137
class SFTPUrlHandling(Transport):
138
"""Mix-in that does common handling of SSH/SFTP URLs."""
140
def __init__(self, base):
141
self._parse_url(base)
142
base = self._unparse_url(self._path)
145
super(SFTPUrlHandling, self).__init__(base)
147
def _parse_url(self, url):
149
self._username, self._password,
150
self._host, self._port, self._path) = self._split_url(url)
152
def _unparse_url(self, path):
153
"""Return a URL for a path relative to this transport.
155
path = urllib.quote(path)
156
# handle homedir paths
157
if not path.startswith('/'):
159
netloc = urllib.quote(self._host)
160
if self._username is not None:
161
netloc = '%s@%s' % (urllib.quote(self._username), netloc)
162
if self._port is not None:
163
netloc = '%s:%d' % (netloc, self._port)
164
return urlparse.urlunparse((self._scheme, netloc, path, '', '', ''))
166
def _split_url(self, url):
167
(scheme, username, password, host, port, path) = split_url(url)
168
## assert scheme == 'sftp'
170
# the initial slash should be removed from the path, and treated
171
# as a homedir relative path (the path begins with a double slash
172
# if it is absolute).
173
# see draft-ietf-secsh-scp-sftp-ssh-uri-03.txt
174
# RBC 20060118 we are not using this as its too user hostile. instead
175
# we are following lftp and using /~/foo to mean '~/foo'.
176
# handle homedir paths
177
if path.startswith('/~/'):
181
return (scheme, username, password, host, port, path)
183
def abspath(self, relpath):
184
"""Return the full url to the given relative path.
186
@param relpath: the relative path or path components
187
@type relpath: str or list
189
return self._unparse_url(self._remote_path(relpath))
191
def _remote_path(self, relpath):
192
"""Return the path to be passed along the sftp protocol for relpath.
194
:param relpath: is a urlencoded string.
196
return self._combine_paths(self._path, relpath)
199
class SFTPTransport(SFTPUrlHandling):
124
class _SFTPReadvHelper(object):
125
"""A class to help with managing the state of a readv request."""
127
# See _get_requests for an explanation.
128
_max_request_size = 32768
130
def __init__(self, original_offsets, relpath, _report_activity):
131
"""Create a new readv helper.
133
:param original_offsets: The original requests given by the caller of
135
:param relpath: The name of the file (if known)
136
:param _report_activity: A Transport._report_activity bound method,
137
to be called as data arrives.
139
self.original_offsets = list(original_offsets)
140
self.relpath = relpath
141
self._report_activity = _report_activity
143
def _get_requests(self):
144
"""Break up the offsets into individual requests over sftp.
146
The SFTP spec only requires implementers to support 32kB requests. We
147
could try something larger (openssh supports 64kB), but then we have to
148
handle requests that fail.
149
So instead, we just break up our maximum chunks into 32kB chunks, and
150
asyncronously requests them.
151
Newer versions of paramiko would do the chunking for us, but we want to
152
start processing results right away, so we do it ourselves.
154
# TODO: Because we issue async requests, we don't 'fudge' any extra
155
# data. I'm not 100% sure that is the best choice.
157
# The first thing we do, is to collapse the individual requests as much
158
# as possible, so we don't issues requests <32kB
159
sorted_offsets = sorted(self.original_offsets)
160
coalesced = list(ConnectedTransport._coalesce_offsets(sorted_offsets,
161
limit=0, fudge_factor=0))
163
for c_offset in coalesced:
164
start = c_offset.start
165
size = c_offset.length
167
# Break this up into 32kB requests
169
next_size = min(size, self._max_request_size)
170
requests.append((start, next_size))
173
if 'sftp' in debug.debug_flags:
174
mutter('SFTP.readv(%s) %s offsets => %s coalesced => %s requests',
175
self.relpath, len(sorted_offsets), len(coalesced),
179
def request_and_yield_offsets(self, fp):
180
"""Request the data from the remote machine, yielding the results.
182
:param fp: A Paramiko SFTPFile object that supports readv.
183
:return: Yield the data requested by the original readv caller, one by
186
requests = self._get_requests()
187
offset_iter = iter(self.original_offsets)
188
cur_offset, cur_size = offset_iter.next()
189
# paramiko .readv() yields strings that are in the order of the requests
190
# So we track the current request to know where the next data is
191
# being returned from.
197
# This is used to buffer chunks which we couldn't process yet
198
# It is (start, end, data) tuples.
200
# Create an 'unlimited' data stream, so we stop based on requests,
201
# rather than just because the data stream ended. This lets us detect
203
data_stream = itertools.chain(fp.readv(requests),
204
itertools.repeat(None))
205
for (start, length), data in itertools.izip(requests, data_stream):
207
if cur_coalesced is not None:
208
raise errors.ShortReadvError(self.relpath,
209
start, length, len(data))
210
if len(data) != length:
211
raise errors.ShortReadvError(self.relpath,
212
start, length, len(data))
213
self._report_activity(length, 'read')
215
# This is the first request, just buffer it
216
buffered_data = [data]
217
buffered_len = length
219
elif start == last_end:
220
# The data we are reading fits neatly on the previous
221
# buffer, so this is all part of a larger coalesced range.
222
buffered_data.append(data)
223
buffered_len += length
225
# We have an 'interrupt' in the data stream. So we know we are
226
# at a request boundary.
228
# We haven't consumed the buffer so far, so put it into
229
# data_chunks, and continue.
230
buffered = ''.join(buffered_data)
231
data_chunks.append((input_start, buffered))
233
buffered_data = [data]
234
buffered_len = length
235
last_end = start + length
236
if input_start == cur_offset and cur_size <= buffered_len:
237
# Simplify the next steps a bit by transforming buffered_data
238
# into a single string. We also have the nice property that
239
# when there is only one string ''.join([x]) == x, so there is
241
buffered = ''.join(buffered_data)
242
# Clean out buffered data so that we keep memory
246
# TODO: We *could* also consider the case where cur_offset is in
247
# in the buffered range, even though it doesn't *start*
248
# the buffered range. But for packs we pretty much always
249
# read in order, so you won't get any extra data in the
251
while (input_start == cur_offset
252
and (buffered_offset + cur_size) <= buffered_len):
253
# We've buffered enough data to process this request, spit it
255
cur_data = buffered[buffered_offset:buffered_offset + cur_size]
256
# move the direct pointer into our buffered data
257
buffered_offset += cur_size
258
# Move the start-of-buffer pointer
259
input_start += cur_size
260
# Yield the requested data
261
yield cur_offset, cur_data
262
cur_offset, cur_size = offset_iter.next()
263
# at this point, we've consumed as much of buffered as we can,
264
# so break off the portion that we consumed
265
if buffered_offset == len(buffered_data):
266
# No tail to leave behind
270
buffered = buffered[buffered_offset:]
271
buffered_data = [buffered]
272
buffered_len = len(buffered)
273
# now that the data stream is done, close the handle
276
buffered = ''.join(buffered_data)
278
data_chunks.append((input_start, buffered))
280
if 'sftp' in debug.debug_flags:
281
mutter('SFTP readv left with %d out-of-order bytes',
282
sum(map(lambda x: len(x[1]), data_chunks)))
283
# We've processed all the readv data, at this point, anything we
284
# couldn't process is in data_chunks. This doesn't happen often, so
285
# this code path isn't optimized
286
# We use an interesting process for data_chunks
287
# Specifically if we have "bisect_left([(start, len, entries)],
289
# If start == qstart, then we get the specific node. Otherwise we
290
# get the previous node
292
idx = bisect.bisect_left(data_chunks, (cur_offset,))
293
if idx < len(data_chunks) and data_chunks[idx][0] == cur_offset:
294
# The data starts here
295
data = data_chunks[idx][1][:cur_size]
297
# The data is in a portion of a previous page
299
sub_offset = cur_offset - data_chunks[idx][0]
300
data = data_chunks[idx][1]
301
data = data[sub_offset:sub_offset + cur_size]
303
# We are missing the page where the data should be found,
306
if len(data) != cur_size:
307
raise AssertionError('We must have miscalulated.'
308
' We expected %d bytes, but only found %d'
309
% (cur_size, len(data)))
310
yield cur_offset, data
311
cur_offset, cur_size = offset_iter.next()
314
class SFTPTransport(ConnectedTransport):
200
315
"""Transport implementation for SFTP access."""
202
317
_do_prefetch = _default_do_prefetch
217
332
# up the request itself, rather than us having to worry about it
218
333
_max_request_size = 32768
220
def __init__(self, base, clone_from=None):
221
super(SFTPTransport, self).__init__(base)
222
if clone_from is None:
225
# use the same ssh connection, etc
226
self._sftp = clone_from._sftp
227
# super saves 'self.base'
229
def should_cache(self):
231
Return True if the data pulled across should be cached locally.
235
def clone(self, offset=None):
237
Return a new SFTPTransport with root at self.base + offset.
238
We share the same SFTP session between such transports, because it's
239
fairly expensive to set them up.
242
return SFTPTransport(self.base, self)
244
return SFTPTransport(self.abspath(offset), self)
246
335
def _remote_path(self, relpath):
247
336
"""Return the path to be passed along the sftp protocol for relpath.
249
relpath is a urlencoded string.
251
:return: a path prefixed with / for regular abspath-based urls, or a
252
path that does not begin with / for urls which begin with /~/.
254
# how does this work?
255
# it processes relpath with respect to
257
# firstly we create a path to evaluate:
258
# if relpath is an abspath or homedir path, its the entire thing
259
# otherwise we join our base with relpath
260
# then we eliminate all empty segments (double //'s) outside the first
261
# two elements of the list. This avoids problems with trailing
262
# slashes, or other abnormalities.
263
# finally we evaluate the entire path in a single pass
265
# '..' result in popping the left most already
266
# processed path (which can never be empty because of the check for
267
# abspath and homedir meaning that its not, or that we've used our
268
# path. If the pop would pop the root, we ignore it.
270
# Specific case examinations:
271
# remove the special casefor ~: if the current root is ~/ popping of it
272
# = / thus our seed for a ~ based path is ['', '~']
273
# and if we end up with [''] then we had basically ('', '..') (which is
274
# '/..' so we append '' if the length is one, and assert that the first
275
# element is still ''. Lastly, if we end with ['', '~'] as a prefix for
276
# the output, we've got a homedir path, so we strip that prefix before
277
# '/' joining the resulting list.
279
# case one: '/' -> ['', ''] cannot shrink
280
# case two: '/' + '../foo' -> ['', 'foo'] (take '', '', '..', 'foo')
281
# and pop the second '' for the '..', append 'foo'
282
# case three: '/~/' -> ['', '~', '']
283
# case four: '/~/' + '../foo' -> ['', '~', '', '..', 'foo'],
284
# and we want to get '/foo' - the empty path in the middle
285
# needs to be stripped, then normal path manipulation will
287
# case five: '/..' ['', '..'], we want ['', '']
288
# stripping '' outside the first two is ok
289
# ignore .. if its too high up
291
# lastly this code is possibly reusable by FTP, but not reusable by
292
# local paths: ~ is resolvable correctly, nor by HTTP or the smart
293
# server: ~ is resolved remotely.
295
# however, a version of this that acts on self.base is possible to be
296
# written which manipulates the URL in canonical form, and would be
297
# reusable for all transports, if a flag for allowing ~/ at all was
299
assert isinstance(relpath, basestring)
300
relpath = urlutils.unescape(relpath)
303
if relpath.startswith('/'):
304
# abspath - normal split is fine.
305
current_path = relpath.split('/')
306
elif relpath.startswith('~/'):
307
# root is homedir based: normal split and prefix '' to remote the
309
current_path = [''].extend(relpath.split('/'))
338
:param relpath: is a urlencoded string.
340
remote_path = self._parsed_url.clone(relpath).path
341
# the initial slash should be removed from the path, and treated as a
342
# homedir relative path (the path begins with a double slash if it is
343
# absolute). see draft-ietf-secsh-scp-sftp-ssh-uri-03.txt
344
# RBC 20060118 we are not using this as its too user hostile. instead
345
# we are following lftp and using /~/foo to mean '~/foo'
346
# vila--20070602 and leave absolute paths begin with a single slash.
347
if remote_path.startswith('/~/'):
348
remote_path = remote_path[3:]
349
elif remote_path == '/~':
353
def _create_connection(self, credentials=None):
354
"""Create a new connection with the provided credentials.
356
:param credentials: The credentials needed to establish the connection.
358
:return: The created connection and its associated credentials.
360
The credentials are only the password as it may have been entered
361
interactively by the user and may be different from the one provided
362
in base url at transport creation time.
364
if credentials is None:
365
password = self._parsed_url.password
311
# root is from the current directory:
312
if self._path.startswith('/'):
313
# abspath, take the regular split
316
# homedir based, add the '', '~' not present in self._path
317
current_path = ['', '~']
318
# add our current dir
319
current_path.extend(self._path.split('/'))
320
# add the users relpath
321
current_path.extend(relpath.split('/'))
322
# strip '' segments that are not in the first one - the leading /.
323
to_process = current_path[:1]
324
for segment in current_path[1:]:
326
to_process.append(segment)
328
# process '.' and '..' segments into output_path.
330
for segment in to_process:
332
# directory pop. Remove a directory
333
# as long as we are not at the root
334
if len(output_path) > 1:
337
# cannot pop beyond the root, so do nothing
339
continue # strip the '.' from the output.
341
# this will append '' to output_path for the root elements,
342
# which is appropriate: its why we strip '' in the first pass.
343
output_path.append(segment)
345
# check output special cases:
346
if output_path == ['']:
348
output_path = ['', '']
349
elif output_path[:2] == ['', '~']:
350
# ['', '~', ...] -> ...
351
output_path = output_path[2:]
352
path = '/'.join(output_path)
355
def relpath(self, abspath):
356
scheme, username, password, host, port, path = self._split_url(abspath)
358
if (username != self._username):
359
error.append('username mismatch')
360
if (host != self._host):
361
error.append('host mismatch')
362
if (port != self._port):
363
error.append('port mismatch')
364
if (not path.startswith(self._path)):
365
error.append('path mismatch')
367
extra = ': ' + ', '.join(error)
368
raise PathNotChild(abspath, self.base, extra=extra)
370
return path[pl:].strip('/')
367
password = credentials
369
vendor = ssh._get_ssh_vendor()
370
user = self._parsed_url.user
372
auth = config.AuthenticationConfig()
373
user = auth.get_user('ssh', self._parsed_url.host,
374
self._parsed_url.port)
375
connection = vendor.connect_sftp(self._parsed_url.user, password,
376
self._parsed_url.host, self._parsed_url.port)
377
return connection, (user, password)
379
def disconnect(self):
380
connection = self._get_connection()
381
if connection is not None:
385
"""Ensures that a connection is established"""
386
connection = self._get_connection()
387
if connection is None:
388
# First connection ever
389
connection, credentials = self._create_connection()
390
self._set_connection(connection, credentials)
372
393
def has(self, relpath):
374
395
Does the target location exist?
377
self._sftp.stat(self._remote_path(relpath))
398
self._get_sftp().stat(self._remote_path(relpath))
399
# stat result is about 20 bytes, let's say
400
self._report_activity(20, 'read')
382
405
def get(self, relpath):
384
Get the file at the given relative path.
406
"""Get the file at the given relative path.
386
408
:param relpath: The relative path to the file
389
411
path = self._remote_path(relpath)
390
f = self._sftp.file(path, mode='rb')
412
f = self._get_sftp().file(path, mode='rb')
391
413
if self._do_prefetch and (getattr(f, 'prefetch', None) is not None):
394
416
except (IOError, paramiko.SSHException), e:
395
self._translate_io_exception(e, path, ': error retrieving')
397
def readv(self, relpath, offsets):
417
self._translate_io_exception(e, path, ': error retrieving',
418
failure_exc=errors.ReadError)
420
def get_bytes(self, relpath):
421
# reimplement this here so that we can report how many bytes came back
422
f = self.get(relpath)
425
self._report_activity(len(bytes), 'read')
430
def _readv(self, relpath, offsets):
398
431
"""See Transport.readv()"""
399
432
# We overload the default readv() because we want to use a file
400
433
# that does not have prefetch enabled.
406
439
path = self._remote_path(relpath)
407
fp = self._sftp.file(path, mode='rb')
440
fp = self._get_sftp().file(path, mode='rb')
408
441
readv = getattr(fp, 'readv', None)
410
443
return self._sftp_readv(fp, offsets, relpath)
411
mutter('seek and read %s offsets', len(offsets))
444
if 'sftp' in debug.debug_flags:
445
mutter('seek and read %s offsets', len(offsets))
412
446
return self._seek_and_read(fp, offsets, relpath)
413
447
except (IOError, paramiko.SSHException), e:
414
448
self._translate_io_exception(e, path, ': error retrieving')
416
def _sftp_readv(self, fp, offsets, relpath='<unknown>'):
450
def recommended_page_size(self):
451
"""See Transport.recommended_page_size().
453
For SFTP we suggest a large page size to reduce the overhead
454
introduced by latency.
458
def _sftp_readv(self, fp, offsets, relpath):
417
459
"""Use the readv() member of fp to do async readv.
419
And then read them using paramiko.readv(). paramiko.readv()
461
Then read them using paramiko.readv(). paramiko.readv()
420
462
does not support ranges > 64K, so it caps the request size, and
421
just reads until it gets all the stuff it wants
463
just reads until it gets all the stuff it wants.
423
offsets = list(offsets)
424
sorted_offsets = sorted(offsets)
426
# The algorithm works as follows:
427
# 1) Coalesce nearby reads into a single chunk
428
# This generates a list of combined regions, the total size
429
# and the size of the sub regions. This coalescing step is limited
430
# in the number of nearby chunks to combine, and is allowed to
431
# skip small breaks in the requests. Limiting it makes sure that
432
# we can start yielding some data earlier, and skipping means we
433
# make fewer requests. (Beneficial even when using async)
434
# 2) Break up this combined regions into chunks that are smaller
435
# than 64KiB. Technically the limit is 65536, but we are a
436
# little bit conservative. This is because sftp has a maximum
437
# return chunk size of 64KiB (max size of an unsigned short)
438
# 3) Issue a readv() to paramiko to create an async request for
440
# 4) Read in the data as it comes back, until we've read one
441
# continuous section as determined in step 1
442
# 5) Break up the full sections into hunks for the original requested
443
# offsets. And put them in a cache
444
# 6) Check if the next request is in the cache, and if it is, remove
445
# it from the cache, and yield its data. Continue until no more
446
# entries are in the cache.
447
# 7) loop back to step 4 until all data has been read
449
# TODO: jam 20060725 This could be optimized one step further, by
450
# attempting to yield whatever data we have read, even before
451
# the first coallesced section has been fully processed.
453
# When coalescing for use with readv(), we don't really need to
454
# use any fudge factor, because the requests are made asynchronously
455
coalesced = list(self._coalesce_offsets(sorted_offsets,
456
limit=self._max_readv_combine,
460
for c_offset in coalesced:
461
start = c_offset.start
462
size = c_offset.length
464
# We need to break this up into multiple requests
466
next_size = min(size, self._max_request_size)
467
requests.append((start, next_size))
471
mutter('SFTP.readv() %s offsets => %s coalesced => %s requests',
472
len(offsets), len(coalesced), len(requests))
474
# Queue the current read until we have read the full coalesced section
477
cur_coalesced_stack = iter(coalesced)
478
cur_coalesced = cur_coalesced_stack.next()
480
# Cache the results, but only until they have been fulfilled
482
# turn the list of offsets into a stack
483
offset_stack = iter(offsets)
484
cur_offset_and_size = offset_stack.next()
486
for data in fp.readv(requests):
488
cur_data_len += len(data)
490
if cur_data_len < cur_coalesced.length:
492
assert cur_data_len == cur_coalesced.length, \
493
"Somehow we read too much: %s != %s" % (cur_data_len,
494
cur_coalesced.length)
495
all_data = ''.join(cur_data)
499
for suboffset, subsize in cur_coalesced.ranges:
500
key = (cur_coalesced.start+suboffset, subsize)
501
data_map[key] = all_data[suboffset:suboffset+subsize]
503
# Now that we've read some data, see if we can yield anything back
504
while cur_offset_and_size in data_map:
505
this_data = data_map.pop(cur_offset_and_size)
506
yield cur_offset_and_size[0], this_data
507
cur_offset_and_size = offset_stack.next()
509
# We read a coalesced entry, so mark it as done
511
# Now that we've read all of the data for this coalesced section
513
cur_coalesced = cur_coalesced_stack.next()
515
if cur_coalesced is not None:
516
raise errors.ShortReadvError(relpath, cur_coalesced.start,
517
cur_coalesced.length, len(data))
465
helper = _SFTPReadvHelper(offsets, relpath, self._report_activity)
466
return helper.request_and_yield_offsets(fp)
519
468
def put_file(self, relpath, f, mode=None):
878
# ------------- server test implementation --------------
881
from bzrlib.tests.stub_sftp import StubServer, StubSFTPServer
883
STUB_SERVER_KEY = """
884
-----BEGIN RSA PRIVATE KEY-----
885
MIICWgIBAAKBgQDTj1bqB4WmayWNPB+8jVSYpZYk80Ujvj680pOTh2bORBjbIAyz
886
oWGW+GUjzKxTiiPvVmxFgx5wdsFvF03v34lEVVhMpouqPAYQ15N37K/ir5XY+9m/
887
d8ufMCkjeXsQkKqFbAlQcnWMCRnOoPHS3I4vi6hmnDDeeYTSRvfLbW0fhwIBIwKB
888
gBIiOqZYaoqbeD9OS9z2K9KR2atlTxGxOJPXiP4ESqP3NVScWNwyZ3NXHpyrJLa0
889
EbVtzsQhLn6rF+TzXnOlcipFvjsem3iYzCpuChfGQ6SovTcOjHV9z+hnpXvQ/fon
890
soVRZY65wKnF7IAoUwTmJS9opqgrN6kRgCd3DASAMd1bAkEA96SBVWFt/fJBNJ9H
891
tYnBKZGw0VeHOYmVYbvMSstssn8un+pQpUm9vlG/bp7Oxd/m+b9KWEh2xPfv6zqU
892
avNwHwJBANqzGZa/EpzF4J8pGti7oIAPUIDGMtfIcmqNXVMckrmzQ2vTfqtkEZsA
893
4rE1IERRyiJQx6EJsz21wJmGV9WJQ5kCQQDwkS0uXqVdFzgHO6S++tjmjYcxwr3g
894
H0CoFYSgbddOT6miqRskOQF3DZVkJT3kyuBgU2zKygz52ukQZMqxCb1fAkASvuTv
895
qfpH87Qq5kQhNKdbbwbmd2NxlNabazPijWuphGTdW0VfJdWfklyS2Kr+iqrs/5wV
896
HhathJt636Eg7oIjAkA8ht3MQ+XSl9yIJIS8gVpbPxSw5OMfw0PjVE7tBdQruiSc
897
nvuQES5C9BMHjF39LZiGH1iLQy7FgdHyoP+eodI7
898
-----END RSA PRIVATE KEY-----
902
class SocketListener(threading.Thread):
904
def __init__(self, callback):
905
threading.Thread.__init__(self)
906
self._callback = callback
907
self._socket = socket.socket()
908
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
909
self._socket.bind(('localhost', 0))
910
self._socket.listen(1)
911
self.port = self._socket.getsockname()[1]
912
self._stop_event = threading.Event()
915
# called from outside this thread
916
self._stop_event.set()
917
# use a timeout here, because if the test fails, the server thread may
918
# never notice the stop_event.
924
readable, writable_unused, exception_unused = \
925
select.select([self._socket], [], [], 0.1)
926
if self._stop_event.isSet():
928
if len(readable) == 0:
931
s, addr_unused = self._socket.accept()
932
# because the loopback socket is inline, and transports are
933
# never explicitly closed, best to launch a new thread.
934
threading.Thread(target=self._callback, args=(s,)).start()
935
except socket.error, x:
936
sys.excepthook(*sys.exc_info())
937
warning('Socket error during accept() within unit test server'
940
# probably a failed test; unit test thread will log the
942
sys.excepthook(*sys.exc_info())
943
warning('Exception from within unit test server thread: %r' %
947
class SocketDelay(object):
948
"""A socket decorator to make TCP appear slower.
950
This changes recv, send, and sendall to add a fixed latency to each python
951
call if a new roundtrip is detected. That is, when a recv is called and the
952
flag new_roundtrip is set, latency is charged. Every send and send_all
955
In addition every send, sendall and recv sleeps a bit per character send to
958
Not all methods are implemented, this is deliberate as this class is not a
959
replacement for the builtin sockets layer. fileno is not implemented to
960
prevent the proxy being bypassed.
964
_proxied_arguments = dict.fromkeys([
965
"close", "getpeername", "getsockname", "getsockopt", "gettimeout",
966
"setblocking", "setsockopt", "settimeout", "shutdown"])
968
def __init__(self, sock, latency, bandwidth=1.0,
971
:param bandwith: simulated bandwith (MegaBit)
972
:param really_sleep: If set to false, the SocketDelay will just
973
increase a counter, instead of calling time.sleep. This is useful for
974
unittesting the SocketDelay.
977
self.latency = latency
978
self.really_sleep = really_sleep
979
self.time_per_byte = 1 / (bandwidth / 8.0 * 1024 * 1024)
980
self.new_roundtrip = False
983
if self.really_sleep:
986
SocketDelay.simulated_time += s
988
def __getattr__(self, attr):
989
if attr in SocketDelay._proxied_arguments:
990
return getattr(self.sock, attr)
991
raise AttributeError("'SocketDelay' object has no attribute %r" %
995
return SocketDelay(self.sock.dup(), self.latency, self.time_per_byte,
998
def recv(self, *args):
999
data = self.sock.recv(*args)
1000
if data and self.new_roundtrip:
1001
self.new_roundtrip = False
1002
self.sleep(self.latency)
1003
self.sleep(len(data) * self.time_per_byte)
1006
def sendall(self, data, flags=0):
1007
if not self.new_roundtrip:
1008
self.new_roundtrip = True
1009
self.sleep(self.latency)
1010
self.sleep(len(data) * self.time_per_byte)
1011
return self.sock.sendall(data, flags)
1013
def send(self, data, flags=0):
1014
if not self.new_roundtrip:
1015
self.new_roundtrip = True
1016
self.sleep(self.latency)
1017
bytes_sent = self.sock.send(data, flags)
1018
self.sleep(bytes_sent * self.time_per_byte)
1022
class SFTPServer(Server):
1023
"""Common code for SFTP server facilities."""
1025
def __init__(self, server_interface=StubServer):
1026
self._original_vendor = None
1027
self._homedir = None
1028
self._server_homedir = None
1029
self._listener = None
1031
self._vendor = ssh.ParamikoVendor()
1032
self._server_interface = server_interface
1035
self.add_latency = 0
1037
def _get_sftp_url(self, path):
1038
"""Calculate an sftp url to this server for path."""
1039
return 'sftp://foo:bar@localhost:%d/%s' % (self._listener.port, path)
1041
def log(self, message):
1042
"""StubServer uses this to log when a new server is created."""
1043
self.logs.append(message)
1045
def _run_server_entry(self, sock):
1046
"""Entry point for all implementations of _run_server.
1048
If self.add_latency is > 0.000001 then sock is given a latency adding
1051
if self.add_latency > 0.000001:
1052
sock = SocketDelay(sock, self.add_latency)
1053
return self._run_server(sock)
1055
def _run_server(self, s):
1056
ssh_server = paramiko.Transport(s)
1057
key_file = pathjoin(self._homedir, 'test_rsa.key')
1058
f = open(key_file, 'w')
1059
f.write(STUB_SERVER_KEY)
1061
host_key = paramiko.RSAKey.from_private_key_file(key_file)
1062
ssh_server.add_server_key(host_key)
1063
server = self._server_interface(self)
1064
ssh_server.set_subsystem_handler('sftp', paramiko.SFTPServer,
1065
StubSFTPServer, root=self._root,
1066
home=self._server_homedir)
1067
event = threading.Event()
1068
ssh_server.start_server(event, server)
1072
self._original_vendor = ssh._ssh_vendor
1073
ssh._ssh_vendor = self._vendor
1074
if sys.platform == 'win32':
1075
# Win32 needs to use the UNICODE api
1076
self._homedir = getcwd()
1078
# But Linux SFTP servers should just deal in bytestreams
1079
self._homedir = os.getcwd()
1080
if self._server_homedir is None:
1081
self._server_homedir = self._homedir
1083
if sys.platform == 'win32':
1085
self._listener = SocketListener(self._run_server_entry)
1086
self._listener.setDaemon(True)
1087
self._listener.start()
1090
"""See bzrlib.transport.Server.tearDown."""
1091
self._listener.stop()
1092
ssh._ssh_vendor = self._original_vendor
1094
def get_bogus_url(self):
1095
"""See bzrlib.transport.Server.get_bogus_url."""
1096
# this is chosen to try to prevent trouble with proxies, wierd dns, etc
1097
# we bind a random socket, so that we get a guaranteed unused port
1098
# we just never listen on that port
1100
s.bind(('localhost', 0))
1101
return 'sftp://%s:%s/' % s.getsockname()
1104
class SFTPFullAbsoluteServer(SFTPServer):
1105
"""A test server for sftp transports, using absolute urls and ssh."""
1108
"""See bzrlib.transport.Server.get_url."""
1109
return self._get_sftp_url(urlutils.escape(self._homedir[1:]))
1112
class SFTPServerWithoutSSH(SFTPServer):
1113
"""An SFTP server that uses a simple TCP socket pair rather than SSH."""
1116
super(SFTPServerWithoutSSH, self).__init__()
1117
self._vendor = ssh.LoopbackVendor()
1119
def _run_server(self, sock):
1120
# Re-import these as locals, so that they're still accessible during
1121
# interpreter shutdown (when all module globals get set to None, leading
1122
# to confusing errors like "'NoneType' object has no attribute 'error'".
1123
class FakeChannel(object):
1124
def get_transport(self):
1126
def get_log_channel(self):
1130
def get_hexdump(self):
1135
server = paramiko.SFTPServer(FakeChannel(), 'sftp', StubServer(self), StubSFTPServer,
1136
root=self._root, home=self._server_homedir)
1138
server.start_subsystem('sftp', None, sock)
1139
except socket.error, e:
1140
if (len(e.args) > 0) and (e.args[0] == errno.EPIPE):
1141
# it's okay for the client to disconnect abruptly
1142
# (bug in paramiko 1.6: it should absorb this exception)
1146
except Exception, e:
1147
import sys; sys.stderr.write('\nEXCEPTION %r\n\n' % e.__class__)
1148
server.finish_subsystem()
1151
class SFTPAbsoluteServer(SFTPServerWithoutSSH):
1152
"""A test server for sftp transports, using absolute urls."""
1155
"""See bzrlib.transport.Server.get_url."""
1156
if sys.platform == 'win32':
1157
return self._get_sftp_url(urlutils.escape(self._homedir))
1159
return self._get_sftp_url(urlutils.escape(self._homedir[1:]))
1162
class SFTPHomeDirServer(SFTPServerWithoutSSH):
1163
"""A test server for sftp transports, using homedir relative urls."""
1166
"""See bzrlib.transport.Server.get_url."""
1167
return self._get_sftp_url("~/")
1170
class SFTPSiblingAbsoluteServer(SFTPAbsoluteServer):
1171
"""A test servere for sftp transports, using absolute urls to non-home."""
1174
self._server_homedir = '/dev/noone/runs/tests/here'
1175
super(SFTPSiblingAbsoluteServer, self).setUp()
1178
def _sftp_connect(host, port, username, password):
1179
"""Connect to the remote sftp server.
1181
:raises: a TransportError 'could not connect'.
1183
:returns: an paramiko.sftp_client.SFTPClient
1185
TODO: Raise a more reasonable ConnectionFailed exception
1187
idx = (host, port, username)
1189
return _connected_hosts[idx]
1193
sftp = _sftp_connect_uncached(host, port, username, password)
1194
_connected_hosts[idx] = sftp
1197
def _sftp_connect_uncached(host, port, username, password):
1198
vendor = ssh._get_ssh_vendor()
1199
sftp = vendor.connect_sftp(username, password, host, port)
1203
897
def get_test_permutations():
1204
898
"""Return the permutations to be used in testing."""
1205
return [(SFTPTransport, SFTPAbsoluteServer),
1206
(SFTPTransport, SFTPHomeDirServer),
1207
(SFTPTransport, SFTPSiblingAbsoluteServer),
899
from bzrlib.tests import stub_sftp
900
return [(SFTPTransport, stub_sftp.SFTPAbsoluteServer),
901
(SFTPTransport, stub_sftp.SFTPHomeDirServer),
902
(SFTPTransport, stub_sftp.SFTPSiblingAbsoluteServer),