124
class _SFTPReadvHelper(object):
125
"""A class to help with managing the state of a readv request."""
127
# See _get_requests for an explanation.
128
_max_request_size = 32768
130
def __init__(self, original_offsets, relpath, _report_activity):
131
"""Create a new readv helper.
133
:param original_offsets: The original requests given by the caller of
135
:param relpath: The name of the file (if known)
136
:param _report_activity: A Transport._report_activity bound method,
137
to be called as data arrives.
139
self.original_offsets = list(original_offsets)
140
self.relpath = relpath
141
self._report_activity = _report_activity
143
def _get_requests(self):
144
"""Break up the offsets into individual requests over sftp.
146
The SFTP spec only requires implementers to support 32kB requests. We
147
could try something larger (openssh supports 64kB), but then we have to
148
handle requests that fail.
149
So instead, we just break up our maximum chunks into 32kB chunks, and
150
asyncronously requests them.
151
Newer versions of paramiko would do the chunking for us, but we want to
152
start processing results right away, so we do it ourselves.
154
# TODO: Because we issue async requests, we don't 'fudge' any extra
155
# data. I'm not 100% sure that is the best choice.
157
# The first thing we do, is to collapse the individual requests as much
158
# as possible, so we don't issues requests <32kB
159
sorted_offsets = sorted(self.original_offsets)
160
coalesced = list(ConnectedTransport._coalesce_offsets(sorted_offsets,
161
limit=0, fudge_factor=0))
163
for c_offset in coalesced:
164
start = c_offset.start
165
size = c_offset.length
167
# Break this up into 32kB requests
169
next_size = min(size, self._max_request_size)
170
requests.append((start, next_size))
173
if 'sftp' in debug.debug_flags:
174
mutter('SFTP.readv(%s) %s offsets => %s coalesced => %s requests',
175
self.relpath, len(sorted_offsets), len(coalesced),
179
def request_and_yield_offsets(self, fp):
180
"""Request the data from the remote machine, yielding the results.
182
:param fp: A Paramiko SFTPFile object that supports readv.
183
:return: Yield the data requested by the original readv caller, one by
186
requests = self._get_requests()
187
offset_iter = iter(self.original_offsets)
188
cur_offset, cur_size = offset_iter.next()
189
# paramiko .readv() yields strings that are in the order of the requests
190
# So we track the current request to know where the next data is
191
# being returned from.
197
# This is used to buffer chunks which we couldn't process yet
198
# It is (start, end, data) tuples.
200
# Create an 'unlimited' data stream, so we stop based on requests,
201
# rather than just because the data stream ended. This lets us detect
203
data_stream = itertools.chain(fp.readv(requests),
204
itertools.repeat(None))
205
for (start, length), data in itertools.izip(requests, data_stream):
207
if cur_coalesced is not None:
208
raise errors.ShortReadvError(self.relpath,
209
start, length, len(data))
210
if len(data) != length:
211
raise errors.ShortReadvError(self.relpath,
212
start, length, len(data))
213
self._report_activity(length, 'read')
215
# This is the first request, just buffer it
216
buffered_data = [data]
217
buffered_len = length
219
elif start == last_end:
220
# The data we are reading fits neatly on the previous
221
# buffer, so this is all part of a larger coalesced range.
222
buffered_data.append(data)
223
buffered_len += length
225
# We have an 'interrupt' in the data stream. So we know we are
226
# at a request boundary.
228
# We haven't consumed the buffer so far, so put it into
229
# data_chunks, and continue.
230
buffered = ''.join(buffered_data)
231
data_chunks.append((input_start, buffered))
233
buffered_data = [data]
234
buffered_len = length
235
last_end = start + length
236
if input_start == cur_offset and cur_size <= buffered_len:
237
# Simplify the next steps a bit by transforming buffered_data
238
# into a single string. We also have the nice property that
239
# when there is only one string ''.join([x]) == x, so there is
241
buffered = ''.join(buffered_data)
242
# Clean out buffered data so that we keep memory
246
# TODO: We *could* also consider the case where cur_offset is in
247
# in the buffered range, even though it doesn't *start*
248
# the buffered range. But for packs we pretty much always
249
# read in order, so you won't get any extra data in the
251
while (input_start == cur_offset
252
and (buffered_offset + cur_size) <= buffered_len):
253
# We've buffered enough data to process this request, spit it
255
cur_data = buffered[buffered_offset:buffered_offset + cur_size]
256
# move the direct pointer into our buffered data
257
buffered_offset += cur_size
258
# Move the start-of-buffer pointer
259
input_start += cur_size
260
# Yield the requested data
261
yield cur_offset, cur_data
262
cur_offset, cur_size = offset_iter.next()
263
# at this point, we've consumed as much of buffered as we can,
264
# so break off the portion that we consumed
265
if buffered_offset == len(buffered_data):
266
# No tail to leave behind
270
buffered = buffered[buffered_offset:]
271
buffered_data = [buffered]
272
buffered_len = len(buffered)
273
# now that the data stream is done, close the handle
276
buffered = ''.join(buffered_data)
278
data_chunks.append((input_start, buffered))
280
if 'sftp' in debug.debug_flags:
281
mutter('SFTP readv left with %d out-of-order bytes',
282
sum(map(lambda x: len(x[1]), data_chunks)))
283
# We've processed all the readv data, at this point, anything we
284
# couldn't process is in data_chunks. This doesn't happen often, so
285
# this code path isn't optimized
286
# We use an interesting process for data_chunks
287
# Specifically if we have "bisect_left([(start, len, entries)],
289
# If start == qstart, then we get the specific node. Otherwise we
290
# get the previous node
292
idx = bisect.bisect_left(data_chunks, (cur_offset,))
293
if idx < len(data_chunks) and data_chunks[idx][0] == cur_offset:
294
# The data starts here
295
data = data_chunks[idx][1][:cur_size]
297
# The data is in a portion of a previous page
299
sub_offset = cur_offset - data_chunks[idx][0]
300
data = data_chunks[idx][1]
301
data = data[sub_offset:sub_offset + cur_size]
303
# We are missing the page where the data should be found,
306
if len(data) != cur_size:
307
raise AssertionError('We must have miscalulated.'
308
' We expected %d bytes, but only found %d'
309
% (cur_size, len(data)))
310
yield cur_offset, data
311
cur_offset, cur_size = offset_iter.next()
314
class SFTPTransport(ConnectedTransport):
138
class SFTPUrlHandling(Transport):
139
"""Mix-in that does common handling of SSH/SFTP URLs."""
141
def __init__(self, base):
142
self._parse_url(base)
143
base = self._unparse_url(self._path)
146
super(SFTPUrlHandling, self).__init__(base)
148
def _parse_url(self, url):
150
self._username, self._password,
151
self._host, self._port, self._path) = self._split_url(url)
153
def _unparse_url(self, path):
154
"""Return a URL for a path relative to this transport.
156
path = urllib.quote(path)
157
# handle homedir paths
158
if not path.startswith('/'):
160
netloc = urllib.quote(self._host)
161
if self._username is not None:
162
netloc = '%s@%s' % (urllib.quote(self._username), netloc)
163
if self._port is not None:
164
netloc = '%s:%d' % (netloc, self._port)
165
return urlparse.urlunparse((self._scheme, netloc, path, '', '', ''))
167
def _split_url(self, url):
168
(scheme, username, password, host, port, path) = split_url(url)
169
## assert scheme == 'sftp'
171
# the initial slash should be removed from the path, and treated
172
# as a homedir relative path (the path begins with a double slash
173
# if it is absolute).
174
# see draft-ietf-secsh-scp-sftp-ssh-uri-03.txt
175
# RBC 20060118 we are not using this as its too user hostile. instead
176
# we are following lftp and using /~/foo to mean '~/foo'.
177
# handle homedir paths
178
if path.startswith('/~/'):
182
return (scheme, username, password, host, port, path)
184
def abspath(self, relpath):
185
"""Return the full url to the given relative path.
187
@param relpath: the relative path or path components
188
@type relpath: str or list
190
return self._unparse_url(self._remote_path(relpath))
192
def _remote_path(self, relpath):
193
"""Return the path to be passed along the sftp protocol for relpath.
195
:param relpath: is a urlencoded string.
197
return self._combine_paths(self._path, relpath)
200
class SFTPTransport(SFTPUrlHandling):
315
201
"""Transport implementation for SFTP access."""
317
203
_do_prefetch = _default_do_prefetch
332
218
# up the request itself, rather than us having to worry about it
333
219
_max_request_size = 32768
221
def __init__(self, base, clone_from=None):
222
super(SFTPTransport, self).__init__(base)
223
if clone_from is None:
226
# use the same ssh connection, etc
227
self._sftp = clone_from._sftp
228
# super saves 'self.base'
230
def should_cache(self):
232
Return True if the data pulled across should be cached locally.
236
def clone(self, offset=None):
238
Return a new SFTPTransport with root at self.base + offset.
239
We share the same SFTP session between such transports, because it's
240
fairly expensive to set them up.
243
return SFTPTransport(self.base, self)
245
return SFTPTransport(self.abspath(offset), self)
335
247
def _remote_path(self, relpath):
336
248
"""Return the path to be passed along the sftp protocol for relpath.
338
:param relpath: is a urlencoded string.
340
remote_path = self._parsed_url.clone(relpath).path
341
# the initial slash should be removed from the path, and treated as a
342
# homedir relative path (the path begins with a double slash if it is
343
# absolute). see draft-ietf-secsh-scp-sftp-ssh-uri-03.txt
344
# RBC 20060118 we are not using this as its too user hostile. instead
345
# we are following lftp and using /~/foo to mean '~/foo'
346
# vila--20070602 and leave absolute paths begin with a single slash.
347
if remote_path.startswith('/~/'):
348
remote_path = remote_path[3:]
349
elif remote_path == '/~':
353
def _create_connection(self, credentials=None):
354
"""Create a new connection with the provided credentials.
356
:param credentials: The credentials needed to establish the connection.
358
:return: The created connection and its associated credentials.
360
The credentials are only the password as it may have been entered
361
interactively by the user and may be different from the one provided
362
in base url at transport creation time.
364
if credentials is None:
365
password = self._parsed_url.password
250
relpath is a urlencoded string.
252
:return: a path prefixed with / for regular abspath-based urls, or a
253
path that does not begin with / for urls which begin with /~/.
255
# how does this work?
256
# it processes relpath with respect to
258
# firstly we create a path to evaluate:
259
# if relpath is an abspath or homedir path, its the entire thing
260
# otherwise we join our base with relpath
261
# then we eliminate all empty segments (double //'s) outside the first
262
# two elements of the list. This avoids problems with trailing
263
# slashes, or other abnormalities.
264
# finally we evaluate the entire path in a single pass
266
# '..' result in popping the left most already
267
# processed path (which can never be empty because of the check for
268
# abspath and homedir meaning that its not, or that we've used our
269
# path. If the pop would pop the root, we ignore it.
271
# Specific case examinations:
272
# remove the special casefor ~: if the current root is ~/ popping of it
273
# = / thus our seed for a ~ based path is ['', '~']
274
# and if we end up with [''] then we had basically ('', '..') (which is
275
# '/..' so we append '' if the length is one, and assert that the first
276
# element is still ''. Lastly, if we end with ['', '~'] as a prefix for
277
# the output, we've got a homedir path, so we strip that prefix before
278
# '/' joining the resulting list.
280
# case one: '/' -> ['', ''] cannot shrink
281
# case two: '/' + '../foo' -> ['', 'foo'] (take '', '', '..', 'foo')
282
# and pop the second '' for the '..', append 'foo'
283
# case three: '/~/' -> ['', '~', '']
284
# case four: '/~/' + '../foo' -> ['', '~', '', '..', 'foo'],
285
# and we want to get '/foo' - the empty path in the middle
286
# needs to be stripped, then normal path manipulation will
288
# case five: '/..' ['', '..'], we want ['', '']
289
# stripping '' outside the first two is ok
290
# ignore .. if its too high up
292
# lastly this code is possibly reusable by FTP, but not reusable by
293
# local paths: ~ is resolvable correctly, nor by HTTP or the smart
294
# server: ~ is resolved remotely.
296
# however, a version of this that acts on self.base is possible to be
297
# written which manipulates the URL in canonical form, and would be
298
# reusable for all transports, if a flag for allowing ~/ at all was
300
assert isinstance(relpath, basestring)
301
relpath = urlutils.unescape(relpath)
304
if relpath.startswith('/'):
305
# abspath - normal split is fine.
306
current_path = relpath.split('/')
307
elif relpath.startswith('~/'):
308
# root is homedir based: normal split and prefix '' to remote the
310
current_path = [''].extend(relpath.split('/'))
367
password = credentials
369
vendor = ssh._get_ssh_vendor()
370
user = self._parsed_url.user
372
auth = config.AuthenticationConfig()
373
user = auth.get_user('ssh', self._parsed_url.host,
374
self._parsed_url.port)
375
connection = vendor.connect_sftp(self._parsed_url.user, password,
376
self._parsed_url.host, self._parsed_url.port)
377
return connection, (user, password)
379
def disconnect(self):
380
connection = self._get_connection()
381
if connection is not None:
385
"""Ensures that a connection is established"""
386
connection = self._get_connection()
387
if connection is None:
388
# First connection ever
389
connection, credentials = self._create_connection()
390
self._set_connection(connection, credentials)
312
# root is from the current directory:
313
if self._path.startswith('/'):
314
# abspath, take the regular split
317
# homedir based, add the '', '~' not present in self._path
318
current_path = ['', '~']
319
# add our current dir
320
current_path.extend(self._path.split('/'))
321
# add the users relpath
322
current_path.extend(relpath.split('/'))
323
# strip '' segments that are not in the first one - the leading /.
324
to_process = current_path[:1]
325
for segment in current_path[1:]:
327
to_process.append(segment)
329
# process '.' and '..' segments into output_path.
331
for segment in to_process:
333
# directory pop. Remove a directory
334
# as long as we are not at the root
335
if len(output_path) > 1:
338
# cannot pop beyond the root, so do nothing
340
continue # strip the '.' from the output.
342
# this will append '' to output_path for the root elements,
343
# which is appropriate: its why we strip '' in the first pass.
344
output_path.append(segment)
346
# check output special cases:
347
if output_path == ['']:
349
output_path = ['', '']
350
elif output_path[:2] == ['', '~']:
351
# ['', '~', ...] -> ...
352
output_path = output_path[2:]
353
path = '/'.join(output_path)
356
def relpath(self, abspath):
357
scheme, username, password, host, port, path = self._split_url(abspath)
359
if (username != self._username):
360
error.append('username mismatch')
361
if (host != self._host):
362
error.append('host mismatch')
363
if (port != self._port):
364
error.append('port mismatch')
365
if (not path.startswith(self._path)):
366
error.append('path mismatch')
368
extra = ': ' + ', '.join(error)
369
raise PathNotChild(abspath, self.base, extra=extra)
371
return path[pl:].strip('/')
393
373
def has(self, relpath):
395
375
Does the target location exist?
398
self._get_sftp().stat(self._remote_path(relpath))
399
# stat result is about 20 bytes, let's say
400
self._report_activity(20, 'read')
378
self._sftp.stat(self._remote_path(relpath))
405
383
def get(self, relpath):
406
"""Get the file at the given relative path.
385
Get the file at the given relative path.
408
387
:param relpath: The relative path to the file
411
390
path = self._remote_path(relpath)
412
f = self._get_sftp().file(path, mode='rb')
391
f = self._sftp.file(path, mode='rb')
413
392
if self._do_prefetch and (getattr(f, 'prefetch', None) is not None):
416
395
except (IOError, paramiko.SSHException), e:
417
self._translate_io_exception(e, path, ': error retrieving',
418
failure_exc=errors.ReadError)
420
def get_bytes(self, relpath):
421
# reimplement this here so that we can report how many bytes came back
422
f = self.get(relpath)
425
self._report_activity(len(bytes), 'read')
430
def _readv(self, relpath, offsets):
396
self._translate_io_exception(e, path, ': error retrieving')
398
def readv(self, relpath, offsets):
431
399
"""See Transport.readv()"""
432
400
# We overload the default readv() because we want to use a file
433
401
# that does not have prefetch enabled.
439
407
path = self._remote_path(relpath)
440
fp = self._get_sftp().file(path, mode='rb')
408
fp = self._sftp.file(path, mode='rb')
441
409
readv = getattr(fp, 'readv', None)
443
411
return self._sftp_readv(fp, offsets, relpath)
444
if 'sftp' in debug.debug_flags:
445
mutter('seek and read %s offsets', len(offsets))
412
mutter('seek and read %s offsets', len(offsets))
446
413
return self._seek_and_read(fp, offsets, relpath)
447
414
except (IOError, paramiko.SSHException), e:
448
415
self._translate_io_exception(e, path, ': error retrieving')
450
def recommended_page_size(self):
451
"""See Transport.recommended_page_size().
453
For SFTP we suggest a large page size to reduce the overhead
454
introduced by latency.
458
def _sftp_readv(self, fp, offsets, relpath):
417
def _sftp_readv(self, fp, offsets, relpath='<unknown>'):
459
418
"""Use the readv() member of fp to do async readv.
461
Then read them using paramiko.readv(). paramiko.readv()
420
And then read them using paramiko.readv(). paramiko.readv()
462
421
does not support ranges > 64K, so it caps the request size, and
463
just reads until it gets all the stuff it wants.
422
just reads until it gets all the stuff it wants
465
helper = _SFTPReadvHelper(offsets, relpath, self._report_activity)
466
return helper.request_and_yield_offsets(fp)
424
offsets = list(offsets)
425
sorted_offsets = sorted(offsets)
427
# The algorithm works as follows:
428
# 1) Coalesce nearby reads into a single chunk
429
# This generates a list of combined regions, the total size
430
# and the size of the sub regions. This coalescing step is limited
431
# in the number of nearby chunks to combine, and is allowed to
432
# skip small breaks in the requests. Limiting it makes sure that
433
# we can start yielding some data earlier, and skipping means we
434
# make fewer requests. (Beneficial even when using async)
435
# 2) Break up this combined regions into chunks that are smaller
436
# than 64KiB. Technically the limit is 65536, but we are a
437
# little bit conservative. This is because sftp has a maximum
438
# return chunk size of 64KiB (max size of an unsigned short)
439
# 3) Issue a readv() to paramiko to create an async request for
441
# 4) Read in the data as it comes back, until we've read one
442
# continuous section as determined in step 1
443
# 5) Break up the full sections into hunks for the original requested
444
# offsets. And put them in a cache
445
# 6) Check if the next request is in the cache, and if it is, remove
446
# it from the cache, and yield its data. Continue until no more
447
# entries are in the cache.
448
# 7) loop back to step 4 until all data has been read
450
# TODO: jam 20060725 This could be optimized one step further, by
451
# attempting to yield whatever data we have read, even before
452
# the first coallesced section has been fully processed.
454
# When coalescing for use with readv(), we don't really need to
455
# use any fudge factor, because the requests are made asynchronously
456
coalesced = list(self._coalesce_offsets(sorted_offsets,
457
limit=self._max_readv_combine,
461
for c_offset in coalesced:
462
start = c_offset.start
463
size = c_offset.length
465
# We need to break this up into multiple requests
467
next_size = min(size, self._max_request_size)
468
requests.append((start, next_size))
472
mutter('SFTP.readv() %s offsets => %s coalesced => %s requests',
473
len(offsets), len(coalesced), len(requests))
475
# Queue the current read until we have read the full coalesced section
478
cur_coalesced_stack = iter(coalesced)
479
cur_coalesced = cur_coalesced_stack.next()
481
# Cache the results, but only until they have been fulfilled
483
# turn the list of offsets into a stack
484
offset_stack = iter(offsets)
485
cur_offset_and_size = offset_stack.next()
487
for data in fp.readv(requests):
489
cur_data_len += len(data)
491
if cur_data_len < cur_coalesced.length:
493
assert cur_data_len == cur_coalesced.length, \
494
"Somehow we read too much: %s != %s" % (cur_data_len,
495
cur_coalesced.length)
496
all_data = ''.join(cur_data)
500
for suboffset, subsize in cur_coalesced.ranges:
501
key = (cur_coalesced.start+suboffset, subsize)
502
data_map[key] = all_data[suboffset:suboffset+subsize]
504
# Now that we've read some data, see if we can yield anything back
505
while cur_offset_and_size in data_map:
506
this_data = data_map.pop(cur_offset_and_size)
507
yield cur_offset_and_size[0], this_data
508
cur_offset_and_size = offset_stack.next()
510
# We read a coalesced entry, so mark it as done
512
# Now that we've read all of the data for this coalesced section
514
cur_coalesced = cur_coalesced_stack.next()
516
if cur_coalesced is not None:
517
raise errors.ShortReadvError(relpath, cur_coalesced.start,
518
cur_coalesced.length, len(data))
468
520
def put_file(self, relpath, f, mode=None):
879
# ------------- server test implementation --------------
882
from bzrlib.tests.stub_sftp import StubServer, StubSFTPServer
884
STUB_SERVER_KEY = """
885
-----BEGIN RSA PRIVATE KEY-----
886
MIICWgIBAAKBgQDTj1bqB4WmayWNPB+8jVSYpZYk80Ujvj680pOTh2bORBjbIAyz
887
oWGW+GUjzKxTiiPvVmxFgx5wdsFvF03v34lEVVhMpouqPAYQ15N37K/ir5XY+9m/
888
d8ufMCkjeXsQkKqFbAlQcnWMCRnOoPHS3I4vi6hmnDDeeYTSRvfLbW0fhwIBIwKB
889
gBIiOqZYaoqbeD9OS9z2K9KR2atlTxGxOJPXiP4ESqP3NVScWNwyZ3NXHpyrJLa0
890
EbVtzsQhLn6rF+TzXnOlcipFvjsem3iYzCpuChfGQ6SovTcOjHV9z+hnpXvQ/fon
891
soVRZY65wKnF7IAoUwTmJS9opqgrN6kRgCd3DASAMd1bAkEA96SBVWFt/fJBNJ9H
892
tYnBKZGw0VeHOYmVYbvMSstssn8un+pQpUm9vlG/bp7Oxd/m+b9KWEh2xPfv6zqU
893
avNwHwJBANqzGZa/EpzF4J8pGti7oIAPUIDGMtfIcmqNXVMckrmzQ2vTfqtkEZsA
894
4rE1IERRyiJQx6EJsz21wJmGV9WJQ5kCQQDwkS0uXqVdFzgHO6S++tjmjYcxwr3g
895
H0CoFYSgbddOT6miqRskOQF3DZVkJT3kyuBgU2zKygz52ukQZMqxCb1fAkASvuTv
896
qfpH87Qq5kQhNKdbbwbmd2NxlNabazPijWuphGTdW0VfJdWfklyS2Kr+iqrs/5wV
897
HhathJt636Eg7oIjAkA8ht3MQ+XSl9yIJIS8gVpbPxSw5OMfw0PjVE7tBdQruiSc
898
nvuQES5C9BMHjF39LZiGH1iLQy7FgdHyoP+eodI7
899
-----END RSA PRIVATE KEY-----
903
class SocketListener(threading.Thread):
905
def __init__(self, callback):
906
threading.Thread.__init__(self)
907
self._callback = callback
908
self._socket = socket.socket()
909
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
910
self._socket.bind(('localhost', 0))
911
self._socket.listen(1)
912
self.port = self._socket.getsockname()[1]
913
self._stop_event = threading.Event()
916
# called from outside this thread
917
self._stop_event.set()
918
# use a timeout here, because if the test fails, the server thread may
919
# never notice the stop_event.
925
readable, writable_unused, exception_unused = \
926
select.select([self._socket], [], [], 0.1)
927
if self._stop_event.isSet():
929
if len(readable) == 0:
932
s, addr_unused = self._socket.accept()
933
# because the loopback socket is inline, and transports are
934
# never explicitly closed, best to launch a new thread.
935
threading.Thread(target=self._callback, args=(s,)).start()
936
except socket.error, x:
937
sys.excepthook(*sys.exc_info())
938
warning('Socket error during accept() within unit test server'
941
# probably a failed test; unit test thread will log the
943
sys.excepthook(*sys.exc_info())
944
warning('Exception from within unit test server thread: %r' %
948
class SocketDelay(object):
949
"""A socket decorator to make TCP appear slower.
951
This changes recv, send, and sendall to add a fixed latency to each python
952
call if a new roundtrip is detected. That is, when a recv is called and the
953
flag new_roundtrip is set, latency is charged. Every send and send_all
956
In addition every send, sendall and recv sleeps a bit per character send to
959
Not all methods are implemented, this is deliberate as this class is not a
960
replacement for the builtin sockets layer. fileno is not implemented to
961
prevent the proxy being bypassed.
965
_proxied_arguments = dict.fromkeys([
966
"close", "getpeername", "getsockname", "getsockopt", "gettimeout",
967
"setblocking", "setsockopt", "settimeout", "shutdown"])
969
def __init__(self, sock, latency, bandwidth=1.0,
972
:param bandwith: simulated bandwith (MegaBit)
973
:param really_sleep: If set to false, the SocketDelay will just
974
increase a counter, instead of calling time.sleep. This is useful for
975
unittesting the SocketDelay.
978
self.latency = latency
979
self.really_sleep = really_sleep
980
self.time_per_byte = 1 / (bandwidth / 8.0 * 1024 * 1024)
981
self.new_roundtrip = False
984
if self.really_sleep:
987
SocketDelay.simulated_time += s
989
def __getattr__(self, attr):
990
if attr in SocketDelay._proxied_arguments:
991
return getattr(self.sock, attr)
992
raise AttributeError("'SocketDelay' object has no attribute %r" %
996
return SocketDelay(self.sock.dup(), self.latency, self.time_per_byte,
999
def recv(self, *args):
1000
data = self.sock.recv(*args)
1001
if data and self.new_roundtrip:
1002
self.new_roundtrip = False
1003
self.sleep(self.latency)
1004
self.sleep(len(data) * self.time_per_byte)
1007
def sendall(self, data, flags=0):
1008
if not self.new_roundtrip:
1009
self.new_roundtrip = True
1010
self.sleep(self.latency)
1011
self.sleep(len(data) * self.time_per_byte)
1012
return self.sock.sendall(data, flags)
1014
def send(self, data, flags=0):
1015
if not self.new_roundtrip:
1016
self.new_roundtrip = True
1017
self.sleep(self.latency)
1018
bytes_sent = self.sock.send(data, flags)
1019
self.sleep(bytes_sent * self.time_per_byte)
1023
class SFTPServer(Server):
1024
"""Common code for SFTP server facilities."""
1026
def __init__(self, server_interface=StubServer):
1027
self._original_vendor = None
1028
self._homedir = None
1029
self._server_homedir = None
1030
self._listener = None
1032
self._vendor = ssh.ParamikoVendor()
1033
self._server_interface = server_interface
1036
self.add_latency = 0
1038
def _get_sftp_url(self, path):
1039
"""Calculate an sftp url to this server for path."""
1040
return 'sftp://foo:bar@localhost:%d/%s' % (self._listener.port, path)
1042
def log(self, message):
1043
"""StubServer uses this to log when a new server is created."""
1044
self.logs.append(message)
1046
def _run_server_entry(self, sock):
1047
"""Entry point for all implementations of _run_server.
1049
If self.add_latency is > 0.000001 then sock is given a latency adding
1052
if self.add_latency > 0.000001:
1053
sock = SocketDelay(sock, self.add_latency)
1054
return self._run_server(sock)
1056
def _run_server(self, s):
1057
ssh_server = paramiko.Transport(s)
1058
key_file = pathjoin(self._homedir, 'test_rsa.key')
1059
f = open(key_file, 'w')
1060
f.write(STUB_SERVER_KEY)
1062
host_key = paramiko.RSAKey.from_private_key_file(key_file)
1063
ssh_server.add_server_key(host_key)
1064
server = self._server_interface(self)
1065
ssh_server.set_subsystem_handler('sftp', paramiko.SFTPServer,
1066
StubSFTPServer, root=self._root,
1067
home=self._server_homedir)
1068
event = threading.Event()
1069
ssh_server.start_server(event, server)
1072
def setUp(self, backing_server=None):
1073
# XXX: TODO: make sftpserver back onto backing_server rather than local
1075
assert (backing_server is None or
1076
isinstance(backing_server, local.LocalURLServer)), (
1077
"backing_server should not be %r, because this can only serve the "
1078
"local current working directory." % (backing_server,))
1079
self._original_vendor = ssh._ssh_vendor_manager._cached_ssh_vendor
1080
ssh._ssh_vendor_manager._cached_ssh_vendor = self._vendor
1081
if sys.platform == 'win32':
1082
# Win32 needs to use the UNICODE api
1083
self._homedir = getcwd()
1085
# But Linux SFTP servers should just deal in bytestreams
1086
self._homedir = os.getcwd()
1087
if self._server_homedir is None:
1088
self._server_homedir = self._homedir
1090
if sys.platform == 'win32':
1092
self._listener = SocketListener(self._run_server_entry)
1093
self._listener.setDaemon(True)
1094
self._listener.start()
1097
"""See bzrlib.transport.Server.tearDown."""
1098
self._listener.stop()
1099
ssh._ssh_vendor_manager._cached_ssh_vendor = self._original_vendor
1101
def get_bogus_url(self):
1102
"""See bzrlib.transport.Server.get_bogus_url."""
1103
# this is chosen to try to prevent trouble with proxies, wierd dns, etc
1104
# we bind a random socket, so that we get a guaranteed unused port
1105
# we just never listen on that port
1107
s.bind(('localhost', 0))
1108
return 'sftp://%s:%s/' % s.getsockname()
1111
class SFTPFullAbsoluteServer(SFTPServer):
1112
"""A test server for sftp transports, using absolute urls and ssh."""
1115
"""See bzrlib.transport.Server.get_url."""
1116
homedir = self._homedir
1117
if sys.platform != 'win32':
1118
# Remove the initial '/' on all platforms but win32
1119
homedir = homedir[1:]
1120
return self._get_sftp_url(urlutils.escape(homedir))
1123
class SFTPServerWithoutSSH(SFTPServer):
1124
"""An SFTP server that uses a simple TCP socket pair rather than SSH."""
1127
super(SFTPServerWithoutSSH, self).__init__()
1128
self._vendor = ssh.LoopbackVendor()
1130
def _run_server(self, sock):
1131
# Re-import these as locals, so that they're still accessible during
1132
# interpreter shutdown (when all module globals get set to None, leading
1133
# to confusing errors like "'NoneType' object has no attribute 'error'".
1134
class FakeChannel(object):
1135
def get_transport(self):
1137
def get_log_channel(self):
1141
def get_hexdump(self):
1146
server = paramiko.SFTPServer(FakeChannel(), 'sftp', StubServer(self), StubSFTPServer,
1147
root=self._root, home=self._server_homedir)
1149
server.start_subsystem('sftp', None, sock)
1150
except socket.error, e:
1151
if (len(e.args) > 0) and (e.args[0] == errno.EPIPE):
1152
# it's okay for the client to disconnect abruptly
1153
# (bug in paramiko 1.6: it should absorb this exception)
1157
except Exception, e:
1158
# This typically seems to happen during interpreter shutdown, so
1159
# most of the useful ways to report this error are won't work.
1160
# Writing the exception type, and then the text of the exception,
1161
# seems to be the best we can do.
1163
sys.stderr.write('\nEXCEPTION %r: ' % (e.__class__,))
1164
sys.stderr.write('%s\n\n' % (e,))
1165
server.finish_subsystem()
1168
class SFTPAbsoluteServer(SFTPServerWithoutSSH):
1169
"""A test server for sftp transports, using absolute urls."""
1172
"""See bzrlib.transport.Server.get_url."""
1173
homedir = self._homedir
1174
if sys.platform != 'win32':
1175
# Remove the initial '/' on all platforms but win32
1176
homedir = homedir[1:]
1177
return self._get_sftp_url(urlutils.escape(homedir))
1180
class SFTPHomeDirServer(SFTPServerWithoutSSH):
1181
"""A test server for sftp transports, using homedir relative urls."""
1184
"""See bzrlib.transport.Server.get_url."""
1185
return self._get_sftp_url("~/")
1188
class SFTPSiblingAbsoluteServer(SFTPAbsoluteServer):
1189
"""A test server for sftp transports where only absolute paths will work.
1191
It does this by serving from a deeply-nested directory that doesn't exist.
1194
def setUp(self, backing_server=None):
1195
self._server_homedir = '/dev/noone/runs/tests/here'
1196
super(SFTPSiblingAbsoluteServer, self).setUp(backing_server)
1199
def _sftp_connect(host, port, username, password):
1200
"""Connect to the remote sftp server.
1202
:raises: a TransportError 'could not connect'.
1204
:returns: an paramiko.sftp_client.SFTPClient
1206
TODO: Raise a more reasonable ConnectionFailed exception
1208
idx = (host, port, username)
1210
return _connected_hosts[idx]
1214
sftp = _sftp_connect_uncached(host, port, username, password)
1215
_connected_hosts[idx] = sftp
1218
def _sftp_connect_uncached(host, port, username, password):
1219
vendor = ssh._get_ssh_vendor()
1220
sftp = vendor.connect_sftp(username, password, host, port)
897
1224
def get_test_permutations():
898
1225
"""Return the permutations to be used in testing."""
899
from bzrlib.tests import stub_sftp
900
return [(SFTPTransport, stub_sftp.SFTPAbsoluteServer),
901
(SFTPTransport, stub_sftp.SFTPHomeDirServer),
902
(SFTPTransport, stub_sftp.SFTPSiblingAbsoluteServer),
1226
return [(SFTPTransport, SFTPAbsoluteServer),
1227
(SFTPTransport, SFTPHomeDirServer),
1228
(SFTPTransport, SFTPSiblingAbsoluteServer),