122
class _SFTPReadvHelper(object):
123
"""A class to help with managing the state of a readv request."""
125
# See _get_requests for an explanation.
126
_max_request_size = 32768
128
def __init__(self, original_offsets, relpath, _report_activity):
129
"""Create a new readv helper.
131
:param original_offsets: The original requests given by the caller of
133
:param relpath: The name of the file (if known)
134
:param _report_activity: A Transport._report_activity bound method,
135
to be called as data arrives.
137
self.original_offsets = list(original_offsets)
138
self.relpath = relpath
139
self._report_activity = _report_activity
141
def _get_requests(self):
142
"""Break up the offsets into individual requests over sftp.
144
The SFTP spec only requires implementers to support 32kB requests. We
145
could try something larger (openssh supports 64kB), but then we have to
146
handle requests that fail.
147
So instead, we just break up our maximum chunks into 32kB chunks, and
148
asyncronously requests them.
149
Newer versions of paramiko would do the chunking for us, but we want to
150
start processing results right away, so we do it ourselves.
152
# TODO: Because we issue async requests, we don't 'fudge' any extra
153
# data. I'm not 100% sure that is the best choice.
155
# The first thing we do, is to collapse the individual requests as much
156
# as possible, so we don't issues requests <32kB
157
sorted_offsets = sorted(self.original_offsets)
158
coalesced = list(ConnectedTransport._coalesce_offsets(sorted_offsets,
159
limit=0, fudge_factor=0))
161
for c_offset in coalesced:
162
start = c_offset.start
163
size = c_offset.length
165
# Break this up into 32kB requests
167
next_size = min(size, self._max_request_size)
168
requests.append((start, next_size))
171
if 'sftp' in debug.debug_flags:
172
mutter('SFTP.readv(%s) %s offsets => %s coalesced => %s requests',
173
self.relpath, len(sorted_offsets), len(coalesced),
177
def request_and_yield_offsets(self, fp):
178
"""Request the data from the remote machine, yielding the results.
180
:param fp: A Paramiko SFTPFile object that supports readv.
181
:return: Yield the data requested by the original readv caller, one by
184
requests = self._get_requests()
185
offset_iter = iter(self.original_offsets)
186
cur_offset, cur_size = offset_iter.next()
187
# paramiko .readv() yields strings that are in the order of the requests
188
# So we track the current request to know where the next data is
189
# being returned from.
195
# This is used to buffer chunks which we couldn't process yet
196
# It is (start, end, data) tuples.
198
# Create an 'unlimited' data stream, so we stop based on requests,
199
# rather than just because the data stream ended. This lets us detect
201
data_stream = itertools.chain(fp.readv(requests),
202
itertools.repeat(None))
203
for (start, length), data in itertools.izip(requests, data_stream):
205
if cur_coalesced is not None:
206
raise errors.ShortReadvError(self.relpath,
207
start, length, len(data))
208
if len(data) != length:
209
raise errors.ShortReadvError(self.relpath,
210
start, length, len(data))
211
self._report_activity(length, 'read')
213
# This is the first request, just buffer it
214
buffered_data = [data]
215
buffered_len = length
217
elif start == last_end:
218
# The data we are reading fits neatly on the previous
219
# buffer, so this is all part of a larger coalesced range.
220
buffered_data.append(data)
221
buffered_len += length
223
# We have an 'interrupt' in the data stream. So we know we are
224
# at a request boundary.
226
# We haven't consumed the buffer so far, so put it into
227
# data_chunks, and continue.
228
buffered = ''.join(buffered_data)
229
data_chunks.append((input_start, buffered))
231
buffered_data = [data]
232
buffered_len = length
233
last_end = start + length
234
if input_start == cur_offset and cur_size <= buffered_len:
235
# Simplify the next steps a bit by transforming buffered_data
236
# into a single string. We also have the nice property that
237
# when there is only one string ''.join([x]) == x, so there is
239
buffered = ''.join(buffered_data)
240
# Clean out buffered data so that we keep memory
244
# TODO: We *could* also consider the case where cur_offset is in
245
# in the buffered range, even though it doesn't *start*
246
# the buffered range. But for packs we pretty much always
247
# read in order, so you won't get any extra data in the
249
while (input_start == cur_offset
250
and (buffered_offset + cur_size) <= buffered_len):
251
# We've buffered enough data to process this request, spit it
253
cur_data = buffered[buffered_offset:buffered_offset + cur_size]
254
# move the direct pointer into our buffered data
255
buffered_offset += cur_size
256
# Move the start-of-buffer pointer
257
input_start += cur_size
258
# Yield the requested data
259
yield cur_offset, cur_data
260
cur_offset, cur_size = offset_iter.next()
261
# at this point, we've consumed as much of buffered as we can,
262
# so break off the portion that we consumed
263
if buffered_offset == len(buffered_data):
264
# No tail to leave behind
268
buffered = buffered[buffered_offset:]
269
buffered_data = [buffered]
270
buffered_len = len(buffered)
271
# now that the data stream is done, close the handle
274
buffered = ''.join(buffered_data)
276
data_chunks.append((input_start, buffered))
278
if 'sftp' in debug.debug_flags:
279
mutter('SFTP readv left with %d out-of-order bytes',
280
sum(map(lambda x: len(x[1]), data_chunks)))
281
# We've processed all the readv data, at this point, anything we
282
# couldn't process is in data_chunks. This doesn't happen often, so
283
# this code path isn't optimized
284
# We use an interesting process for data_chunks
285
# Specifically if we have "bisect_left([(start, len, entries)],
287
# If start == qstart, then we get the specific node. Otherwise we
288
# get the previous node
290
idx = bisect.bisect_left(data_chunks, (cur_offset,))
291
if idx < len(data_chunks) and data_chunks[idx][0] == cur_offset:
292
# The data starts here
293
data = data_chunks[idx][1][:cur_size]
295
# The data is in a portion of a previous page
297
sub_offset = cur_offset - data_chunks[idx][0]
298
data = data_chunks[idx][1]
299
data = data[sub_offset:sub_offset + cur_size]
301
# We are missing the page where the data should be found,
304
if len(data) != cur_size:
305
raise AssertionError('We must have miscalulated.'
306
' We expected %d bytes, but only found %d'
307
% (cur_size, len(data)))
308
yield cur_offset, data
309
cur_offset, cur_size = offset_iter.next()
312
145
class SFTPTransport(ConnectedTransport):
313
146
"""Transport implementation for SFTP access."""
456
def _sftp_readv(self, fp, offsets, relpath):
273
def _sftp_readv(self, fp, offsets, relpath='<unknown>'):
457
274
"""Use the readv() member of fp to do async readv.
459
Then read them using paramiko.readv(). paramiko.readv()
276
And then read them using paramiko.readv(). paramiko.readv()
460
277
does not support ranges > 64K, so it caps the request size, and
461
just reads until it gets all the stuff it wants.
278
just reads until it gets all the stuff it wants
463
helper = _SFTPReadvHelper(offsets, relpath, self._report_activity)
464
return helper.request_and_yield_offsets(fp)
280
offsets = list(offsets)
281
sorted_offsets = sorted(offsets)
283
# The algorithm works as follows:
284
# 1) Coalesce nearby reads into a single chunk
285
# This generates a list of combined regions, the total size
286
# and the size of the sub regions. This coalescing step is limited
287
# in the number of nearby chunks to combine, and is allowed to
288
# skip small breaks in the requests. Limiting it makes sure that
289
# we can start yielding some data earlier, and skipping means we
290
# make fewer requests. (Beneficial even when using async)
291
# 2) Break up this combined regions into chunks that are smaller
292
# than 64KiB. Technically the limit is 65536, but we are a
293
# little bit conservative. This is because sftp has a maximum
294
# return chunk size of 64KiB (max size of an unsigned short)
295
# 3) Issue a readv() to paramiko to create an async request for
297
# 4) Read in the data as it comes back, until we've read one
298
# continuous section as determined in step 1
299
# 5) Break up the full sections into hunks for the original requested
300
# offsets. And put them in a cache
301
# 6) Check if the next request is in the cache, and if it is, remove
302
# it from the cache, and yield its data. Continue until no more
303
# entries are in the cache.
304
# 7) loop back to step 4 until all data has been read
306
# TODO: jam 20060725 This could be optimized one step further, by
307
# attempting to yield whatever data we have read, even before
308
# the first coallesced section has been fully processed.
310
# When coalescing for use with readv(), we don't really need to
311
# use any fudge factor, because the requests are made asynchronously
312
coalesced = list(self._coalesce_offsets(sorted_offsets,
313
limit=self._max_readv_combine,
317
for c_offset in coalesced:
318
start = c_offset.start
319
size = c_offset.length
321
# We need to break this up into multiple requests
323
next_size = min(size, self._max_request_size)
324
requests.append((start, next_size))
328
mutter('SFTP.readv() %s offsets => %s coalesced => %s requests',
329
len(offsets), len(coalesced), len(requests))
331
# Queue the current read until we have read the full coalesced section
334
cur_coalesced_stack = iter(coalesced)
335
cur_coalesced = cur_coalesced_stack.next()
337
# Cache the results, but only until they have been fulfilled
339
# turn the list of offsets into a stack
340
offset_stack = iter(offsets)
341
cur_offset_and_size = offset_stack.next()
343
for data in fp.readv(requests):
345
cur_data_len += len(data)
347
if cur_data_len < cur_coalesced.length:
349
assert cur_data_len == cur_coalesced.length, \
350
"Somehow we read too much: %s != %s" % (cur_data_len,
351
cur_coalesced.length)
352
all_data = ''.join(cur_data)
356
for suboffset, subsize in cur_coalesced.ranges:
357
key = (cur_coalesced.start+suboffset, subsize)
358
data_map[key] = all_data[suboffset:suboffset+subsize]
360
# Now that we've read some data, see if we can yield anything back
361
while cur_offset_and_size in data_map:
362
this_data = data_map.pop(cur_offset_and_size)
363
yield cur_offset_and_size[0], this_data
364
cur_offset_and_size = offset_stack.next()
366
# We read a coalesced entry, so mark it as done
368
# Now that we've read all of the data for this coalesced section
370
cur_coalesced = cur_coalesced_stack.next()
372
if cur_coalesced is not None:
373
raise errors.ShortReadvError(relpath, cur_coalesced.start,
374
cur_coalesced.length, len(data))
466
376
def put_file(self, relpath, f, mode=None):
755
# ------------- server test implementation --------------
758
from bzrlib.tests.stub_sftp import StubServer, StubSFTPServer
760
STUB_SERVER_KEY = """
761
-----BEGIN RSA PRIVATE KEY-----
762
MIICWgIBAAKBgQDTj1bqB4WmayWNPB+8jVSYpZYk80Ujvj680pOTh2bORBjbIAyz
763
oWGW+GUjzKxTiiPvVmxFgx5wdsFvF03v34lEVVhMpouqPAYQ15N37K/ir5XY+9m/
764
d8ufMCkjeXsQkKqFbAlQcnWMCRnOoPHS3I4vi6hmnDDeeYTSRvfLbW0fhwIBIwKB
765
gBIiOqZYaoqbeD9OS9z2K9KR2atlTxGxOJPXiP4ESqP3NVScWNwyZ3NXHpyrJLa0
766
EbVtzsQhLn6rF+TzXnOlcipFvjsem3iYzCpuChfGQ6SovTcOjHV9z+hnpXvQ/fon
767
soVRZY65wKnF7IAoUwTmJS9opqgrN6kRgCd3DASAMd1bAkEA96SBVWFt/fJBNJ9H
768
tYnBKZGw0VeHOYmVYbvMSstssn8un+pQpUm9vlG/bp7Oxd/m+b9KWEh2xPfv6zqU
769
avNwHwJBANqzGZa/EpzF4J8pGti7oIAPUIDGMtfIcmqNXVMckrmzQ2vTfqtkEZsA
770
4rE1IERRyiJQx6EJsz21wJmGV9WJQ5kCQQDwkS0uXqVdFzgHO6S++tjmjYcxwr3g
771
H0CoFYSgbddOT6miqRskOQF3DZVkJT3kyuBgU2zKygz52ukQZMqxCb1fAkASvuTv
772
qfpH87Qq5kQhNKdbbwbmd2NxlNabazPijWuphGTdW0VfJdWfklyS2Kr+iqrs/5wV
773
HhathJt636Eg7oIjAkA8ht3MQ+XSl9yIJIS8gVpbPxSw5OMfw0PjVE7tBdQruiSc
774
nvuQES5C9BMHjF39LZiGH1iLQy7FgdHyoP+eodI7
775
-----END RSA PRIVATE KEY-----
779
class SocketListener(threading.Thread):
781
def __init__(self, callback):
782
threading.Thread.__init__(self)
783
self._callback = callback
784
self._socket = socket.socket()
785
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
786
self._socket.bind(('localhost', 0))
787
self._socket.listen(1)
788
self.port = self._socket.getsockname()[1]
789
self._stop_event = threading.Event()
792
# called from outside this thread
793
self._stop_event.set()
794
# use a timeout here, because if the test fails, the server thread may
795
# never notice the stop_event.
801
readable, writable_unused, exception_unused = \
802
select.select([self._socket], [], [], 0.1)
803
if self._stop_event.isSet():
805
if len(readable) == 0:
808
s, addr_unused = self._socket.accept()
809
# because the loopback socket is inline, and transports are
810
# never explicitly closed, best to launch a new thread.
811
threading.Thread(target=self._callback, args=(s,)).start()
812
except socket.error, x:
813
sys.excepthook(*sys.exc_info())
814
warning('Socket error during accept() within unit test server'
817
# probably a failed test; unit test thread will log the
819
sys.excepthook(*sys.exc_info())
820
warning('Exception from within unit test server thread: %r' %
824
class SocketDelay(object):
825
"""A socket decorator to make TCP appear slower.
827
This changes recv, send, and sendall to add a fixed latency to each python
828
call if a new roundtrip is detected. That is, when a recv is called and the
829
flag new_roundtrip is set, latency is charged. Every send and send_all
832
In addition every send, sendall and recv sleeps a bit per character send to
835
Not all methods are implemented, this is deliberate as this class is not a
836
replacement for the builtin sockets layer. fileno is not implemented to
837
prevent the proxy being bypassed.
841
_proxied_arguments = dict.fromkeys([
842
"close", "getpeername", "getsockname", "getsockopt", "gettimeout",
843
"setblocking", "setsockopt", "settimeout", "shutdown"])
845
def __init__(self, sock, latency, bandwidth=1.0,
848
:param bandwith: simulated bandwith (MegaBit)
849
:param really_sleep: If set to false, the SocketDelay will just
850
increase a counter, instead of calling time.sleep. This is useful for
851
unittesting the SocketDelay.
854
self.latency = latency
855
self.really_sleep = really_sleep
856
self.time_per_byte = 1 / (bandwidth / 8.0 * 1024 * 1024)
857
self.new_roundtrip = False
860
if self.really_sleep:
863
SocketDelay.simulated_time += s
865
def __getattr__(self, attr):
866
if attr in SocketDelay._proxied_arguments:
867
return getattr(self.sock, attr)
868
raise AttributeError("'SocketDelay' object has no attribute %r" %
872
return SocketDelay(self.sock.dup(), self.latency, self.time_per_byte,
875
def recv(self, *args):
876
data = self.sock.recv(*args)
877
if data and self.new_roundtrip:
878
self.new_roundtrip = False
879
self.sleep(self.latency)
880
self.sleep(len(data) * self.time_per_byte)
883
def sendall(self, data, flags=0):
884
if not self.new_roundtrip:
885
self.new_roundtrip = True
886
self.sleep(self.latency)
887
self.sleep(len(data) * self.time_per_byte)
888
return self.sock.sendall(data, flags)
890
def send(self, data, flags=0):
891
if not self.new_roundtrip:
892
self.new_roundtrip = True
893
self.sleep(self.latency)
894
bytes_sent = self.sock.send(data, flags)
895
self.sleep(bytes_sent * self.time_per_byte)
899
class SFTPServer(Server):
900
"""Common code for SFTP server facilities."""
902
def __init__(self, server_interface=StubServer):
903
self._original_vendor = None
905
self._server_homedir = None
906
self._listener = None
908
self._vendor = ssh.ParamikoVendor()
909
self._server_interface = server_interface
914
def _get_sftp_url(self, path):
915
"""Calculate an sftp url to this server for path."""
916
return 'sftp://foo:bar@localhost:%d/%s' % (self._listener.port, path)
918
def log(self, message):
919
"""StubServer uses this to log when a new server is created."""
920
self.logs.append(message)
922
def _run_server_entry(self, sock):
923
"""Entry point for all implementations of _run_server.
925
If self.add_latency is > 0.000001 then sock is given a latency adding
928
if self.add_latency > 0.000001:
929
sock = SocketDelay(sock, self.add_latency)
930
return self._run_server(sock)
932
def _run_server(self, s):
933
ssh_server = paramiko.Transport(s)
934
key_file = pathjoin(self._homedir, 'test_rsa.key')
935
f = open(key_file, 'w')
936
f.write(STUB_SERVER_KEY)
938
host_key = paramiko.RSAKey.from_private_key_file(key_file)
939
ssh_server.add_server_key(host_key)
940
server = self._server_interface(self)
941
ssh_server.set_subsystem_handler('sftp', paramiko.SFTPServer,
942
StubSFTPServer, root=self._root,
943
home=self._server_homedir)
944
event = threading.Event()
945
ssh_server.start_server(event, server)
948
def setUp(self, backing_server=None):
949
# XXX: TODO: make sftpserver back onto backing_server rather than local
951
assert (backing_server is None or
952
isinstance(backing_server, local.LocalURLServer)), (
953
"backing_server should not be %r, because this can only serve the "
954
"local current working directory." % (backing_server,))
955
self._original_vendor = ssh._ssh_vendor_manager._cached_ssh_vendor
956
ssh._ssh_vendor_manager._cached_ssh_vendor = self._vendor
957
if sys.platform == 'win32':
958
# Win32 needs to use the UNICODE api
959
self._homedir = getcwd()
961
# But Linux SFTP servers should just deal in bytestreams
962
self._homedir = os.getcwd()
963
if self._server_homedir is None:
964
self._server_homedir = self._homedir
966
if sys.platform == 'win32':
968
self._listener = SocketListener(self._run_server_entry)
969
self._listener.setDaemon(True)
970
self._listener.start()
973
"""See bzrlib.transport.Server.tearDown."""
974
self._listener.stop()
975
ssh._ssh_vendor_manager._cached_ssh_vendor = self._original_vendor
977
def get_bogus_url(self):
978
"""See bzrlib.transport.Server.get_bogus_url."""
979
# this is chosen to try to prevent trouble with proxies, wierd dns, etc
980
# we bind a random socket, so that we get a guaranteed unused port
981
# we just never listen on that port
983
s.bind(('localhost', 0))
984
return 'sftp://%s:%s/' % s.getsockname()
987
class SFTPFullAbsoluteServer(SFTPServer):
988
"""A test server for sftp transports, using absolute urls and ssh."""
991
"""See bzrlib.transport.Server.get_url."""
992
homedir = self._homedir
993
if sys.platform != 'win32':
994
# Remove the initial '/' on all platforms but win32
995
homedir = homedir[1:]
996
return self._get_sftp_url(urlutils.escape(homedir))
999
class SFTPServerWithoutSSH(SFTPServer):
1000
"""An SFTP server that uses a simple TCP socket pair rather than SSH."""
1003
super(SFTPServerWithoutSSH, self).__init__()
1004
self._vendor = ssh.LoopbackVendor()
1006
def _run_server(self, sock):
1007
# Re-import these as locals, so that they're still accessible during
1008
# interpreter shutdown (when all module globals get set to None, leading
1009
# to confusing errors like "'NoneType' object has no attribute 'error'".
1010
class FakeChannel(object):
1011
def get_transport(self):
1013
def get_log_channel(self):
1017
def get_hexdump(self):
1022
server = paramiko.SFTPServer(FakeChannel(), 'sftp', StubServer(self), StubSFTPServer,
1023
root=self._root, home=self._server_homedir)
1025
server.start_subsystem('sftp', None, sock)
1026
except socket.error, e:
1027
if (len(e.args) > 0) and (e.args[0] == errno.EPIPE):
1028
# it's okay for the client to disconnect abruptly
1029
# (bug in paramiko 1.6: it should absorb this exception)
1033
except Exception, e:
1034
# This typically seems to happen during interpreter shutdown, so
1035
# most of the useful ways to report this error are won't work.
1036
# Writing the exception type, and then the text of the exception,
1037
# seems to be the best we can do.
1039
sys.stderr.write('\nEXCEPTION %r: ' % (e.__class__,))
1040
sys.stderr.write('%s\n\n' % (e,))
1041
server.finish_subsystem()
1044
class SFTPAbsoluteServer(SFTPServerWithoutSSH):
1045
"""A test server for sftp transports, using absolute urls."""
1048
"""See bzrlib.transport.Server.get_url."""
1049
homedir = self._homedir
1050
if sys.platform != 'win32':
1051
# Remove the initial '/' on all platforms but win32
1052
homedir = homedir[1:]
1053
return self._get_sftp_url(urlutils.escape(homedir))
1056
class SFTPHomeDirServer(SFTPServerWithoutSSH):
1057
"""A test server for sftp transports, using homedir relative urls."""
1060
"""See bzrlib.transport.Server.get_url."""
1061
return self._get_sftp_url("~/")
1064
class SFTPSiblingAbsoluteServer(SFTPAbsoluteServer):
1065
"""A test server for sftp transports where only absolute paths will work.
1067
It does this by serving from a deeply-nested directory that doesn't exist.
1070
def setUp(self, backing_server=None):
1071
self._server_homedir = '/dev/noone/runs/tests/here'
1072
super(SFTPSiblingAbsoluteServer, self).setUp(backing_server)
895
1075
def get_test_permutations():
896
1076
"""Return the permutations to be used in testing."""
897
from bzrlib.tests import stub_sftp
898
return [(SFTPTransport, stub_sftp.SFTPAbsoluteServer),
899
(SFTPTransport, stub_sftp.SFTPHomeDirServer),
900
(SFTPTransport, stub_sftp.SFTPSiblingAbsoluteServer),
1077
return [(SFTPTransport, SFTPAbsoluteServer),
1078
(SFTPTransport, SFTPHomeDirServer),
1079
(SFTPTransport, SFTPSiblingAbsoluteServer),