1
1
# Copyright (C) 2005 Robey Pointer <robey@lag.net>
2
# Copyright (C) 2005, 2006 Canonical Ltd
2
# Copyright (C) 2005, 2006, 2007 Canonical Ltd
4
4
# This program is free software; you can redistribute it and/or modify
5
5
# it under the terms of the GNU General Public License as published by
6
6
# the Free Software Foundation; either version 2 of the License, or
7
7
# (at your option) any later version.
9
9
# This program is distributed in the hope that it will be useful,
10
10
# but WITHOUT ANY WARRANTY; without even the implied warranty of
11
11
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12
12
# GNU General Public License for more details.
14
14
# You should have received a copy of the GNU General Public License
15
15
# along with this program; if not, write to the Free Software
16
16
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18
18
"""Implementation of Transport over SFTP, using paramiko."""
20
# TODO: Remove the transport-based lock_read and lock_write methods. They'll
21
# then raise TransportNotPossible, which will break remote access to any
22
# formats which rely on OS-level locks. That should be fine as those formats
23
# are pretty old, but these combinations may have to be removed from the test
24
# suite. Those formats all date back to 0.7; so we should be able to remove
25
# these methods when we officially drop support for those formats.
33
from bzrlib.config import config_dir, ensure_config_dir_exists
34
from bzrlib.errors import (ConnectionError,
36
TransportNotPossible, NoSuchFile, PathNotChild,
43
from bzrlib.errors import (FileExists,
44
NoSuchFile, PathNotChild,
40
48
ParamikoNotPresent,
42
from bzrlib.osutils import pathjoin, fancy_rename
43
from bzrlib.trace import mutter, warning, error
50
from bzrlib.osutils import pathjoin, fancy_rename, getcwd
51
from bzrlib.symbol_versioning import (
54
from bzrlib.trace import mutter, warning
44
55
from bzrlib.transport import (
45
register_urlparse_netloc_protocol,
64
# Disable one particular warning that comes from paramiko in Python2.5; if
65
# this is emitted at the wrong time it tends to cause spurious test failures
66
# or at least noise in the test case::
68
# [1770/7639 in 86s, 1 known failures, 50 skipped, 2 missing features]
69
# test_permissions.TestSftpPermissions.test_new_files
70
# /var/lib/python-support/python2.5/paramiko/message.py:226: DeprecationWarning: integer argument expected, got float
71
# self.packet.write(struct.pack('>I', n))
72
warnings.filterwarnings('ignore',
73
'integer argument expected, got float',
74
category=DeprecationWarning,
75
module='paramiko.message')
59
84
CMD_HANDLE, CMD_OPEN)
60
85
from paramiko.sftp_attr import SFTPAttributes
61
86
from paramiko.sftp_file import SFTPFile
62
from paramiko.sftp_client import SFTPClient
65
register_urlparse_netloc_protocol('sftp')
68
# don't use prefetch unless paramiko version >= 1.5.2 (there were bugs earlier)
69
_default_do_prefetch = False
70
if getattr(paramiko, '__version_info__', (0, 0, 0)) >= (1, 5, 2):
71
_default_do_prefetch = True
75
if sys.platform == 'win32':
76
# close_fds not supported on win32
81
def _get_ssh_vendor():
82
"""Find out what version of SSH is on the system."""
84
if _ssh_vendor is not None:
89
if 'BZR_SSH' in os.environ:
90
_ssh_vendor = os.environ['BZR_SSH']
91
if _ssh_vendor == 'paramiko':
96
p = subprocess.Popen(['ssh', '-V'],
98
stdin=subprocess.PIPE,
99
stdout=subprocess.PIPE,
100
stderr=subprocess.PIPE)
101
returncode = p.returncode
102
stdout, stderr = p.communicate()
106
if 'OpenSSH' in stderr:
107
mutter('ssh implementation is OpenSSH')
108
_ssh_vendor = 'openssh'
109
elif 'SSH Secure Shell' in stderr:
110
mutter('ssh implementation is SSH Corp.')
113
if _ssh_vendor != 'none':
116
# XXX: 20051123 jamesh
117
# A check for putty's plink or lsh would go here.
119
mutter('falling back to paramiko implementation')
123
class SFTPSubprocess:
124
"""A socket-like object that talks to an ssh subprocess via pipes."""
125
def __init__(self, hostname, vendor, port=None, user=None):
126
assert vendor in ['openssh', 'ssh']
127
if vendor == 'openssh':
129
'-oForwardX11=no', '-oForwardAgent=no',
130
'-oClearAllForwardings=yes', '-oProtocol=2',
131
'-oNoHostAuthenticationForLocalhost=yes']
133
args.extend(['-p', str(port)])
135
args.extend(['-l', user])
136
args.extend(['-s', hostname, 'sftp'])
137
elif vendor == 'ssh':
140
args.extend(['-p', str(port)])
142
args.extend(['-l', user])
143
args.extend(['-s', 'sftp', hostname])
145
self.proc = subprocess.Popen(args, close_fds=_close_fds,
146
stdin=subprocess.PIPE,
147
stdout=subprocess.PIPE)
149
def send(self, data):
150
return os.write(self.proc.stdin.fileno(), data)
152
def recv_ready(self):
153
# TODO: jam 20051215 this function is necessary to support the
154
# pipelined() function. In reality, it probably should use
155
# poll() or select() to actually return if there is data
156
# available, otherwise we probably don't get any benefit
159
def recv(self, count):
160
return os.read(self.proc.stdout.fileno(), count)
163
self.proc.stdin.close()
164
self.proc.stdout.close()
168
class LoopbackSFTP(object):
169
"""Simple wrapper for a socket that pretends to be a paramiko Channel."""
171
def __init__(self, sock):
174
def send(self, data):
175
return self.__socket.send(data)
178
return self.__socket.recv(n)
180
def recv_ready(self):
184
self.__socket.close()
190
# This is a weakref dictionary, so that we can reuse connections
191
# that are still active. Long term, it might be nice to have some
192
# sort of expiration policy, such as disconnect if inactive for
193
# X seconds. But that requires a lot more fanciness.
194
_connected_hosts = weakref.WeakValueDictionary()
197
def load_host_keys():
199
Load system host keys (probably doesn't work on windows) and any
200
"discovered" keys from previous sessions.
202
global SYSTEM_HOSTKEYS, BZR_HOSTKEYS
204
SYSTEM_HOSTKEYS = paramiko.util.load_host_keys(os.path.expanduser('~/.ssh/known_hosts'))
206
mutter('failed to load system host keys: ' + str(e))
207
bzr_hostkey_path = pathjoin(config_dir(), 'ssh_host_keys')
209
BZR_HOSTKEYS = paramiko.util.load_host_keys(bzr_hostkey_path)
211
mutter('failed to load bzr host keys: ' + str(e))
215
def save_host_keys():
217
Save "discovered" host keys in $(config)/ssh_host_keys/.
219
global SYSTEM_HOSTKEYS, BZR_HOSTKEYS
220
bzr_hostkey_path = pathjoin(config_dir(), 'ssh_host_keys')
221
ensure_config_dir_exists()
224
f = open(bzr_hostkey_path, 'w')
225
f.write('# SSH host keys collected by bzr\n')
226
for hostname, keys in BZR_HOSTKEYS.iteritems():
227
for keytype, key in keys.iteritems():
228
f.write('%s %s %s\n' % (hostname, keytype, key.get_base64()))
231
mutter('failed to save bzr host keys: ' + str(e))
89
_paramiko_version = getattr(paramiko, '__version_info__', (0, 0, 0))
90
# don't use prefetch unless paramiko version >= 1.5.5 (there were bugs earlier)
91
_default_do_prefetch = (_paramiko_version >= (1, 5, 5))
234
94
class SFTPLock(object):
235
"""This fakes a lock in a remote location."""
95
"""This fakes a lock in a remote location.
97
A present lock is indicated just by the existence of a file. This
98
doesn't work well on all transports and they are only used in
99
deprecated storage formats.
236
102
__slots__ = ['path', 'lock_path', 'lock_file', 'transport']
237
104
def __init__(self, path, transport):
238
assert isinstance(transport, SFTPTransport)
240
105
self.lock_file = None
242
107
self.lock_path = path + '.write-lock'
265
130
# What specific errors should we catch here?
268
class SFTPTransport (Transport):
270
Transport implementation for SFTP access.
134
class SFTPTransport(ConnectedTransport):
135
"""Transport implementation for SFTP access."""
272
137
_do_prefetch = _default_do_prefetch
274
def __init__(self, base, clone_from=None):
275
assert base.startswith('sftp://')
276
self._parse_url(base)
277
base = self._unparse_url()
280
super(SFTPTransport, self).__init__(base)
281
if clone_from is None:
284
# use the same ssh connection, etc
285
self._sftp = clone_from._sftp
286
# super saves 'self.base'
288
def should_cache(self):
290
Return True if the data pulled across should be cached locally.
294
def clone(self, offset=None):
296
Return a new SFTPTransport with root at self.base + offset.
297
We share the same SFTP session between such transports, because it's
298
fairly expensive to set them up.
301
return SFTPTransport(self.base, self)
303
return SFTPTransport(self.abspath(offset), self)
305
def abspath(self, relpath):
307
Return the full url to the given relative path.
309
@param relpath: the relative path or path components
310
@type relpath: str or list
312
return self._unparse_url(self._remote_path(relpath))
138
# TODO: jam 20060717 Conceivably these could be configurable, either
139
# by auto-tuning at run-time, or by a configuration (per host??)
140
# but the performance curve is pretty flat, so just going with
141
# reasonable defaults.
142
_max_readv_combine = 200
143
# Having to round trip to the server means waiting for a response,
144
# so it is better to download extra bytes.
145
# 8KiB had good performance for both local and remote network operations
146
_bytes_to_read_before_seek = 8192
148
# The sftp spec says that implementations SHOULD allow reads
149
# to be at least 32K. paramiko.readv() does an async request
150
# for the chunks. So we need to keep it within a single request
151
# size for paramiko <= 1.6.1. paramiko 1.6.2 will probably chop
152
# up the request itself, rather than us having to worry about it
153
_max_request_size = 32768
155
def __init__(self, base, _from_transport=None):
156
super(SFTPTransport, self).__init__(base,
157
_from_transport=_from_transport)
314
159
def _remote_path(self, relpath):
315
160
"""Return the path to be passed along the sftp protocol for relpath.
317
relpath is a urlencoded string.
319
# FIXME: share the common code across transports
320
assert isinstance(relpath, basestring)
321
relpath = urllib.unquote(relpath).split('/')
322
basepath = self._path.split('/')
323
if len(basepath) > 0 and basepath[-1] == '':
324
basepath = basepath[:-1]
328
if len(basepath) == 0:
329
# In most filesystems, a request for the parent
330
# of root, just returns root.
338
path = '/'.join(basepath)
341
def relpath(self, abspath):
342
username, password, host, port, path = self._split_url(abspath)
344
if (username != self._username):
345
error.append('username mismatch')
346
if (host != self._host):
347
error.append('host mismatch')
348
if (port != self._port):
349
error.append('port mismatch')
350
if (not path.startswith(self._path)):
351
error.append('path mismatch')
353
extra = ': ' + ', '.join(error)
354
raise PathNotChild(abspath, self.base, extra=extra)
356
return path[pl:].strip('/')
162
:param relpath: is a urlencoded string.
164
relative = urlutils.unescape(relpath).encode('utf-8')
165
remote_path = self._combine_paths(self._path, relative)
166
# the initial slash should be removed from the path, and treated as a
167
# homedir relative path (the path begins with a double slash if it is
168
# absolute). see draft-ietf-secsh-scp-sftp-ssh-uri-03.txt
169
# RBC 20060118 we are not using this as its too user hostile. instead
170
# we are following lftp and using /~/foo to mean '~/foo'
171
# vila--20070602 and leave absolute paths begin with a single slash.
172
if remote_path.startswith('/~/'):
173
remote_path = remote_path[3:]
174
elif remote_path == '/~':
178
def _create_connection(self, credentials=None):
179
"""Create a new connection with the provided credentials.
181
:param credentials: The credentials needed to establish the connection.
183
:return: The created connection and its associated credentials.
185
The credentials are only the password as it may have been entered
186
interactively by the user and may be different from the one provided
187
in base url at transport creation time.
189
if credentials is None:
190
password = self._password
192
password = credentials
194
vendor = ssh._get_ssh_vendor()
195
connection = vendor.connect_sftp(self._user, password,
196
self._host, self._port)
197
return connection, password
200
"""Ensures that a connection is established"""
201
connection = self._get_connection()
202
if connection is None:
203
# First connection ever
204
connection, credentials = self._create_connection()
205
self._set_connection(connection, credentials)
358
208
def has(self, relpath):
360
210
Does the target location exist?
363
self._sftp.stat(self._remote_path(relpath))
213
self._get_sftp().stat(self._remote_path(relpath))
375
225
path = self._remote_path(relpath)
376
f = self._sftp.file(path, mode='rb')
226
f = self._get_sftp().file(path, mode='rb')
377
227
if self._do_prefetch and (getattr(f, 'prefetch', None) is not None):
380
230
except (IOError, paramiko.SSHException), e:
231
self._translate_io_exception(e, path, ': error retrieving',
232
failure_exc=errors.ReadError)
234
def _readv(self, relpath, offsets):
235
"""See Transport.readv()"""
236
# We overload the default readv() because we want to use a file
237
# that does not have prefetch enabled.
238
# Also, if we have a new paramiko, it implements an async readv()
243
path = self._remote_path(relpath)
244
fp = self._get_sftp().file(path, mode='rb')
245
readv = getattr(fp, 'readv', None)
247
return self._sftp_readv(fp, offsets, relpath)
248
mutter('seek and read %s offsets', len(offsets))
249
return self._seek_and_read(fp, offsets, relpath)
250
except (IOError, paramiko.SSHException), e:
381
251
self._translate_io_exception(e, path, ': error retrieving')
383
def get_partial(self, relpath, start, length=None):
385
Get just part of a file.
387
:param relpath: Path to the file, relative to base
388
:param start: The starting position to read from
389
:param length: The length to read. A length of None indicates
390
read to the end of the file.
391
:return: A file-like object containing at least the specified bytes.
392
Some implementations may return objects which can be read
393
past this length, but this is not guaranteed.
395
# TODO: implement get_partial_multi to help with knit support
396
f = self.get(relpath)
398
if self._do_prefetch and hasattr(f, 'prefetch'):
402
def put(self, relpath, f, mode=None):
404
Copy the file-like or string object into the location.
253
def recommended_page_size(self):
254
"""See Transport.recommended_page_size().
256
For SFTP we suggest a large page size to reduce the overhead
257
introduced by latency.
261
def _sftp_readv(self, fp, offsets, relpath='<unknown>'):
262
"""Use the readv() member of fp to do async readv.
264
And then read them using paramiko.readv(). paramiko.readv()
265
does not support ranges > 64K, so it caps the request size, and
266
just reads until it gets all the stuff it wants
268
offsets = list(offsets)
269
sorted_offsets = sorted(offsets)
271
# The algorithm works as follows:
272
# 1) Coalesce nearby reads into a single chunk
273
# This generates a list of combined regions, the total size
274
# and the size of the sub regions. This coalescing step is limited
275
# in the number of nearby chunks to combine, and is allowed to
276
# skip small breaks in the requests. Limiting it makes sure that
277
# we can start yielding some data earlier, and skipping means we
278
# make fewer requests. (Beneficial even when using async)
279
# 2) Break up this combined regions into chunks that are smaller
280
# than 64KiB. Technically the limit is 65536, but we are a
281
# little bit conservative. This is because sftp has a maximum
282
# return chunk size of 64KiB (max size of an unsigned short)
283
# 3) Issue a readv() to paramiko to create an async request for
285
# 4) Read in the data as it comes back, until we've read one
286
# continuous section as determined in step 1
287
# 5) Break up the full sections into hunks for the original requested
288
# offsets. And put them in a cache
289
# 6) Check if the next request is in the cache, and if it is, remove
290
# it from the cache, and yield its data. Continue until no more
291
# entries are in the cache.
292
# 7) loop back to step 4 until all data has been read
294
# TODO: jam 20060725 This could be optimized one step further, by
295
# attempting to yield whatever data we have read, even before
296
# the first coallesced section has been fully processed.
298
# When coalescing for use with readv(), we don't really need to
299
# use any fudge factor, because the requests are made asynchronously
300
coalesced = list(self._coalesce_offsets(sorted_offsets,
301
limit=self._max_readv_combine,
305
for c_offset in coalesced:
306
start = c_offset.start
307
size = c_offset.length
309
# We need to break this up into multiple requests
311
next_size = min(size, self._max_request_size)
312
requests.append((start, next_size))
316
mutter('SFTP.readv() %s offsets => %s coalesced => %s requests',
317
len(offsets), len(coalesced), len(requests))
319
# Queue the current read until we have read the full coalesced section
322
cur_coalesced_stack = iter(coalesced)
323
cur_coalesced = cur_coalesced_stack.next()
325
# Cache the results, but only until they have been fulfilled
327
# turn the list of offsets into a stack
328
offset_stack = iter(offsets)
329
cur_offset_and_size = offset_stack.next()
331
for data in fp.readv(requests):
333
cur_data_len += len(data)
335
if cur_data_len < cur_coalesced.length:
337
if cur_data_len != cur_coalesced.length:
338
raise AssertionError(
339
"Somehow we read too much: %s != %s"
340
% (cur_data_len, cur_coalesced.length))
341
all_data = ''.join(cur_data)
345
for suboffset, subsize in cur_coalesced.ranges:
346
key = (cur_coalesced.start+suboffset, subsize)
347
data_map[key] = all_data[suboffset:suboffset+subsize]
349
# Now that we've read some data, see if we can yield anything back
350
while cur_offset_and_size in data_map:
351
this_data = data_map.pop(cur_offset_and_size)
352
yield cur_offset_and_size[0], this_data
353
cur_offset_and_size = offset_stack.next()
355
# We read a coalesced entry, so mark it as done
357
# Now that we've read all of the data for this coalesced section
359
cur_coalesced = cur_coalesced_stack.next()
361
if cur_coalesced is not None:
362
raise errors.ShortReadvError(relpath, cur_coalesced.start,
363
cur_coalesced.length, len(data))
365
def put_file(self, relpath, f, mode=None):
367
Copy the file-like object into the location.
406
369
:param relpath: Location to put the contents, relative to base.
407
:param f: File-like or string object.
370
:param f: File-like object.
408
371
:param mode: The final mode for the file
410
373
final_path = self._remote_path(relpath)
411
self._put(final_path, f, mode=mode)
374
return self._put(final_path, f, mode=mode)
413
376
def _put(self, abspath, f, mode=None):
414
377
"""Helper function so both put() and copy_abspaths can reuse the code"""
441
self._sftp.remove(tmp_abspath)
418
self._get_sftp().remove(tmp_abspath)
443
420
# raise the saved except
445
422
# raise the original with its traceback if we can.
425
def _put_non_atomic_helper(self, relpath, writer, mode=None,
426
create_parent_dir=False,
428
abspath = self._remote_path(relpath)
430
# TODO: jam 20060816 paramiko doesn't publicly expose a way to
431
# set the file mode at create time. If it does, use it.
432
# But for now, we just chmod later anyway.
434
def _open_and_write_file():
435
"""Try to open the target file, raise error on failure"""
439
fout = self._get_sftp().file(abspath, mode='wb')
440
fout.set_pipelined(True)
442
except (paramiko.SSHException, IOError), e:
443
self._translate_io_exception(e, abspath,
446
# This is designed to chmod() right before we close.
447
# Because we set_pipelined() earlier, theoretically we might
448
# avoid the round trip for fout.close()
450
self._get_sftp().chmod(abspath, mode)
455
if not create_parent_dir:
456
_open_and_write_file()
459
# Try error handling to create the parent directory if we need to
461
_open_and_write_file()
463
# Try to create the parent directory, and then go back to
465
parent_dir = os.path.dirname(abspath)
466
self._mkdir(parent_dir, dir_mode)
467
_open_and_write_file()
469
def put_file_non_atomic(self, relpath, f, mode=None,
470
create_parent_dir=False,
472
"""Copy the file-like object into the target location.
474
This function is not strictly safe to use. It is only meant to
475
be used when you already know that the target does not exist.
476
It is not safe, because it will open and truncate the remote
477
file. So there may be a time when the file has invalid contents.
479
:param relpath: The remote location to put the contents.
480
:param f: File-like object.
481
:param mode: Possible access permissions for new file.
482
None means do not set remote permissions.
483
:param create_parent_dir: If we cannot create the target file because
484
the parent directory does not exist, go ahead and
485
create it, and then try again.
489
self._put_non_atomic_helper(relpath, writer, mode=mode,
490
create_parent_dir=create_parent_dir,
493
def put_bytes_non_atomic(self, relpath, bytes, mode=None,
494
create_parent_dir=False,
498
self._put_non_atomic_helper(relpath, writer, mode=mode,
499
create_parent_dir=create_parent_dir,
448
502
def iter_files_recursive(self):
449
503
"""Walk the relative paths of all files in this transport."""
450
504
queue = list(self.list_dir('.'))
452
relpath = urllib.quote(queue.pop(0))
506
relpath = queue.pop(0)
453
507
st = self.stat(relpath)
454
508
if stat.S_ISDIR(st.st_mode):
455
509
for i, basename in enumerate(self.list_dir(relpath)):
514
def _mkdir(self, abspath, mode=None):
520
self._get_sftp().mkdir(abspath, local_mode)
522
self._get_sftp().chmod(abspath, mode=mode)
523
except (paramiko.SSHException, IOError), e:
524
self._translate_io_exception(e, abspath, ': unable to mkdir',
525
failure_exc=FileExists)
460
527
def mkdir(self, relpath, mode=None):
461
528
"""Create a directory at the given path."""
529
self._mkdir(self._remote_path(relpath), mode=mode)
531
def open_write_stream(self, relpath, mode=None):
532
"""See Transport.open_write_stream."""
533
# initialise the file to zero-length
534
# this is three round trips, but we don't use this
535
# api more than once per write_group at the moment so
536
# it is a tolerable overhead. Better would be to truncate
537
# the file after opening. RBC 20070805
538
self.put_bytes_non_atomic(relpath, "", mode)
539
abspath = self._remote_path(relpath)
540
# TODO: jam 20060816 paramiko doesn't publicly expose a way to
541
# set the file mode at create time. If it does, use it.
542
# But for now, we just chmod later anyway.
463
path = self._remote_path(relpath)
464
# In the paramiko documentation, it says that passing a mode flag
465
# will filtered against the server umask.
466
# StubSFTPServer does not do this, which would be nice, because it is
467
# what we really want :)
468
# However, real servers do use umask, so we really should do it that way
469
self._sftp.mkdir(path)
471
self._sftp.chmod(path, mode=mode)
545
handle = self._get_sftp().file(abspath, mode='wb')
546
handle.set_pipelined(True)
472
547
except (paramiko.SSHException, IOError), e:
473
self._translate_io_exception(e, path, ': unable to mkdir',
474
failure_exc=FileExists)
548
self._translate_io_exception(e, abspath,
550
_file_streams[self.abspath(relpath)] = handle
551
return FileFileStream(self, relpath, handle)
476
def _translate_io_exception(self, e, path, more_info='',
553
def _translate_io_exception(self, e, path, more_info='',
477
554
failure_exc=PathError):
478
555
"""Translate a paramiko or IOError into a friendlier exception.
609
699
# that we have taken the lock.
610
700
return SFTPLock(relpath, self)
612
def _unparse_url(self, path=None):
615
path = urllib.quote(path)
616
# handle homedir paths
617
if not path.startswith('/'):
619
netloc = urllib.quote(self._host)
620
if self._username is not None:
621
netloc = '%s@%s' % (urllib.quote(self._username), netloc)
622
if self._port is not None:
623
netloc = '%s:%d' % (netloc, self._port)
624
return urlparse.urlunparse(('sftp', netloc, path, '', '', ''))
626
def _split_url(self, url):
627
if isinstance(url, unicode):
628
url = url.encode('utf-8')
629
(scheme, netloc, path, params,
630
query, fragment) = urlparse.urlparse(url, allow_fragments=False)
631
assert scheme == 'sftp'
632
username = password = host = port = None
634
username, host = netloc.split('@', 1)
636
username, password = username.split(':', 1)
637
password = urllib.unquote(password)
638
username = urllib.unquote(username)
643
host, port = host.rsplit(':', 1)
647
# TODO: Should this be ConnectionError?
648
raise TransportError('%s: invalid port number' % port)
649
host = urllib.unquote(host)
651
path = urllib.unquote(path)
653
# the initial slash should be removed from the path, and treated
654
# as a homedir relative path (the path begins with a double slash
655
# if it is absolute).
656
# see draft-ietf-secsh-scp-sftp-ssh-uri-03.txt
657
# RBC 20060118 we are not using this as its too user hostile. instead
658
# we are following lftp and using /~/foo to mean '~/foo'.
659
# handle homedir paths
660
if path.startswith('/~/'):
664
return (username, password, host, port, path)
666
def _parse_url(self, url):
667
(self._username, self._password,
668
self._host, self._port, self._path) = self._split_url(url)
670
def _sftp_connect(self):
671
"""Connect to the remote sftp server.
672
After this, self._sftp should have a valid connection (or
673
we raise an TransportError 'could not connect').
675
TODO: Raise a more reasonable ConnectionFailed exception
677
global _connected_hosts
679
idx = (self._host, self._port, self._username)
681
self._sftp = _connected_hosts[idx]
686
vendor = _get_ssh_vendor()
687
if vendor == 'loopback':
688
sock = socket.socket()
689
sock.connect((self._host, self._port))
690
self._sftp = SFTPClient(LoopbackSFTP(sock))
691
elif vendor != 'none':
692
sock = SFTPSubprocess(self._host, vendor, self._port,
694
self._sftp = SFTPClient(sock)
696
self._paramiko_connect()
698
_connected_hosts[idx] = self._sftp
700
def _paramiko_connect(self):
701
global SYSTEM_HOSTKEYS, BZR_HOSTKEYS
706
t = paramiko.Transport((self._host, self._port or 22))
707
t.set_log_channel('bzr.paramiko')
709
except paramiko.SSHException, e:
710
raise ConnectionError('Unable to reach SSH host %s:%d' %
711
(self._host, self._port), e)
713
server_key = t.get_remote_server_key()
714
server_key_hex = paramiko.util.hexify(server_key.get_fingerprint())
715
keytype = server_key.get_name()
716
if SYSTEM_HOSTKEYS.has_key(self._host) and SYSTEM_HOSTKEYS[self._host].has_key(keytype):
717
our_server_key = SYSTEM_HOSTKEYS[self._host][keytype]
718
our_server_key_hex = paramiko.util.hexify(our_server_key.get_fingerprint())
719
elif BZR_HOSTKEYS.has_key(self._host) and BZR_HOSTKEYS[self._host].has_key(keytype):
720
our_server_key = BZR_HOSTKEYS[self._host][keytype]
721
our_server_key_hex = paramiko.util.hexify(our_server_key.get_fingerprint())
723
warning('Adding %s host key for %s: %s' % (keytype, self._host, server_key_hex))
724
if not BZR_HOSTKEYS.has_key(self._host):
725
BZR_HOSTKEYS[self._host] = {}
726
BZR_HOSTKEYS[self._host][keytype] = server_key
727
our_server_key = server_key
728
our_server_key_hex = paramiko.util.hexify(our_server_key.get_fingerprint())
730
if server_key != our_server_key:
731
filename1 = os.path.expanduser('~/.ssh/known_hosts')
732
filename2 = pathjoin(config_dir(), 'ssh_host_keys')
733
raise TransportError('Host keys for %s do not match! %s != %s' % \
734
(self._host, our_server_key_hex, server_key_hex),
735
['Try editing %s or %s' % (filename1, filename2)])
740
self._sftp = t.open_sftp_client()
741
except paramiko.SSHException, e:
742
raise ConnectionError('Unable to start sftp client %s:%d' %
743
(self._host, self._port), e)
745
def _sftp_auth(self, transport):
746
# paramiko requires a username, but it might be none if nothing was supplied
747
# use the local username, just in case.
748
# We don't override self._username, because if we aren't using paramiko,
749
# the username might be specified in ~/.ssh/config and we don't want to
750
# force it to something else
751
# Also, it would mess up the self.relpath() functionality
752
username = self._username or getpass.getuser()
754
# Paramiko tries to open a socket.AF_UNIX in order to connect
755
# to ssh-agent. That attribute doesn't exist on win32 (it does in cygwin)
756
# so we get an AttributeError exception. For now, just don't try to
757
# connect to an agent if we are on win32
758
if sys.platform != 'win32':
759
agent = paramiko.Agent()
760
for key in agent.get_keys():
761
mutter('Trying SSH agent key %s' % paramiko.util.hexify(key.get_fingerprint()))
763
transport.auth_publickey(username, key)
765
except paramiko.SSHException, e:
768
# okay, try finding id_rsa or id_dss? (posix only)
769
if self._try_pkey_auth(transport, paramiko.RSAKey, username, 'id_rsa'):
771
if self._try_pkey_auth(transport, paramiko.DSSKey, username, 'id_dsa'):
776
transport.auth_password(username, self._password)
778
except paramiko.SSHException, e:
781
# FIXME: Don't keep a password held in memory if you can help it
782
#self._password = None
784
# give up and ask for a password
785
password = bzrlib.ui.ui_factory.get_password(
786
prompt='SSH %(user)s@%(host)s password',
787
user=username, host=self._host)
789
transport.auth_password(username, password)
790
except paramiko.SSHException, e:
791
raise ConnectionError('Unable to authenticate to SSH host as %s@%s' %
792
(username, self._host), e)
794
def _try_pkey_auth(self, transport, pkey_class, username, filename):
795
filename = os.path.expanduser('~/.ssh/' + filename)
797
key = pkey_class.from_private_key_file(filename)
798
transport.auth_publickey(username, key)
800
except paramiko.PasswordRequiredException:
801
password = bzrlib.ui.ui_factory.get_password(
802
prompt='SSH %(filename)s password',
805
key = pkey_class.from_private_key_file(filename, password)
806
transport.auth_publickey(username, key)
808
except paramiko.SSHException:
809
mutter('SSH authentication via %s key failed.' % (os.path.basename(filename),))
810
except paramiko.SSHException:
811
mutter('SSH authentication via %s key failed.' % (os.path.basename(filename),))
816
702
def _sftp_open_exclusive(self, abspath, mode=None):
817
703
"""Open a remote path exclusively.
827
713
:param abspath: The remote absolute path where the file should be opened
828
714
:param mode: The mode permissions bits for the new file
830
path = self._sftp._adjust_cwd(abspath)
716
# TODO: jam 20060816 Paramiko >= 1.6.2 (probably earlier) supports
717
# using the 'x' flag to indicate SFTP_FLAG_EXCL.
718
# However, there is no way to set the permission mode at open
719
# time using the sftp_client.file() functionality.
720
path = self._get_sftp()._adjust_cwd(abspath)
721
# mutter('sftp abspath %s => %s', abspath, path)
831
722
attr = SFTPAttributes()
832
723
if mode is not None:
833
724
attr.st_mode = mode
834
725
omode = (SFTP_FLAG_WRITE | SFTP_FLAG_CREATE
835
726
| SFTP_FLAG_TRUNC | SFTP_FLAG_EXCL)
837
t, msg = self._sftp._request(CMD_OPEN, path, omode, attr)
728
t, msg = self._get_sftp()._request(CMD_OPEN, path, omode, attr)
838
729
if t != CMD_HANDLE:
839
730
raise TransportError('Expected an SFTP handle')
840
731
handle = msg.get_string()
841
return SFTPFile(self._sftp, handle, 'wb', -1)
732
return SFTPFile(self._get_sftp(), handle, 'wb', -1)
842
733
except (paramiko.SSHException, IOError), e:
843
734
self._translate_io_exception(e, abspath, ': unable to open',
844
735
failure_exc=FileExists)
737
def _can_roundtrip_unix_modebits(self):
738
if sys.platform == 'win32':
847
744
# ------------- server test implementation --------------
851
747
from bzrlib.tests.stub_sftp import StubServer, StubSFTPServer
879
775
self._socket.bind(('localhost', 0))
880
776
self._socket.listen(1)
881
777
self.port = self._socket.getsockname()[1]
882
self.stop_event = threading.Event()
885
s, _ = self._socket.accept()
886
# now close the listen socket
889
self._callback(s, self.stop_event)
891
pass #Ignore socket errors
893
# probably a failed test
894
warning('Exception from within unit test server thread: %r' % x)
778
self._stop_event = threading.Event()
897
self.stop_event.set()
781
# called from outside this thread
782
self._stop_event.set()
898
783
# use a timeout here, because if the test fails, the server thread may
899
784
# never notice the stop_event.
790
readable, writable_unused, exception_unused = \
791
select.select([self._socket], [], [], 0.1)
792
if self._stop_event.isSet():
794
if len(readable) == 0:
797
s, addr_unused = self._socket.accept()
798
# because the loopback socket is inline, and transports are
799
# never explicitly closed, best to launch a new thread.
800
threading.Thread(target=self._callback, args=(s,)).start()
801
except socket.error, x:
802
sys.excepthook(*sys.exc_info())
803
warning('Socket error during accept() within unit test server'
806
# probably a failed test; unit test thread will log the
808
sys.excepthook(*sys.exc_info())
809
warning('Exception from within unit test server thread: %r' %
813
class SocketDelay(object):
814
"""A socket decorator to make TCP appear slower.
816
This changes recv, send, and sendall to add a fixed latency to each python
817
call if a new roundtrip is detected. That is, when a recv is called and the
818
flag new_roundtrip is set, latency is charged. Every send and send_all
821
In addition every send, sendall and recv sleeps a bit per character send to
824
Not all methods are implemented, this is deliberate as this class is not a
825
replacement for the builtin sockets layer. fileno is not implemented to
826
prevent the proxy being bypassed.
830
_proxied_arguments = dict.fromkeys([
831
"close", "getpeername", "getsockname", "getsockopt", "gettimeout",
832
"setblocking", "setsockopt", "settimeout", "shutdown"])
834
def __init__(self, sock, latency, bandwidth=1.0,
837
:param bandwith: simulated bandwith (MegaBit)
838
:param really_sleep: If set to false, the SocketDelay will just
839
increase a counter, instead of calling time.sleep. This is useful for
840
unittesting the SocketDelay.
843
self.latency = latency
844
self.really_sleep = really_sleep
845
self.time_per_byte = 1 / (bandwidth / 8.0 * 1024 * 1024)
846
self.new_roundtrip = False
849
if self.really_sleep:
852
SocketDelay.simulated_time += s
854
def __getattr__(self, attr):
855
if attr in SocketDelay._proxied_arguments:
856
return getattr(self.sock, attr)
857
raise AttributeError("'SocketDelay' object has no attribute %r" %
861
return SocketDelay(self.sock.dup(), self.latency, self.time_per_byte,
864
def recv(self, *args):
865
data = self.sock.recv(*args)
866
if data and self.new_roundtrip:
867
self.new_roundtrip = False
868
self.sleep(self.latency)
869
self.sleep(len(data) * self.time_per_byte)
872
def sendall(self, data, flags=0):
873
if not self.new_roundtrip:
874
self.new_roundtrip = True
875
self.sleep(self.latency)
876
self.sleep(len(data) * self.time_per_byte)
877
return self.sock.sendall(data, flags)
879
def send(self, data, flags=0):
880
if not self.new_roundtrip:
881
self.new_roundtrip = True
882
self.sleep(self.latency)
883
bytes_sent = self.sock.send(data, flags)
884
self.sleep(bytes_sent * self.time_per_byte)
903
888
class SFTPServer(Server):
904
889
"""Common code for SFTP server facilities."""
891
def __init__(self, server_interface=StubServer):
907
892
self._original_vendor = None
908
893
self._homedir = None
909
894
self._server_homedir = None
910
895
self._listener = None
911
896
self._root = None
912
self._vendor = 'none'
897
self._vendor = ssh.ParamikoVendor()
898
self._server_interface = server_interface
913
899
# sftp server logs
916
903
def _get_sftp_url(self, path):
917
904
"""Calculate an sftp url to this server for path."""
921
908
"""StubServer uses this to log when a new server is created."""
922
909
self.logs.append(message)
924
def _run_server(self, s, stop_event):
911
def _run_server_entry(self, sock):
912
"""Entry point for all implementations of _run_server.
914
If self.add_latency is > 0.000001 then sock is given a latency adding
917
if self.add_latency > 0.000001:
918
sock = SocketDelay(sock, self.add_latency)
919
return self._run_server(sock)
921
def _run_server(self, s):
925
922
ssh_server = paramiko.Transport(s)
926
key_file = os.path.join(self._homedir, 'test_rsa.key')
927
file(key_file, 'w').write(STUB_SERVER_KEY)
923
key_file = pathjoin(self._homedir, 'test_rsa.key')
924
f = open(key_file, 'w')
925
f.write(STUB_SERVER_KEY)
928
927
host_key = paramiko.RSAKey.from_private_key_file(key_file)
929
928
ssh_server.add_server_key(host_key)
930
server = StubServer(self)
929
server = self._server_interface(self)
931
930
ssh_server.set_subsystem_handler('sftp', paramiko.SFTPServer,
932
931
StubSFTPServer, root=self._root,
933
932
home=self._server_homedir)
934
933
event = threading.Event()
935
934
ssh_server.start_server(event, server)
937
stop_event.wait(30.0)
941
self._original_vendor = _ssh_vendor
942
_ssh_vendor = self._vendor
943
self._homedir = os.getcwdu()
937
def setUp(self, backing_server=None):
938
# XXX: TODO: make sftpserver back onto backing_server rather than local
940
if not (backing_server is None or
941
isinstance(backing_server, local.LocalURLServer)):
942
raise AssertionError(
943
"backing_server should not be %r, because this can only serve the "
944
"local current working directory." % (backing_server,))
945
self._original_vendor = ssh._ssh_vendor_manager._cached_ssh_vendor
946
ssh._ssh_vendor_manager._cached_ssh_vendor = self._vendor
947
if sys.platform == 'win32':
948
# Win32 needs to use the UNICODE api
949
self._homedir = getcwd()
951
# But Linux SFTP servers should just deal in bytestreams
952
self._homedir = os.getcwd()
944
953
if self._server_homedir is None:
945
954
self._server_homedir = self._homedir
947
# FIXME WINDOWS: _root should be _server_homedir[0]:/
948
self._listener = SingleListener(self._run_server)
956
if sys.platform == 'win32':
958
self._listener = SocketListener(self._run_server_entry)
949
959
self._listener.setDaemon(True)
950
960
self._listener.start()
952
962
def tearDown(self):
953
963
"""See bzrlib.transport.Server.tearDown."""
955
964
self._listener.stop()
956
_ssh_vendor = self._original_vendor
965
ssh._ssh_vendor_manager._cached_ssh_vendor = self._original_vendor
967
def get_bogus_url(self):
968
"""See bzrlib.transport.Server.get_bogus_url."""
969
# this is chosen to try to prevent trouble with proxies, wierd dns, etc
970
# we bind a random socket, so that we get a guaranteed unused port
971
# we just never listen on that port
973
s.bind(('localhost', 0))
974
return 'sftp://%s:%s/' % s.getsockname()
959
977
class SFTPFullAbsoluteServer(SFTPServer):