1
# Copyright (C) 2005 Robey Pointer <robey@lag.net>
2
# Copyright (C) 2005, 2006, 2007 Canonical Ltd
1
# Copyright (C) 2005 Robey Pointer <robey@lag.net>, Canonical Ltd
4
3
# This program is free software; you can redistribute it and/or modify
5
4
# it under the terms of the GNU General Public License as published by
6
5
# the Free Software Foundation; either version 2 of the License, or
7
6
# (at your option) any later version.
9
8
# This program is distributed in the hope that it will be useful,
10
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
11
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12
11
# GNU General Public License for more details.
14
13
# You should have received a copy of the GNU General Public License
15
14
# along with this program; if not, write to the Free Software
16
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18
17
"""Implementation of Transport over SFTP, using paramiko."""
20
# TODO: Remove the transport-based lock_read and lock_write methods. They'll
21
# then raise TransportNotPossible, which will break remote access to any
22
# formats which rely on OS-level locks. That should be fine as those formats
23
# are pretty old, but these combinations may have to be removed from the test
24
# suite. Those formats all date back to 0.7; so we should be able to remove
25
# these methods when we officially drop support for those formats.
42
from bzrlib.errors import (FileExists,
43
NoSuchFile, PathNotChild,
32
from bzrlib.config import config_dir, ensure_config_dir_exists
33
from bzrlib.errors import (ConnectionError,
35
TransportNotPossible, NoSuchFile, PathNotChild,
37
LockError, ParamikoNotPresent
49
from bzrlib.osutils import pathjoin, fancy_rename, getcwd
50
from bzrlib.symbol_versioning import (
54
from bzrlib.trace import mutter, warning
55
from bzrlib.transport import (
57
register_urlparse_netloc_protocol,
39
from bzrlib.osutils import pathjoin, fancy_rename
40
from bzrlib.trace import mutter, warning, error
41
from bzrlib.transport import Transport, Server, urlescape
70
51
CMD_HANDLE, CMD_OPEN)
71
52
from paramiko.sftp_attr import SFTPAttributes
72
53
from paramiko.sftp_file import SFTPFile
75
register_urlparse_netloc_protocol('sftp')
78
_paramiko_version = getattr(paramiko, '__version_info__', (0, 0, 0))
79
# don't use prefetch unless paramiko version >= 1.5.5 (there were bugs earlier)
80
_default_do_prefetch = (_paramiko_version >= (1, 5, 5))
83
@deprecated_function(zero_nineteen)
84
def clear_connection_cache():
85
"""Remove all hosts from the SFTP connection cache.
87
Primarily useful for test cases wanting to force garbage collection.
88
We don't have a global connection cache anymore.
54
from paramiko.sftp_client import SFTPClient
56
if 'sftp' not in urlparse.uses_netloc:
57
urlparse.uses_netloc.append('sftp')
59
# don't use prefetch unless paramiko version >= 1.5.2 (there were bugs earlier)
60
_default_do_prefetch = False
61
if getattr(paramiko, '__version_info__', (0, 0, 0)) >= (1, 5, 2):
62
_default_do_prefetch = True
66
if sys.platform == 'win32':
67
# close_fds not supported on win32
72
def _get_ssh_vendor():
73
"""Find out what version of SSH is on the system."""
75
if _ssh_vendor is not None:
80
if 'BZR_SSH' in os.environ:
81
_ssh_vendor = os.environ['BZR_SSH']
82
if _ssh_vendor == 'paramiko':
87
p = subprocess.Popen(['ssh', '-V'],
89
stdin=subprocess.PIPE,
90
stdout=subprocess.PIPE,
91
stderr=subprocess.PIPE)
92
returncode = p.returncode
93
stdout, stderr = p.communicate()
97
if 'OpenSSH' in stderr:
98
mutter('ssh implementation is OpenSSH')
99
_ssh_vendor = 'openssh'
100
elif 'SSH Secure Shell' in stderr:
101
mutter('ssh implementation is SSH Corp.')
104
if _ssh_vendor != 'none':
107
# XXX: 20051123 jamesh
108
# A check for putty's plink or lsh would go here.
110
mutter('falling back to paramiko implementation')
114
class SFTPSubprocess:
115
"""A socket-like object that talks to an ssh subprocess via pipes."""
116
def __init__(self, hostname, vendor, port=None, user=None):
117
assert vendor in ['openssh', 'ssh']
118
if vendor == 'openssh':
120
'-oForwardX11=no', '-oForwardAgent=no',
121
'-oClearAllForwardings=yes', '-oProtocol=2',
122
'-oNoHostAuthenticationForLocalhost=yes']
124
args.extend(['-p', str(port)])
126
args.extend(['-l', user])
127
args.extend(['-s', hostname, 'sftp'])
128
elif vendor == 'ssh':
131
args.extend(['-p', str(port)])
133
args.extend(['-l', user])
134
args.extend(['-s', 'sftp', hostname])
136
self.proc = subprocess.Popen(args, close_fds=_close_fds,
137
stdin=subprocess.PIPE,
138
stdout=subprocess.PIPE)
140
def send(self, data):
141
return os.write(self.proc.stdin.fileno(), data)
143
def recv_ready(self):
144
# TODO: jam 20051215 this function is necessary to support the
145
# pipelined() function. In reality, it probably should use
146
# poll() or select() to actually return if there is data
147
# available, otherwise we probably don't get any benefit
150
def recv(self, count):
151
return os.read(self.proc.stdout.fileno(), count)
154
self.proc.stdin.close()
155
self.proc.stdout.close()
159
class LoopbackSFTP(object):
160
"""Simple wrapper for a socket that pretends to be a paramiko Channel."""
162
def __init__(self, sock):
165
def send(self, data):
166
return self.__socket.send(data)
169
return self.__socket.recv(n)
171
def recv_ready(self):
175
self.__socket.close()
181
# This is a weakref dictionary, so that we can reuse connections
182
# that are still active. Long term, it might be nice to have some
183
# sort of expiration policy, such as disconnect if inactive for
184
# X seconds. But that requires a lot more fanciness.
185
_connected_hosts = weakref.WeakValueDictionary()
188
def load_host_keys():
190
Load system host keys (probably doesn't work on windows) and any
191
"discovered" keys from previous sessions.
193
global SYSTEM_HOSTKEYS, BZR_HOSTKEYS
195
SYSTEM_HOSTKEYS = paramiko.util.load_host_keys(os.path.expanduser('~/.ssh/known_hosts'))
197
mutter('failed to load system host keys: ' + str(e))
198
bzr_hostkey_path = pathjoin(config_dir(), 'ssh_host_keys')
200
BZR_HOSTKEYS = paramiko.util.load_host_keys(bzr_hostkey_path)
202
mutter('failed to load bzr host keys: ' + str(e))
206
def save_host_keys():
208
Save "discovered" host keys in $(config)/ssh_host_keys/.
210
global SYSTEM_HOSTKEYS, BZR_HOSTKEYS
211
bzr_hostkey_path = pathjoin(config_dir(), 'ssh_host_keys')
212
ensure_config_dir_exists()
215
f = open(bzr_hostkey_path, 'w')
216
f.write('# SSH host keys collected by bzr\n')
217
for hostname, keys in BZR_HOSTKEYS.iteritems():
218
for keytype, key in keys.iteritems():
219
f.write('%s %s %s\n' % (hostname, keytype, key.get_base64()))
222
mutter('failed to save bzr host keys: ' + str(e))
91
225
class SFTPLock(object):
92
"""This fakes a lock in a remote location.
94
A present lock is indicated just by the existence of a file. This
95
doesn't work well on all transports and they are only used in
96
deprecated storage formats.
226
"""This fakes a lock in a remote location."""
99
227
__slots__ = ['path', 'lock_path', 'lock_file', 'transport']
101
228
def __init__(self, path, transport):
102
229
assert isinstance(transport, SFTPTransport)
129
256
# What specific errors should we catch here?
133
class SFTPTransport(ConnectedTransport):
134
"""Transport implementation for SFTP access."""
259
class SFTPTransport (Transport):
261
Transport implementation for SFTP access.
136
263
_do_prefetch = _default_do_prefetch
137
# TODO: jam 20060717 Conceivably these could be configurable, either
138
# by auto-tuning at run-time, or by a configuration (per host??)
139
# but the performance curve is pretty flat, so just going with
140
# reasonable defaults.
141
_max_readv_combine = 200
142
# Having to round trip to the server means waiting for a response,
143
# so it is better to download extra bytes.
144
# 8KiB had good performance for both local and remote network operations
145
_bytes_to_read_before_seek = 8192
147
# The sftp spec says that implementations SHOULD allow reads
148
# to be at least 32K. paramiko.readv() does an async request
149
# for the chunks. So we need to keep it within a single request
150
# size for paramiko <= 1.6.1. paramiko 1.6.2 will probably chop
151
# up the request itself, rather than us having to worry about it
152
_max_request_size = 32768
154
def __init__(self, base, from_transport=None):
265
def __init__(self, base, clone_from=None):
155
266
assert base.startswith('sftp://')
156
super(SFTPTransport, self).__init__(base, from_transport)
267
self._parse_url(base)
268
base = self._unparse_url()
271
super(SFTPTransport, self).__init__(base)
272
if clone_from is None:
275
# use the same ssh connection, etc
276
self._sftp = clone_from._sftp
277
# super saves 'self.base'
279
def should_cache(self):
281
Return True if the data pulled across should be cached locally.
285
def clone(self, offset=None):
287
Return a new SFTPTransport with root at self.base + offset.
288
We share the same SFTP session between such transports, because it's
289
fairly expensive to set them up.
292
return SFTPTransport(self.base, self)
294
return SFTPTransport(self.abspath(offset), self)
296
def abspath(self, relpath):
298
Return the full url to the given relative path.
300
@param relpath: the relative path or path components
301
@type relpath: str or list
303
return self._unparse_url(self._remote_path(relpath))
158
305
def _remote_path(self, relpath):
159
306
"""Return the path to be passed along the sftp protocol for relpath.
161
:param relpath: is a urlencoded string.
163
relative = urlutils.unescape(relpath).encode('utf-8')
164
remote_path = self._combine_paths(self._path, relative)
165
# the initial slash should be removed from the path, and treated as a
166
# homedir relative path (the path begins with a double slash if it is
167
# absolute). see draft-ietf-secsh-scp-sftp-ssh-uri-03.txt
168
# RBC 20060118 we are not using this as its too user hostile. instead
169
# we are following lftp and using /~/foo to mean '~/foo'
170
# vila--20070602 and leave absolute paths begin with a single slash.
171
if remote_path.startswith('/~/'):
172
remote_path = remote_path[3:]
173
elif remote_path == '/~':
177
def _create_connection(self, credentials=None):
178
"""Create a new connection with the provided credentials.
180
:param credentials: The credentials needed to establish the connection.
182
:return: The created connection and its associated credentials.
184
The credentials are only the password as it may have been entered
185
interactively by the user and may be different from the one provided
186
in base url at transport creation time.
188
if credentials is None:
189
password = self._password
191
password = credentials
193
vendor = ssh._get_ssh_vendor()
194
connection = vendor.connect_sftp(self._user, password,
195
self._host, self._port)
196
return connection, password
199
"""Ensures that a connection is established"""
200
connection = self._get_connection()
201
if connection is None:
202
# First connection ever
203
connection, credentials = self._create_connection()
204
self._set_connection(connection, credentials)
208
def should_cache(self):
210
Return True if the data pulled across should be cached locally.
308
relpath is a urlencoded string.
310
# FIXME: share the common code across transports
311
assert isinstance(relpath, basestring)
312
relpath = urllib.unquote(relpath).split('/')
313
basepath = self._path.split('/')
314
if len(basepath) > 0 and basepath[-1] == '':
315
basepath = basepath[:-1]
319
if len(basepath) == 0:
320
# In most filesystems, a request for the parent
321
# of root, just returns root.
329
path = '/'.join(basepath)
332
def relpath(self, abspath):
333
username, password, host, port, path = self._split_url(abspath)
335
if (username != self._username):
336
error.append('username mismatch')
337
if (host != self._host):
338
error.append('host mismatch')
339
if (port != self._port):
340
error.append('port mismatch')
341
if (not path.startswith(self._path)):
342
error.append('path mismatch')
344
extra = ': ' + ', '.join(error)
345
raise PathNotChild(abspath, self.base, extra=extra)
347
return path[pl:].strip('/')
214
349
def has(self, relpath):
216
351
Does the target location exist?
219
self._get_sftp().stat(self._remote_path(relpath))
354
self._sftp.stat(self._remote_path(relpath))
231
366
path = self._remote_path(relpath)
232
f = self._get_sftp().file(path, mode='rb')
367
f = self._sftp.file(path, mode='rb')
233
368
if self._do_prefetch and (getattr(f, 'prefetch', None) is not None):
236
371
except (IOError, paramiko.SSHException), e:
237
self._translate_io_exception(e, path, ': error retrieving',
238
failure_exc=errors.ReadError)
240
def readv(self, relpath, offsets):
241
"""See Transport.readv()"""
242
# We overload the default readv() because we want to use a file
243
# that does not have prefetch enabled.
244
# Also, if we have a new paramiko, it implements an async readv()
249
path = self._remote_path(relpath)
250
fp = self._get_sftp().file(path, mode='rb')
251
readv = getattr(fp, 'readv', None)
253
return self._sftp_readv(fp, offsets, relpath)
254
mutter('seek and read %s offsets', len(offsets))
255
return self._seek_and_read(fp, offsets, relpath)
256
except (IOError, paramiko.SSHException), e:
257
372
self._translate_io_exception(e, path, ': error retrieving')
259
def _sftp_readv(self, fp, offsets, relpath='<unknown>'):
260
"""Use the readv() member of fp to do async readv.
262
And then read them using paramiko.readv(). paramiko.readv()
263
does not support ranges > 64K, so it caps the request size, and
264
just reads until it gets all the stuff it wants
266
offsets = list(offsets)
267
sorted_offsets = sorted(offsets)
269
# The algorithm works as follows:
270
# 1) Coalesce nearby reads into a single chunk
271
# This generates a list of combined regions, the total size
272
# and the size of the sub regions. This coalescing step is limited
273
# in the number of nearby chunks to combine, and is allowed to
274
# skip small breaks in the requests. Limiting it makes sure that
275
# we can start yielding some data earlier, and skipping means we
276
# make fewer requests. (Beneficial even when using async)
277
# 2) Break up this combined regions into chunks that are smaller
278
# than 64KiB. Technically the limit is 65536, but we are a
279
# little bit conservative. This is because sftp has a maximum
280
# return chunk size of 64KiB (max size of an unsigned short)
281
# 3) Issue a readv() to paramiko to create an async request for
283
# 4) Read in the data as it comes back, until we've read one
284
# continuous section as determined in step 1
285
# 5) Break up the full sections into hunks for the original requested
286
# offsets. And put them in a cache
287
# 6) Check if the next request is in the cache, and if it is, remove
288
# it from the cache, and yield its data. Continue until no more
289
# entries are in the cache.
290
# 7) loop back to step 4 until all data has been read
292
# TODO: jam 20060725 This could be optimized one step further, by
293
# attempting to yield whatever data we have read, even before
294
# the first coallesced section has been fully processed.
296
# When coalescing for use with readv(), we don't really need to
297
# use any fudge factor, because the requests are made asynchronously
298
coalesced = list(self._coalesce_offsets(sorted_offsets,
299
limit=self._max_readv_combine,
303
for c_offset in coalesced:
304
start = c_offset.start
305
size = c_offset.length
307
# We need to break this up into multiple requests
309
next_size = min(size, self._max_request_size)
310
requests.append((start, next_size))
314
mutter('SFTP.readv() %s offsets => %s coalesced => %s requests',
315
len(offsets), len(coalesced), len(requests))
317
# Queue the current read until we have read the full coalesced section
320
cur_coalesced_stack = iter(coalesced)
321
cur_coalesced = cur_coalesced_stack.next()
323
# Cache the results, but only until they have been fulfilled
325
# turn the list of offsets into a stack
326
offset_stack = iter(offsets)
327
cur_offset_and_size = offset_stack.next()
329
for data in fp.readv(requests):
331
cur_data_len += len(data)
333
if cur_data_len < cur_coalesced.length:
335
assert cur_data_len == cur_coalesced.length, \
336
"Somehow we read too much: %s != %s" % (cur_data_len,
337
cur_coalesced.length)
338
all_data = ''.join(cur_data)
342
for suboffset, subsize in cur_coalesced.ranges:
343
key = (cur_coalesced.start+suboffset, subsize)
344
data_map[key] = all_data[suboffset:suboffset+subsize]
346
# Now that we've read some data, see if we can yield anything back
347
while cur_offset_and_size in data_map:
348
this_data = data_map.pop(cur_offset_and_size)
349
yield cur_offset_and_size[0], this_data
350
cur_offset_and_size = offset_stack.next()
352
# We read a coalesced entry, so mark it as done
354
# Now that we've read all of the data for this coalesced section
356
cur_coalesced = cur_coalesced_stack.next()
358
if cur_coalesced is not None:
359
raise errors.ShortReadvError(relpath, cur_coalesced.start,
360
cur_coalesced.length, len(data))
362
def put_file(self, relpath, f, mode=None):
364
Copy the file-like object into the location.
374
def get_partial(self, relpath, start, length=None):
376
Get just part of a file.
378
:param relpath: Path to the file, relative to base
379
:param start: The starting position to read from
380
:param length: The length to read. A length of None indicates
381
read to the end of the file.
382
:return: A file-like object containing at least the specified bytes.
383
Some implementations may return objects which can be read
384
past this length, but this is not guaranteed.
386
# TODO: implement get_partial_multi to help with knit support
387
f = self.get(relpath)
389
if self._do_prefetch and hasattr(f, 'prefetch'):
393
def put(self, relpath, f, mode=None):
395
Copy the file-like or string object into the location.
366
397
:param relpath: Location to put the contents, relative to base.
367
:param f: File-like object.
398
:param f: File-like or string object.
368
399
:param mode: The final mode for the file
370
401
final_path = self._remote_path(relpath)
414
self._get_sftp().remove(tmp_abspath)
432
self._sftp.remove(tmp_abspath)
416
434
# raise the saved except
418
436
# raise the original with its traceback if we can.
421
def _put_non_atomic_helper(self, relpath, writer, mode=None,
422
create_parent_dir=False,
424
abspath = self._remote_path(relpath)
426
# TODO: jam 20060816 paramiko doesn't publicly expose a way to
427
# set the file mode at create time. If it does, use it.
428
# But for now, we just chmod later anyway.
430
def _open_and_write_file():
431
"""Try to open the target file, raise error on failure"""
435
fout = self._get_sftp().file(abspath, mode='wb')
436
fout.set_pipelined(True)
438
except (paramiko.SSHException, IOError), e:
439
self._translate_io_exception(e, abspath,
442
# This is designed to chmod() right before we close.
443
# Because we set_pipelined() earlier, theoretically we might
444
# avoid the round trip for fout.close()
446
self._get_sftp().chmod(abspath, mode)
451
if not create_parent_dir:
452
_open_and_write_file()
455
# Try error handling to create the parent directory if we need to
457
_open_and_write_file()
459
# Try to create the parent directory, and then go back to
461
parent_dir = os.path.dirname(abspath)
462
self._mkdir(parent_dir, dir_mode)
463
_open_and_write_file()
465
def put_file_non_atomic(self, relpath, f, mode=None,
466
create_parent_dir=False,
468
"""Copy the file-like object into the target location.
470
This function is not strictly safe to use. It is only meant to
471
be used when you already know that the target does not exist.
472
It is not safe, because it will open and truncate the remote
473
file. So there may be a time when the file has invalid contents.
475
:param relpath: The remote location to put the contents.
476
:param f: File-like object.
477
:param mode: Possible access permissions for new file.
478
None means do not set remote permissions.
479
:param create_parent_dir: If we cannot create the target file because
480
the parent directory does not exist, go ahead and
481
create it, and then try again.
485
self._put_non_atomic_helper(relpath, writer, mode=mode,
486
create_parent_dir=create_parent_dir,
489
def put_bytes_non_atomic(self, relpath, bytes, mode=None,
490
create_parent_dir=False,
494
self._put_non_atomic_helper(relpath, writer, mode=mode,
495
create_parent_dir=create_parent_dir,
498
439
def iter_files_recursive(self):
499
440
"""Walk the relative paths of all files in this transport."""
500
441
queue = list(self.list_dir('.'))
502
relpath = queue.pop(0)
443
relpath = urllib.quote(queue.pop(0))
503
444
st = self.stat(relpath)
504
445
if stat.S_ISDIR(st.st_mode):
505
446
for i, basename in enumerate(self.list_dir(relpath)):
510
def _mkdir(self, abspath, mode=None):
516
self._get_sftp().mkdir(abspath, local_mode)
518
self._get_sftp().chmod(abspath, mode=mode)
519
except (paramiko.SSHException, IOError), e:
520
self._translate_io_exception(e, abspath, ': unable to mkdir',
521
failure_exc=FileExists)
523
451
def mkdir(self, relpath, mode=None):
524
452
"""Create a directory at the given path."""
525
self._mkdir(self._remote_path(relpath), mode=mode)
454
path = self._remote_path(relpath)
455
# In the paramiko documentation, it says that passing a mode flag
456
# will filtered against the server umask.
457
# StubSFTPServer does not do this, which would be nice, because it is
458
# what we really want :)
459
# However, real servers do use umask, so we really should do it that way
460
self._sftp.mkdir(path)
462
self._sftp.chmod(path, mode=mode)
463
except (paramiko.SSHException, IOError), e:
464
self._translate_io_exception(e, path, ': unable to mkdir',
465
failure_exc=FileExists)
527
def _translate_io_exception(self, e, path, more_info='',
528
failure_exc=PathError):
467
def _translate_io_exception(self, e, path, more_info='', failure_exc=NoSuchFile):
529
468
"""Translate a paramiko or IOError into a friendlier exception.
531
470
:param e: The original exception
668
599
# that we have taken the lock.
669
600
return SFTPLock(relpath, self)
602
def _unparse_url(self, path=None):
605
path = urllib.quote(path)
606
# handle homedir paths
607
if not path.startswith('/'):
609
netloc = urllib.quote(self._host)
610
if self._username is not None:
611
netloc = '%s@%s' % (urllib.quote(self._username), netloc)
612
if self._port is not None:
613
netloc = '%s:%d' % (netloc, self._port)
615
return urlparse.urlunparse(('sftp', netloc, path, '', '', ''))
617
def _split_url(self, url):
618
if isinstance(url, unicode):
619
url = url.encode('utf-8')
620
(scheme, netloc, path, params,
621
query, fragment) = urlparse.urlparse(url, allow_fragments=False)
622
assert scheme == 'sftp'
623
username = password = host = port = None
625
username, host = netloc.split('@', 1)
627
username, password = username.split(':', 1)
628
password = urllib.unquote(password)
629
username = urllib.unquote(username)
634
host, port = host.rsplit(':', 1)
638
# TODO: Should this be ConnectionError?
639
raise TransportError('%s: invalid port number' % port)
640
host = urllib.unquote(host)
642
path = urllib.unquote(path)
644
# the initial slash should be removed from the path, and treated
645
# as a homedir relative path (the path begins with a double slash
646
# if it is absolute).
647
# see draft-ietf-secsh-scp-sftp-ssh-uri-03.txt
648
# RBC 20060118 we are not using this as its too user hostile. instead
649
# we are following lftp and using /~/foo to mean '~/foo'.
650
# handle homedir paths
651
if path.startswith('/~/'):
655
return (username, password, host, port, path)
657
def _parse_url(self, url):
658
(self._username, self._password,
659
self._host, self._port, self._path) = self._split_url(url)
661
def _sftp_connect(self):
662
"""Connect to the remote sftp server.
663
After this, self._sftp should have a valid connection (or
664
we raise an TransportError 'could not connect').
666
TODO: Raise a more reasonable ConnectionFailed exception
668
global _connected_hosts
670
idx = (self._host, self._port, self._username)
672
self._sftp = _connected_hosts[idx]
677
vendor = _get_ssh_vendor()
678
if vendor == 'loopback':
679
sock = socket.socket()
680
sock.connect((self._host, self._port))
681
self._sftp = SFTPClient(LoopbackSFTP(sock))
682
elif vendor != 'none':
683
sock = SFTPSubprocess(self._host, vendor, self._port,
685
self._sftp = SFTPClient(sock)
687
self._paramiko_connect()
689
_connected_hosts[idx] = self._sftp
691
def _paramiko_connect(self):
692
global SYSTEM_HOSTKEYS, BZR_HOSTKEYS
697
t = paramiko.Transport((self._host, self._port or 22))
698
t.set_log_channel('bzr.paramiko')
700
except paramiko.SSHException, e:
701
raise ConnectionError('Unable to reach SSH host %s:%d' %
702
(self._host, self._port), e)
704
server_key = t.get_remote_server_key()
705
server_key_hex = paramiko.util.hexify(server_key.get_fingerprint())
706
keytype = server_key.get_name()
707
if SYSTEM_HOSTKEYS.has_key(self._host) and SYSTEM_HOSTKEYS[self._host].has_key(keytype):
708
our_server_key = SYSTEM_HOSTKEYS[self._host][keytype]
709
our_server_key_hex = paramiko.util.hexify(our_server_key.get_fingerprint())
710
elif BZR_HOSTKEYS.has_key(self._host) and BZR_HOSTKEYS[self._host].has_key(keytype):
711
our_server_key = BZR_HOSTKEYS[self._host][keytype]
712
our_server_key_hex = paramiko.util.hexify(our_server_key.get_fingerprint())
714
warning('Adding %s host key for %s: %s' % (keytype, self._host, server_key_hex))
715
if not BZR_HOSTKEYS.has_key(self._host):
716
BZR_HOSTKEYS[self._host] = {}
717
BZR_HOSTKEYS[self._host][keytype] = server_key
718
our_server_key = server_key
719
our_server_key_hex = paramiko.util.hexify(our_server_key.get_fingerprint())
721
if server_key != our_server_key:
722
filename1 = os.path.expanduser('~/.ssh/known_hosts')
723
filename2 = pathjoin(config_dir(), 'ssh_host_keys')
724
raise TransportError('Host keys for %s do not match! %s != %s' % \
725
(self._host, our_server_key_hex, server_key_hex),
726
['Try editing %s or %s' % (filename1, filename2)])
731
self._sftp = t.open_sftp_client()
732
except paramiko.SSHException, e:
733
raise ConnectionError('Unable to start sftp client %s:%d' %
734
(self._host, self._port), e)
736
def _sftp_auth(self, transport):
737
# paramiko requires a username, but it might be none if nothing was supplied
738
# use the local username, just in case.
739
# We don't override self._username, because if we aren't using paramiko,
740
# the username might be specified in ~/.ssh/config and we don't want to
741
# force it to something else
742
# Also, it would mess up the self.relpath() functionality
743
username = self._username or getpass.getuser()
745
# Paramiko tries to open a socket.AF_UNIX in order to connect
746
# to ssh-agent. That attribute doesn't exist on win32 (it does in cygwin)
747
# so we get an AttributeError exception. For now, just don't try to
748
# connect to an agent if we are on win32
749
if sys.platform != 'win32':
750
agent = paramiko.Agent()
751
for key in agent.get_keys():
752
mutter('Trying SSH agent key %s' % paramiko.util.hexify(key.get_fingerprint()))
754
transport.auth_publickey(username, key)
756
except paramiko.SSHException, e:
759
# okay, try finding id_rsa or id_dss? (posix only)
760
if self._try_pkey_auth(transport, paramiko.RSAKey, username, 'id_rsa'):
762
if self._try_pkey_auth(transport, paramiko.DSSKey, username, 'id_dsa'):
767
transport.auth_password(username, self._password)
769
except paramiko.SSHException, e:
772
# FIXME: Don't keep a password held in memory if you can help it
773
#self._password = None
775
# give up and ask for a password
776
password = bzrlib.ui.ui_factory.get_password(
777
prompt='SSH %(user)s@%(host)s password',
778
user=username, host=self._host)
780
transport.auth_password(username, password)
781
except paramiko.SSHException, e:
782
raise ConnectionError('Unable to authenticate to SSH host as %s@%s' %
783
(username, self._host), e)
785
def _try_pkey_auth(self, transport, pkey_class, username, filename):
786
filename = os.path.expanduser('~/.ssh/' + filename)
788
key = pkey_class.from_private_key_file(filename)
789
transport.auth_publickey(username, key)
791
except paramiko.PasswordRequiredException:
792
password = bzrlib.ui.ui_factory.get_password(
793
prompt='SSH %(filename)s password',
796
key = pkey_class.from_private_key_file(filename, password)
797
transport.auth_publickey(username, key)
799
except paramiko.SSHException:
800
mutter('SSH authentication via %s key failed.' % (os.path.basename(filename),))
801
except paramiko.SSHException:
802
mutter('SSH authentication via %s key failed.' % (os.path.basename(filename),))
671
807
def _sftp_open_exclusive(self, abspath, mode=None):
672
808
"""Open a remote path exclusively.
682
818
:param abspath: The remote absolute path where the file should be opened
683
819
:param mode: The mode permissions bits for the new file
685
# TODO: jam 20060816 Paramiko >= 1.6.2 (probably earlier) supports
686
# using the 'x' flag to indicate SFTP_FLAG_EXCL.
687
# However, there is no way to set the permission mode at open
688
# time using the sftp_client.file() functionality.
689
path = self._get_sftp()._adjust_cwd(abspath)
690
# mutter('sftp abspath %s => %s', abspath, path)
821
path = self._sftp._adjust_cwd(abspath)
691
822
attr = SFTPAttributes()
692
823
if mode is not None:
693
824
attr.st_mode = mode
694
825
omode = (SFTP_FLAG_WRITE | SFTP_FLAG_CREATE
695
826
| SFTP_FLAG_TRUNC | SFTP_FLAG_EXCL)
697
t, msg = self._get_sftp()._request(CMD_OPEN, path, omode, attr)
828
t, msg = self._sftp._request(CMD_OPEN, path, omode, attr)
698
829
if t != CMD_HANDLE:
699
830
raise TransportError('Expected an SFTP handle')
700
831
handle = msg.get_string()
701
return SFTPFile(self._get_sftp(), handle, 'wb', -1)
832
return SFTPFile(self._sftp, handle, 'wb', -1)
702
833
except (paramiko.SSHException, IOError), e:
703
834
self._translate_io_exception(e, abspath, ': unable to open',
704
835
failure_exc=FileExists)
706
def _can_roundtrip_unix_modebits(self):
707
if sys.platform == 'win32':
713
838
# ------------- server test implementation --------------
716
842
from bzrlib.tests.stub_sftp import StubServer, StubSFTPServer
744
870
self._socket.bind(('localhost', 0))
745
871
self._socket.listen(1)
746
872
self.port = self._socket.getsockname()[1]
747
self._stop_event = threading.Event()
873
self.stop_event = threading.Event()
876
s, _ = self._socket.accept()
877
# now close the listen socket
880
self._callback(s, self.stop_event)
882
pass #Ignore socket errors
884
# probably a failed test
885
warning('Exception from within unit test server thread: %r' % x)
750
# called from outside this thread
751
self._stop_event.set()
888
self.stop_event.set()
752
889
# use a timeout here, because if the test fails, the server thread may
753
890
# never notice the stop_event.
759
readable, writable_unused, exception_unused = \
760
select.select([self._socket], [], [], 0.1)
761
if self._stop_event.isSet():
763
if len(readable) == 0:
766
s, addr_unused = self._socket.accept()
767
# because the loopback socket is inline, and transports are
768
# never explicitly closed, best to launch a new thread.
769
threading.Thread(target=self._callback, args=(s,)).start()
770
except socket.error, x:
771
sys.excepthook(*sys.exc_info())
772
warning('Socket error during accept() within unit test server'
775
# probably a failed test; unit test thread will log the
777
sys.excepthook(*sys.exc_info())
778
warning('Exception from within unit test server thread: %r' %
782
class SocketDelay(object):
783
"""A socket decorator to make TCP appear slower.
785
This changes recv, send, and sendall to add a fixed latency to each python
786
call if a new roundtrip is detected. That is, when a recv is called and the
787
flag new_roundtrip is set, latency is charged. Every send and send_all
790
In addition every send, sendall and recv sleeps a bit per character send to
793
Not all methods are implemented, this is deliberate as this class is not a
794
replacement for the builtin sockets layer. fileno is not implemented to
795
prevent the proxy being bypassed.
799
_proxied_arguments = dict.fromkeys([
800
"close", "getpeername", "getsockname", "getsockopt", "gettimeout",
801
"setblocking", "setsockopt", "settimeout", "shutdown"])
803
def __init__(self, sock, latency, bandwidth=1.0,
806
:param bandwith: simulated bandwith (MegaBit)
807
:param really_sleep: If set to false, the SocketDelay will just
808
increase a counter, instead of calling time.sleep. This is useful for
809
unittesting the SocketDelay.
812
self.latency = latency
813
self.really_sleep = really_sleep
814
self.time_per_byte = 1 / (bandwidth / 8.0 * 1024 * 1024)
815
self.new_roundtrip = False
818
if self.really_sleep:
821
SocketDelay.simulated_time += s
823
def __getattr__(self, attr):
824
if attr in SocketDelay._proxied_arguments:
825
return getattr(self.sock, attr)
826
raise AttributeError("'SocketDelay' object has no attribute %r" %
830
return SocketDelay(self.sock.dup(), self.latency, self.time_per_byte,
833
def recv(self, *args):
834
data = self.sock.recv(*args)
835
if data and self.new_roundtrip:
836
self.new_roundtrip = False
837
self.sleep(self.latency)
838
self.sleep(len(data) * self.time_per_byte)
841
def sendall(self, data, flags=0):
842
if not self.new_roundtrip:
843
self.new_roundtrip = True
844
self.sleep(self.latency)
845
self.sleep(len(data) * self.time_per_byte)
846
return self.sock.sendall(data, flags)
848
def send(self, data, flags=0):
849
if not self.new_roundtrip:
850
self.new_roundtrip = True
851
self.sleep(self.latency)
852
bytes_sent = self.sock.send(data, flags)
853
self.sleep(bytes_sent * self.time_per_byte)
857
894
class SFTPServer(Server):
858
895
"""Common code for SFTP server facilities."""
860
def __init__(self, server_interface=StubServer):
861
898
self._original_vendor = None
862
899
self._homedir = None
863
900
self._server_homedir = None
864
901
self._listener = None
865
902
self._root = None
866
self._vendor = ssh.ParamikoVendor()
867
self._server_interface = server_interface
903
self._vendor = 'none'
868
904
# sftp server logs
872
907
def _get_sftp_url(self, path):
873
908
"""Calculate an sftp url to this server for path."""
877
912
"""StubServer uses this to log when a new server is created."""
878
913
self.logs.append(message)
880
def _run_server_entry(self, sock):
881
"""Entry point for all implementations of _run_server.
883
If self.add_latency is > 0.000001 then sock is given a latency adding
886
if self.add_latency > 0.000001:
887
sock = SocketDelay(sock, self.add_latency)
888
return self._run_server(sock)
890
def _run_server(self, s):
915
def _run_server(self, s, stop_event):
891
916
ssh_server = paramiko.Transport(s)
892
key_file = pathjoin(self._homedir, 'test_rsa.key')
893
f = open(key_file, 'w')
894
f.write(STUB_SERVER_KEY)
917
key_file = os.path.join(self._homedir, 'test_rsa.key')
918
file(key_file, 'w').write(STUB_SERVER_KEY)
896
919
host_key = paramiko.RSAKey.from_private_key_file(key_file)
897
920
ssh_server.add_server_key(host_key)
898
server = self._server_interface(self)
921
server = StubServer(self)
899
922
ssh_server.set_subsystem_handler('sftp', paramiko.SFTPServer,
900
923
StubSFTPServer, root=self._root,
901
924
home=self._server_homedir)
902
925
event = threading.Event()
903
926
ssh_server.start_server(event, server)
928
stop_event.wait(30.0)
906
def setUp(self, backing_server=None):
907
# XXX: TODO: make sftpserver back onto backing_server rather than local
909
assert (backing_server is None or
910
isinstance(backing_server, local.LocalURLServer)), (
911
"backing_server should not be %r, because this can only serve the "
912
"local current working directory." % (backing_server,))
913
self._original_vendor = ssh._ssh_vendor_manager._cached_ssh_vendor
914
ssh._ssh_vendor_manager._cached_ssh_vendor = self._vendor
915
if sys.platform == 'win32':
916
# Win32 needs to use the UNICODE api
917
self._homedir = getcwd()
919
# But Linux SFTP servers should just deal in bytestreams
920
self._homedir = os.getcwd()
932
self._original_vendor = _ssh_vendor
933
_ssh_vendor = self._vendor
934
self._homedir = os.getcwdu()
921
935
if self._server_homedir is None:
922
936
self._server_homedir = self._homedir
924
if sys.platform == 'win32':
926
self._listener = SocketListener(self._run_server_entry)
938
# FIXME WINDOWS: _root should be _server_homedir[0]:/
939
self._listener = SingleListener(self._run_server)
927
940
self._listener.setDaemon(True)
928
941
self._listener.start()
930
943
def tearDown(self):
931
944
"""See bzrlib.transport.Server.tearDown."""
932
946
self._listener.stop()
933
ssh._ssh_vendor_manager._cached_ssh_vendor = self._original_vendor
935
def get_bogus_url(self):
936
"""See bzrlib.transport.Server.get_bogus_url."""
937
# this is chosen to try to prevent trouble with proxies, wierd dns, etc
938
# we bind a random socket, so that we get a guaranteed unused port
939
# we just never listen on that port
941
s.bind(('localhost', 0))
942
return 'sftp://%s:%s/' % s.getsockname()
947
_ssh_vendor = self._original_vendor
945
950
class SFTPFullAbsoluteServer(SFTPServer):