1
# Copyright (C) 2005-2010 Canonical Ltd
1
# Copyright (C) 2005 Canonical Ltd
3
3
# This program is free software; you can redistribute it and/or modify
4
4
# it under the terms of the GNU General Public License as published by
5
5
# the Free Software Foundation; either version 2 of the License, or
6
6
# (at your option) any later version.
8
8
# This program is distributed in the hope that it will be useful,
9
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
11
# GNU General Public License for more details.
13
13
# You should have received a copy of the GNU General Public License
14
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17
"""Base implementation of Transport over http.
19
There are separate implementation modules for each http client implementation.
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16
"""Implementation of Transport over http.
19
from bzrlib.transport import Transport, register_transport
20
from bzrlib.errors import (TransportNotPossible, NoSuchFile,
21
NonRelativePath, TransportError)
22
23
from cStringIO import StringIO
37
from bzrlib.smart import medium
38
from bzrlib.symbol_versioning import (
27
from bzrlib.errors import BzrError, BzrCheckError
28
from bzrlib.branch import Branch
41
29
from bzrlib.trace import mutter
42
from bzrlib.transport import (
48
# TODO: This is not used anymore by HttpTransport_urllib
49
# (extracting the auth info and prompting the user for a password
50
# have been split), only the tests still use it. It should be
51
# deleted and the tests rewritten ASAP to stay in sync.
52
def extract_auth(url, password_manager):
53
"""Extract auth parameters from am HTTP/HTTPS url and add them to the given
54
password manager. Return the url, minus those auth parameters (which
57
if not re.match(r'^(https?)(\+\w+)?://', url):
59
'invalid absolute url %r' % (url,))
60
scheme, netloc, path, query, fragment = urlparse.urlsplit(url)
63
auth, netloc = netloc.split('@', 1)
65
username, password = auth.split(':', 1)
67
username, password = auth, None
69
host = netloc.split(':', 1)[0]
72
username = urllib.unquote(username)
73
if password is not None:
74
password = urllib.unquote(password)
76
password = ui.ui_factory.get_password(
77
prompt=u'HTTP %(user)s@%(host)s password',
78
user=username, host=host)
79
password_manager.add_password(None, host, username, password)
80
url = urlparse.urlunsplit((scheme, netloc, path, query, fragment))
84
class HttpTransportBase(ConnectedTransport):
85
"""Base class for http implementations.
87
Does URL parsing, etc, but not any network IO.
89
The protocol can be given as e.g. http+urllib://host/ to use a particular
93
# _unqualified_scheme: "http" or "https"
94
# _scheme: may have "+pycurl", etc
96
def __init__(self, base, _impl_name, _from_transport=None):
31
# velocitynet.com.au transparently proxies connections and thereby
32
# breaks keep-alive -- sucks!
37
mutter("get_url %s" % url)
38
url_f = urllib2.urlopen(url)
41
class HttpTransportError(TransportError):
44
class HttpTransport(Transport):
45
"""This is the transport agent for http:// access.
47
TODO: Implement pipelined versions of all of the *_multi() functions.
50
def __init__(self, base):
97
51
"""Set the base path where files will be stored."""
98
proto_match = re.match(r'^(https?)(\+\w+)?://', base)
100
raise AssertionError("not a http url: %r" % base)
101
self._unqualified_scheme = proto_match.group(1)
102
self._impl_name = _impl_name
103
super(HttpTransportBase, self).__init__(base,
104
_from_transport=_from_transport)
106
# range hint is handled dynamically throughout the life
107
# of the transport object. We start by trying multi-range
108
# requests and if the server returns bogus results, we
109
# retry with single range requests and, finally, we
110
# forget about range if the server really can't
111
# understand. Once acquired, this piece of info is
112
# propagated to clones.
113
if _from_transport is not None:
114
self._range_hint = _from_transport._range_hint
52
assert base.startswith('http://') or base.startswith('https://')
53
super(HttpTransport, self).__init__(base)
54
# In the future we might actually connect to the remote host
55
# rather than using get_url
56
# self._connection = None
57
(self._proto, self._host,
58
self._path, self._parameters,
59
self._query, self._fragment) = urlparse.urlparse(self.base)
61
def should_cache(self):
62
"""Return True if the data pulled across should be cached locally.
66
def clone(self, offset=None):
67
"""Return a new HttpTransport with root at self.base + offset
68
For now HttpTransport does not actually connect, so just return
69
a new HttpTransport object.
72
return HttpTransport(self.base)
116
self._range_hint = 'multi'
74
return HttpTransport(self.abspath(offset))
76
def abspath(self, relpath):
77
"""Return the full url to the given relative path.
78
This can be supplied with a string or a list
80
if isinstance(relpath, basestring):
82
basepath = self._path.split('/')
83
if len(basepath) > 0 and basepath[-1] == '':
84
basepath = basepath[:-1]
89
# In most filesystems, a request for the parent
90
# of root, just returns root.
99
# Possibly, we could use urlparse.urljoin() here, but
100
# I'm concerned about when it chooses to strip the last
101
# portion of the path, and when it doesn't.
102
path = '/'.join(basepath)
103
return urlparse.urlunparse((self._proto,
104
self._host, path, '', '', ''))
118
106
def has(self, relpath):
119
raise NotImplementedError("has() is abstract on %r" % self)
121
def get(self, relpath):
107
"""Does the target location exist?
109
TODO: HttpTransport.has() should use a HEAD request,
110
not a full GET request.
112
TODO: This should be changed so that we don't use
113
urllib2 and get an exception, the code path would be
114
cleaner if we just do an http HEAD request, and parse
118
f = get_url(self.abspath(relpath))
119
# Without the read and then close()
120
# we tend to have busy sockets.
126
except urllib2.URLError:
129
if e.errno == errno.ENOENT:
131
raise HttpTransportError(orig_error=e)
133
def get(self, relpath, decode=False):
122
134
"""Get the file at the given relative path.
124
136
:param relpath: The relative path to the file
126
code, response_file = self._get(relpath, None)
127
# FIXME: some callers want an iterable... One step forward, three steps
128
# backwards :-/ And not only an iterable, but an iterable that can be
129
# seeked backwards, so we will never be able to do that. One such
130
# known client is bzrlib.bundle.serializer.v4.get_bundle_reader. At the
131
# time of this writing it's even the only known client -- vila20071203
132
return StringIO(response_file.read())
134
def _get(self, relpath, ranges, tail_amount=0):
135
"""Get a file, or part of a file.
137
:param relpath: Path relative to transport base URL
138
:param ranges: None to get the whole file;
139
or a list of _CoalescedOffset to fetch parts of a file.
140
:param tail_amount: The amount to get from the end of the file.
142
:returns: (http_code, result_file)
144
raise NotImplementedError(self._get)
146
def _remote_path(self, relpath):
147
"""See ConnectedTransport._remote_path.
149
user and passwords are not embedded in the path provided to the server.
151
url = self._parsed_url.clone(relpath)
152
url.user = url.quoted_user = None
153
url.password = url.quoted_password = None
154
url.scheme = self._unqualified_scheme
157
def _create_auth(self):
158
"""Returns a dict containing the credentials provided at build time."""
159
auth = dict(host=self._parsed_url.host, port=self._parsed_url.port,
160
user=self._parsed_url.user, password=self._parsed_url.password,
161
protocol=self._unqualified_scheme,
162
path=self._parsed_url.path)
165
def get_smart_medium(self):
166
"""See Transport.get_smart_medium."""
167
if self._medium is None:
168
# Since medium holds some state (smart server probing at least), we
169
# need to keep it around. Note that this is needed because medium
170
# has the same 'base' attribute as the transport so it can't be
171
# shared between transports having different bases.
172
self._medium = SmartClientHTTPMedium(self)
175
def _degrade_range_hint(self, relpath, ranges, exc_info):
176
if self._range_hint == 'multi':
177
self._range_hint = 'single'
178
mutter('Retry "%s" with single range request' % relpath)
179
elif self._range_hint == 'single':
180
self._range_hint = None
181
mutter('Retry "%s" without ranges' % relpath)
183
# We tried all the tricks, but nothing worked. We re-raise the
184
# original exception; the 'mutter' calls above will indicate that
185
# further tries were unsuccessful
186
raise exc_info[0], exc_info[1], exc_info[2]
188
# _coalesce_offsets is a helper for readv, it try to combine ranges without
189
# degrading readv performances. _bytes_to_read_before_seek is the value
190
# used for the limit parameter and has been tuned for other transports. For
191
# HTTP, the name is inappropriate but the parameter is still useful and
192
# helps reduce the number of chunks in the response. The overhead for a
193
# chunk (headers, length, footer around the data itself is variable but
194
# around 50 bytes. We use 128 to reduce the range specifiers that appear in
195
# the header, some servers (notably Apache) enforce a maximum length for a
196
# header and issue a '400: Bad request' error when too much ranges are
198
_bytes_to_read_before_seek = 128
199
# No limit on the offset number that get combined into one, we are trying
200
# to avoid downloading the whole file.
201
_max_readv_combine = 0
202
# By default Apache has a limit of ~400 ranges before replying with a 400
203
# Bad Request. So we go underneath that amount to be safe.
204
_max_get_ranges = 200
205
# We impose no limit on the range size. But see _pycurl.py for a different
209
def _readv(self, relpath, offsets):
210
"""Get parts of the file at the given relative path.
212
:param offsets: A list of (offset, size) tuples.
213
:param return: A list or generator of (offset, data) tuples
215
# offsets may be a generator, we will iterate it several times, so
217
offsets = list(offsets)
220
retried_offset = None
224
# Coalesce the offsets to minimize the GET requests issued
225
sorted_offsets = sorted(offsets)
226
coalesced = self._coalesce_offsets(
227
sorted_offsets, limit=self._max_readv_combine,
228
fudge_factor=self._bytes_to_read_before_seek,
229
max_size=self._get_max_size)
231
# Turn it into a list, we will iterate it several times
232
coalesced = list(coalesced)
233
if 'http' in debug.debug_flags:
234
mutter('http readv of %s offsets => %s collapsed %s',
235
relpath, len(offsets), len(coalesced))
237
# Cache the data read, but only until it's been used
239
# We will iterate on the data received from the GET requests and
240
# serve the corresponding offsets respecting the initial order. We
241
# need an offset iterator for that.
242
iter_offsets = iter(offsets)
243
cur_offset_and_size = iter_offsets.next()
246
for cur_coal, rfile in self._coalesce_readv(relpath, coalesced):
247
# Split the received chunk
248
for offset, size in cur_coal.ranges:
249
start = cur_coal.start + offset
251
data = rfile.read(size)
254
raise errors.ShortReadvError(relpath, start, size,
256
if (start, size) == cur_offset_and_size:
257
# The offset requested are sorted as the coalesced
258
# ones, no need to cache. Win !
259
yield cur_offset_and_size[0], data
260
cur_offset_and_size = iter_offsets.next()
262
# Different sorting. We need to cache.
263
data_map[(start, size)] = data
265
# Yield everything we can
266
while cur_offset_and_size in data_map:
267
# Clean the cached data since we use it
268
# XXX: will break if offsets contains duplicates --
270
this_data = data_map.pop(cur_offset_and_size)
271
yield cur_offset_and_size[0], this_data
272
cur_offset_and_size = iter_offsets.next()
274
except (errors.ShortReadvError, errors.InvalidRange,
275
errors.InvalidHttpRange, errors.HttpBoundaryMissing), e:
276
mutter('Exception %r: %s during http._readv',e, e)
277
if (not isinstance(e, errors.ShortReadvError)
278
or retried_offset == cur_offset_and_size):
279
# We don't degrade the range hint for ShortReadvError since
280
# they do not indicate a problem with the server ability to
281
# handle ranges. Except when we fail to get back a required
282
# offset twice in a row. In that case, falling back to
283
# single range or whole file should help or end up in a
285
self._degrade_range_hint(relpath, coalesced, sys.exc_info())
286
# Some offsets may have been already processed, so we retry
287
# only the unsuccessful ones.
288
offsets = [cur_offset_and_size] + [o for o in iter_offsets]
289
retried_offset = cur_offset_and_size
292
def _coalesce_readv(self, relpath, coalesced):
293
"""Issue several GET requests to satisfy the coalesced offsets"""
295
def get_and_yield(relpath, coalesced):
297
# Note that the _get below may raise
298
# errors.InvalidHttpRange. It's the caller's responsibility to
299
# decide how to retry since it may provide different coalesced
301
code, rfile = self._get(relpath, coalesced)
302
for coal in coalesced:
305
if self._range_hint is None:
306
# Download whole file
307
for c, rfile in get_and_yield(relpath, coalesced):
310
total = len(coalesced)
311
if self._range_hint == 'multi':
312
max_ranges = self._max_get_ranges
313
elif self._range_hint == 'single':
316
raise AssertionError("Unknown _range_hint %r"
317
% (self._range_hint,))
318
# TODO: Some web servers may ignore the range requests and return
319
# the whole file, we may want to detect that and avoid further
321
# Hint: test_readv_multiple_get_requests will fail once we do that
324
for coal in coalesced:
325
if ((self._get_max_size > 0
326
and cumul + coal.length > self._get_max_size)
327
or len(ranges) >= max_ranges):
328
# Get that much and yield
329
for c, rfile in get_and_yield(relpath, ranges):
331
# Restart with the current offset
337
# Get the rest and yield
338
for c, rfile in get_and_yield(relpath, ranges):
341
def recommended_page_size(self):
342
"""See Transport.recommended_page_size().
344
For HTTP we suggest a large page size to reduce the overhead
345
introduced by latency.
349
def _post(self, body_bytes):
350
"""POST body_bytes to .bzr/smart on this transport.
352
:returns: (response code, response body file-like object).
354
# TODO: Requiring all the body_bytes to be available at the beginning of
355
# the POST may require large client buffers. It would be nice to have
356
# an interface that allows streaming via POST when possible (and
357
# degrades to a local buffer when not).
358
raise NotImplementedError(self._post)
360
def put_file(self, relpath, f, mode=None):
361
"""Copy the file-like object into the location.
139
return get_url(self.abspath(relpath))
140
except (BzrError, urllib2.URLError, IOError), e:
141
raise NoSuchFile(msg = "Error retrieving %s: %s"
142
% (self.abspath(relpath), str(e)),
145
def put(self, relpath, f):
146
"""Copy the file-like or string object into the location.
363
148
:param relpath: Location to put the contents, relative to base.
364
:param f: File-like object.
149
:param f: File-like or string object.
366
raise errors.TransportNotPossible('http PUT not supported')
151
raise TransportNotPossible('http PUT not supported')
368
def mkdir(self, relpath, mode=None):
153
def mkdir(self, relpath):
369
154
"""Create a directory at the given path."""
370
raise errors.TransportNotPossible('http does not support mkdir()')
372
def rmdir(self, relpath):
373
"""See Transport.rmdir."""
374
raise errors.TransportNotPossible('http does not support rmdir()')
376
def append_file(self, relpath, f, mode=None):
155
raise TransportNotPossible('http does not support mkdir()')
157
def append(self, relpath, f):
377
158
"""Append the text in the file-like object into the final
380
raise errors.TransportNotPossible('http does not support append()')
161
raise TransportNotPossible('http does not support append()')
382
163
def copy(self, rel_from, rel_to):
383
164
"""Copy the item at rel_from to the location at rel_to"""
384
raise errors.TransportNotPossible('http does not support copy()')
165
raise TransportNotPossible('http does not support copy()')
386
def copy_to(self, relpaths, other, mode=None, pb=None):
167
def copy_to(self, relpaths, other, pb=None):
387
168
"""Copy a set of entries from self into another Transport.
389
170
:param relpaths: A list/generator of entries to be copied.
450
217
:return: A lock object, which should be passed to Transport.unlock()
452
raise errors.TransportNotPossible('http does not support lock_write()')
454
def _attempted_range_header(self, offsets, tail_amount):
455
"""Prepare a HTTP Range header at a level the server should accept.
457
:return: the range header representing offsets/tail_amount or None if
458
no header can be built.
461
if self._range_hint == 'multi':
462
# Generate the header describing all offsets
463
return self._range_header(offsets, tail_amount)
464
elif self._range_hint == 'single':
465
# Combine all the requested ranges into a single
468
if tail_amount not in (0, None):
469
# Nothing we can do here to combine ranges with tail_amount
470
# in a single range, just returns None. The whole file
471
# should be downloaded.
474
start = offsets[0].start
476
end = last.start + last.length - 1
477
whole = self._coalesce_offsets([(start, end - start + 1)],
478
limit=0, fudge_factor=0)
479
return self._range_header(list(whole), 0)
481
# Only tail_amount, requested, leave range_header
483
return self._range_header(offsets, tail_amount)
488
def _range_header(ranges, tail_amount):
489
"""Turn a list of bytes ranges into a HTTP Range header value.
491
:param ranges: A list of _CoalescedOffset
492
:param tail_amount: The amount to get from the end of the file.
494
:return: HTTP range header string.
496
At least a non-empty ranges *or* a tail_amount must be
500
for offset in ranges:
501
strings.append('%d-%d' % (offset.start,
502
offset.start + offset.length - 1))
505
strings.append('-%d' % tail_amount)
507
return ','.join(strings)
509
def _redirected_to(self, source, target):
510
"""Returns a transport suitable to re-issue a redirected request.
512
:param source: The source url as returned by the server.
513
:param target: The target url as returned by the server.
515
The redirection can be handled only if the relpath involved is not
516
renamed by the redirection.
518
:returns: A transport or None.
520
def relpath(abspath):
521
"""Returns the path relative to our base.
523
The constraints are weaker than the real relpath method because the
524
abspath is coming from the server and may slightly differ from our
525
base. We don't check the scheme, host, port, user, password parts,
526
relying on the caller to give us a proper url (i.e. one returned by
527
the server mirroring the one we sent).
529
parsed_url = self._split_url(abspath)
530
pl = len(self._parsed_url.path)
531
return parsed_url.path[pl:].strip('/')
533
relpath = relpath(source)
534
if not target.endswith(relpath):
535
# The final part of the url has been renamed, we can't handle the
539
parsed_url = self._split_url(target)
540
# Recalculate base path. This is needed to ensure that when the
541
# redirected transport will be used to re-try whatever request was
542
# redirected, we end up with the same url
543
base_path = parsed_url.path[:-len(relpath)]
544
if parsed_url.scheme in ('http', 'https'):
545
# Same protocol family (i.e. http[s]), we will preserve the same
546
# http client implementation when a redirection occurs from one to
547
# the other (otherwise users may be surprised that bzr switches
548
# from one implementation to the other, and devs may suffer
550
if (parsed_url.scheme == self._unqualified_scheme
551
and parsed_url.host == self._parsed_url.host
552
and parsed_url.port == self._parsed_url.port
553
and (parsed_url.user is None or
554
parsed_url.user == self._parsed_url.user)):
555
# If a user is specified, it should match, we don't care about
556
# passwords, wrong passwords will be rejected anyway.
557
new_transport = self.clone(base_path)
559
# Rebuild the url preserving the scheme qualification and the
560
# credentials (if they don't apply, the redirected to server
561
# will tell us, but if they do apply, we avoid prompting the
563
redir_scheme = parsed_url.scheme + '+' + self._impl_name
564
new_url = self._unsplit_url(redir_scheme,
565
self._parsed_url.user,
566
self._parsed_url.password,
567
parsed_url.host, parsed_url.port,
569
new_transport = transport.get_transport(new_url)
571
# Redirected to a different protocol
572
new_url = self._unsplit_url(parsed_url.scheme,
573
parsed_url.user, parsed_url.password,
574
parsed_url.host, parsed_url.port,
576
new_transport = transport.get_transport(new_url)
580
# TODO: May be better located in smart/medium.py with the other
581
# SmartMedium classes
582
class SmartClientHTTPMedium(medium.SmartClientMedium):
584
def __init__(self, http_transport):
585
super(SmartClientHTTPMedium, self).__init__(http_transport.base)
586
# We don't want to create a circular reference between the http
587
# transport and its associated medium. Since the transport will live
588
# longer than the medium, the medium keep only a weak reference to its
590
self._http_transport_ref = weakref.ref(http_transport)
592
def get_request(self):
593
return SmartClientHTTPMediumRequest(self)
595
def should_probe(self):
598
def remote_path_from_transport(self, transport):
599
# Strip the optional 'bzr+' prefix from transport so it will have the
600
# same scheme as self.
601
transport_base = transport.base
602
if transport_base.startswith('bzr+'):
603
transport_base = transport_base[4:]
604
rel_url = urlutils.relative_url(self.base, transport_base)
605
return urllib.unquote(rel_url)
607
def send_http_smart_request(self, bytes):
609
# Get back the http_transport hold by the weak reference
610
t = self._http_transport_ref()
611
code, body_filelike = t._post(bytes)
613
raise InvalidHttpResponse(
614
t._remote_path('.bzr/smart'),
615
'Expected 200 response code, got %r' % (code,))
616
except (errors.InvalidHttpResponse, errors.ConnectionReset), e:
617
raise errors.SmartProtocolError(str(e))
620
def _report_activity(self, bytes, direction):
621
"""See SmartMedium._report_activity.
623
Does nothing; the underlying plain HTTP transport will report the
624
activity that this medium would report.
628
def disconnect(self):
629
"""See SmartClientMedium.disconnect()."""
630
t = self._http_transport_ref()
634
# TODO: May be better located in smart/medium.py with the other
635
# SmartMediumRequest classes
636
class SmartClientHTTPMediumRequest(medium.SmartClientMediumRequest):
637
"""A SmartClientMediumRequest that works with an HTTP medium."""
639
def __init__(self, client_medium):
640
medium.SmartClientMediumRequest.__init__(self, client_medium)
643
def _accept_bytes(self, bytes):
644
self._buffer += bytes
646
def _finished_writing(self):
647
data = self._medium.send_http_smart_request(self._buffer)
648
self._response_body = data
650
def _read_bytes(self, count):
651
"""See SmartClientMediumRequest._read_bytes."""
652
return self._response_body.read(count)
654
def _read_line(self):
655
line, excess = medium._get_line(self._response_body.read)
657
raise AssertionError(
658
'_get_line returned excess bytes, but this mediumrequest '
659
'cannot handle excess. (%r)' % (excess,))
662
def _finished_reading(self):
663
"""See SmartClientMediumRequest._finished_reading."""
667
def unhtml_roughly(maybe_html, length_limit=1000):
668
"""Very approximate html->text translation, for presenting error bodies.
670
:param length_limit: Truncate the result to this many characters.
672
>>> unhtml_roughly("<b>bad</b> things happened\\n")
673
' bad things happened '
675
return re.subn(r"(<[^>]*>|\n| )", " ", maybe_html)[0][:length_limit]
219
raise TransportNotPossible('http does not support lock_write()')
221
register_transport('http://', HttpTransport)
222
register_transport('https://', HttpTransport)