1
# Copyright (C) 2005-2010 Canonical Ltd
1
# Copyright (C) 2005 Canonical Ltd
3
3
# This program is free software; you can redistribute it and/or modify
4
4
# it under the terms of the GNU General Public License as published by
5
5
# the Free Software Foundation; either version 2 of the License, or
6
6
# (at your option) any later version.
8
8
# This program is distributed in the hope that it will be useful,
9
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
11
# GNU General Public License for more details.
13
13
# You should have received a copy of the GNU General Public License
14
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17
"""Base implementation of Transport over http.
19
There are separate implementation modules for each http client implementation.
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16
"""Implementation of Transport over http.
22
from __future__ import absolute_import
19
from bzrlib.transport import Transport, register_transport
20
from bzrlib.errors import (TransportNotPossible, NoSuchFile,
21
NonRelativePath, TransportError)
23
from cStringIO import StringIO
37
from bzrlib.smart import medium
27
from bzrlib.errors import BzrError, BzrCheckError
28
from bzrlib.branch import Branch
38
29
from bzrlib.trace import mutter
39
from bzrlib.transport import (
44
class HttpTransportBase(ConnectedTransport):
45
"""Base class for http implementations.
47
Does URL parsing, etc, but not any network IO.
49
The protocol can be given as e.g. http+urllib://host/ to use a particular
31
# velocitynet.com.au transparently proxies connections and thereby
32
# breaks keep-alive -- sucks!
37
mutter("get_url %s" % url)
38
url_f = urllib2.urlopen(url)
41
class HttpTransportError(TransportError):
44
class HttpTransport(Transport):
45
"""This is the transport agent for http:// access.
47
TODO: Implement pipelined versions of all of the *_multi() functions.
53
# _unqualified_scheme: "http" or "https"
54
# _scheme: may have "+pycurl", etc
56
def __init__(self, base, _impl_name, _from_transport=None):
50
def __init__(self, base):
57
51
"""Set the base path where files will be stored."""
58
proto_match = re.match(r'^(https?)(\+\w+)?://', base)
60
raise AssertionError("not a http url: %r" % base)
61
self._unqualified_scheme = proto_match.group(1)
62
self._impl_name = _impl_name
63
super(HttpTransportBase, self).__init__(base,
64
_from_transport=_from_transport)
66
# range hint is handled dynamically throughout the life
67
# of the transport object. We start by trying multi-range
68
# requests and if the server returns bogus results, we
69
# retry with single range requests and, finally, we
70
# forget about range if the server really can't
71
# understand. Once acquired, this piece of info is
72
# propagated to clones.
73
if _from_transport is not None:
74
self._range_hint = _from_transport._range_hint
52
assert base.startswith('http://') or base.startswith('https://')
53
super(HttpTransport, self).__init__(base)
54
# In the future we might actually connect to the remote host
55
# rather than using get_url
56
# self._connection = None
57
(self._proto, self._host,
58
self._path, self._parameters,
59
self._query, self._fragment) = urlparse.urlparse(self.base)
61
def should_cache(self):
62
"""Return True if the data pulled across should be cached locally.
66
def clone(self, offset=None):
67
"""Return a new HttpTransport with root at self.base + offset
68
For now HttpTransport does not actually connect, so just return
69
a new HttpTransport object.
72
return HttpTransport(self.base)
76
self._range_hint = 'multi'
74
return HttpTransport(self.abspath(offset))
76
def abspath(self, relpath):
77
"""Return the full url to the given relative path.
78
This can be supplied with a string or a list
80
if isinstance(relpath, basestring):
82
basepath = self._path.split('/')
83
if len(basepath) > 0 and basepath[-1] == '':
84
basepath = basepath[:-1]
89
# In most filesystems, a request for the parent
90
# of root, just returns root.
98
# Possibly, we could use urlparse.urljoin() here, but
99
# I'm concerned about when it chooses to strip the last
100
# portion of the path, and when it doesn't.
101
path = '/'.join(basepath)
102
return urlparse.urlunparse((self._proto,
103
self._host, path, '', '', ''))
105
def relpath(self, abspath):
106
if not abspath.startswith(self.base):
107
raise NonRelativePath('path %r is not under base URL %r'
108
% (abspath, self.base))
110
return abspath[pl:].lstrip('/')
78
112
def has(self, relpath):
79
raise NotImplementedError("has() is abstract on %r" % self)
81
def get(self, relpath):
113
"""Does the target location exist?
115
TODO: HttpTransport.has() should use a HEAD request,
116
not a full GET request.
118
TODO: This should be changed so that we don't use
119
urllib2 and get an exception, the code path would be
120
cleaner if we just do an http HEAD request, and parse
124
f = get_url(self.abspath(relpath))
125
# Without the read and then close()
126
# we tend to have busy sockets.
132
except urllib2.URLError:
135
if e.errno == errno.ENOENT:
137
raise HttpTransportError(orig_error=e)
139
def get(self, relpath, decode=False):
82
140
"""Get the file at the given relative path.
84
142
:param relpath: The relative path to the file
86
code, response_file = self._get(relpath, None)
89
def _get(self, relpath, ranges, tail_amount=0):
90
"""Get a file, or part of a file.
92
:param relpath: Path relative to transport base URL
93
:param ranges: None to get the whole file;
94
or a list of _CoalescedOffset to fetch parts of a file.
95
:param tail_amount: The amount to get from the end of the file.
97
:returns: (http_code, result_file)
99
raise NotImplementedError(self._get)
101
def _remote_path(self, relpath):
102
"""See ConnectedTransport._remote_path.
104
user and passwords are not embedded in the path provided to the server.
106
url = self._parsed_url.clone(relpath)
107
url.user = url.quoted_user = None
108
url.password = url.quoted_password = None
109
url.scheme = self._unqualified_scheme
112
def _create_auth(self):
113
"""Returns a dict containing the credentials provided at build time."""
114
auth = dict(host=self._parsed_url.host, port=self._parsed_url.port,
115
user=self._parsed_url.user, password=self._parsed_url.password,
116
protocol=self._unqualified_scheme,
117
path=self._parsed_url.path)
120
def get_smart_medium(self):
121
"""See Transport.get_smart_medium."""
122
if self._medium is None:
123
# Since medium holds some state (smart server probing at least), we
124
# need to keep it around. Note that this is needed because medium
125
# has the same 'base' attribute as the transport so it can't be
126
# shared between transports having different bases.
127
self._medium = SmartClientHTTPMedium(self)
130
def _degrade_range_hint(self, relpath, ranges, exc_info):
131
if self._range_hint == 'multi':
132
self._range_hint = 'single'
133
mutter('Retry "%s" with single range request' % relpath)
134
elif self._range_hint == 'single':
135
self._range_hint = None
136
mutter('Retry "%s" without ranges' % relpath)
138
# We tried all the tricks, but nothing worked. We re-raise the
139
# original exception; the 'mutter' calls above will indicate that
140
# further tries were unsuccessful
141
raise exc_info[0], exc_info[1], exc_info[2]
143
# _coalesce_offsets is a helper for readv, it try to combine ranges without
144
# degrading readv performances. _bytes_to_read_before_seek is the value
145
# used for the limit parameter and has been tuned for other transports. For
146
# HTTP, the name is inappropriate but the parameter is still useful and
147
# helps reduce the number of chunks in the response. The overhead for a
148
# chunk (headers, length, footer around the data itself is variable but
149
# around 50 bytes. We use 128 to reduce the range specifiers that appear in
150
# the header, some servers (notably Apache) enforce a maximum length for a
151
# header and issue a '400: Bad request' error when too much ranges are
153
_bytes_to_read_before_seek = 128
154
# No limit on the offset number that get combined into one, we are trying
155
# to avoid downloading the whole file.
156
_max_readv_combine = 0
157
# By default Apache has a limit of ~400 ranges before replying with a 400
158
# Bad Request. So we go underneath that amount to be safe.
159
_max_get_ranges = 200
160
# We impose no limit on the range size. But see _pycurl.py for a different
164
def _readv(self, relpath, offsets):
165
"""Get parts of the file at the given relative path.
167
:param offsets: A list of (offset, size) tuples.
168
:param return: A list or generator of (offset, data) tuples
170
# offsets may be a generator, we will iterate it several times, so
172
offsets = list(offsets)
175
retried_offset = None
179
# Coalesce the offsets to minimize the GET requests issued
180
sorted_offsets = sorted(offsets)
181
coalesced = self._coalesce_offsets(
182
sorted_offsets, limit=self._max_readv_combine,
183
fudge_factor=self._bytes_to_read_before_seek,
184
max_size=self._get_max_size)
186
# Turn it into a list, we will iterate it several times
187
coalesced = list(coalesced)
188
if 'http' in debug.debug_flags:
189
mutter('http readv of %s offsets => %s collapsed %s',
190
relpath, len(offsets), len(coalesced))
192
# Cache the data read, but only until it's been used
194
# We will iterate on the data received from the GET requests and
195
# serve the corresponding offsets respecting the initial order. We
196
# need an offset iterator for that.
197
iter_offsets = iter(offsets)
198
cur_offset_and_size = iter_offsets.next()
201
for cur_coal, rfile in self._coalesce_readv(relpath, coalesced):
202
# Split the received chunk
203
for offset, size in cur_coal.ranges:
204
start = cur_coal.start + offset
205
rfile.seek(start, os.SEEK_SET)
206
data = rfile.read(size)
209
raise errors.ShortReadvError(relpath, start, size,
211
if (start, size) == cur_offset_and_size:
212
# The offset requested are sorted as the coalesced
213
# ones, no need to cache. Win !
214
yield cur_offset_and_size[0], data
215
cur_offset_and_size = iter_offsets.next()
217
# Different sorting. We need to cache.
218
data_map[(start, size)] = data
220
# Yield everything we can
221
while cur_offset_and_size in data_map:
222
# Clean the cached data since we use it
223
# XXX: will break if offsets contains duplicates --
225
this_data = data_map.pop(cur_offset_and_size)
226
yield cur_offset_and_size[0], this_data
227
cur_offset_and_size = iter_offsets.next()
229
except (errors.ShortReadvError, errors.InvalidRange,
230
errors.InvalidHttpRange, errors.HttpBoundaryMissing), e:
231
mutter('Exception %r: %s during http._readv',e, e)
232
if (not isinstance(e, errors.ShortReadvError)
233
or retried_offset == cur_offset_and_size):
234
# We don't degrade the range hint for ShortReadvError since
235
# they do not indicate a problem with the server ability to
236
# handle ranges. Except when we fail to get back a required
237
# offset twice in a row. In that case, falling back to
238
# single range or whole file should help or end up in a
240
self._degrade_range_hint(relpath, coalesced, sys.exc_info())
241
# Some offsets may have been already processed, so we retry
242
# only the unsuccessful ones.
243
offsets = [cur_offset_and_size] + [o for o in iter_offsets]
244
retried_offset = cur_offset_and_size
247
def _coalesce_readv(self, relpath, coalesced):
248
"""Issue several GET requests to satisfy the coalesced offsets"""
250
def get_and_yield(relpath, coalesced):
252
# Note that the _get below may raise
253
# errors.InvalidHttpRange. It's the caller's responsibility to
254
# decide how to retry since it may provide different coalesced
256
code, rfile = self._get(relpath, coalesced)
257
for coal in coalesced:
260
if self._range_hint is None:
261
# Download whole file
262
for c, rfile in get_and_yield(relpath, coalesced):
265
total = len(coalesced)
266
if self._range_hint == 'multi':
267
max_ranges = self._max_get_ranges
268
elif self._range_hint == 'single':
271
raise AssertionError("Unknown _range_hint %r"
272
% (self._range_hint,))
273
# TODO: Some web servers may ignore the range requests and return
274
# the whole file, we may want to detect that and avoid further
276
# Hint: test_readv_multiple_get_requests will fail once we do that
279
for coal in coalesced:
280
if ((self._get_max_size > 0
281
and cumul + coal.length > self._get_max_size)
282
or len(ranges) >= max_ranges):
283
# Get that much and yield
284
for c, rfile in get_and_yield(relpath, ranges):
286
# Restart with the current offset
292
# Get the rest and yield
293
for c, rfile in get_and_yield(relpath, ranges):
296
def recommended_page_size(self):
297
"""See Transport.recommended_page_size().
299
For HTTP we suggest a large page size to reduce the overhead
300
introduced by latency.
304
def _post(self, body_bytes):
305
"""POST body_bytes to .bzr/smart on this transport.
307
:returns: (response code, response body file-like object).
309
# TODO: Requiring all the body_bytes to be available at the beginning of
310
# the POST may require large client buffers. It would be nice to have
311
# an interface that allows streaming via POST when possible (and
312
# degrades to a local buffer when not).
313
raise NotImplementedError(self._post)
315
def put_file(self, relpath, f, mode=None):
316
"""Copy the file-like object into the location.
145
return get_url(self.abspath(relpath))
146
except (BzrError, urllib2.URLError, IOError), e:
147
raise NoSuchFile(orig_error=e)
149
raise HttpTransportError(orig_error=e)
151
def get_partial(self, relpath, start, length=None):
152
"""Get just part of a file.
154
:param relpath: Path to the file, relative to base
155
:param start: The starting position to read from
156
:param length: The length to read. A length of None indicates
157
read to the end of the file.
158
:return: A file-like object containing at least the specified bytes.
159
Some implementations may return objects which can be read
160
past this length, but this is not guaranteed.
162
# TODO: You can make specialized http requests for just
163
# a portion of the file. Figure out how to do that.
164
# For now, urllib2 returns files that cannot seek() so
165
# we just read bytes off the beginning, until we
166
# get to the point that we care about.
167
f = self.get(relpath)
168
# TODO: read in smaller chunks, in case things are
169
# buffered internally.
173
def put(self, relpath, f):
174
"""Copy the file-like or string object into the location.
318
176
:param relpath: Location to put the contents, relative to base.
319
:param f: File-like object.
177
:param f: File-like or string object.
321
raise errors.TransportNotPossible('http PUT not supported')
179
raise TransportNotPossible('http PUT not supported')
323
def mkdir(self, relpath, mode=None):
181
def mkdir(self, relpath):
324
182
"""Create a directory at the given path."""
325
raise errors.TransportNotPossible('http does not support mkdir()')
327
def rmdir(self, relpath):
328
"""See Transport.rmdir."""
329
raise errors.TransportNotPossible('http does not support rmdir()')
331
def append_file(self, relpath, f, mode=None):
183
raise TransportNotPossible('http does not support mkdir()')
185
def append(self, relpath, f):
332
186
"""Append the text in the file-like object into the final
335
raise errors.TransportNotPossible('http does not support append()')
189
raise TransportNotPossible('http does not support append()')
337
191
def copy(self, rel_from, rel_to):
338
192
"""Copy the item at rel_from to the location at rel_to"""
339
raise errors.TransportNotPossible('http does not support copy()')
193
raise TransportNotPossible('http does not support copy()')
341
def copy_to(self, relpaths, other, mode=None, pb=None):
195
def copy_to(self, relpaths, other, pb=None):
342
196
"""Copy a set of entries from self into another Transport.
344
198
:param relpaths: A list/generator of entries to be copied.
405
245
:return: A lock object, which should be passed to Transport.unlock()
407
raise errors.TransportNotPossible('http does not support lock_write()')
409
def _attempted_range_header(self, offsets, tail_amount):
410
"""Prepare a HTTP Range header at a level the server should accept.
412
:return: the range header representing offsets/tail_amount or None if
413
no header can be built.
416
if self._range_hint == 'multi':
417
# Generate the header describing all offsets
418
return self._range_header(offsets, tail_amount)
419
elif self._range_hint == 'single':
420
# Combine all the requested ranges into a single
423
if tail_amount not in (0, None):
424
# Nothing we can do here to combine ranges with tail_amount
425
# in a single range, just returns None. The whole file
426
# should be downloaded.
429
start = offsets[0].start
431
end = last.start + last.length - 1
432
whole = self._coalesce_offsets([(start, end - start + 1)],
433
limit=0, fudge_factor=0)
434
return self._range_header(list(whole), 0)
436
# Only tail_amount, requested, leave range_header
438
return self._range_header(offsets, tail_amount)
443
def _range_header(ranges, tail_amount):
444
"""Turn a list of bytes ranges into a HTTP Range header value.
446
:param ranges: A list of _CoalescedOffset
447
:param tail_amount: The amount to get from the end of the file.
449
:return: HTTP range header string.
451
At least a non-empty ranges *or* a tail_amount must be
455
for offset in ranges:
456
strings.append('%d-%d' % (offset.start,
457
offset.start + offset.length - 1))
460
strings.append('-%d' % tail_amount)
462
return ','.join(strings)
464
def _redirected_to(self, source, target):
465
"""Returns a transport suitable to re-issue a redirected request.
467
:param source: The source url as returned by the server.
468
:param target: The target url as returned by the server.
470
The redirection can be handled only if the relpath involved is not
471
renamed by the redirection.
473
:returns: A transport or None.
475
parsed_source = self._split_url(source)
476
parsed_target = self._split_url(target)
477
pl = len(self._parsed_url.path)
478
# determine the excess tail - the relative path that was in
479
# the original request but not part of this transports' URL.
480
excess_tail = parsed_source.path[pl:].strip("/")
481
if not target.endswith(excess_tail):
482
# The final part of the url has been renamed, we can't handle the
486
target_path = parsed_target.path
488
# Drop the tail that was in the redirect but not part of
489
# the path of this transport.
490
target_path = target_path[:-len(excess_tail)]
492
if parsed_target.scheme in ('http', 'https'):
493
# Same protocol family (i.e. http[s]), we will preserve the same
494
# http client implementation when a redirection occurs from one to
495
# the other (otherwise users may be surprised that bzr switches
496
# from one implementation to the other, and devs may suffer
498
if (parsed_target.scheme == self._unqualified_scheme
499
and parsed_target.host == self._parsed_url.host
500
and parsed_target.port == self._parsed_url.port
501
and (parsed_target.user is None or
502
parsed_target.user == self._parsed_url.user)):
503
# If a user is specified, it should match, we don't care about
504
# passwords, wrong passwords will be rejected anyway.
505
return self.clone(target_path)
507
# Rebuild the url preserving the scheme qualification and the
508
# credentials (if they don't apply, the redirected to server
509
# will tell us, but if they do apply, we avoid prompting the
511
redir_scheme = parsed_target.scheme + '+' + self._impl_name
512
new_url = self._unsplit_url(redir_scheme,
513
self._parsed_url.user,
514
self._parsed_url.password,
515
parsed_target.host, parsed_target.port,
517
return transport.get_transport_from_url(new_url)
519
# Redirected to a different protocol
520
new_url = self._unsplit_url(parsed_target.scheme,
522
parsed_target.password,
523
parsed_target.host, parsed_target.port,
525
return transport.get_transport_from_url(new_url)
528
# TODO: May be better located in smart/medium.py with the other
529
# SmartMedium classes
530
class SmartClientHTTPMedium(medium.SmartClientMedium):
532
def __init__(self, http_transport):
533
super(SmartClientHTTPMedium, self).__init__(http_transport.base)
534
# We don't want to create a circular reference between the http
535
# transport and its associated medium. Since the transport will live
536
# longer than the medium, the medium keep only a weak reference to its
538
self._http_transport_ref = weakref.ref(http_transport)
540
def get_request(self):
541
return SmartClientHTTPMediumRequest(self)
543
def should_probe(self):
546
def remote_path_from_transport(self, transport):
547
# Strip the optional 'bzr+' prefix from transport so it will have the
548
# same scheme as self.
549
transport_base = transport.base
550
if transport_base.startswith('bzr+'):
551
transport_base = transport_base[4:]
552
rel_url = urlutils.relative_url(self.base, transport_base)
553
return urlutils.unquote(rel_url)
555
def send_http_smart_request(self, bytes):
557
# Get back the http_transport hold by the weak reference
558
t = self._http_transport_ref()
559
code, body_filelike = t._post(bytes)
561
raise errors.InvalidHttpResponse(
562
t._remote_path('.bzr/smart'),
563
'Expected 200 response code, got %r' % (code,))
564
except (errors.InvalidHttpResponse, errors.ConnectionReset), e:
565
raise errors.SmartProtocolError(str(e))
568
def _report_activity(self, bytes, direction):
569
"""See SmartMedium._report_activity.
571
Does nothing; the underlying plain HTTP transport will report the
572
activity that this medium would report.
576
def disconnect(self):
577
"""See SmartClientMedium.disconnect()."""
578
t = self._http_transport_ref()
582
# TODO: May be better located in smart/medium.py with the other
583
# SmartMediumRequest classes
584
class SmartClientHTTPMediumRequest(medium.SmartClientMediumRequest):
585
"""A SmartClientMediumRequest that works with an HTTP medium."""
587
def __init__(self, client_medium):
588
medium.SmartClientMediumRequest.__init__(self, client_medium)
591
def _accept_bytes(self, bytes):
592
self._buffer += bytes
594
def _finished_writing(self):
595
data = self._medium.send_http_smart_request(self._buffer)
596
self._response_body = data
598
def _read_bytes(self, count):
599
"""See SmartClientMediumRequest._read_bytes."""
600
return self._response_body.read(count)
602
def _read_line(self):
603
line, excess = medium._get_line(self._response_body.read)
605
raise AssertionError(
606
'_get_line returned excess bytes, but this mediumrequest '
607
'cannot handle excess. (%r)' % (excess,))
610
def _finished_reading(self):
611
"""See SmartClientMediumRequest._finished_reading."""
615
def unhtml_roughly(maybe_html, length_limit=1000):
616
"""Very approximate html->text translation, for presenting error bodies.
618
:param length_limit: Truncate the result to this many characters.
620
>>> unhtml_roughly("<b>bad</b> things happened\\n")
621
' bad things happened '
623
return re.subn(r"(<[^>]*>|\n| )", " ", maybe_html)[0][:length_limit]
247
raise TransportNotPossible('http does not support lock_write()')
249
register_transport('http://', HttpTransport)
250
register_transport('https://', HttpTransport)