1
# Copyright (C) 2005, 2006 Canonical Ltd
1
# Copyright (C) 2005 Canonical Ltd
3
3
# This program is free software; you can redistribute it and/or modify
4
4
# it under the terms of the GNU General Public License as published by
5
5
# the Free Software Foundation; either version 2 of the License, or
6
6
# (at your option) any later version.
8
8
# This program is distributed in the hope that it will be useful,
9
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
11
# GNU General Public License for more details.
13
13
# You should have received a copy of the GNU General Public License
14
14
# along with this program; if not, write to the Free Software
15
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17
"""Base implementation of Transport over http.
19
There are separate implementation modules for each http client implementation.
16
"""Implementation of Transport over http.
19
from bzrlib.transport import Transport, register_transport
20
from bzrlib.errors import (TransportNotPossible, NoSuchFile,
21
NonRelativePath, TransportError)
22
23
from cStringIO import StringIO
34
from bzrlib.smart import medium
35
from bzrlib.symbol_versioning import (
27
from bzrlib.errors import BzrError, BzrCheckError
28
from bzrlib.branch import Branch
39
29
from bzrlib.trace import mutter
40
from bzrlib.transport import (
46
# FIXME: There are two known cases where we open a new connection during the
47
# lifetime of the transport:
48
# - when an error is received from the server,
49
# - when the keep-alive header reach zero.
50
# This should be taken into account for the connection sharing by either
51
# failing without reconnecting or inform the clones that a new connection have
54
# TODO: This is not used anymore by HttpTransport_urllib
55
# (extracting the auth info and prompting the user for a password
56
# have been split), only the tests still use it. It should be
57
# deleted and the tests rewritten ASAP to stay in sync.
58
def extract_auth(url, password_manager):
59
"""Extract auth parameters from am HTTP/HTTPS url and add them to the given
60
password manager. Return the url, minus those auth parameters (which
63
assert re.match(r'^(https?)(\+\w+)?://', url), \
64
'invalid absolute url %r' % url
65
scheme, netloc, path, query, fragment = urlparse.urlsplit(url)
68
auth, netloc = netloc.split('@', 1)
70
username, password = auth.split(':', 1)
72
username, password = auth, None
74
host = netloc.split(':', 1)[0]
77
username = urllib.unquote(username)
78
if password is not None:
79
password = urllib.unquote(password)
81
password = ui.ui_factory.get_password(
82
prompt='HTTP %(user)s@%(host)s password',
83
user=username, host=host)
84
password_manager.add_password(None, host, username, password)
85
url = urlparse.urlunsplit((scheme, netloc, path, query, fragment))
89
def _extract_headers(header_text, url):
90
"""Extract the mapping for an rfc2822 header
92
This is a helper function for the test suite and for _pycurl.
93
(urllib already parses the headers for us)
95
In the case that there are multiple headers inside the file,
96
the last one is returned.
98
:param header_text: A string of header information.
99
This expects that the first line of a header will always be HTTP ...
100
:param url: The url we are parsing, so we can raise nice errors
101
:return: mimetools.Message object, which basically acts like a case
102
insensitive dictionary.
105
remaining = header_text
108
raise errors.InvalidHttpResponse(url, 'Empty headers')
111
header_file = StringIO(remaining)
112
first_line = header_file.readline()
113
if not first_line.startswith('HTTP'):
114
if first_header: # The first header *must* start with HTTP
115
raise errors.InvalidHttpResponse(url,
116
'Opening header line did not start with HTTP: %s'
119
break # We are done parsing
121
m = mimetools.Message(header_file)
123
# mimetools.Message parses the first header up to a blank line
124
# So while there is remaining data, it probably means there is
125
# another header to be parsed.
126
# Get rid of any preceeding whitespace, which if it is all whitespace
127
# will get rid of everything.
128
remaining = header_file.read().lstrip()
132
class HttpTransportBase(ConnectedTransport, medium.SmartClientMedium):
133
"""Base class for http implementations.
135
Does URL parsing, etc, but not any network IO.
137
The protocol can be given as e.g. http+urllib://host/ to use a particular
141
# _unqualified_scheme: "http" or "https"
142
# _scheme: may have "+pycurl", etc
144
def __init__(self, base, from_transport=None):
31
# velocitynet.com.au transparently proxies connections and thereby
32
# breaks keep-alive -- sucks!
37
mutter("get_url %s" % url)
38
url_f = urllib2.urlopen(url)
41
class HttpTransportError(TransportError):
44
class HttpTransport(Transport):
45
"""This is the transport agent for http:// access.
47
TODO: Implement pipelined versions of all of the *_multi() functions.
50
def __init__(self, base):
145
51
"""Set the base path where files will be stored."""
146
proto_match = re.match(r'^(https?)(\+\w+)?://', base)
148
raise AssertionError("not a http url: %r" % base)
149
self._unqualified_scheme = proto_match.group(1)
150
impl_name = proto_match.group(2)
152
impl_name = impl_name[1:]
153
self._impl_name = impl_name
154
super(HttpTransportBase, self).__init__(base, from_transport)
155
# range hint is handled dynamically throughout the life
156
# of the transport object. We start by trying multi-range
157
# requests and if the server returns bogus results, we
158
# retry with single range requests and, finally, we
159
# forget about range if the server really can't
160
# understand. Once acquired, this piece of info is
161
# propagated to clones.
162
if from_transport is not None:
163
self._range_hint = from_transport._range_hint
52
assert base.startswith('http://') or base.startswith('https://')
53
super(HttpTransport, self).__init__(base)
54
# In the future we might actually connect to the remote host
55
# rather than using get_url
56
# self._connection = None
57
(self._proto, self._host,
58
self._path, self._parameters,
59
self._query, self._fragment) = urlparse.urlparse(self.base)
61
def should_cache(self):
62
"""Return True if the data pulled across should be cached locally.
66
def clone(self, offset=None):
67
"""Return a new HttpTransport with root at self.base + offset
68
For now HttpTransport does not actually connect, so just return
69
a new HttpTransport object.
72
return HttpTransport(self.base)
165
self._range_hint = 'multi'
167
def _remote_path(self, relpath):
168
"""Produce absolute path, adjusting protocol."""
169
relative = urlutils.unescape(relpath).encode('utf-8')
170
path = self._combine_paths(self._path, relative)
171
return self._unsplit_url(self._unqualified_scheme,
172
self._user, self._password,
173
self._host, self._port,
74
return HttpTransport(self.abspath(offset))
76
def abspath(self, relpath):
77
"""Return the full url to the given relative path.
78
This can be supplied with a string or a list
80
if isinstance(relpath, basestring):
82
basepath = self._path.split('/')
83
if len(basepath) > 0 and basepath[-1] == '':
84
basepath = basepath[:-1]
89
# In most filesystems, a request for the parent
90
# of root, just returns root.
99
# Possibly, we could use urlparse.urljoin() here, but
100
# I'm concerned about when it chooses to strip the last
101
# portion of the path, and when it doesn't.
102
path = '/'.join(basepath)
103
return urlparse.urlunparse((self._proto,
104
self._host, path, '', '', ''))
106
def relpath(self, abspath):
107
if not abspath.startswith(self.base):
108
raise NonRelativePath('path %r is not under base URL %r'
109
% (abspath, self.base))
111
return abspath[pl:].lstrip('/')
176
113
def has(self, relpath):
177
raise NotImplementedError("has() is abstract on %r" % self)
179
def get(self, relpath):
114
"""Does the target location exist?
116
TODO: HttpTransport.has() should use a HEAD request,
117
not a full GET request.
119
TODO: This should be changed so that we don't use
120
urllib2 and get an exception, the code path would be
121
cleaner if we just do an http HEAD request, and parse
125
f = get_url(self.abspath(relpath))
126
# Without the read and then close()
127
# we tend to have busy sockets.
133
except urllib2.URLError:
136
if e.errno == errno.ENOENT:
138
raise HttpTransportError(orig_error=e)
140
def get(self, relpath, decode=False):
180
141
"""Get the file at the given relative path.
182
143
:param relpath: The relative path to the file
184
code, response_file = self._get(relpath, None)
187
def _get(self, relpath, ranges, tail_amount=0):
188
"""Get a file, or part of a file.
190
:param relpath: Path relative to transport base URL
191
:param ranges: None to get the whole file;
192
or a list of _CoalescedOffset to fetch parts of a file.
193
:param tail_amount: The amount to get from the end of the file.
195
:returns: (http_code, result_file)
197
raise NotImplementedError(self._get)
199
def get_request(self):
200
return SmartClientHTTPMediumRequest(self)
202
def get_smart_medium(self):
203
"""See Transport.get_smart_medium.
205
HttpTransportBase directly implements the minimal interface of
206
SmartMediumClient, so this returns self.
210
def _degrade_range_hint(self, relpath, ranges, exc_info):
211
if self._range_hint == 'multi':
212
self._range_hint = 'single'
213
mutter('Retry "%s" with single range request' % relpath)
214
elif self._range_hint == 'single':
215
self._range_hint = None
216
mutter('Retry "%s" without ranges' % relpath)
218
# We tried all the tricks, but nothing worked. We re-raise original
219
# exception; the 'mutter' calls above will indicate that further
220
# tries were unsuccessful
221
raise exc_info[0], exc_info[1], exc_info[2]
223
def _get_ranges_hinted(self, relpath, ranges):
224
"""Issue a ranged GET request taking server capabilities into account.
226
Depending of the errors returned by the server, we try several GET
227
requests, trying to minimize the data transferred.
229
:param relpath: Path relative to transport base URL
230
:param ranges: None to get the whole file;
231
or a list of _CoalescedOffset to fetch parts of a file.
232
:returns: A file handle containing at least the requested ranges.
239
code, f = self._get(relpath, ranges)
240
except errors.InvalidRange, e:
242
exc_info = sys.exc_info()
243
self._degrade_range_hint(relpath, ranges, exc_info)
146
return get_url(self.abspath(relpath))
147
except (BzrError, urllib2.URLError, IOError), e:
148
raise NoSuchFile(msg = "Error retrieving %s: %s"
149
% (self.abspath(relpath), str(e)),
152
def get_partial(self, relpath, start, length=None):
153
"""Get just part of a file.
155
:param relpath: Path to the file, relative to base
156
:param start: The starting position to read from
157
:param length: The length to read. A length of None indicates
158
read to the end of the file.
159
:return: A file-like object containing at least the specified bytes.
160
Some implementations may return objects which can be read
161
past this length, but this is not guaranteed.
163
# TODO: You can make specialized http requests for just
164
# a portion of the file. Figure out how to do that.
165
# For now, urllib2 returns files that cannot seek() so
166
# we just read bytes off the beginning, until we
167
# get to the point that we care about.
168
f = self.get(relpath)
169
# TODO: read in smaller chunks, in case things are
170
# buffered internally.
247
# _coalesce_offsets is a helper for readv, it try to combine ranges without
248
# degrading readv performances. _bytes_to_read_before_seek is the value
249
# used for the limit parameter and has been tuned for other transports. For
250
# HTTP, the name is inappropriate but the parameter is still useful and
251
# helps reduce the number of chunks in the response. The overhead for a
252
# chunk (headers, length, footer around the data itself is variable but
253
# around 50 bytes. We use 128 to reduce the range specifiers that appear in
254
# the header, some servers (notably Apache) enforce a maximum length for a
255
# header and issue a '400: Bad request' error when too much ranges are
257
_bytes_to_read_before_seek = 128
258
# No limit on the offset number that get combined into one, we are trying
259
# to avoid downloading the whole file.
260
_max_readv_combined = 0
262
def readv(self, relpath, offsets):
263
"""Get parts of the file at the given relative path.
265
:param offsets: A list of (offset, size) tuples.
266
:param return: A list or generator of (offset, data) tuples
268
sorted_offsets = sorted(list(offsets))
269
fudge = self._bytes_to_read_before_seek
270
coalesced = self._coalesce_offsets(sorted_offsets,
271
limit=self._max_readv_combine,
273
coalesced = list(coalesced)
274
mutter('http readv of %s offsets => %s collapsed %s',
275
relpath, len(offsets), len(coalesced))
277
f = self._get_ranges_hinted(relpath, coalesced)
278
for start, size in offsets:
282
f.seek(start, ((start < 0) and 2) or 0)
286
if len(data) != size:
287
raise errors.ShortReadvError(relpath, start, size,
289
except errors.ShortReadvError, e:
290
self._degrade_range_hint(relpath, coalesced, sys.exc_info())
292
# Since the offsets and the ranges may not be in the same
293
# order, we don't try to calculate a restricted single
294
# range encompassing unprocessed offsets.
296
# Note: we replace 'f' here, it may need cleaning one day
297
# before being thrown that way.
298
f = self._get_ranges_hinted(relpath, coalesced)
301
# After one or more tries, we get the data.
305
@deprecated_method(zero_seventeen)
306
def offsets_to_ranges(offsets):
307
"""Turn a list of offsets and sizes into a list of byte ranges.
309
:param offsets: A list of tuples of (start, size). An empty list
311
:return: a list of inclusive byte ranges (start, end)
312
Adjacent ranges will be combined.
314
# Make sure we process sorted offsets
315
offsets = sorted(offsets)
320
for start, size in offsets:
321
end = start + size - 1
323
combined.append([start, end])
324
elif start <= prev_end + 1:
325
combined[-1][1] = end
327
combined.append([start, end])
332
def _post(self, body_bytes):
333
"""POST body_bytes to .bzr/smart on this transport.
335
:returns: (response code, response body file-like object).
337
# TODO: Requiring all the body_bytes to be available at the beginning of
338
# the POST may require large client buffers. It would be nice to have
339
# an interface that allows streaming via POST when possible (and
340
# degrades to a local buffer when not).
341
raise NotImplementedError(self._post)
343
def put_file(self, relpath, f, mode=None):
344
"""Copy the file-like object into the location.
174
def put(self, relpath, f):
175
"""Copy the file-like or string object into the location.
346
177
:param relpath: Location to put the contents, relative to base.
347
:param f: File-like object.
178
:param f: File-like or string object.
349
raise errors.TransportNotPossible('http PUT not supported')
180
raise TransportNotPossible('http PUT not supported')
351
def mkdir(self, relpath, mode=None):
182
def mkdir(self, relpath):
352
183
"""Create a directory at the given path."""
353
raise errors.TransportNotPossible('http does not support mkdir()')
355
def rmdir(self, relpath):
356
"""See Transport.rmdir."""
357
raise errors.TransportNotPossible('http does not support rmdir()')
359
def append_file(self, relpath, f, mode=None):
184
raise TransportNotPossible('http does not support mkdir()')
186
def append(self, relpath, f):
360
187
"""Append the text in the file-like object into the final
363
raise errors.TransportNotPossible('http does not support append()')
190
raise TransportNotPossible('http does not support append()')
365
192
def copy(self, rel_from, rel_to):
366
193
"""Copy the item at rel_from to the location at rel_to"""
367
raise errors.TransportNotPossible('http does not support copy()')
194
raise TransportNotPossible('http does not support copy()')
369
def copy_to(self, relpaths, other, mode=None, pb=None):
196
def copy_to(self, relpaths, other, pb=None):
370
197
"""Copy a set of entries from self into another Transport.
372
199
:param relpaths: A list/generator of entries to be copied.