1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
|
#!/usr/bin/env python
"""\
An implementation of the Transport object for http access.
"""
from bzrlib.transport import Transport, register_transport, \
TransportNotPossible, NoSuchFile, NonRelativePath, \
TransportError
import os, errno
from cStringIO import StringIO
import urllib2
from errors import BzrError, BzrCheckError
from branch import Branch, BZR_BRANCH_FORMAT
from trace import mutter
# velocitynet.com.au transparently proxies connections and thereby
# breaks keep-alive -- sucks!
ENABLE_URLGRABBER = True
if ENABLE_URLGRABBER:
import urlgrabber
import urlgrabber.keepalive
import urlgrabber.grabber
urlgrabber.keepalive.DEBUG = 0
def get_url(path, compressed=False):
try:
url = path
if compressed:
url += '.gz'
mutter("grab url %s" % url)
url_f = urlgrabber.urlopen(url, keepalive=1, close_connection=0)
if not compressed:
return url_f
else:
return gzip.GzipFile(fileobj=StringIO(url_f.read()))
except urllib2.URLError, e:
raise BzrError("remote fetch failed: %r: %s" % (url, e))
except urlgrabber.grabber.URLGrabError, e:
raise BzrError("remote fetch failed: %r: %s" % (url, e))
else:
def get_url(url, compressed=False):
import urllib2
if compressed:
url += '.gz'
mutter("get_url %s" % url)
url_f = urllib2.urlopen(url)
if compressed:
return gzip.GzipFile(fileobj=StringIO(url_f.read()))
else:
return url_f
def _find_remote_root(url):
"""Return the prefix URL that corresponds to the branch root."""
orig_url = url
while True:
try:
ff = get_url(url + '/.bzr/branch-format')
fmt = ff.read()
ff.close()
fmt = fmt.rstrip('\r\n')
if fmt != BZR_BRANCH_FORMAT.rstrip('\r\n'):
raise BzrError("sorry, branch format %r not supported at url %s"
% (fmt, url))
return url
except urllib2.URLError:
pass
try:
idx = url.rindex('/')
except ValueError:
raise BzrError('no branch root found for URL %s' % orig_url)
url = url[:idx]
class HttpTransportError(TransportError):
pass
class HttpTransport(Transport):
"""This is the transport agent for http:// access.
TODO: Implement pipelined versions of all of the *_multi() functions.
"""
def __init__(self, base):
"""Set the base path where files will be stored."""
assert base.startswith('http://') or base.startswith('https://')
super(HttpTransport, self).__init__(base)
# In the future we might actually connect to the remote host
# rather than using get_url
# self._connection = None
def should_cache(self):
"""Return True if the data pulled across should be cached locally.
"""
return True
def clone(self, offset=None):
"""Return a new HttpTransport with root at self.base + offset
For now HttpTransport does not actually connect, so just return
a new HttpTransport object.
"""
if offset is None:
return HttpTransport(self.base)
else:
return HttpTransport(self.abspath(offset))
def abspath(self, relpath):
"""Return the full url to the given relative path.
This can be supplied with a string or a list
"""
if isinstance(relpath, basestring):
relpath = [relpath]
baseurl = self.base.rstrip('/')
return '/'.join([baseurl] + relpath)
def relpath(self, abspath):
if not abspath.startswith(self.base):
raise NonRelativePath('path %r is not under base URL %r'
% (abspath, self.base))
pl = len(self.base)
return abspath[pl:].lstrip('/')
def has(self, relpath):
"""Does the target location exist?
TODO: HttpTransport.has() should use a HEAD request,
not a full GET request.
"""
try:
f = get_url(self.abspath(relpath))
return True
except BzrError:
return False
except urllib2.URLError:
return False
except IOError, e:
if e.errno == errno.ENOENT:
return False
raise HttpTransportError(orig_error=e)
def get(self, relpath, decode=False):
"""Get the file at the given relative path.
:param relpath: The relative path to the file
"""
try:
return get_url(self.abspath(relpath))
except BzrError, e:
raise NoSuchFile(orig_error=e)
except urllib2.URLError, e:
raise NoSuchFile(orig_error=e)
except IOError, e:
raise NoSuchFile(orig_error=e)
except Exception,e:
raise HttpTransportError(orig_error=e)
def put(self, relpath, f):
"""Copy the file-like or string object into the location.
:param relpath: Location to put the contents, relative to base.
:param f: File-like or string object.
"""
raise TransportNotPossible('http PUT not supported')
def mkdir(self, relpath):
"""Create a directory at the given path."""
raise TransportNotPossible('http does not support mkdir()')
def append(self, relpath, f):
"""Append the text in the file-like object into the final
location.
"""
raise TransportNotPossible('http does not support append()')
def copy(self, rel_from, rel_to):
"""Copy the item at rel_from to the location at rel_to"""
raise TransportNotPossible('http does not support copy()')
def copy_to(self, relpaths, other, pb=None):
"""Copy a set of entries from self into another Transport.
:param relpaths: A list/generator of entries to be copied.
TODO: if other is LocalTransport, is it possible to
do better than put(get())?
"""
# At this point HttpTransport might be able to check and see if
# the remote location is the same, and rather than download, and
# then upload, it could just issue a remote copy_this command.
if isinstance(other, HttpTransport):
raise TransportNotPossible('http cannot be the target of copy_to()')
else:
return super(HttpTransport, self).copy_to(relpaths, other, pb=pb)
def move(self, rel_from, rel_to):
"""Move the item at rel_from to the location at rel_to"""
raise TransportNotPossible('http does not support move()')
def delete(self, relpath):
"""Delete the item at relpath"""
raise TransportNotPossible('http does not support delete()')
def async_get(self, relpath):
"""Make a request for an file at the given location, but
don't worry about actually getting it yet.
:rtype: AsyncFile
"""
raise NotImplementedError
def list_dir(self, relpath):
"""Return a list of all files at the given location.
WARNING: many transports do not support this, so trying avoid using
it if at all possible.
"""
raise TransportNotPossible('http does not support list_dir()')
def stat(self, relpath):
"""Return the stat information for a file.
"""
raise TransportNotPossible('http does not support stat()')
def lock_read(self, relpath):
"""Lock the given file for shared (read) access.
:return: A lock object, which should be passed to Transport.unlock()
"""
# The old RemoteBranch ignore lock for reading, so we will
# continue that tradition and return a bogus lock object.
class BogusLock(object):
def __init__(self, path):
self.path = path
def unlock(self):
pass
return BogusLock(relpath)
def lock_write(self, relpath):
"""Lock the given file for exclusive (write) access.
WARNING: many transports do not support this, so trying avoid using it
:return: A lock object, which should be passed to Transport.unlock()
"""
raise TransportNotPossible('http does not support lock_write()')
register_transport('http://', HttpTransport)
register_transport('https://', HttpTransport)
|