13
13
# You should have received a copy of the GNU General Public License
14
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17
17
"""Container format for Bazaar data.
19
"Containers" and "records" are described in doc/developers/container-format.txt.
19
"Containers" and "records" are described in
20
doc/developers/container-format.txt.
23
from cStringIO import StringIO
24
26
from bzrlib import errors
57
59
raise errors.InvalidRecordError(str(e))
62
class ContainerSerialiser(object):
63
"""A helper class for serialising containers.
65
It simply returns bytes from method calls to 'begin', 'end' and
66
'bytes_record'. You may find ContainerWriter to be a more convenient
71
"""Return the bytes to begin a container."""
72
return FORMAT_ONE + "\n"
75
"""Return the bytes to finish a container."""
78
def bytes_record(self, bytes, names):
79
"""Return the bytes for a Bytes record with the given name and
85
byte_sections.append(str(len(bytes)) + "\n")
87
for name_tuple in names:
88
# Make sure we're writing valid names. Note that we will leave a
89
# half-written record if a name is bad!
90
for name in name_tuple:
92
byte_sections.append('\x00'.join(name_tuple) + "\n")
94
byte_sections.append("\n")
95
# Finally, the contents.
96
byte_sections.append(bytes)
97
# XXX: This causes a memory copy of bytes in size, but is usually
98
# faster than two write calls (12 vs 13 seconds to output a gig of
99
# 1k records.) - results may differ on significantly larger records
100
# like .iso's but as they should be rare in any case and thus not
101
# likely to be the common case. The biggest issue is causing extreme
102
# memory pressure in that case. One possibly improvement here is to
103
# check the size of the content before deciding to join here vs call
105
return ''.join(byte_sections)
60
108
class ContainerWriter(object):
61
"""A class for writing containers."""
109
"""A class for writing containers to a file.
111
:attribute records_written: The number of user records added to the
112
container. This does not count the prelude or suffix of the container
113
introduced by the begin() and end() methods.
63
116
def __init__(self, write_func):
66
119
:param write_func: a callable that will be called when this
67
120
ContainerWriter needs to write some bytes.
69
self.write_func = write_func
122
self._write_func = write_func
123
self.current_offset = 0
124
self.records_written = 0
125
self._serialiser = ContainerSerialiser()
72
128
"""Begin writing a container."""
73
self.write_func(FORMAT_ONE + "\n")
129
self.write_func(self._serialiser.begin())
131
def write_func(self, bytes):
132
self._write_func(bytes)
133
self.current_offset += len(bytes)
76
136
"""Finish writing a container."""
137
self.write_func(self._serialiser.end())
79
139
def add_bytes_record(self, bytes, names):
80
"""Add a Bytes record with the given names."""
84
self.write_func(str(len(bytes)) + "\n")
87
# Make sure we're writing valid names. Note that we will leave a
88
# half-written record if a name is bad!
90
self.write_func(name + "\n")
93
# Finally, the contents.
94
self.write_func(bytes)
140
"""Add a Bytes record with the given names.
142
:param bytes: The bytes to insert.
143
:param names: The names to give the inserted bytes. Each name is
144
a tuple of bytestrings. The bytestrings may not contain
146
:return: An offset, length tuple. The offset is the offset
147
of the record within the container, and the length is the
148
length of data that will need to be read to reconstitute the
149
record. These offset and length can only be used with the pack
150
interface - they might be offset by headers or other such details
151
and thus are only suitable for use by a ContainerReader.
153
current_offset = self.current_offset
154
serialised_record = self._serialiser.bytes_record(bytes, names)
155
self.write_func(serialised_record)
156
self.records_written += 1
157
# return a memo of where we wrote data to allow random access.
158
return current_offset, self.current_offset - current_offset
161
class ReadVFile(object):
162
"""Adapt a readv result iterator to a file like protocol.
164
The readv result must support the iterator protocol returning (offset,
168
# XXX: This could be a generic transport class, as other code may want to
169
# gradually consume the readv result.
171
def __init__(self, readv_result):
172
"""Construct a new ReadVFile wrapper.
174
:seealso: make_readv_reader
176
:param readv_result: the most recent readv result - list or generator
178
# readv can return a sequence or an iterator, but we require an
179
# iterator to know how much has been consumed.
180
readv_result = iter(readv_result)
181
self.readv_result = readv_result
185
if (self._string is None or
186
self._string.tell() == self._string_length):
187
offset, data = self.readv_result.next()
188
self._string_length = len(data)
189
self._string = StringIO(data)
191
def read(self, length):
193
result = self._string.read(length)
194
if len(result) < length:
195
raise errors.BzrError('wanted %d bytes but next '
196
'hunk only contains %d: %r...' %
197
(length, len(result), result[:20]))
201
"""Note that readline will not cross readv segments."""
203
result = self._string.readline()
204
if self._string.tell() == self._string_length and result[-1] != '\n':
205
raise errors.BzrError('short readline in the readvfile hunk: %r'
210
def make_readv_reader(transport, filename, requested_records):
211
"""Create a ContainerReader that will read selected records only.
213
:param transport: The transport the pack file is located on.
214
:param filename: The filename of the pack file.
215
:param requested_records: The record offset, length tuples as returned
216
by add_bytes_record for the desired records.
218
readv_blocks = [(0, len(FORMAT_ONE)+1)]
219
readv_blocks.extend(requested_records)
220
result = ContainerReader(ReadVFile(
221
transport.readv(filename, readv_blocks)))
97
225
class BaseReader(object):
124
252
is a ``list`` and bytes is a function that takes one argument,
127
You **must not** call the callable after advancing the interator to the
255
You **must not** call the callable after advancing the iterator to the
128
256
next record. That is, this code is invalid::
130
258
record_iter = container.iter_records()
131
259
names1, callable1 = record_iter.next()
132
260
names2, callable2 = record_iter.next()
133
261
bytes1 = callable1(None)
135
263
As it will give incorrect results and invalidate the state of the
138
:raises ContainerError: if any sort of containter corruption is
266
:raises ContainerError: if any sort of container corruption is
139
267
detected, e.g. UnknownContainerFormatError is the format of the
140
268
container is unrecognised.
141
269
:seealso: ContainerReader.read
143
271
self._read_format()
144
272
return self._iter_records()
146
274
def iter_record_objects(self):
147
275
"""Iterate over the container, yielding each record as it is read.
197
325
all_names = set()
198
326
for record_names, read_bytes in self.iter_records():
200
for name in record_names:
201
_check_name_encoding(name)
328
for name_tuple in record_names:
329
for name in name_tuple:
330
_check_name_encoding(name)
202
331
# Check that the name is unique. Note that Python will refuse
203
332
# to decode non-shortest forms of UTF-8 encoding, so there is no
204
333
# risk that the same unicode string has been encoded two
205
334
# different ways.
206
if name in all_names:
207
raise errors.DuplicateRecordNameError(name)
335
if name_tuple in all_names:
336
raise errors.DuplicateRecordNameError(name_tuple)
337
all_names.add(name_tuple)
209
338
excess_bytes = self.reader_func(1)
210
339
if excess_bytes != '':
211
340
raise errors.ContainerHasExcessDataError(excess_bytes)
263
394
:raises ContainerError: if this record is invalid.
265
396
names, read_bytes = self.read()
267
_check_name_encoding(name)
397
for name_tuple in names:
398
for name in name_tuple:
399
_check_name_encoding(name)
403
class ContainerPushParser(object):
404
"""A "push" parser for container format 1.
406
It accepts bytes via the ``accept_bytes`` method, and parses them into
407
records which can be retrieved via the ``read_pending_records`` method.
412
self._state_handler = self._state_expecting_format_line
413
self._parsed_records = []
414
self._reset_current_record()
415
self.finished = False
417
def _reset_current_record(self):
418
self._current_record_length = None
419
self._current_record_names = []
421
def accept_bytes(self, bytes):
422
self._buffer += bytes
423
# Keep iterating the state machine until it stops consuming bytes from
425
last_buffer_length = None
426
cur_buffer_length = len(self._buffer)
427
last_state_handler = None
428
while (cur_buffer_length != last_buffer_length
429
or last_state_handler != self._state_handler):
430
last_buffer_length = cur_buffer_length
431
last_state_handler = self._state_handler
432
self._state_handler()
433
cur_buffer_length = len(self._buffer)
435
def read_pending_records(self, max=None):
437
records = self._parsed_records[:max]
438
del self._parsed_records[:max]
441
records = self._parsed_records
442
self._parsed_records = []
445
def _consume_line(self):
446
"""Take a line out of the buffer, and return the line.
448
If a newline byte is not found in the buffer, the buffer is
449
unchanged and this returns None instead.
451
newline_pos = self._buffer.find('\n')
452
if newline_pos != -1:
453
line = self._buffer[:newline_pos]
454
self._buffer = self._buffer[newline_pos+1:]
459
def _state_expecting_format_line(self):
460
line = self._consume_line()
462
if line != FORMAT_ONE:
463
raise errors.UnknownContainerFormatError(line)
464
self._state_handler = self._state_expecting_record_type
466
def _state_expecting_record_type(self):
467
if len(self._buffer) >= 1:
468
record_type = self._buffer[0]
469
self._buffer = self._buffer[1:]
470
if record_type == 'B':
471
self._state_handler = self._state_expecting_length
472
elif record_type == 'E':
474
self._state_handler = self._state_expecting_nothing
476
raise errors.UnknownRecordTypeError(record_type)
478
def _state_expecting_length(self):
479
line = self._consume_line()
482
self._current_record_length = int(line)
484
raise errors.InvalidRecordError(
485
"%r is not a valid length." % (line,))
486
self._state_handler = self._state_expecting_name
488
def _state_expecting_name(self):
489
encoded_name_parts = self._consume_line()
490
if encoded_name_parts == '':
491
self._state_handler = self._state_expecting_body
492
elif encoded_name_parts:
493
name_parts = tuple(encoded_name_parts.split('\x00'))
494
for name_part in name_parts:
495
_check_name(name_part)
496
self._current_record_names.append(name_parts)
498
def _state_expecting_body(self):
499
if len(self._buffer) >= self._current_record_length:
500
body_bytes = self._buffer[:self._current_record_length]
501
self._buffer = self._buffer[self._current_record_length:]
502
record = (self._current_record_names, body_bytes)
503
self._parsed_records.append(record)
504
self._reset_current_record()
505
self._state_handler = self._state_expecting_record_type
507
def _state_expecting_nothing(self):
510
def read_size_hint(self):
512
if self._state_handler == self._state_expecting_body:
513
remaining = self._current_record_length - len(self._buffer)
516
return max(hint, remaining)
520
def iter_records_from_file(source_file):
521
parser = ContainerPushParser()
523
bytes = source_file.read(parser.read_size_hint())
524
parser.accept_bytes(bytes)
525
for record in parser.read_pending_records():