77
def recreate_search(self, repository, search_bytes, discard_excess=False):
78
"""Recreate a search from its serialised form.
80
:param discard_excess: If True, and the search refers to data we don't
81
have, just silently accept that fact - the verb calling
82
recreate_search trusts that clients will look for missing things
83
they expected and get it from elsewhere.
85
lines = search_bytes.split('\n')
86
if lines[0] == 'ancestry-of':
88
search_result = graph.PendingAncestryResult(heads, repository)
89
return search_result, None
90
elif lines[0] == 'search':
91
return self.recreate_search_from_recipe(repository, lines[1:],
92
discard_excess=discard_excess)
94
return (None, FailedSmartServerResponse(('BadSearch',)))
96
def recreate_search_from_recipe(self, repository, lines,
97
discard_excess=False):
98
"""Recreate a specific revision search (vs a from-tip search).
100
:param discard_excess: If True, and the search refers to data we don't
101
have, just silently accept that fact - the verb calling
102
recreate_search trusts that clients will look for missing things
103
they expected and get it from elsewhere.
66
def recreate_search(self, repository, recipe_bytes):
67
lines = recipe_bytes.split('\n')
105
68
start_keys = set(lines[0].split(' '))
106
69
exclude_keys = set(lines[1].split(' '))
107
70
revision_count = int(lines[2])
206
157
queried_revs.update(next_revs)
207
158
parent_map = repo_graph.get_parent_map(next_revs)
208
current_revs = next_revs
209
159
next_revs = set()
210
for revision_id in current_revs:
212
parents = parent_map.get(revision_id)
213
if parents is not None:
214
# adjust for the wire
215
if parents == (_mod_revision.NULL_REVISION,):
217
# prepare the next query
218
next_revs.update(parents)
219
encoded_id = revision_id
222
encoded_id = "missing:" + revision_id
224
if (revision_id not in client_seen_revs and
225
(not missing_rev or include_missing)):
160
for revision_id, parents in parent_map.iteritems():
161
# adjust for the wire
162
if parents == (_mod_revision.NULL_REVISION,):
164
# prepare the next query
165
next_revs.update(parents)
166
if revision_id not in client_seen_revs:
226
167
# Client does not have this revision, give it to it.
227
168
# add parents to the result
228
result[encoded_id] = parents
169
result[revision_id] = parents
229
170
# Approximate the serialized cost of this revision_id.
230
size_so_far += 2 + len(encoded_id) + sum(map(len, parents))
171
size_so_far += 2 + len(revision_id) + sum(map(len, parents))
231
172
# get all the directly asked for parents, and then flesh out to
232
173
# 64K (compressed) or so. We do one level of depth at a time to
233
174
# stay in sync with the client. The 250000 magic number is
234
175
# estimated compression ratio taken from bzr.dev itself.
235
if self.no_extra_results or (
236
first_loop_done and size_so_far > 250000):
176
if first_loop_done and size_so_far > 250000:
237
177
next_revs = set()
239
179
# don't query things we've already queried
408
324
return SuccessfulSmartServerResponse(('ok', token))
411
class SmartServerRepositoryGetStream(SmartServerRepositoryRequest):
413
def do_repository_request(self, repository, to_network_name):
414
"""Get a stream for inserting into a to_format repository.
416
:param repository: The repository to stream from.
417
:param to_network_name: The network name of the format of the target
420
self._to_format = network_format_registry.get(to_network_name)
421
if self._should_fake_unknown():
422
return FailedSmartServerResponse(
423
('UnknownMethod', 'Repository.get_stream'))
424
return None # Signal that we want a body.
426
def _should_fake_unknown(self):
427
"""Return True if we should return UnknownMethod to the client.
429
This is a workaround for bugs in pre-1.19 clients that claim to
430
support receiving streams of CHK repositories. The pre-1.19 client
431
expects inventory records to be serialized in the format defined by
432
to_network_name, but in pre-1.19 (at least) that format definition
433
tries to use the xml5 serializer, which does not correctly handle
434
rich-roots. After 1.19 the client can also accept inventory-deltas
435
(which avoids this issue), and those clients will use the
436
Repository.get_stream_1.19 verb instead of this one.
437
So: if this repository is CHK, and the to_format doesn't match,
438
we should just fake an UnknownSmartMethod error so that the client
439
will fallback to VFS, rather than sending it a stream we know it
442
from_format = self._repository._format
443
to_format = self._to_format
444
if not from_format.supports_chks:
445
# Source not CHK: that's ok
447
if (to_format.supports_chks and
448
from_format.repository_class is to_format.repository_class and
449
from_format._serializer == to_format._serializer):
450
# Source is CHK, but target matches: that's ok
451
# (e.g. 2a->2a, or CHK2->2a)
453
# Source is CHK, and target is not CHK or incompatible CHK. We can't
454
# generate a compatible stream.
457
def do_body(self, body_bytes):
458
repository = self._repository
459
repository.lock_read()
461
search_result, error = self.recreate_search(repository, body_bytes,
463
if error is not None:
466
source = repository._get_source(self._to_format)
467
stream = source.get_stream(search_result)
469
exc_info = sys.exc_info()
471
# On non-error, unlocking is done by the body stream handler.
474
raise exc_info[0], exc_info[1], exc_info[2]
475
return SuccessfulSmartServerResponse(('ok',),
476
body_stream=self.body_stream(stream, repository))
478
def body_stream(self, stream, repository):
479
byte_stream = _stream_to_byte_stream(stream, repository._format)
481
for bytes in byte_stream:
483
except errors.RevisionNotPresent, e:
484
# This shouldn't be able to happen, but as we don't buffer
485
# everything it can in theory happen.
487
yield FailedSmartServerResponse(('NoSuchRevision', e.revision_id))
492
class SmartServerRepositoryGetStream_1_19(SmartServerRepositoryGetStream):
494
def _should_fake_unknown(self):
495
"""Returns False; we don't need to workaround bugs in 1.19+ clients."""
499
def _stream_to_byte_stream(stream, src_format):
500
"""Convert a record stream to a self delimited byte stream."""
501
pack_writer = pack.ContainerSerialiser()
502
yield pack_writer.begin()
503
yield pack_writer.bytes_record(src_format.network_name(), '')
504
for substream_type, substream in stream:
505
for record in substream:
506
if record.storage_kind in ('chunked', 'fulltext'):
507
serialised = record_to_fulltext_bytes(record)
508
elif record.storage_kind == 'inventory-delta':
509
serialised = record_to_inventory_delta_bytes(record)
510
elif record.storage_kind == 'absent':
511
raise ValueError("Absent factory for %s" % (record.key,))
513
serialised = record.get_bytes_as(record.storage_kind)
515
# Some streams embed the whole stream into the wire
516
# representation of the first record, which means that
517
# later records have no wire representation: we skip them.
518
yield pack_writer.bytes_record(serialised, [(substream_type,)])
519
yield pack_writer.end()
522
class _ByteStreamDecoder(object):
523
"""Helper for _byte_stream_to_stream.
525
The expected usage of this class is via the function _byte_stream_to_stream
526
which creates a _ByteStreamDecoder, pops off the stream format and then
527
yields the output of record_stream(), the main entry point to
530
Broadly this class has to unwrap two layers of iterators:
534
This is complicated by wishing to return type, iterator_for_type, but
535
getting the data for iterator_for_type when we find out type: we can't
536
simply pass a generator down to the NetworkRecordStream parser, instead
537
we have a little local state to seed each NetworkRecordStream instance,
538
and gather the type that we'll be yielding.
540
:ivar byte_stream: The byte stream being decoded.
541
:ivar stream_decoder: A pack parser used to decode the bytestream
542
:ivar current_type: The current type, used to join adjacent records of the
543
same type into a single stream.
544
:ivar first_bytes: The first bytes to give the next NetworkRecordStream.
547
def __init__(self, byte_stream):
548
"""Create a _ByteStreamDecoder."""
549
self.stream_decoder = pack.ContainerPushParser()
550
self.current_type = None
551
self.first_bytes = None
552
self.byte_stream = byte_stream
554
def iter_stream_decoder(self):
555
"""Iterate the contents of the pack from stream_decoder."""
556
# dequeue pending items
557
for record in self.stream_decoder.read_pending_records():
559
# Pull bytes of the wire, decode them to records, yield those records.
560
for bytes in self.byte_stream:
561
self.stream_decoder.accept_bytes(bytes)
562
for record in self.stream_decoder.read_pending_records():
565
def iter_substream_bytes(self):
566
if self.first_bytes is not None:
567
yield self.first_bytes
568
# If we run out of pack records, single the outer layer to stop.
569
self.first_bytes = None
570
for record in self.iter_pack_records:
571
record_names, record_bytes = record
572
record_name, = record_names
573
substream_type = record_name[0]
574
if substream_type != self.current_type:
575
# end of a substream, seed the next substream.
576
self.current_type = substream_type
577
self.first_bytes = record_bytes
581
def record_stream(self):
582
"""Yield substream_type, substream from the byte stream."""
584
# Make and consume sub generators, one per substream type:
585
while self.first_bytes is not None:
586
substream = NetworkRecordStream(self.iter_substream_bytes())
587
# after substream is fully consumed, self.current_type is set to
588
# the next type, and self.first_bytes is set to the matching bytes.
589
yield self.current_type, substream.read()
591
def seed_state(self):
592
"""Prepare the _ByteStreamDecoder to decode from the pack stream."""
593
# Set a single generator we can use to get data from the pack stream.
594
self.iter_pack_records = self.iter_stream_decoder()
595
# Seed the very first subiterator with content; after this each one
597
list(self.iter_substream_bytes())
600
def _byte_stream_to_stream(byte_stream):
601
"""Convert a byte stream into a format and a stream.
603
:param byte_stream: A bytes iterator, as output by _stream_to_byte_stream.
604
:return: (RepositoryFormat, stream_generator)
606
decoder = _ByteStreamDecoder(byte_stream)
607
for bytes in byte_stream:
608
decoder.stream_decoder.accept_bytes(bytes)
609
for record in decoder.stream_decoder.read_pending_records(max=1):
610
record_names, src_format_name = record
611
src_format = network_format_registry.get(src_format_name)
612
return src_format, decoder.record_stream()
615
327
class SmartServerRepositoryUnlock(SmartServerRepositoryRequest):
617
329
def do_repository_request(self, repository, token):
697
class SmartServerRepositoryInsertStreamLocked(SmartServerRepositoryRequest):
698
"""Insert a record stream from a RemoteSink into a repository.
700
This gets bytes pushed to it by the network infrastructure and turns that
701
into a bytes iterator using a thread. That is then processed by
702
_byte_stream_to_stream.
707
def do_repository_request(self, repository, resume_tokens, lock_token):
708
"""StreamSink.insert_stream for a remote repository."""
709
repository.lock_write(token=lock_token)
710
self.do_insert_stream_request(repository, resume_tokens)
712
def do_insert_stream_request(self, repository, resume_tokens):
713
tokens = [token for token in resume_tokens.split(' ') if token]
715
self.repository = repository
716
self.queue = Queue.Queue()
717
self.insert_thread = threading.Thread(target=self._inserter_thread)
718
self.insert_thread.start()
720
def do_chunk(self, body_stream_chunk):
721
self.queue.put(body_stream_chunk)
723
def _inserter_thread(self):
725
src_format, stream = _byte_stream_to_stream(
726
self.blocking_byte_stream())
727
self.insert_result = self.repository._get_sink().insert_stream(
728
stream, src_format, self.tokens)
729
self.insert_ok = True
731
self.insert_exception = sys.exc_info()
732
self.insert_ok = False
734
def blocking_byte_stream(self):
736
bytes = self.queue.get()
737
if bytes is StopIteration:
743
self.queue.put(StopIteration)
744
if self.insert_thread is not None:
745
self.insert_thread.join()
746
if not self.insert_ok:
747
exc_info = self.insert_exception
748
raise exc_info[0], exc_info[1], exc_info[2]
749
write_group_tokens, missing_keys = self.insert_result
750
if write_group_tokens or missing_keys:
751
# bzip needed? missing keys should typically be a small set.
752
# Should this be a streaming body response ?
753
missing_keys = sorted(missing_keys)
754
bytes = bencode.bencode((write_group_tokens, missing_keys))
755
self.repository.unlock()
756
return SuccessfulSmartServerResponse(('missing-basis', bytes))
399
class SmartServerRepositoryStreamKnitDataForRevisions(SmartServerRepositoryRequest):
400
"""Bzr <= 1.1 streaming pull, buffers all data on server."""
402
def do_repository_request(self, repository, *revision_ids):
403
repository.lock_read()
405
return self._do_repository_request(repository, revision_ids)
409
def _do_repository_request(self, repository, revision_ids):
410
stream = repository.get_data_stream_for_search(
411
repository.revision_ids_to_search_result(set(revision_ids)))
413
pack = ContainerSerialiser()
414
buffer.write(pack.begin())
417
for name_tuple, bytes in stream:
418
buffer.write(pack.bytes_record(bytes, [name_tuple]))
420
# Undo the lock_read that happens once the iterator from
421
# get_data_stream is started.
424
except errors.RevisionNotPresent, e:
425
return FailedSmartServerResponse(('NoSuchRevision', e.revision_id))
426
buffer.write(pack.end())
427
return SuccessfulSmartServerResponse(('ok',), buffer.getvalue())
430
class SmartServerRepositoryStreamRevisionsChunked(SmartServerRepositoryRequest):
431
"""Bzr 1.1+ streaming pull."""
433
def do_body(self, body_bytes):
434
repository = self._repository
435
repository.lock_read()
437
search, error = self.recreate_search(repository, body_bytes)
438
if error is not None:
441
stream = repository.get_data_stream_for_search(search.get_result())
443
# On non-error, unlocking is done by the body stream handler.
446
return SuccessfulSmartServerResponse(('ok',),
447
body_stream=self.body_stream(stream, repository))
449
def body_stream(self, stream, repository):
450
pack = ContainerSerialiser()
454
for name_tuple, bytes in stream:
455
yield pack.bytes_record(bytes, [name_tuple])
457
# Undo the lock_read that that happens once the iterator from
458
# get_data_stream is started.
461
except errors.RevisionNotPresent, e:
462
# This shouldn't be able to happen, but as we don't buffer
463
# everything it can in theory happen.
465
yield FailedSmartServerResponse(('NoSuchRevision', e.revision_id))
758
self.repository.unlock()
759
return SuccessfulSmartServerResponse(('ok', ))
762
class SmartServerRepositoryInsertStream_1_19(SmartServerRepositoryInsertStreamLocked):
763
"""Insert a record stream from a RemoteSink into a repository.
765
Same as SmartServerRepositoryInsertStreamLocked, except:
766
- the lock token argument is optional
767
- servers that implement this verb accept 'inventory-delta' records in the
773
def do_repository_request(self, repository, resume_tokens, lock_token=None):
774
"""StreamSink.insert_stream for a remote repository."""
775
SmartServerRepositoryInsertStreamLocked.do_repository_request(
776
self, repository, resume_tokens, lock_token)
779
class SmartServerRepositoryInsertStream(SmartServerRepositoryInsertStreamLocked):
780
"""Insert a record stream from a RemoteSink into an unlocked repository.
782
This is the same as SmartServerRepositoryInsertStreamLocked, except it
783
takes no lock_tokens; i.e. it works with an unlocked (or lock-free, e.g.
784
like pack format) repository.
789
def do_repository_request(self, repository, resume_tokens):
790
"""StreamSink.insert_stream for a remote repository."""
791
repository.lock_write()
792
self.do_insert_stream_request(repository, resume_tokens)