78
def recreate_search(self, repository, search_bytes, discard_excess=False):
79
"""Recreate a search from its serialised form.
81
:param discard_excess: If True, and the search refers to data we don't
82
have, just silently accept that fact - the verb calling
83
recreate_search trusts that clients will look for missing things
84
they expected and get it from elsewhere.
86
lines = search_bytes.split('\n')
87
if lines[0] == 'ancestry-of':
89
search_result = graph.PendingAncestryResult(heads, repository)
90
return search_result, None
91
elif lines[0] == 'search':
92
return self.recreate_search_from_recipe(repository, lines[1:],
93
discard_excess=discard_excess)
95
return (None, FailedSmartServerResponse(('BadSearch',)))
97
def recreate_search_from_recipe(self, repository, lines,
98
discard_excess=False):
99
"""Recreate a specific revision search (vs a from-tip search).
101
:param discard_excess: If True, and the search refers to data we don't
102
have, just silently accept that fact - the verb calling
103
recreate_search trusts that clients will look for missing things
104
they expected and get it from elsewhere.
73
def recreate_search(self, repository, recipe_bytes):
74
lines = recipe_bytes.split('\n')
106
75
start_keys = set(lines[0].split(' '))
107
76
exclude_keys = set(lines[1].split(' '))
108
77
revision_count = int(lines[2])
207
166
queried_revs.update(next_revs)
208
167
parent_map = repo_graph.get_parent_map(next_revs)
209
current_revs = next_revs
210
168
next_revs = set()
211
for revision_id in current_revs:
213
parents = parent_map.get(revision_id)
214
if parents is not None:
215
# adjust for the wire
216
if parents == (_mod_revision.NULL_REVISION,):
218
# prepare the next query
219
next_revs.update(parents)
220
encoded_id = revision_id
223
encoded_id = "missing:" + revision_id
225
if (revision_id not in client_seen_revs and
226
(not missing_rev or include_missing)):
169
for revision_id, parents in parent_map.iteritems():
170
# adjust for the wire
171
if parents == (_mod_revision.NULL_REVISION,):
173
# prepare the next query
174
next_revs.update(parents)
175
if revision_id not in client_seen_revs:
227
176
# Client does not have this revision, give it to it.
228
177
# add parents to the result
229
result[encoded_id] = parents
178
result[revision_id] = parents
230
179
# Approximate the serialized cost of this revision_id.
231
size_so_far += 2 + len(encoded_id) + sum(map(len, parents))
180
size_so_far += 2 + len(revision_id) + sum(map(len, parents))
232
181
# get all the directly asked for parents, and then flesh out to
233
182
# 64K (compressed) or so. We do one level of depth at a time to
234
183
# stay in sync with the client. The 250000 magic number is
288
237
return SuccessfulSmartServerResponse(('ok', ), '\n'.join(lines))
291
class SmartServerRepositoryGetRevIdForRevno(SmartServerRepositoryReadLocked):
293
def do_readlocked_repository_request(self, repository, revno,
295
"""Find the revid for a given revno, given a known revno/revid pair.
300
found_flag, result = repository.get_rev_id_for_revno(revno, known_pair)
301
except errors.RevisionNotPresent, err:
302
if err.revision_id != known_pair[1]:
303
raise AssertionError(
304
'get_rev_id_for_revno raised RevisionNotPresent for '
305
'non-initial revision: ' + err.revision_id)
306
return FailedSmartServerResponse(
307
('nosuchrevision', err.revision_id))
309
return SuccessfulSmartServerResponse(('ok', result))
311
earliest_revno, earliest_revid = result
312
return SuccessfulSmartServerResponse(
313
('history-incomplete', earliest_revno, earliest_revid))
316
240
class SmartServerRequestHasRevision(SmartServerRepositoryRequest):
318
242
def do_repository_request(self, repository, revision_id):
421
345
self._to_format = network_format_registry.get(to_network_name)
422
if self._should_fake_unknown():
423
return FailedSmartServerResponse(
424
('UnknownMethod', 'Repository.get_stream'))
425
346
return None # Signal that we want a body.
427
def _should_fake_unknown(self):
428
"""Return True if we should return UnknownMethod to the client.
430
This is a workaround for bugs in pre-1.19 clients that claim to
431
support receiving streams of CHK repositories. The pre-1.19 client
432
expects inventory records to be serialized in the format defined by
433
to_network_name, but in pre-1.19 (at least) that format definition
434
tries to use the xml5 serializer, which does not correctly handle
435
rich-roots. After 1.19 the client can also accept inventory-deltas
436
(which avoids this issue), and those clients will use the
437
Repository.get_stream_1.19 verb instead of this one.
438
So: if this repository is CHK, and the to_format doesn't match,
439
we should just fake an UnknownSmartMethod error so that the client
440
will fallback to VFS, rather than sending it a stream we know it
443
from_format = self._repository._format
444
to_format = self._to_format
445
if not from_format.supports_chks:
446
# Source not CHK: that's ok
448
if (to_format.supports_chks and
449
from_format.repository_class is to_format.repository_class and
450
from_format._serializer == to_format._serializer):
451
# Source is CHK, but target matches: that's ok
452
# (e.g. 2a->2a, or CHK2->2a)
454
# Source is CHK, and target is not CHK or incompatible CHK. We can't
455
# generate a compatible stream.
458
348
def do_body(self, body_bytes):
459
349
repository = self._repository
460
350
repository.lock_read()
462
search_result, error = self.recreate_search(repository, body_bytes,
352
search, error = self.recreate_search(repository, body_bytes)
464
353
if error is not None:
465
354
repository.unlock()
356
search = search.get_result()
467
357
source = repository._get_source(self._to_format)
468
stream = source.get_stream(search_result)
358
stream = source.get_stream(search)
469
359
except Exception:
470
360
exc_info = sys.exc_info()
490
380
repository.unlock()
493
class SmartServerRepositoryGetStream_1_19(SmartServerRepositoryGetStream):
495
def _should_fake_unknown(self):
496
"""Returns False; we don't need to workaround bugs in 1.19+ clients."""
500
383
def _stream_to_byte_stream(stream, src_format):
501
384
"""Convert a record stream to a self delimited byte stream."""
502
385
pack_writer = pack.ContainerSerialiser()
503
386
yield pack_writer.begin()
504
387
yield pack_writer.bytes_record(src_format.network_name(), '')
505
388
for substream_type, substream in stream:
506
if substream_type == 'inventory-deltas':
507
# This doesn't feel like the ideal place to issue this warning;
508
# however we don't want to do it in the Repository that's
509
# generating the stream, because that might be on the server.
510
# Instead we try to observe it as the stream goes by.
511
ui.ui_factory.warn_cross_format_fetch(src_format,
513
389
for record in substream:
514
390
if record.storage_kind in ('chunked', 'fulltext'):
515
391
serialised = record_to_fulltext_bytes(record)
516
elif record.storage_kind == 'inventory-delta':
517
serialised = record_to_inventory_delta_bytes(record)
518
elif record.storage_kind == 'absent':
519
raise ValueError("Absent factory for %s" % (record.key,))
521
393
serialised = record.get_bytes_as(record.storage_kind)
527
399
yield pack_writer.end()
530
class _ByteStreamDecoder(object):
531
"""Helper for _byte_stream_to_stream.
533
The expected usage of this class is via the function _byte_stream_to_stream
534
which creates a _ByteStreamDecoder, pops off the stream format and then
535
yields the output of record_stream(), the main entry point to
538
Broadly this class has to unwrap two layers of iterators:
542
This is complicated by wishing to return type, iterator_for_type, but
543
getting the data for iterator_for_type when we find out type: we can't
544
simply pass a generator down to the NetworkRecordStream parser, instead
545
we have a little local state to seed each NetworkRecordStream instance,
546
and gather the type that we'll be yielding.
548
:ivar byte_stream: The byte stream being decoded.
549
:ivar stream_decoder: A pack parser used to decode the bytestream
550
:ivar current_type: The current type, used to join adjacent records of the
551
same type into a single stream.
552
:ivar first_bytes: The first bytes to give the next NetworkRecordStream.
555
def __init__(self, byte_stream):
556
"""Create a _ByteStreamDecoder."""
557
self.stream_decoder = pack.ContainerPushParser()
558
self.current_type = None
559
self.first_bytes = None
560
self.byte_stream = byte_stream
562
def iter_stream_decoder(self):
563
"""Iterate the contents of the pack from stream_decoder."""
564
# dequeue pending items
565
for record in self.stream_decoder.read_pending_records():
567
# Pull bytes of the wire, decode them to records, yield those records.
568
for bytes in self.byte_stream:
569
self.stream_decoder.accept_bytes(bytes)
570
for record in self.stream_decoder.read_pending_records():
573
def iter_substream_bytes(self):
574
if self.first_bytes is not None:
575
yield self.first_bytes
576
# If we run out of pack records, single the outer layer to stop.
577
self.first_bytes = None
578
for record in self.iter_pack_records:
579
record_names, record_bytes = record
580
record_name, = record_names
581
substream_type = record_name[0]
582
if substream_type != self.current_type:
583
# end of a substream, seed the next substream.
584
self.current_type = substream_type
585
self.first_bytes = record_bytes
589
def record_stream(self):
590
"""Yield substream_type, substream from the byte stream."""
592
# Make and consume sub generators, one per substream type:
593
while self.first_bytes is not None:
594
substream = NetworkRecordStream(self.iter_substream_bytes())
595
# after substream is fully consumed, self.current_type is set to
596
# the next type, and self.first_bytes is set to the matching bytes.
597
yield self.current_type, substream.read()
599
def seed_state(self):
600
"""Prepare the _ByteStreamDecoder to decode from the pack stream."""
601
# Set a single generator we can use to get data from the pack stream.
602
self.iter_pack_records = self.iter_stream_decoder()
603
# Seed the very first subiterator with content; after this each one
605
list(self.iter_substream_bytes())
608
402
def _byte_stream_to_stream(byte_stream):
609
403
"""Convert a byte stream into a format and a stream.
611
405
:param byte_stream: A bytes iterator, as output by _stream_to_byte_stream.
612
406
:return: (RepositoryFormat, stream_generator)
614
decoder = _ByteStreamDecoder(byte_stream)
408
stream_decoder = pack.ContainerPushParser()
410
"""Closure to return the substreams."""
411
# May have fully parsed records already.
412
for record in stream_decoder.read_pending_records():
413
record_names, record_bytes = record
414
record_name, = record_names
415
substream_type = record_name[0]
416
substream = NetworkRecordStream([record_bytes])
417
yield substream_type, substream.read()
418
for bytes in byte_stream:
419
stream_decoder.accept_bytes(bytes)
420
for record in stream_decoder.read_pending_records():
421
record_names, record_bytes = record
422
record_name, = record_names
423
substream_type = record_name[0]
424
substream = NetworkRecordStream([record_bytes])
425
yield substream_type, substream.read()
615
426
for bytes in byte_stream:
616
decoder.stream_decoder.accept_bytes(bytes)
617
for record in decoder.stream_decoder.read_pending_records(max=1):
427
stream_decoder.accept_bytes(bytes)
428
for record in stream_decoder.read_pending_records(max=1):
618
429
record_names, src_format_name = record
619
430
src_format = network_format_registry.get(src_format_name)
620
return src_format, decoder.record_stream()
431
return src_format, record_stream()
623
434
class SmartServerRepositoryUnlock(SmartServerRepositoryRequest):
704
class SmartServerRepositoryInsertStreamLocked(SmartServerRepositoryRequest):
515
class SmartServerRepositoryInsertStream(SmartServerRepositoryRequest):
705
516
"""Insert a record stream from a RemoteSink into a repository.
707
518
This gets bytes pushed to it by the network infrastructure and turns that
708
519
into a bytes iterator using a thread. That is then processed by
709
520
_byte_stream_to_stream.
714
def do_repository_request(self, repository, resume_tokens, lock_token):
523
def do_repository_request(self, repository, resume_tokens):
715
524
"""StreamSink.insert_stream for a remote repository."""
716
repository.lock_write(token=lock_token)
717
self.do_insert_stream_request(repository, resume_tokens)
719
def do_insert_stream_request(self, repository, resume_tokens):
525
repository.lock_write()
720
526
tokens = [token for token in resume_tokens.split(' ') if token]
721
527
self.tokens = tokens
722
528
self.repository = repository
765
571
self.repository.unlock()
766
572
return SuccessfulSmartServerResponse(('ok', ))
769
class SmartServerRepositoryInsertStream_1_19(SmartServerRepositoryInsertStreamLocked):
770
"""Insert a record stream from a RemoteSink into a repository.
772
Same as SmartServerRepositoryInsertStreamLocked, except:
773
- the lock token argument is optional
774
- servers that implement this verb accept 'inventory-delta' records in the
780
def do_repository_request(self, repository, resume_tokens, lock_token=None):
781
"""StreamSink.insert_stream for a remote repository."""
782
SmartServerRepositoryInsertStreamLocked.do_repository_request(
783
self, repository, resume_tokens, lock_token)
786
class SmartServerRepositoryInsertStream(SmartServerRepositoryInsertStreamLocked):
787
"""Insert a record stream from a RemoteSink into an unlocked repository.
789
This is the same as SmartServerRepositoryInsertStreamLocked, except it
790
takes no lock_tokens; i.e. it works with an unlocked (or lock-free, e.g.
791
like pack format) repository.
796
def do_repository_request(self, repository, resume_tokens):
797
"""StreamSink.insert_stream for a remote repository."""
798
repository.lock_write()
799
self.do_insert_stream_request(repository, resume_tokens)