287
284
return SuccessfulSmartServerResponse(('ok', ), '\n'.join(lines))
290
class SmartServerRepositoryGetRevIdForRevno(SmartServerRepositoryReadLocked):
292
def do_readlocked_repository_request(self, repository, revno,
294
"""Find the revid for a given revno, given a known revno/revid pair.
299
found_flag, result = repository.get_rev_id_for_revno(revno, known_pair)
300
except errors.RevisionNotPresent, err:
301
if err.revision_id != known_pair[1]:
302
raise AssertionError(
303
'get_rev_id_for_revno raised RevisionNotPresent for '
304
'non-initial revision: ' + err.revision_id)
305
return FailedSmartServerResponse(
306
('nosuchrevision', err.revision_id))
308
return SuccessfulSmartServerResponse(('ok', result))
310
earliest_revno, earliest_revid = result
311
return SuccessfulSmartServerResponse(
312
('history-incomplete', earliest_revno, earliest_revid))
315
287
class SmartServerRequestHasRevision(SmartServerRepositoryRequest):
317
289
def do_repository_request(self, repository, revision_id):
420
392
self._to_format = network_format_registry.get(to_network_name)
421
if self._should_fake_unknown():
422
return FailedSmartServerResponse(
423
('UnknownMethod', 'Repository.get_stream'))
424
393
return None # Signal that we want a body.
426
def _should_fake_unknown(self):
427
"""Return True if we should return UnknownMethod to the client.
429
This is a workaround for bugs in pre-1.19 clients that claim to
430
support receiving streams of CHK repositories. The pre-1.19 client
431
expects inventory records to be serialized in the format defined by
432
to_network_name, but in pre-1.19 (at least) that format definition
433
tries to use the xml5 serializer, which does not correctly handle
434
rich-roots. After 1.19 the client can also accept inventory-deltas
435
(which avoids this issue), and those clients will use the
436
Repository.get_stream_1.19 verb instead of this one.
437
So: if this repository is CHK, and the to_format doesn't match,
438
we should just fake an UnknownSmartMethod error so that the client
439
will fallback to VFS, rather than sending it a stream we know it
442
from_format = self._repository._format
443
to_format = self._to_format
444
if not from_format.supports_chks:
445
# Source not CHK: that's ok
447
if (to_format.supports_chks and
448
from_format.repository_class is to_format.repository_class and
449
from_format._serializer == to_format._serializer):
450
# Source is CHK, but target matches: that's ok
451
# (e.g. 2a->2a, or CHK2->2a)
453
# Source is CHK, and target is not CHK or incompatible CHK. We can't
454
# generate a compatible stream.
457
395
def do_body(self, body_bytes):
458
396
repository = self._repository
459
397
repository.lock_read()
519
448
yield pack_writer.end()
522
class _ByteStreamDecoder(object):
523
"""Helper for _byte_stream_to_stream.
525
The expected usage of this class is via the function _byte_stream_to_stream
526
which creates a _ByteStreamDecoder, pops off the stream format and then
527
yields the output of record_stream(), the main entry point to
530
Broadly this class has to unwrap two layers of iterators:
534
This is complicated by wishing to return type, iterator_for_type, but
535
getting the data for iterator_for_type when we find out type: we can't
536
simply pass a generator down to the NetworkRecordStream parser, instead
537
we have a little local state to seed each NetworkRecordStream instance,
538
and gather the type that we'll be yielding.
540
:ivar byte_stream: The byte stream being decoded.
541
:ivar stream_decoder: A pack parser used to decode the bytestream
542
:ivar current_type: The current type, used to join adjacent records of the
543
same type into a single stream.
544
:ivar first_bytes: The first bytes to give the next NetworkRecordStream.
547
def __init__(self, byte_stream):
548
"""Create a _ByteStreamDecoder."""
549
self.stream_decoder = pack.ContainerPushParser()
550
self.current_type = None
551
self.first_bytes = None
552
self.byte_stream = byte_stream
554
def iter_stream_decoder(self):
555
"""Iterate the contents of the pack from stream_decoder."""
556
# dequeue pending items
557
for record in self.stream_decoder.read_pending_records():
559
# Pull bytes of the wire, decode them to records, yield those records.
560
for bytes in self.byte_stream:
561
self.stream_decoder.accept_bytes(bytes)
562
for record in self.stream_decoder.read_pending_records():
565
def iter_substream_bytes(self):
566
if self.first_bytes is not None:
567
yield self.first_bytes
568
# If we run out of pack records, single the outer layer to stop.
569
self.first_bytes = None
570
for record in self.iter_pack_records:
571
record_names, record_bytes = record
572
record_name, = record_names
573
substream_type = record_name[0]
574
if substream_type != self.current_type:
575
# end of a substream, seed the next substream.
576
self.current_type = substream_type
577
self.first_bytes = record_bytes
581
def record_stream(self):
582
"""Yield substream_type, substream from the byte stream."""
584
# Make and consume sub generators, one per substream type:
585
while self.first_bytes is not None:
586
substream = NetworkRecordStream(self.iter_substream_bytes())
587
# after substream is fully consumed, self.current_type is set to
588
# the next type, and self.first_bytes is set to the matching bytes.
589
yield self.current_type, substream.read()
591
def seed_state(self):
592
"""Prepare the _ByteStreamDecoder to decode from the pack stream."""
593
# Set a single generator we can use to get data from the pack stream.
594
self.iter_pack_records = self.iter_stream_decoder()
595
# Seed the very first subiterator with content; after this each one
597
list(self.iter_substream_bytes())
600
451
def _byte_stream_to_stream(byte_stream):
601
452
"""Convert a byte stream into a format and a stream.
603
454
:param byte_stream: A bytes iterator, as output by _stream_to_byte_stream.
604
455
:return: (RepositoryFormat, stream_generator)
606
decoder = _ByteStreamDecoder(byte_stream)
457
stream_decoder = pack.ContainerPushParser()
459
"""Closure to return the substreams."""
460
# May have fully parsed records already.
461
for record in stream_decoder.read_pending_records():
462
record_names, record_bytes = record
463
record_name, = record_names
464
substream_type = record_name[0]
465
substream = NetworkRecordStream([record_bytes])
466
yield substream_type, substream.read()
467
for bytes in byte_stream:
468
stream_decoder.accept_bytes(bytes)
469
for record in stream_decoder.read_pending_records():
470
record_names, record_bytes = record
471
record_name, = record_names
472
substream_type = record_name[0]
473
substream = NetworkRecordStream([record_bytes])
474
yield substream_type, substream.read()
607
475
for bytes in byte_stream:
608
decoder.stream_decoder.accept_bytes(bytes)
609
for record in decoder.stream_decoder.read_pending_records(max=1):
476
stream_decoder.accept_bytes(bytes)
477
for record in stream_decoder.read_pending_records(max=1):
610
478
record_names, src_format_name = record
611
479
src_format = network_format_registry.get(src_format_name)
612
return src_format, decoder.record_stream()
480
return src_format, record_stream()
615
483
class SmartServerRepositoryUnlock(SmartServerRepositoryRequest):
758
626
return SuccessfulSmartServerResponse(('ok', ))
761
class SmartServerRepositoryInsertStream_1_19(SmartServerRepositoryInsertStreamLocked):
762
"""Insert a record stream from a RemoteSink into a repository.
764
Same as SmartServerRepositoryInsertStreamLocked, except:
765
- the lock token argument is optional
766
- servers that implement this verb accept 'inventory-delta' records in the
772
def do_repository_request(self, repository, resume_tokens, lock_token=None):
773
"""StreamSink.insert_stream for a remote repository."""
774
SmartServerRepositoryInsertStreamLocked.do_repository_request(
775
self, repository, resume_tokens, lock_token)
778
629
class SmartServerRepositoryInsertStream(SmartServerRepositoryInsertStreamLocked):
779
630
"""Insert a record stream from a RemoteSink into an unlocked repository.