~bzr-pqm/bzr/bzr.dev

« back to all changes in this revision

Viewing changes to bzrlib/repofmt/groupcompress_repo.py

  • Committer: Vincent Ladeuil
  • Date: 2009-05-04 14:48:21 UTC
  • mto: (4349.1.1 integration)
  • mto: This revision was merged to the branch mainline in revision 4350.
  • Revision ID: v.ladeuil+lp@free.fr-20090504144821-39dvqkikmd3zqkdg
Handle servers proposing several authentication schemes.

* bzrlib/transport/http/_urllib2_wrappers.py:
(AbstractAuthHandler.auth_required): Several schemes can be
proposed by the server, try to match each one in turn.
(BasicAuthHandler.auth_match): Delete dead code.

* bzrlib/tests/test_http.py:
(load_tests): Separate proxy and http authentication tests as they
require different server setups.
(TestAuth.create_transport_readonly_server): Simplified by using
parameter provided by load_tests.
(TestAuth.test_changing_nonce): Adapt to new parametrization.
(TestProxyAuth.create_transport_readonly_server): Deleted.

* bzrlib/tests/http_utils.py:
(DigestAndBasicAuthRequestHandler, HTTPBasicAndDigestAuthServer,
ProxyBasicAndDigestAuthServer): Add a test server proposing both
basic and digest auth schemes but accepting only digest as valid.

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
 
# Copyright (C) 2008-2011 Canonical Ltd
 
1
# Copyright (C) 2008, 2009 Canonical Ltd
2
2
#
3
3
# This program is free software; you can redistribute it and/or modify
4
4
# it under the terms of the GNU General Public License as published by
26
26
    errors,
27
27
    index as _mod_index,
28
28
    inventory,
 
29
    knit,
29
30
    osutils,
30
31
    pack,
 
32
    remote,
 
33
    repository,
31
34
    revision as _mod_revision,
32
35
    trace,
33
36
    ui,
34
 
    versionedfile,
35
37
    )
36
38
from bzrlib.btree_index import (
37
39
    BTreeGraphIndex,
38
40
    BTreeBuilder,
39
41
    )
40
 
from bzrlib.decorators import needs_write_lock
 
42
from bzrlib.index import GraphIndex, GraphIndexBuilder
41
43
from bzrlib.groupcompress import (
42
44
    _GCGraphIndex,
43
45
    GroupCompressVersionedFiles,
44
46
    )
45
47
from bzrlib.repofmt.pack_repo import (
46
 
    _DirectPackAccess,
47
48
    Pack,
48
49
    NewPack,
49
 
    PackRepository,
 
50
    KnitPackRepository,
50
51
    PackRootCommitBuilder,
51
52
    RepositoryPackCollection,
52
53
    RepositoryFormatPack,
53
 
    ResumedPack,
54
54
    Packer,
55
55
    )
56
 
from bzrlib.vf_repository import (
57
 
    StreamSource,
58
 
    )
59
 
from bzrlib.static_tuple import StaticTuple
60
56
 
61
57
 
62
58
class GCPack(NewPack):
91
87
            # have a regular 2-list index giving parents and compression
92
88
            # source.
93
89
            index_builder_class(reference_lists=1),
94
 
            # Texts: per file graph, for all fileids - so one reference list
95
 
            # and two elements in the key tuple.
 
90
            # Texts: compression and per file graph, for all fileids - so two
 
91
            # reference lists and two elements in the key tuple.
96
92
            index_builder_class(reference_lists=1, key_elements=2),
97
93
            # Signatures: Just blobs to store, no compression, no parents
98
94
            # listing.
158
154
        self._writer.begin()
159
155
        # what state is the pack in? (open, finished, aborted)
160
156
        self._state = 'open'
161
 
        # no name until we finish writing the content
162
 
        self.name = None
163
157
 
164
158
    def _check_references(self):
165
159
        """Make sure our external references are present.
169
163
        have deltas based on a fallback repository.
170
164
        (See <https://bugs.launchpad.net/bzr/+bug/288751>)
171
165
        """
172
 
        # Groupcompress packs don't have any external references, arguably CHK
173
 
        # pages have external references, but we cannot 'cheaply' determine
174
 
        # them without actually walking all of the chk pages.
175
 
 
176
 
 
177
 
class ResumedGCPack(ResumedPack):
178
 
 
179
 
    def _check_references(self):
180
 
        """Make sure our external compression parents are present."""
181
 
        # See GCPack._check_references for why this is empty
182
 
 
183
 
    def _get_external_refs(self, index):
184
 
        # GC repositories don't have compression parents external to a given
185
 
        # pack file
186
 
        return set()
 
166
        # Groupcompress packs don't have any external references
187
167
 
188
168
 
189
169
class GCCHKPacker(Packer):
222
202
            p_id_roots_set = set()
223
203
            stream = source_vf.get_record_stream(keys, 'groupcompress', True)
224
204
            for idx, record in enumerate(stream):
225
 
                # Inventories should always be with revisions; assume success.
226
205
                bytes = record.get_bytes_as('fulltext')
227
206
                chk_inv = inventory.CHKInventory.deserialise(None, bytes,
228
207
                                                             record.key)
266
245
        remaining_keys = set(keys)
267
246
        counter = [0]
268
247
        if self._gather_text_refs:
 
248
            bytes_to_info = inventory.CHKInventory._bytes_to_utf8name_key
269
249
            self._text_refs = set()
270
250
        def _get_referenced_stream(root_keys, parse_leaf_nodes=False):
271
251
            cur_keys = root_keys
292
272
                    # Store is None, because we know we have a LeafNode, and we
293
273
                    # just want its entries
294
274
                    for file_id, bytes in node.iteritems(None):
295
 
                        self._text_refs.add(chk_map._bytes_to_text_key(bytes))
 
275
                        name_utf8, file_id, revision_id = bytes_to_info(bytes)
 
276
                        self._text_refs.add((file_id, revision_id))
296
277
                def next_stream():
297
278
                    stream = source_vf.get_record_stream(cur_keys,
298
279
                                                         'as-requested', True)
299
280
                    for record in stream:
300
 
                        if record.storage_kind == 'absent':
301
 
                            # An absent CHK record: we assume that the missing
302
 
                            # record is in a different pack - e.g. a page not
303
 
                            # altered by the commit we're packing.
304
 
                            continue
305
281
                        bytes = record.get_bytes_as('fulltext')
306
282
                        # We don't care about search_key_func for this code,
307
283
                        # because we only care about external references.
354
330
        """Build a VersionedFiles instance on top of this group of packs."""
355
331
        index_name = index_name + '_index'
356
332
        index_to_pack = {}
357
 
        access = _DirectPackAccess(index_to_pack,
358
 
                                   reload_func=self._reload_func)
 
333
        access = knit._DirectPackAccess(index_to_pack)
359
334
        if for_write:
360
335
            # Use new_pack
361
336
            if self.new_pack is None:
415
390
 
416
391
    def _copy_inventory_texts(self):
417
392
        source_vf, target_vf = self._build_vfs('inventory', True, True)
418
 
        # It is not sufficient to just use self.revision_keys, as stacked
419
 
        # repositories can have more inventories than they have revisions.
420
 
        # One alternative would be to do something with
421
 
        # get_parent_map(self.revision_keys), but that shouldn't be any faster
422
 
        # than this.
423
 
        inventory_keys = source_vf.keys()
424
 
        missing_inventories = set(self.revision_keys).difference(inventory_keys)
425
 
        if missing_inventories:
426
 
            # Go back to the original repo, to see if these are really missing
427
 
            # https://bugs.launchpad.net/bzr/+bug/437003
428
 
            # If we are packing a subset of the repo, it is fine to just have
429
 
            # the data in another Pack file, which is not included in this pack
430
 
            # operation.
431
 
            inv_index = self._pack_collection.repo.inventories._index
432
 
            pmap = inv_index.get_parent_map(missing_inventories)
433
 
            really_missing = missing_inventories.difference(pmap)
434
 
            if really_missing:
435
 
                missing_inventories = sorted(really_missing)
436
 
                raise ValueError('We are missing inventories for revisions: %s'
437
 
                    % (missing_inventories,))
438
 
        self._copy_stream(source_vf, target_vf, inventory_keys,
 
393
        self._copy_stream(source_vf, target_vf, self.revision_keys,
439
394
                          'inventories', self._get_filtered_inv_stream, 2)
440
395
 
441
 
    def _get_chk_vfs_for_copy(self):
442
 
        return self._build_vfs('chk', False, False)
443
 
 
444
396
    def _copy_chk_texts(self):
445
 
        source_vf, target_vf = self._get_chk_vfs_for_copy()
 
397
        source_vf, target_vf = self._build_vfs('chk', False, False)
446
398
        # TODO: This is technically spurious... if it is a performance issue,
447
399
        #       remove it
448
400
        total_keys = source_vf.keys()
471
423
        #      is grabbing too many keys...
472
424
        text_keys = source_vf.keys()
473
425
        self._copy_stream(source_vf, target_vf, text_keys,
474
 
                          'texts', self._get_progress_stream, 4)
 
426
                          'text', self._get_progress_stream, 4)
475
427
 
476
428
    def _copy_signature_texts(self):
477
429
        source_vf, target_vf = self._build_vfs('signature', False, False)
494
446
        if not self._use_pack(self.new_pack):
495
447
            self.new_pack.abort()
496
448
            return None
497
 
        self.new_pack.finish_content()
498
 
        if len(self.packs) == 1:
499
 
            old_pack = self.packs[0]
500
 
            if old_pack.name == self.new_pack._hash.hexdigest():
501
 
                # The single old pack was already optimally packed.
502
 
                trace.mutter('single pack %s was already optimally packed',
503
 
                    old_pack.name)
504
 
                self.new_pack.abort()
505
 
                return None
506
449
        self.pb.update('finishing repack', 6, 7)
507
450
        self.new_pack.finish()
508
451
        self._pack_collection.allocate(self.new_pack)
594
537
        return new_pack.data_inserted() and self._data_changed
595
538
 
596
539
 
597
 
class GCCHKCanonicalizingPacker(GCCHKPacker):
598
 
    """A packer that ensures inventories have canonical-form CHK maps.
599
 
    
600
 
    Ideally this would be part of reconcile, but it's very slow and rarely
601
 
    needed.  (It repairs repositories affected by
602
 
    https://bugs.launchpad.net/bzr/+bug/522637).
603
 
    """
604
 
 
605
 
    def __init__(self, *args, **kwargs):
606
 
        super(GCCHKCanonicalizingPacker, self).__init__(*args, **kwargs)
607
 
        self._data_changed = False
608
 
 
609
 
    def _exhaust_stream(self, source_vf, keys, message, vf_to_stream, pb_offset):
610
 
        """Create and exhaust a stream, but don't insert it.
611
 
 
612
 
        This is useful to get the side-effects of generating a stream.
613
 
        """
614
 
        self.pb.update('scanning %s' % (message,), pb_offset)
615
 
        child_pb = ui.ui_factory.nested_progress_bar()
616
 
        try:
617
 
            list(vf_to_stream(source_vf, keys, message, child_pb))
618
 
        finally:
619
 
            child_pb.finished()
620
 
 
621
 
    def _copy_inventory_texts(self):
622
 
        source_vf, target_vf = self._build_vfs('inventory', True, True)
623
 
        source_chk_vf, target_chk_vf = self._get_chk_vfs_for_copy()
624
 
        inventory_keys = source_vf.keys()
625
 
        # First, copy the existing CHKs on the assumption that most of them
626
 
        # will be correct.  This will save us from having to reinsert (and
627
 
        # recompress) these records later at the cost of perhaps preserving a
628
 
        # few unused CHKs. 
629
 
        # (Iterate but don't insert _get_filtered_inv_stream to populate the
630
 
        # variables needed by GCCHKPacker._copy_chk_texts.)
631
 
        self._exhaust_stream(source_vf, inventory_keys, 'inventories',
632
 
                self._get_filtered_inv_stream, 2)
633
 
        GCCHKPacker._copy_chk_texts(self)
634
 
        # Now copy and fix the inventories, and any regenerated CHKs.
635
 
        def chk_canonicalizing_inv_stream(source_vf, keys, message, pb=None):
636
 
            return self._get_filtered_canonicalizing_inv_stream(
637
 
                source_vf, keys, message, pb, source_chk_vf, target_chk_vf)
638
 
        self._copy_stream(source_vf, target_vf, inventory_keys,
639
 
                          'inventories', chk_canonicalizing_inv_stream, 4)
640
 
 
641
 
    def _copy_chk_texts(self):
642
 
        # No-op; in this class this happens during _copy_inventory_texts.
643
 
        pass
644
 
 
645
 
    def _get_filtered_canonicalizing_inv_stream(self, source_vf, keys, message,
646
 
            pb=None, source_chk_vf=None, target_chk_vf=None):
647
 
        """Filter the texts of inventories, regenerating CHKs to make sure they
648
 
        are canonical.
649
 
        """
650
 
        total_keys = len(keys)
651
 
        target_chk_vf = versionedfile.NoDupeAddLinesDecorator(target_chk_vf)
652
 
        def _filtered_inv_stream():
653
 
            stream = source_vf.get_record_stream(keys, 'groupcompress', True)
654
 
            search_key_name = None
655
 
            for idx, record in enumerate(stream):
656
 
                # Inventories should always be with revisions; assume success.
657
 
                bytes = record.get_bytes_as('fulltext')
658
 
                chk_inv = inventory.CHKInventory.deserialise(
659
 
                    source_chk_vf, bytes, record.key)
660
 
                if pb is not None:
661
 
                    pb.update('inv', idx, total_keys)
662
 
                chk_inv.id_to_entry._ensure_root()
663
 
                if search_key_name is None:
664
 
                    # Find the name corresponding to the search_key_func
665
 
                    search_key_reg = chk_map.search_key_registry
666
 
                    for search_key_name, func in search_key_reg.iteritems():
667
 
                        if func == chk_inv.id_to_entry._search_key_func:
668
 
                            break
669
 
                canonical_inv = inventory.CHKInventory.from_inventory(
670
 
                    target_chk_vf, chk_inv,
671
 
                    maximum_size=chk_inv.id_to_entry._root_node._maximum_size,
672
 
                    search_key_name=search_key_name)
673
 
                if chk_inv.id_to_entry.key() != canonical_inv.id_to_entry.key():
674
 
                    trace.mutter(
675
 
                        'Non-canonical CHK map for id_to_entry of inv: %s '
676
 
                        '(root is %s, should be %s)' % (chk_inv.revision_id,
677
 
                        chk_inv.id_to_entry.key()[0],
678
 
                        canonical_inv.id_to_entry.key()[0]))
679
 
                    self._data_changed = True
680
 
                p_id_map = chk_inv.parent_id_basename_to_file_id
681
 
                p_id_map._ensure_root()
682
 
                canon_p_id_map = canonical_inv.parent_id_basename_to_file_id
683
 
                if p_id_map.key() != canon_p_id_map.key():
684
 
                    trace.mutter(
685
 
                        'Non-canonical CHK map for parent_id_to_basename of '
686
 
                        'inv: %s (root is %s, should be %s)'
687
 
                        % (chk_inv.revision_id, p_id_map.key()[0],
688
 
                           canon_p_id_map.key()[0]))
689
 
                    self._data_changed = True
690
 
                yield versionedfile.ChunkedContentFactory(record.key,
691
 
                        record.parents, record.sha1,
692
 
                        canonical_inv.to_lines())
693
 
            # We have finished processing all of the inventory records, we
694
 
            # don't need these sets anymore
695
 
        return _filtered_inv_stream()
696
 
 
697
 
    def _use_pack(self, new_pack):
698
 
        """Override _use_pack to check for reconcile having changed content."""
699
 
        return new_pack.data_inserted() and self._data_changed
700
 
 
701
 
 
702
540
class GCRepositoryPackCollection(RepositoryPackCollection):
703
541
 
704
542
    pack_factory = GCPack
705
 
    resumed_pack_factory = ResumedGCPack
706
 
    normal_packer_class = GCCHKPacker
707
 
    optimising_packer_class = GCCHKPacker
708
 
 
709
 
    def _check_new_inventories(self):
710
 
        """Detect missing inventories or chk root entries for the new revisions
711
 
        in this write group.
712
 
 
713
 
        :returns: list of strs, summarising any problems found.  If the list is
714
 
            empty no problems were found.
 
543
 
 
544
    def _already_packed(self):
 
545
        """Is the collection already packed?"""
 
546
        # Always repack GC repositories for now
 
547
        return False
 
548
 
 
549
    def _execute_pack_operations(self, pack_operations,
 
550
                                 _packer_class=GCCHKPacker,
 
551
                                 reload_func=None):
 
552
        """Execute a series of pack operations.
 
553
 
 
554
        :param pack_operations: A list of [revision_count, packs_to_combine].
 
555
        :param _packer_class: The class of packer to use (default: Packer).
 
556
        :return: None.
715
557
        """
716
 
        # Ensure that all revisions added in this write group have:
717
 
        #   - corresponding inventories,
718
 
        #   - chk root entries for those inventories,
719
 
        #   - and any present parent inventories have their chk root
720
 
        #     entries too.
721
 
        # And all this should be independent of any fallback repository.
722
 
        problems = []
723
 
        key_deps = self.repo.revisions._index._key_dependencies
724
 
        new_revisions_keys = key_deps.get_new_keys()
725
 
        no_fallback_inv_index = self.repo.inventories._index
726
 
        no_fallback_chk_bytes_index = self.repo.chk_bytes._index
727
 
        no_fallback_texts_index = self.repo.texts._index
728
 
        inv_parent_map = no_fallback_inv_index.get_parent_map(
729
 
            new_revisions_keys)
730
 
        # Are any inventories for corresponding to the new revisions missing?
731
 
        corresponding_invs = set(inv_parent_map)
732
 
        missing_corresponding = set(new_revisions_keys)
733
 
        missing_corresponding.difference_update(corresponding_invs)
734
 
        if missing_corresponding:
735
 
            problems.append("inventories missing for revisions %s" %
736
 
                (sorted(missing_corresponding),))
737
 
            return problems
738
 
        # Are any chk root entries missing for any inventories?  This includes
739
 
        # any present parent inventories, which may be used when calculating
740
 
        # deltas for streaming.
741
 
        all_inv_keys = set(corresponding_invs)
742
 
        for parent_inv_keys in inv_parent_map.itervalues():
743
 
            all_inv_keys.update(parent_inv_keys)
744
 
        # Filter out ghost parents.
745
 
        all_inv_keys.intersection_update(
746
 
            no_fallback_inv_index.get_parent_map(all_inv_keys))
747
 
        parent_invs_only_keys = all_inv_keys.symmetric_difference(
748
 
            corresponding_invs)
749
 
        all_missing = set()
750
 
        inv_ids = [key[-1] for key in all_inv_keys]
751
 
        parent_invs_only_ids = [key[-1] for key in parent_invs_only_keys]
752
 
        root_key_info = _build_interesting_key_sets(
753
 
            self.repo, inv_ids, parent_invs_only_ids)
754
 
        expected_chk_roots = root_key_info.all_keys()
755
 
        present_chk_roots = no_fallback_chk_bytes_index.get_parent_map(
756
 
            expected_chk_roots)
757
 
        missing_chk_roots = expected_chk_roots.difference(present_chk_roots)
758
 
        if missing_chk_roots:
759
 
            problems.append("missing referenced chk root keys: %s"
760
 
                % (sorted(missing_chk_roots),))
761
 
            # Don't bother checking any further.
762
 
            return problems
763
 
        # Find all interesting chk_bytes records, and make sure they are
764
 
        # present, as well as the text keys they reference.
765
 
        chk_bytes_no_fallbacks = self.repo.chk_bytes.without_fallbacks()
766
 
        chk_bytes_no_fallbacks._search_key_func = \
767
 
            self.repo.chk_bytes._search_key_func
768
 
        chk_diff = chk_map.iter_interesting_nodes(
769
 
            chk_bytes_no_fallbacks, root_key_info.interesting_root_keys,
770
 
            root_key_info.uninteresting_root_keys)
771
 
        text_keys = set()
772
 
        try:
773
 
            for record in _filter_text_keys(chk_diff, text_keys,
774
 
                                            chk_map._bytes_to_text_key):
775
 
                pass
776
 
        except errors.NoSuchRevision, e:
777
 
            # XXX: It would be nice if we could give a more precise error here.
778
 
            problems.append("missing chk node(s) for id_to_entry maps")
779
 
        chk_diff = chk_map.iter_interesting_nodes(
780
 
            chk_bytes_no_fallbacks, root_key_info.interesting_pid_root_keys,
781
 
            root_key_info.uninteresting_pid_root_keys)
782
 
        try:
783
 
            for interesting_rec, interesting_map in chk_diff:
784
 
                pass
785
 
        except errors.NoSuchRevision, e:
786
 
            problems.append(
787
 
                "missing chk node(s) for parent_id_basename_to_file_id maps")
788
 
        present_text_keys = no_fallback_texts_index.get_parent_map(text_keys)
789
 
        missing_text_keys = text_keys.difference(present_text_keys)
790
 
        if missing_text_keys:
791
 
            problems.append("missing text keys: %r"
792
 
                % (sorted(missing_text_keys),))
793
 
        return problems
794
 
 
795
 
 
796
 
class CHKInventoryRepository(PackRepository):
797
 
    """subclass of PackRepository that uses CHK based inventories."""
 
558
        # XXX: Copied across from RepositoryPackCollection simply because we
 
559
        #      want to override the _packer_class ... :(
 
560
        for revision_count, packs in pack_operations:
 
561
            # we may have no-ops from the setup logic
 
562
            if len(packs) == 0:
 
563
                continue
 
564
            packer = GCCHKPacker(self, packs, '.autopack',
 
565
                                 reload_func=reload_func)
 
566
            try:
 
567
                packer.pack()
 
568
            except errors.RetryWithNewPacks:
 
569
                # An exception is propagating out of this context, make sure
 
570
                # this packer has cleaned up. Packer() doesn't set its new_pack
 
571
                # state into the RepositoryPackCollection object, so we only
 
572
                # have access to it directly here.
 
573
                if packer.new_pack is not None:
 
574
                    packer.new_pack.abort()
 
575
                raise
 
576
            for pack in packs:
 
577
                self._remove_pack_from_memory(pack)
 
578
        # record the newly available packs and stop advertising the old
 
579
        # packs
 
580
        self._save_pack_names(clear_obsolete_packs=True)
 
581
        # Move the old packs out of the way now they are no longer referenced.
 
582
        for revision_count, packs in pack_operations:
 
583
            self._obsolete_packs(packs)
 
584
 
 
585
 
 
586
class CHKInventoryRepository(KnitPackRepository):
 
587
    """subclass of KnitPackRepository that uses CHK based inventories."""
798
588
 
799
589
    def __init__(self, _format, a_bzrdir, control_files, _commit_builder_class,
800
590
        _serializer):
801
591
        """Overridden to change pack collection class."""
802
 
        super(CHKInventoryRepository, self).__init__(_format, a_bzrdir,
803
 
            control_files, _commit_builder_class, _serializer)
 
592
        KnitPackRepository.__init__(self, _format, a_bzrdir, control_files,
 
593
            _commit_builder_class, _serializer)
 
594
        # and now replace everything it did :)
804
595
        index_transport = self._transport.clone('indices')
805
596
        self._pack_collection = GCRepositoryPackCollection(self,
806
597
            self._transport, index_transport,
813
604
        self.inventories = GroupCompressVersionedFiles(
814
605
            _GCGraphIndex(self._pack_collection.inventory_index.combined_index,
815
606
                add_callback=self._pack_collection.inventory_index.add_callback,
816
 
                parents=True, is_locked=self.is_locked,
817
 
                inconsistency_fatal=False),
 
607
                parents=True, is_locked=self.is_locked),
818
608
            access=self._pack_collection.inventory_index.data_access)
819
609
        self.revisions = GroupCompressVersionedFiles(
820
610
            _GCGraphIndex(self._pack_collection.revision_index.combined_index,
821
611
                add_callback=self._pack_collection.revision_index.add_callback,
822
 
                parents=True, is_locked=self.is_locked,
823
 
                track_external_parent_refs=True, track_new_keys=True),
 
612
                parents=True, is_locked=self.is_locked),
824
613
            access=self._pack_collection.revision_index.data_access,
825
614
            delta=False)
826
615
        self.signatures = GroupCompressVersionedFiles(
827
616
            _GCGraphIndex(self._pack_collection.signature_index.combined_index,
828
617
                add_callback=self._pack_collection.signature_index.add_callback,
829
 
                parents=False, is_locked=self.is_locked,
830
 
                inconsistency_fatal=False),
 
618
                parents=False, is_locked=self.is_locked),
831
619
            access=self._pack_collection.signature_index.data_access,
832
620
            delta=False)
833
621
        self.texts = GroupCompressVersionedFiles(
834
622
            _GCGraphIndex(self._pack_collection.text_index.combined_index,
835
623
                add_callback=self._pack_collection.text_index.add_callback,
836
 
                parents=True, is_locked=self.is_locked,
837
 
                inconsistency_fatal=False),
 
624
                parents=True, is_locked=self.is_locked),
838
625
            access=self._pack_collection.text_index.data_access)
839
626
        # No parents, individual CHK pages don't have specific ancestry
840
627
        self.chk_bytes = GroupCompressVersionedFiles(
841
628
            _GCGraphIndex(self._pack_collection.chk_index.combined_index,
842
629
                add_callback=self._pack_collection.chk_index.add_callback,
843
 
                parents=False, is_locked=self.is_locked,
844
 
                inconsistency_fatal=False),
 
630
                parents=False, is_locked=self.is_locked),
845
631
            access=self._pack_collection.chk_index.data_access)
846
 
        search_key_name = self._format._serializer.search_key_name
847
 
        search_key_func = chk_map.search_key_registry.get(search_key_name)
848
 
        self.chk_bytes._search_key_func = search_key_func
849
632
        # True when the repository object is 'write locked' (as opposed to the
850
633
        # physical lock only taken out around changes to the pack-names list.)
851
634
        # Another way to represent this would be a decorator around the control
874
657
        return self._inventory_add_lines(revision_id, parents,
875
658
            inv_lines, check_content=False)
876
659
 
877
 
    def _create_inv_from_null(self, delta, revision_id):
878
 
        """This will mutate new_inv directly.
879
 
 
880
 
        This is a simplified form of create_by_apply_delta which knows that all
881
 
        the old values must be None, so everything is a create.
882
 
        """
883
 
        serializer = self._format._serializer
884
 
        new_inv = inventory.CHKInventory(serializer.search_key_name)
885
 
        new_inv.revision_id = revision_id
886
 
        entry_to_bytes = new_inv._entry_to_bytes
887
 
        id_to_entry_dict = {}
888
 
        parent_id_basename_dict = {}
889
 
        for old_path, new_path, file_id, entry in delta:
890
 
            if old_path is not None:
891
 
                raise ValueError('Invalid delta, somebody tried to delete %r'
892
 
                                 ' from the NULL_REVISION'
893
 
                                 % ((old_path, file_id),))
894
 
            if new_path is None:
895
 
                raise ValueError('Invalid delta, delta from NULL_REVISION has'
896
 
                                 ' no new_path %r' % (file_id,))
897
 
            if new_path == '':
898
 
                new_inv.root_id = file_id
899
 
                parent_id_basename_key = StaticTuple('', '').intern()
900
 
            else:
901
 
                utf8_entry_name = entry.name.encode('utf-8')
902
 
                parent_id_basename_key = StaticTuple(entry.parent_id,
903
 
                                                     utf8_entry_name).intern()
904
 
            new_value = entry_to_bytes(entry)
905
 
            # Populate Caches?
906
 
            # new_inv._path_to_fileid_cache[new_path] = file_id
907
 
            key = StaticTuple(file_id).intern()
908
 
            id_to_entry_dict[key] = new_value
909
 
            parent_id_basename_dict[parent_id_basename_key] = file_id
910
 
 
911
 
        new_inv._populate_from_dicts(self.chk_bytes, id_to_entry_dict,
912
 
            parent_id_basename_dict, maximum_size=serializer.maximum_size)
913
 
        return new_inv
914
 
 
915
660
    def add_inventory_by_delta(self, basis_revision_id, delta, new_revision_id,
916
661
                               parents, basis_inv=None, propagate_caches=False):
917
662
        """Add a new inventory expressed as a delta against another revision.
937
682
            repository format specific) of the serialized inventory, and the
938
683
            resulting inventory.
939
684
        """
 
685
        if basis_revision_id == _mod_revision.NULL_REVISION:
 
686
            return KnitPackRepository.add_inventory_by_delta(self,
 
687
                basis_revision_id, delta, new_revision_id, parents)
940
688
        if not self.is_in_write_group():
941
689
            raise AssertionError("%r not in write group" % (self,))
942
690
        _mod_revision.check_not_reserved_id(new_revision_id)
943
 
        basis_tree = None
944
 
        if basis_inv is None:
945
 
            if basis_revision_id == _mod_revision.NULL_REVISION:
946
 
                new_inv = self._create_inv_from_null(delta, new_revision_id)
947
 
                if new_inv.root_id is None:
948
 
                    raise errors.RootMissing()
949
 
                inv_lines = new_inv.to_lines()
950
 
                return self._inventory_add_lines(new_revision_id, parents,
951
 
                    inv_lines, check_content=False), new_inv
952
 
            else:
953
 
                basis_tree = self.revision_tree(basis_revision_id)
954
 
                basis_tree.lock_read()
 
691
        basis_tree = self.revision_tree(basis_revision_id)
 
692
        basis_tree.lock_read()
 
693
        try:
 
694
            if basis_inv is None:
955
695
                basis_inv = basis_tree.inventory
956
 
        try:
957
696
            result = basis_inv.create_by_apply_delta(delta, new_revision_id,
958
697
                propagate_caches=propagate_caches)
959
698
            inv_lines = result.to_lines()
960
699
            return self._inventory_add_lines(new_revision_id, parents,
961
700
                inv_lines, check_content=False), result
962
701
        finally:
963
 
            if basis_tree is not None:
964
 
                basis_tree.unlock()
965
 
 
966
 
    def _deserialise_inventory(self, revision_id, bytes):
967
 
        return inventory.CHKInventory.deserialise(self.chk_bytes, bytes,
968
 
            (revision_id,))
969
 
 
970
 
    def _iter_inventories(self, revision_ids, ordering):
 
702
            basis_tree.unlock()
 
703
 
 
704
    def _iter_inventories(self, revision_ids):
971
705
        """Iterate over many inventory objects."""
972
 
        if ordering is None:
973
 
            ordering = 'unordered'
974
706
        keys = [(revision_id,) for revision_id in revision_ids]
975
 
        stream = self.inventories.get_record_stream(keys, ordering, True)
 
707
        stream = self.inventories.get_record_stream(keys, 'unordered', True)
976
708
        texts = {}
977
709
        for record in stream:
978
710
            if record.storage_kind != 'absent':
982
714
        for key in keys:
983
715
            yield inventory.CHKInventory.deserialise(self.chk_bytes, texts[key], key)
984
716
 
985
 
    def _iter_inventory_xmls(self, revision_ids, ordering):
986
 
        # Without a native 'xml' inventory, this method doesn't make sense.
987
 
        # However older working trees, and older bundles want it - so we supply
988
 
        # it allowing _get_inventory_xml to work. Bundles currently use the
989
 
        # serializer directly; this also isn't ideal, but there isn't an xml
990
 
        # iteration interface offered at all for repositories. We could make
991
 
        # _iter_inventory_xmls be part of the contract, even if kept private.
992
 
        inv_to_str = self._serializer.write_inventory_to_string
993
 
        for inv in self.iter_inventories(revision_ids, ordering=ordering):
994
 
            yield inv_to_str(inv), inv.revision_id
995
 
 
996
 
    def _find_present_inventory_keys(self, revision_keys):
997
 
        parent_map = self.inventories.get_parent_map(revision_keys)
998
 
        present_inventory_keys = set(k for k in parent_map)
999
 
        return present_inventory_keys
 
717
    def _iter_inventory_xmls(self, revision_ids):
 
718
        # Without a native 'xml' inventory, this method doesn't make sense, so
 
719
        # make it raise to trap naughty direct users.
 
720
        raise NotImplementedError(self._iter_inventory_xmls)
 
721
 
 
722
    def _find_revision_outside_set(self, revision_ids):
 
723
        revision_set = frozenset(revision_ids)
 
724
        for revid in revision_ids:
 
725
            parent_ids = self.get_parent_map([revid]).get(revid, ())
 
726
            for parent in parent_ids:
 
727
                if parent in revision_set:
 
728
                    # Parent is not outside the set
 
729
                    continue
 
730
                if parent not in self.get_parent_map([parent]):
 
731
                    # Parent is a ghost
 
732
                    continue
 
733
                return parent
 
734
        return _mod_revision.NULL_REVISION
 
735
 
 
736
    def _find_file_keys_to_fetch(self, revision_ids, pb):
 
737
        rich_root = self.supports_rich_root()
 
738
        revision_outside_set = self._find_revision_outside_set(revision_ids)
 
739
        if revision_outside_set == _mod_revision.NULL_REVISION:
 
740
            uninteresting_root_keys = set()
 
741
        else:
 
742
            uninteresting_inv = self.get_inventory(revision_outside_set)
 
743
            uninteresting_root_keys = set([uninteresting_inv.id_to_entry.key()])
 
744
        interesting_root_keys = set()
 
745
        for idx, inv in enumerate(self.iter_inventories(revision_ids)):
 
746
            interesting_root_keys.add(inv.id_to_entry.key())
 
747
        revision_ids = frozenset(revision_ids)
 
748
        file_id_revisions = {}
 
749
        bytes_to_info = inventory.CHKInventory._bytes_to_utf8name_key
 
750
        for record, items in chk_map.iter_interesting_nodes(self.chk_bytes,
 
751
                    interesting_root_keys, uninteresting_root_keys,
 
752
                    pb=pb):
 
753
            # This is cheating a bit to use the last grabbed 'inv', but it
 
754
            # works
 
755
            for name, bytes in items:
 
756
                (name_utf8, file_id, revision_id) = bytes_to_info(bytes)
 
757
                if not rich_root and name_utf8 == '':
 
758
                    continue
 
759
                if revision_id in revision_ids:
 
760
                    # Would we rather build this up into file_id => revision
 
761
                    # maps?
 
762
                    try:
 
763
                        file_id_revisions[file_id].add(revision_id)
 
764
                    except KeyError:
 
765
                        file_id_revisions[file_id] = set([revision_id])
 
766
        for file_id, revisions in file_id_revisions.iteritems():
 
767
            yield ('file', file_id, revisions)
1000
768
 
1001
769
    def fileids_altered_by_revision_ids(self, revision_ids, _inv_weave=None):
1002
770
        """Find the file ids and versions affected by revisions.
1008
776
            revision_ids. Each altered file-ids has the exact revision_ids that
1009
777
            altered it listed explicitly.
1010
778
        """
1011
 
        rich_root = self.supports_rich_root()
1012
 
        bytes_to_info = inventory.CHKInventory._bytes_to_utf8name_key
1013
 
        file_id_revisions = {}
 
779
        rich_roots = self.supports_rich_root()
 
780
        result = {}
1014
781
        pb = ui.ui_factory.nested_progress_bar()
1015
782
        try:
1016
 
            revision_keys = [(r,) for r in revision_ids]
1017
 
            parent_keys = self._find_parent_keys_of_revisions(revision_keys)
1018
 
            # TODO: instead of using _find_present_inventory_keys, change the
1019
 
            #       code paths to allow missing inventories to be tolerated.
1020
 
            #       However, we only want to tolerate missing parent
1021
 
            #       inventories, not missing inventories for revision_ids
1022
 
            present_parent_inv_keys = self._find_present_inventory_keys(
1023
 
                                        parent_keys)
1024
 
            present_parent_inv_ids = set(
1025
 
                [k[-1] for k in present_parent_inv_keys])
1026
 
            inventories_to_read = set(revision_ids)
1027
 
            inventories_to_read.update(present_parent_inv_ids)
1028
 
            root_key_info = _build_interesting_key_sets(
1029
 
                self, inventories_to_read, present_parent_inv_ids)
1030
 
            interesting_root_keys = root_key_info.interesting_root_keys
1031
 
            uninteresting_root_keys = root_key_info.uninteresting_root_keys
1032
 
            chk_bytes = self.chk_bytes
1033
 
            for record, items in chk_map.iter_interesting_nodes(chk_bytes,
1034
 
                        interesting_root_keys, uninteresting_root_keys,
1035
 
                        pb=pb):
1036
 
                for name, bytes in items:
1037
 
                    (name_utf8, file_id, revision_id) = bytes_to_info(bytes)
1038
 
                    # TODO: consider interning file_id, revision_id here, or
1039
 
                    #       pushing that intern() into bytes_to_info()
1040
 
                    # TODO: rich_root should always be True here, for all
1041
 
                    #       repositories that support chk_bytes
1042
 
                    if not rich_root and name_utf8 == '':
1043
 
                        continue
1044
 
                    try:
1045
 
                        file_id_revisions[file_id].add(revision_id)
1046
 
                    except KeyError:
1047
 
                        file_id_revisions[file_id] = set([revision_id])
 
783
            total = len(revision_ids)
 
784
            for pos, inv in enumerate(self.iter_inventories(revision_ids)):
 
785
                pb.update("Finding text references", pos, total)
 
786
                for entry in inv.iter_just_entries():
 
787
                    if entry.revision != inv.revision_id:
 
788
                        continue
 
789
                    if not rich_roots and entry.file_id == inv.root_id:
 
790
                        continue
 
791
                    alterations = result.setdefault(entry.file_id, set([]))
 
792
                    alterations.add(entry.revision)
 
793
            return result
1048
794
        finally:
1049
795
            pb.finished()
1050
 
        return file_id_revisions
1051
796
 
1052
797
    def find_text_key_references(self):
1053
798
        """Find the text key references within the repository.
1081
826
        finally:
1082
827
            pb.finished()
1083
828
 
1084
 
    @needs_write_lock
1085
 
    def reconcile_canonicalize_chks(self):
1086
 
        """Reconcile this repository to make sure all CHKs are in canonical
1087
 
        form.
1088
 
        """
1089
 
        from bzrlib.reconcile import PackReconciler
1090
 
        reconciler = PackReconciler(self, thorough=True, canonicalize_chks=True)
1091
 
        reconciler.reconcile()
1092
 
        return reconciler
1093
 
 
1094
829
    def _reconcile_pack(self, collection, packs, extension, revs, pb):
1095
830
        packer = GCCHKReconcilePacker(collection, packs, extension)
1096
831
        return packer.pack(pb)
1097
832
 
1098
 
    def _canonicalize_chks_pack(self, collection, packs, extension, revs, pb):
1099
 
        packer = GCCHKCanonicalizingPacker(collection, packs, extension, revs)
1100
 
        return packer.pack(pb)
1101
 
 
1102
833
    def _get_source(self, to_format):
1103
834
        """Return a source for streaming from this repository."""
1104
 
        if self._format._serializer == to_format._serializer:
 
835
        if isinstance(to_format, remote.RemoteRepositoryFormat):
 
836
            # Can't just check attributes on to_format with the current code,
 
837
            # work around this:
 
838
            to_format._ensure_real()
 
839
            to_format = to_format._custom_format
 
840
        if to_format.__class__ is self._format.__class__:
1105
841
            # We must be exactly the same format, otherwise stuff like the chk
1106
 
            # page layout might be different.
1107
 
            # Actually, this test is just slightly looser than exact so that
1108
 
            # CHK2 <-> 2a transfers will work.
 
842
            # page layout might be different
1109
843
            return GroupCHKStreamSource(self, to_format)
1110
844
        return super(CHKInventoryRepository, self)._get_source(to_format)
1111
845
 
1112
 
    def _find_inconsistent_revision_parents(self, revisions_iterator=None):
1113
 
        """Find revisions with different parent lists in the revision object
1114
 
        and in the index graph.
1115
 
 
1116
 
        :param revisions_iterator: None, or an iterator of (revid,
1117
 
            Revision-or-None). This iterator controls the revisions checked.
1118
 
        :returns: an iterator yielding tuples of (revison-id, parents-in-index,
1119
 
            parents-in-revision).
1120
 
        """
1121
 
        if not self.is_locked():
1122
 
            raise AssertionError()
1123
 
        vf = self.revisions
1124
 
        if revisions_iterator is None:
1125
 
            revisions_iterator = self._iter_revisions(None)
1126
 
        for revid, revision in revisions_iterator:
1127
 
            if revision is None:
1128
 
                pass
1129
 
            parent_map = vf.get_parent_map([(revid,)])
1130
 
            parents_according_to_index = tuple(parent[-1] for parent in
1131
 
                parent_map[(revid,)])
1132
 
            parents_according_to_revision = tuple(revision.parent_ids)
1133
 
            if parents_according_to_index != parents_according_to_revision:
1134
 
                yield (revid, parents_according_to_index,
1135
 
                    parents_according_to_revision)
1136
 
 
1137
 
    def _check_for_inconsistent_revision_parents(self):
1138
 
        inconsistencies = list(self._find_inconsistent_revision_parents())
1139
 
        if inconsistencies:
1140
 
            raise errors.BzrCheckError(
1141
 
                "Revision index has inconsistent parents.")
1142
 
 
1143
 
 
1144
 
class GroupCHKStreamSource(StreamSource):
 
846
    def suspend_write_group(self):
 
847
        raise errors.UnsuspendableWriteGroup(self)
 
848
 
 
849
    def _resume_write_group(self, tokens):
 
850
        raise errors.UnsuspendableWriteGroup(self)
 
851
 
 
852
 
 
853
class GroupCHKStreamSource(repository.StreamSource):
1145
854
    """Used when both the source and target repo are GroupCHK repos."""
1146
855
 
1147
856
    def __init__(self, from_repository, to_format):
1149
858
        super(GroupCHKStreamSource, self).__init__(from_repository, to_format)
1150
859
        self._revision_keys = None
1151
860
        self._text_keys = None
1152
 
        self._text_fetch_order = 'groupcompress'
1153
861
        self._chk_id_roots = None
1154
862
        self._chk_p_id_roots = None
1155
863
 
1156
 
    def _get_inventory_stream(self, inventory_keys, allow_absent=False):
 
864
    def _get_filtered_inv_stream(self):
1157
865
        """Get a stream of inventory texts.
1158
866
 
1159
867
        When this function returns, self._chk_id_roots and self._chk_p_id_roots
1165
873
            id_roots_set = set()
1166
874
            p_id_roots_set = set()
1167
875
            source_vf = self.from_repository.inventories
1168
 
            stream = source_vf.get_record_stream(inventory_keys,
 
876
            stream = source_vf.get_record_stream(self._revision_keys,
1169
877
                                                 'groupcompress', True)
1170
878
            for record in stream:
1171
 
                if record.storage_kind == 'absent':
1172
 
                    if allow_absent:
1173
 
                        continue
1174
 
                    else:
1175
 
                        raise errors.NoSuchRevision(self, record.key)
1176
879
                bytes = record.get_bytes_as('fulltext')
1177
880
                chk_inv = inventory.CHKInventory.deserialise(None, bytes,
1178
881
                                                             record.key)
1194
897
            p_id_roots_set.clear()
1195
898
        return ('inventories', _filtered_inv_stream())
1196
899
 
1197
 
    def _get_filtered_chk_streams(self, excluded_revision_keys):
 
900
    def _get_filtered_chk_streams(self, excluded_keys):
1198
901
        self._text_keys = set()
1199
 
        excluded_revision_keys.discard(_mod_revision.NULL_REVISION)
1200
 
        if not excluded_revision_keys:
 
902
        excluded_keys.discard(_mod_revision.NULL_REVISION)
 
903
        if not excluded_keys:
1201
904
            uninteresting_root_keys = set()
1202
905
            uninteresting_pid_root_keys = set()
1203
906
        else:
1204
 
            # filter out any excluded revisions whose inventories are not
1205
 
            # actually present
1206
 
            # TODO: Update Repository.iter_inventories() to add
1207
 
            #       ignore_missing=True
1208
 
            present_keys = self.from_repository._find_present_inventory_keys(
1209
 
                            excluded_revision_keys)
1210
 
            present_ids = [k[-1] for k in present_keys]
1211
907
            uninteresting_root_keys = set()
1212
908
            uninteresting_pid_root_keys = set()
1213
 
            for inv in self.from_repository.iter_inventories(present_ids):
 
909
            for inv in self.from_repository.iter_inventories(excluded_keys):
1214
910
                uninteresting_root_keys.add(inv.id_to_entry.key())
1215
911
                uninteresting_pid_root_keys.add(
1216
912
                    inv.parent_id_basename_to_file_id.key())
 
913
        bytes_to_info = inventory.CHKInventory._bytes_to_utf8name_key
1217
914
        chk_bytes = self.from_repository.chk_bytes
1218
915
        def _filter_id_to_entry():
1219
 
            interesting_nodes = chk_map.iter_interesting_nodes(chk_bytes,
1220
 
                        self._chk_id_roots, uninteresting_root_keys)
1221
 
            for record in _filter_text_keys(interesting_nodes, self._text_keys,
1222
 
                    chk_map._bytes_to_text_key):
 
916
            for record, items in chk_map.iter_interesting_nodes(chk_bytes,
 
917
                        self._chk_id_roots, uninteresting_root_keys):
 
918
                for name, bytes in items:
 
919
                    # Note: we don't care about name_utf8, because we are always
 
920
                    # rich-root = True
 
921
                    _, file_id, revision_id = bytes_to_info(bytes)
 
922
                    self._text_keys.add((file_id, revision_id))
1223
923
                if record is not None:
1224
924
                    yield record
1225
 
            # Consumed
1226
 
            self._chk_id_roots = None
1227
925
        yield 'chk_bytes', _filter_id_to_entry()
1228
926
        def _get_parent_id_basename_to_file_id_pages():
1229
927
            for record, items in chk_map.iter_interesting_nodes(chk_bytes,
1230
928
                        self._chk_p_id_roots, uninteresting_pid_root_keys):
1231
929
                if record is not None:
1232
930
                    yield record
1233
 
            # Consumed
1234
 
            self._chk_p_id_roots = None
1235
931
        yield 'chk_bytes', _get_parent_id_basename_to_file_id_pages()
1236
932
 
1237
933
    def _get_text_stream(self):
1238
934
        # Note: We know we don't have to handle adding root keys, because both
1239
 
        # the source and target are the identical network name.
1240
 
        text_stream = self.from_repository.texts.get_record_stream(
1241
 
                        self._text_keys, self._text_fetch_order, False)
1242
 
        return ('texts', text_stream)
 
935
        # the source and target are GCCHK, and those always support rich-roots
 
936
        # We may want to request as 'unordered', in case the source has done a
 
937
        # 'split' packing
 
938
        return ('texts', self.from_repository.texts.get_record_stream(
 
939
                            self._text_keys, 'groupcompress', False))
1243
940
 
1244
941
    def get_stream(self, search):
1245
 
        def wrap_and_count(pb, rc, stream):
1246
 
            """Yield records from stream while showing progress."""
1247
 
            count = 0
1248
 
            for record in stream:
1249
 
                if count == rc.STEP:
1250
 
                    rc.increment(count)
1251
 
                    pb.update('Estimate', rc.current, rc.max)
1252
 
                    count = 0
1253
 
                count += 1
1254
 
                yield record
1255
 
 
1256
942
        revision_ids = search.get_keys()
1257
 
        pb = ui.ui_factory.nested_progress_bar()
1258
 
        rc = self._record_counter
1259
 
        self._record_counter.setup(len(revision_ids))
1260
943
        for stream_info in self._fetch_revision_texts(revision_ids):
1261
 
            yield (stream_info[0],
1262
 
                wrap_and_count(pb, rc, stream_info[1]))
 
944
            yield stream_info
1263
945
        self._revision_keys = [(rev_id,) for rev_id in revision_ids]
1264
 
        # TODO: The keys to exclude might be part of the search recipe
1265
 
        # For now, exclude all parents that are at the edge of ancestry, for
1266
 
        # which we have inventories
1267
 
        from_repo = self.from_repository
1268
 
        parent_keys = from_repo._find_parent_keys_of_revisions(
1269
 
                        self._revision_keys)
1270
 
        self.from_repository.revisions.clear_cache()
1271
 
        self.from_repository.signatures.clear_cache()
1272
 
        # Clear the repo's get_parent_map cache too.
1273
 
        self.from_repository._unstacked_provider.disable_cache()
1274
 
        self.from_repository._unstacked_provider.enable_cache()
1275
 
        s = self._get_inventory_stream(self._revision_keys)
1276
 
        yield (s[0], wrap_and_count(pb, rc, s[1]))
1277
 
        self.from_repository.inventories.clear_cache()
1278
 
        for stream_info in self._get_filtered_chk_streams(parent_keys):
1279
 
            yield (stream_info[0], wrap_and_count(pb, rc, stream_info[1]))
1280
 
        self.from_repository.chk_bytes.clear_cache()
1281
 
        s = self._get_text_stream()
1282
 
        yield (s[0], wrap_and_count(pb, rc, s[1]))
1283
 
        self.from_repository.texts.clear_cache()
1284
 
        pb.update('Done', rc.max, rc.max)
1285
 
        pb.finished()
1286
 
 
1287
 
    def get_stream_for_missing_keys(self, missing_keys):
1288
 
        # missing keys can only occur when we are byte copying and not
1289
 
        # translating (because translation means we don't send
1290
 
        # unreconstructable deltas ever).
1291
 
        missing_inventory_keys = set()
1292
 
        for key in missing_keys:
1293
 
            if key[0] != 'inventories':
1294
 
                raise AssertionError('The only missing keys we should'
1295
 
                    ' be filling in are inventory keys, not %s'
1296
 
                    % (key[0],))
1297
 
            missing_inventory_keys.add(key[1:])
1298
 
        if self._chk_id_roots or self._chk_p_id_roots:
1299
 
            raise AssertionError('Cannot call get_stream_for_missing_keys'
1300
 
                ' until all of get_stream() has been consumed.')
1301
 
        # Yield the inventory stream, so we can find the chk stream
1302
 
        # Some of the missing_keys will be missing because they are ghosts.
1303
 
        # As such, we can ignore them. The Sink is required to verify there are
1304
 
        # no unavailable texts when the ghost inventories are not filled in.
1305
 
        yield self._get_inventory_stream(missing_inventory_keys,
1306
 
                                         allow_absent=True)
1307
 
        # We use the empty set for excluded_revision_keys, to make it clear
1308
 
        # that we want to transmit all referenced chk pages.
1309
 
        for stream_info in self._get_filtered_chk_streams(set()):
 
946
        yield self._get_filtered_inv_stream()
 
947
        # The keys to exclude are part of the search recipe
 
948
        _, _, exclude_keys, _ = search.get_recipe()
 
949
        for stream_info in self._get_filtered_chk_streams(exclude_keys):
1310
950
            yield stream_info
1311
 
 
1312
 
 
1313
 
class _InterestingKeyInfo(object):
1314
 
    def __init__(self):
1315
 
        self.interesting_root_keys = set()
1316
 
        self.interesting_pid_root_keys = set()
1317
 
        self.uninteresting_root_keys = set()
1318
 
        self.uninteresting_pid_root_keys = set()
1319
 
 
1320
 
    def all_interesting(self):
1321
 
        return self.interesting_root_keys.union(self.interesting_pid_root_keys)
1322
 
 
1323
 
    def all_uninteresting(self):
1324
 
        return self.uninteresting_root_keys.union(
1325
 
            self.uninteresting_pid_root_keys)
1326
 
 
1327
 
    def all_keys(self):
1328
 
        return self.all_interesting().union(self.all_uninteresting())
1329
 
 
1330
 
 
1331
 
def _build_interesting_key_sets(repo, inventory_ids, parent_only_inv_ids):
1332
 
    result = _InterestingKeyInfo()
1333
 
    for inv in repo.iter_inventories(inventory_ids, 'unordered'):
1334
 
        root_key = inv.id_to_entry.key()
1335
 
        pid_root_key = inv.parent_id_basename_to_file_id.key()
1336
 
        if inv.revision_id in parent_only_inv_ids:
1337
 
            result.uninteresting_root_keys.add(root_key)
1338
 
            result.uninteresting_pid_root_keys.add(pid_root_key)
1339
 
        else:
1340
 
            result.interesting_root_keys.add(root_key)
1341
 
            result.interesting_pid_root_keys.add(pid_root_key)
1342
 
    return result
1343
 
 
1344
 
 
1345
 
def _filter_text_keys(interesting_nodes_iterable, text_keys, bytes_to_text_key):
1346
 
    """Iterate the result of iter_interesting_nodes, yielding the records
1347
 
    and adding to text_keys.
1348
 
    """
1349
 
    text_keys_update = text_keys.update
1350
 
    for record, items in interesting_nodes_iterable:
1351
 
        text_keys_update([bytes_to_text_key(b) for n,b in items])
1352
 
        yield record
1353
 
 
1354
 
 
1355
 
class RepositoryFormat2a(RepositoryFormatPack):
1356
 
    """A CHK repository that uses the bencode revision serializer."""
 
951
        yield self._get_text_stream()
 
952
 
 
953
 
 
954
class RepositoryFormatCHK1(RepositoryFormatPack):
 
955
    """A hashed CHK+group compress pack repository."""
1357
956
 
1358
957
    repository_class = CHKInventoryRepository
1359
 
    supports_external_lookups = True
1360
958
    supports_chks = True
 
959
    # For right now, setting this to True gives us InterModel1And2 rather
 
960
    # than InterDifferingSerializer
1361
961
    _commit_builder_class = PackRootCommitBuilder
1362
962
    rich_root_data = True
1363
 
    _serializer = chk_serializer.chk_bencode_serializer
 
963
    _serializer = chk_serializer.chk_serializer_255_bigpage
1364
964
    _commit_inv_deltas = True
1365
965
    # What index classes to use
1366
966
    index_builder_class = BTreeBuilder
1374
974
    _fetch_order = 'unordered'
1375
975
    _fetch_uses_deltas = False # essentially ignored by the groupcompress code.
1376
976
    fast_deltas = True
1377
 
    pack_compresses = True
1378
977
 
1379
978
    def _get_matching_bzrdir(self):
1380
 
        return bzrdir.format_registry.make_bzrdir('2a')
 
979
        return bzrdir.format_registry.make_bzrdir('development6-rich-root')
1381
980
 
1382
981
    def _ignore_setting_bzrdir(self, format):
1383
982
        pass
1385
984
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
1386
985
 
1387
986
    def get_format_string(self):
1388
 
        return ('Bazaar repository format 2a (needs bzr 1.16 or later)\n')
 
987
        """See RepositoryFormat.get_format_string()."""
 
988
        return ('Bazaar development format - group compression and chk inventory'
 
989
                ' (needs bzr.dev from 1.14)\n')
1389
990
 
1390
991
    def get_format_description(self):
1391
992
        """See RepositoryFormat.get_format_description()."""
1392
 
        return ("Repository format 2a - rich roots, group compression"
 
993
        return ("Development repository format - rich roots, group compression"
1393
994
            " and chk inventories")
1394
995
 
1395
 
 
1396
 
class RepositoryFormat2aSubtree(RepositoryFormat2a):
1397
 
    """A 2a repository format that supports nested trees.
1398
 
 
1399
 
    """
1400
 
 
1401
 
    def _get_matching_bzrdir(self):
1402
 
        return bzrdir.format_registry.make_bzrdir('development-subtree')
1403
 
 
1404
 
    def _ignore_setting_bzrdir(self, format):
1405
 
        pass
1406
 
 
1407
 
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
1408
 
 
1409
 
    def get_format_string(self):
1410
 
        return ('Bazaar development format 8\n')
1411
 
 
1412
 
    def get_format_description(self):
1413
 
        """See RepositoryFormat.get_format_description()."""
1414
 
        return ("Development repository format 8 - nested trees, "
1415
 
                "group compression and chk inventories")
1416
 
 
1417
 
    experimental = True
1418
 
    supports_tree_reference = True
 
996
    def check_conversion_target(self, target_format):
 
997
        if not target_format.rich_root_data:
 
998
            raise errors.BadConversionTarget(
 
999
                'Does not support rich root data.', target_format)
 
1000
        if not getattr(target_format, 'supports_tree_reference', False):
 
1001
            raise errors.BadConversionTarget(
 
1002
                'Does not support nested trees', target_format)
 
1003
 
 
1004