~bzr-pqm/bzr/bzr.dev

« back to all changes in this revision

Viewing changes to bzrlib/repofmt/groupcompress_repo.py

  • Committer: Patch Queue Manager
  • Date: 2014-02-12 18:22:22 UTC
  • mfrom: (6589.2.1 trunk)
  • Revision ID: pqm@pqm.ubuntu.com-20140212182222-beouo25gaf1cny76
(vila) The XDG Base Directory Specification uses the XDG_CACHE_HOME,
 not XDG_CACHE_DIR. (Andrew Starr-Bochicchio)

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
 
# Copyright (C) 2008, 2009 Canonical Ltd
 
1
# Copyright (C) 2008-2011 Canonical Ltd
2
2
#
3
3
# This program is free software; you can redistribute it and/or modify
4
4
# it under the terms of the GNU General Public License as published by
16
16
 
17
17
"""Repository formats using CHK inventories and groupcompress compression."""
18
18
 
 
19
from __future__ import absolute_import
 
20
 
19
21
import time
20
22
 
21
23
from bzrlib import (
22
 
    bzrdir,
 
24
    controldir,
23
25
    chk_map,
24
26
    chk_serializer,
25
27
    debug,
26
28
    errors,
27
29
    index as _mod_index,
28
30
    inventory,
29
 
    knit,
30
31
    osutils,
31
32
    pack,
32
 
    remote,
33
 
    repository,
34
33
    revision as _mod_revision,
35
34
    trace,
36
35
    ui,
 
36
    versionedfile,
37
37
    )
38
38
from bzrlib.btree_index import (
39
39
    BTreeGraphIndex,
40
40
    BTreeBuilder,
41
41
    )
42
 
from bzrlib.index import GraphIndex, GraphIndexBuilder
 
42
from bzrlib.decorators import needs_write_lock
43
43
from bzrlib.groupcompress import (
44
44
    _GCGraphIndex,
45
45
    GroupCompressVersionedFiles,
46
46
    )
47
47
from bzrlib.repofmt.pack_repo import (
 
48
    _DirectPackAccess,
48
49
    Pack,
49
50
    NewPack,
50
 
    KnitPackRepository,
 
51
    PackRepository,
51
52
    PackRootCommitBuilder,
52
53
    RepositoryPackCollection,
53
54
    RepositoryFormatPack,
 
55
    ResumedPack,
54
56
    Packer,
55
57
    )
 
58
from bzrlib.vf_repository import (
 
59
    StreamSource,
 
60
    )
 
61
from bzrlib.static_tuple import StaticTuple
56
62
 
57
63
 
58
64
class GCPack(NewPack):
87
93
            # have a regular 2-list index giving parents and compression
88
94
            # source.
89
95
            index_builder_class(reference_lists=1),
90
 
            # Texts: compression and per file graph, for all fileids - so two
91
 
            # reference lists and two elements in the key tuple.
 
96
            # Texts: per file graph, for all fileids - so one reference list
 
97
            # and two elements in the key tuple.
92
98
            index_builder_class(reference_lists=1, key_elements=2),
93
99
            # Signatures: Just blobs to store, no compression, no parents
94
100
            # listing.
154
160
        self._writer.begin()
155
161
        # what state is the pack in? (open, finished, aborted)
156
162
        self._state = 'open'
 
163
        # no name until we finish writing the content
 
164
        self.name = None
157
165
 
158
166
    def _check_references(self):
159
167
        """Make sure our external references are present.
163
171
        have deltas based on a fallback repository.
164
172
        (See <https://bugs.launchpad.net/bzr/+bug/288751>)
165
173
        """
166
 
        # Groupcompress packs don't have any external references
 
174
        # Groupcompress packs don't have any external references, arguably CHK
 
175
        # pages have external references, but we cannot 'cheaply' determine
 
176
        # them without actually walking all of the chk pages.
 
177
 
 
178
 
 
179
class ResumedGCPack(ResumedPack):
 
180
 
 
181
    def _check_references(self):
 
182
        """Make sure our external compression parents are present."""
 
183
        # See GCPack._check_references for why this is empty
 
184
 
 
185
    def _get_external_refs(self, index):
 
186
        # GC repositories don't have compression parents external to a given
 
187
        # pack file
 
188
        return set()
167
189
 
168
190
 
169
191
class GCCHKPacker(Packer):
202
224
            p_id_roots_set = set()
203
225
            stream = source_vf.get_record_stream(keys, 'groupcompress', True)
204
226
            for idx, record in enumerate(stream):
 
227
                # Inventories should always be with revisions; assume success.
205
228
                bytes = record.get_bytes_as('fulltext')
206
229
                chk_inv = inventory.CHKInventory.deserialise(None, bytes,
207
230
                                                             record.key)
245
268
        remaining_keys = set(keys)
246
269
        counter = [0]
247
270
        if self._gather_text_refs:
248
 
            bytes_to_info = inventory.CHKInventory._bytes_to_utf8name_key
249
271
            self._text_refs = set()
250
272
        def _get_referenced_stream(root_keys, parse_leaf_nodes=False):
251
273
            cur_keys = root_keys
272
294
                    # Store is None, because we know we have a LeafNode, and we
273
295
                    # just want its entries
274
296
                    for file_id, bytes in node.iteritems(None):
275
 
                        name_utf8, file_id, revision_id = bytes_to_info(bytes)
276
 
                        self._text_refs.add((file_id, revision_id))
 
297
                        self._text_refs.add(chk_map._bytes_to_text_key(bytes))
277
298
                def next_stream():
278
299
                    stream = source_vf.get_record_stream(cur_keys,
279
300
                                                         'as-requested', True)
280
301
                    for record in stream:
 
302
                        if record.storage_kind == 'absent':
 
303
                            # An absent CHK record: we assume that the missing
 
304
                            # record is in a different pack - e.g. a page not
 
305
                            # altered by the commit we're packing.
 
306
                            continue
281
307
                        bytes = record.get_bytes_as('fulltext')
282
308
                        # We don't care about search_key_func for this code,
283
309
                        # because we only care about external references.
330
356
        """Build a VersionedFiles instance on top of this group of packs."""
331
357
        index_name = index_name + '_index'
332
358
        index_to_pack = {}
333
 
        access = knit._DirectPackAccess(index_to_pack)
 
359
        access = _DirectPackAccess(index_to_pack,
 
360
                                   reload_func=self._reload_func)
334
361
        if for_write:
335
362
            # Use new_pack
336
363
            if self.new_pack is None:
390
417
 
391
418
    def _copy_inventory_texts(self):
392
419
        source_vf, target_vf = self._build_vfs('inventory', True, True)
393
 
        self._copy_stream(source_vf, target_vf, self.revision_keys,
 
420
        # It is not sufficient to just use self.revision_keys, as stacked
 
421
        # repositories can have more inventories than they have revisions.
 
422
        # One alternative would be to do something with
 
423
        # get_parent_map(self.revision_keys), but that shouldn't be any faster
 
424
        # than this.
 
425
        inventory_keys = source_vf.keys()
 
426
        missing_inventories = set(self.revision_keys).difference(inventory_keys)
 
427
        if missing_inventories:
 
428
            # Go back to the original repo, to see if these are really missing
 
429
            # https://bugs.launchpad.net/bzr/+bug/437003
 
430
            # If we are packing a subset of the repo, it is fine to just have
 
431
            # the data in another Pack file, which is not included in this pack
 
432
            # operation.
 
433
            inv_index = self._pack_collection.repo.inventories._index
 
434
            pmap = inv_index.get_parent_map(missing_inventories)
 
435
            really_missing = missing_inventories.difference(pmap)
 
436
            if really_missing:
 
437
                missing_inventories = sorted(really_missing)
 
438
                raise ValueError('We are missing inventories for revisions: %s'
 
439
                    % (missing_inventories,))
 
440
        self._copy_stream(source_vf, target_vf, inventory_keys,
394
441
                          'inventories', self._get_filtered_inv_stream, 2)
395
442
 
 
443
    def _get_chk_vfs_for_copy(self):
 
444
        return self._build_vfs('chk', False, False)
 
445
 
396
446
    def _copy_chk_texts(self):
397
 
        source_vf, target_vf = self._build_vfs('chk', False, False)
 
447
        source_vf, target_vf = self._get_chk_vfs_for_copy()
398
448
        # TODO: This is technically spurious... if it is a performance issue,
399
449
        #       remove it
400
450
        total_keys = source_vf.keys()
423
473
        #      is grabbing too many keys...
424
474
        text_keys = source_vf.keys()
425
475
        self._copy_stream(source_vf, target_vf, text_keys,
426
 
                          'text', self._get_progress_stream, 4)
 
476
                          'texts', self._get_progress_stream, 4)
427
477
 
428
478
    def _copy_signature_texts(self):
429
479
        source_vf, target_vf = self._build_vfs('signature', False, False)
446
496
        if not self._use_pack(self.new_pack):
447
497
            self.new_pack.abort()
448
498
            return None
 
499
        self.new_pack.finish_content()
 
500
        if len(self.packs) == 1:
 
501
            old_pack = self.packs[0]
 
502
            if old_pack.name == self.new_pack._hash.hexdigest():
 
503
                # The single old pack was already optimally packed.
 
504
                trace.mutter('single pack %s was already optimally packed',
 
505
                    old_pack.name)
 
506
                self.new_pack.abort()
 
507
                return None
449
508
        self.pb.update('finishing repack', 6, 7)
450
509
        self.new_pack.finish()
451
510
        self._pack_collection.allocate(self.new_pack)
537
596
        return new_pack.data_inserted() and self._data_changed
538
597
 
539
598
 
 
599
class GCCHKCanonicalizingPacker(GCCHKPacker):
 
600
    """A packer that ensures inventories have canonical-form CHK maps.
 
601
    
 
602
    Ideally this would be part of reconcile, but it's very slow and rarely
 
603
    needed.  (It repairs repositories affected by
 
604
    https://bugs.launchpad.net/bzr/+bug/522637).
 
605
    """
 
606
 
 
607
    def __init__(self, *args, **kwargs):
 
608
        super(GCCHKCanonicalizingPacker, self).__init__(*args, **kwargs)
 
609
        self._data_changed = False
 
610
 
 
611
    def _exhaust_stream(self, source_vf, keys, message, vf_to_stream, pb_offset):
 
612
        """Create and exhaust a stream, but don't insert it.
 
613
 
 
614
        This is useful to get the side-effects of generating a stream.
 
615
        """
 
616
        self.pb.update('scanning %s' % (message,), pb_offset)
 
617
        child_pb = ui.ui_factory.nested_progress_bar()
 
618
        try:
 
619
            list(vf_to_stream(source_vf, keys, message, child_pb))
 
620
        finally:
 
621
            child_pb.finished()
 
622
 
 
623
    def _copy_inventory_texts(self):
 
624
        source_vf, target_vf = self._build_vfs('inventory', True, True)
 
625
        source_chk_vf, target_chk_vf = self._get_chk_vfs_for_copy()
 
626
        inventory_keys = source_vf.keys()
 
627
        # First, copy the existing CHKs on the assumption that most of them
 
628
        # will be correct.  This will save us from having to reinsert (and
 
629
        # recompress) these records later at the cost of perhaps preserving a
 
630
        # few unused CHKs. 
 
631
        # (Iterate but don't insert _get_filtered_inv_stream to populate the
 
632
        # variables needed by GCCHKPacker._copy_chk_texts.)
 
633
        self._exhaust_stream(source_vf, inventory_keys, 'inventories',
 
634
                self._get_filtered_inv_stream, 2)
 
635
        GCCHKPacker._copy_chk_texts(self)
 
636
        # Now copy and fix the inventories, and any regenerated CHKs.
 
637
        def chk_canonicalizing_inv_stream(source_vf, keys, message, pb=None):
 
638
            return self._get_filtered_canonicalizing_inv_stream(
 
639
                source_vf, keys, message, pb, source_chk_vf, target_chk_vf)
 
640
        self._copy_stream(source_vf, target_vf, inventory_keys,
 
641
                          'inventories', chk_canonicalizing_inv_stream, 4)
 
642
 
 
643
    def _copy_chk_texts(self):
 
644
        # No-op; in this class this happens during _copy_inventory_texts.
 
645
        pass
 
646
 
 
647
    def _get_filtered_canonicalizing_inv_stream(self, source_vf, keys, message,
 
648
            pb=None, source_chk_vf=None, target_chk_vf=None):
 
649
        """Filter the texts of inventories, regenerating CHKs to make sure they
 
650
        are canonical.
 
651
        """
 
652
        total_keys = len(keys)
 
653
        target_chk_vf = versionedfile.NoDupeAddLinesDecorator(target_chk_vf)
 
654
        def _filtered_inv_stream():
 
655
            stream = source_vf.get_record_stream(keys, 'groupcompress', True)
 
656
            search_key_name = None
 
657
            for idx, record in enumerate(stream):
 
658
                # Inventories should always be with revisions; assume success.
 
659
                bytes = record.get_bytes_as('fulltext')
 
660
                chk_inv = inventory.CHKInventory.deserialise(
 
661
                    source_chk_vf, bytes, record.key)
 
662
                if pb is not None:
 
663
                    pb.update('inv', idx, total_keys)
 
664
                chk_inv.id_to_entry._ensure_root()
 
665
                if search_key_name is None:
 
666
                    # Find the name corresponding to the search_key_func
 
667
                    search_key_reg = chk_map.search_key_registry
 
668
                    for search_key_name, func in search_key_reg.iteritems():
 
669
                        if func == chk_inv.id_to_entry._search_key_func:
 
670
                            break
 
671
                canonical_inv = inventory.CHKInventory.from_inventory(
 
672
                    target_chk_vf, chk_inv,
 
673
                    maximum_size=chk_inv.id_to_entry._root_node._maximum_size,
 
674
                    search_key_name=search_key_name)
 
675
                if chk_inv.id_to_entry.key() != canonical_inv.id_to_entry.key():
 
676
                    trace.mutter(
 
677
                        'Non-canonical CHK map for id_to_entry of inv: %s '
 
678
                        '(root is %s, should be %s)' % (chk_inv.revision_id,
 
679
                        chk_inv.id_to_entry.key()[0],
 
680
                        canonical_inv.id_to_entry.key()[0]))
 
681
                    self._data_changed = True
 
682
                p_id_map = chk_inv.parent_id_basename_to_file_id
 
683
                p_id_map._ensure_root()
 
684
                canon_p_id_map = canonical_inv.parent_id_basename_to_file_id
 
685
                if p_id_map.key() != canon_p_id_map.key():
 
686
                    trace.mutter(
 
687
                        'Non-canonical CHK map for parent_id_to_basename of '
 
688
                        'inv: %s (root is %s, should be %s)'
 
689
                        % (chk_inv.revision_id, p_id_map.key()[0],
 
690
                           canon_p_id_map.key()[0]))
 
691
                    self._data_changed = True
 
692
                yield versionedfile.ChunkedContentFactory(record.key,
 
693
                        record.parents, record.sha1,
 
694
                        canonical_inv.to_lines())
 
695
            # We have finished processing all of the inventory records, we
 
696
            # don't need these sets anymore
 
697
        return _filtered_inv_stream()
 
698
 
 
699
    def _use_pack(self, new_pack):
 
700
        """Override _use_pack to check for reconcile having changed content."""
 
701
        return new_pack.data_inserted() and self._data_changed
 
702
 
 
703
 
540
704
class GCRepositoryPackCollection(RepositoryPackCollection):
541
705
 
542
706
    pack_factory = GCPack
543
 
 
544
 
    def _already_packed(self):
545
 
        """Is the collection already packed?"""
546
 
        # Always repack GC repositories for now
547
 
        return False
548
 
 
549
 
    def _execute_pack_operations(self, pack_operations,
550
 
                                 _packer_class=GCCHKPacker,
551
 
                                 reload_func=None):
552
 
        """Execute a series of pack operations.
553
 
 
554
 
        :param pack_operations: A list of [revision_count, packs_to_combine].
555
 
        :param _packer_class: The class of packer to use (default: Packer).
556
 
        :return: None.
 
707
    resumed_pack_factory = ResumedGCPack
 
708
    normal_packer_class = GCCHKPacker
 
709
    optimising_packer_class = GCCHKPacker
 
710
 
 
711
    def _check_new_inventories(self):
 
712
        """Detect missing inventories or chk root entries for the new revisions
 
713
        in this write group.
 
714
 
 
715
        :returns: list of strs, summarising any problems found.  If the list is
 
716
            empty no problems were found.
557
717
        """
558
 
        # XXX: Copied across from RepositoryPackCollection simply because we
559
 
        #      want to override the _packer_class ... :(
560
 
        for revision_count, packs in pack_operations:
561
 
            # we may have no-ops from the setup logic
562
 
            if len(packs) == 0:
563
 
                continue
564
 
            packer = GCCHKPacker(self, packs, '.autopack',
565
 
                                 reload_func=reload_func)
566
 
            try:
567
 
                packer.pack()
568
 
            except errors.RetryWithNewPacks:
569
 
                # An exception is propagating out of this context, make sure
570
 
                # this packer has cleaned up. Packer() doesn't set its new_pack
571
 
                # state into the RepositoryPackCollection object, so we only
572
 
                # have access to it directly here.
573
 
                if packer.new_pack is not None:
574
 
                    packer.new_pack.abort()
575
 
                raise
576
 
            for pack in packs:
577
 
                self._remove_pack_from_memory(pack)
578
 
        # record the newly available packs and stop advertising the old
579
 
        # packs
580
 
        self._save_pack_names(clear_obsolete_packs=True)
581
 
        # Move the old packs out of the way now they are no longer referenced.
582
 
        for revision_count, packs in pack_operations:
583
 
            self._obsolete_packs(packs)
584
 
 
585
 
 
586
 
class CHKInventoryRepository(KnitPackRepository):
587
 
    """subclass of KnitPackRepository that uses CHK based inventories."""
 
718
        # Ensure that all revisions added in this write group have:
 
719
        #   - corresponding inventories,
 
720
        #   - chk root entries for those inventories,
 
721
        #   - and any present parent inventories have their chk root
 
722
        #     entries too.
 
723
        # And all this should be independent of any fallback repository.
 
724
        problems = []
 
725
        key_deps = self.repo.revisions._index._key_dependencies
 
726
        new_revisions_keys = key_deps.get_new_keys()
 
727
        no_fallback_inv_index = self.repo.inventories._index
 
728
        no_fallback_chk_bytes_index = self.repo.chk_bytes._index
 
729
        no_fallback_texts_index = self.repo.texts._index
 
730
        inv_parent_map = no_fallback_inv_index.get_parent_map(
 
731
            new_revisions_keys)
 
732
        # Are any inventories for corresponding to the new revisions missing?
 
733
        corresponding_invs = set(inv_parent_map)
 
734
        missing_corresponding = set(new_revisions_keys)
 
735
        missing_corresponding.difference_update(corresponding_invs)
 
736
        if missing_corresponding:
 
737
            problems.append("inventories missing for revisions %s" %
 
738
                (sorted(missing_corresponding),))
 
739
            return problems
 
740
        # Are any chk root entries missing for any inventories?  This includes
 
741
        # any present parent inventories, which may be used when calculating
 
742
        # deltas for streaming.
 
743
        all_inv_keys = set(corresponding_invs)
 
744
        for parent_inv_keys in inv_parent_map.itervalues():
 
745
            all_inv_keys.update(parent_inv_keys)
 
746
        # Filter out ghost parents.
 
747
        all_inv_keys.intersection_update(
 
748
            no_fallback_inv_index.get_parent_map(all_inv_keys))
 
749
        parent_invs_only_keys = all_inv_keys.symmetric_difference(
 
750
            corresponding_invs)
 
751
        all_missing = set()
 
752
        inv_ids = [key[-1] for key in all_inv_keys]
 
753
        parent_invs_only_ids = [key[-1] for key in parent_invs_only_keys]
 
754
        root_key_info = _build_interesting_key_sets(
 
755
            self.repo, inv_ids, parent_invs_only_ids)
 
756
        expected_chk_roots = root_key_info.all_keys()
 
757
        present_chk_roots = no_fallback_chk_bytes_index.get_parent_map(
 
758
            expected_chk_roots)
 
759
        missing_chk_roots = expected_chk_roots.difference(present_chk_roots)
 
760
        if missing_chk_roots:
 
761
            problems.append(
 
762
                "missing referenced chk root keys: %s."
 
763
                "Run 'bzr reconcile --canonicalize-chks' on the affected "
 
764
                "repository."
 
765
                % (sorted(missing_chk_roots),))
 
766
            # Don't bother checking any further.
 
767
            return problems
 
768
        # Find all interesting chk_bytes records, and make sure they are
 
769
        # present, as well as the text keys they reference.
 
770
        chk_bytes_no_fallbacks = self.repo.chk_bytes.without_fallbacks()
 
771
        chk_bytes_no_fallbacks._search_key_func = \
 
772
            self.repo.chk_bytes._search_key_func
 
773
        chk_diff = chk_map.iter_interesting_nodes(
 
774
            chk_bytes_no_fallbacks, root_key_info.interesting_root_keys,
 
775
            root_key_info.uninteresting_root_keys)
 
776
        text_keys = set()
 
777
        try:
 
778
            for record in _filter_text_keys(chk_diff, text_keys,
 
779
                                            chk_map._bytes_to_text_key):
 
780
                pass
 
781
        except errors.NoSuchRevision, e:
 
782
            # XXX: It would be nice if we could give a more precise error here.
 
783
            problems.append("missing chk node(s) for id_to_entry maps")
 
784
        chk_diff = chk_map.iter_interesting_nodes(
 
785
            chk_bytes_no_fallbacks, root_key_info.interesting_pid_root_keys,
 
786
            root_key_info.uninteresting_pid_root_keys)
 
787
        try:
 
788
            for interesting_rec, interesting_map in chk_diff:
 
789
                pass
 
790
        except errors.NoSuchRevision, e:
 
791
            problems.append(
 
792
                "missing chk node(s) for parent_id_basename_to_file_id maps")
 
793
        present_text_keys = no_fallback_texts_index.get_parent_map(text_keys)
 
794
        missing_text_keys = text_keys.difference(present_text_keys)
 
795
        if missing_text_keys:
 
796
            problems.append("missing text keys: %r"
 
797
                % (sorted(missing_text_keys),))
 
798
        return problems
 
799
 
 
800
 
 
801
class CHKInventoryRepository(PackRepository):
 
802
    """subclass of PackRepository that uses CHK based inventories."""
588
803
 
589
804
    def __init__(self, _format, a_bzrdir, control_files, _commit_builder_class,
590
805
        _serializer):
591
806
        """Overridden to change pack collection class."""
592
 
        KnitPackRepository.__init__(self, _format, a_bzrdir, control_files,
593
 
            _commit_builder_class, _serializer)
594
 
        # and now replace everything it did :)
 
807
        super(CHKInventoryRepository, self).__init__(_format, a_bzrdir,
 
808
            control_files, _commit_builder_class, _serializer)
595
809
        index_transport = self._transport.clone('indices')
596
810
        self._pack_collection = GCRepositoryPackCollection(self,
597
811
            self._transport, index_transport,
604
818
        self.inventories = GroupCompressVersionedFiles(
605
819
            _GCGraphIndex(self._pack_collection.inventory_index.combined_index,
606
820
                add_callback=self._pack_collection.inventory_index.add_callback,
607
 
                parents=True, is_locked=self.is_locked),
 
821
                parents=True, is_locked=self.is_locked,
 
822
                inconsistency_fatal=False),
608
823
            access=self._pack_collection.inventory_index.data_access)
609
824
        self.revisions = GroupCompressVersionedFiles(
610
825
            _GCGraphIndex(self._pack_collection.revision_index.combined_index,
611
826
                add_callback=self._pack_collection.revision_index.add_callback,
612
 
                parents=True, is_locked=self.is_locked),
 
827
                parents=True, is_locked=self.is_locked,
 
828
                track_external_parent_refs=True, track_new_keys=True),
613
829
            access=self._pack_collection.revision_index.data_access,
614
830
            delta=False)
615
831
        self.signatures = GroupCompressVersionedFiles(
616
832
            _GCGraphIndex(self._pack_collection.signature_index.combined_index,
617
833
                add_callback=self._pack_collection.signature_index.add_callback,
618
 
                parents=False, is_locked=self.is_locked),
 
834
                parents=False, is_locked=self.is_locked,
 
835
                inconsistency_fatal=False),
619
836
            access=self._pack_collection.signature_index.data_access,
620
837
            delta=False)
621
838
        self.texts = GroupCompressVersionedFiles(
622
839
            _GCGraphIndex(self._pack_collection.text_index.combined_index,
623
840
                add_callback=self._pack_collection.text_index.add_callback,
624
 
                parents=True, is_locked=self.is_locked),
 
841
                parents=True, is_locked=self.is_locked,
 
842
                inconsistency_fatal=False),
625
843
            access=self._pack_collection.text_index.data_access)
626
844
        # No parents, individual CHK pages don't have specific ancestry
627
845
        self.chk_bytes = GroupCompressVersionedFiles(
628
846
            _GCGraphIndex(self._pack_collection.chk_index.combined_index,
629
847
                add_callback=self._pack_collection.chk_index.add_callback,
630
 
                parents=False, is_locked=self.is_locked),
 
848
                parents=False, is_locked=self.is_locked,
 
849
                inconsistency_fatal=False),
631
850
            access=self._pack_collection.chk_index.data_access)
 
851
        search_key_name = self._format._serializer.search_key_name
 
852
        search_key_func = chk_map.search_key_registry.get(search_key_name)
 
853
        self.chk_bytes._search_key_func = search_key_func
632
854
        # True when the repository object is 'write locked' (as opposed to the
633
855
        # physical lock only taken out around changes to the pack-names list.)
634
856
        # Another way to represent this would be a decorator around the control
657
879
        return self._inventory_add_lines(revision_id, parents,
658
880
            inv_lines, check_content=False)
659
881
 
 
882
    def _create_inv_from_null(self, delta, revision_id):
 
883
        """This will mutate new_inv directly.
 
884
 
 
885
        This is a simplified form of create_by_apply_delta which knows that all
 
886
        the old values must be None, so everything is a create.
 
887
        """
 
888
        serializer = self._format._serializer
 
889
        new_inv = inventory.CHKInventory(serializer.search_key_name)
 
890
        new_inv.revision_id = revision_id
 
891
        entry_to_bytes = new_inv._entry_to_bytes
 
892
        id_to_entry_dict = {}
 
893
        parent_id_basename_dict = {}
 
894
        for old_path, new_path, file_id, entry in delta:
 
895
            if old_path is not None:
 
896
                raise ValueError('Invalid delta, somebody tried to delete %r'
 
897
                                 ' from the NULL_REVISION'
 
898
                                 % ((old_path, file_id),))
 
899
            if new_path is None:
 
900
                raise ValueError('Invalid delta, delta from NULL_REVISION has'
 
901
                                 ' no new_path %r' % (file_id,))
 
902
            if new_path == '':
 
903
                new_inv.root_id = file_id
 
904
                parent_id_basename_key = StaticTuple('', '').intern()
 
905
            else:
 
906
                utf8_entry_name = entry.name.encode('utf-8')
 
907
                parent_id_basename_key = StaticTuple(entry.parent_id,
 
908
                                                     utf8_entry_name).intern()
 
909
            new_value = entry_to_bytes(entry)
 
910
            # Populate Caches?
 
911
            # new_inv._path_to_fileid_cache[new_path] = file_id
 
912
            key = StaticTuple(file_id).intern()
 
913
            id_to_entry_dict[key] = new_value
 
914
            parent_id_basename_dict[parent_id_basename_key] = file_id
 
915
 
 
916
        new_inv._populate_from_dicts(self.chk_bytes, id_to_entry_dict,
 
917
            parent_id_basename_dict, maximum_size=serializer.maximum_size)
 
918
        return new_inv
 
919
 
660
920
    def add_inventory_by_delta(self, basis_revision_id, delta, new_revision_id,
661
921
                               parents, basis_inv=None, propagate_caches=False):
662
922
        """Add a new inventory expressed as a delta against another revision.
682
942
            repository format specific) of the serialized inventory, and the
683
943
            resulting inventory.
684
944
        """
685
 
        if basis_revision_id == _mod_revision.NULL_REVISION:
686
 
            return KnitPackRepository.add_inventory_by_delta(self,
687
 
                basis_revision_id, delta, new_revision_id, parents)
688
945
        if not self.is_in_write_group():
689
946
            raise AssertionError("%r not in write group" % (self,))
690
947
        _mod_revision.check_not_reserved_id(new_revision_id)
691
 
        basis_tree = self.revision_tree(basis_revision_id)
692
 
        basis_tree.lock_read()
 
948
        basis_tree = None
 
949
        if basis_inv is None:
 
950
            if basis_revision_id == _mod_revision.NULL_REVISION:
 
951
                new_inv = self._create_inv_from_null(delta, new_revision_id)
 
952
                if new_inv.root_id is None:
 
953
                    raise errors.RootMissing()
 
954
                inv_lines = new_inv.to_lines()
 
955
                return self._inventory_add_lines(new_revision_id, parents,
 
956
                    inv_lines, check_content=False), new_inv
 
957
            else:
 
958
                basis_tree = self.revision_tree(basis_revision_id)
 
959
                basis_tree.lock_read()
 
960
                basis_inv = basis_tree.root_inventory
693
961
        try:
694
 
            if basis_inv is None:
695
 
                basis_inv = basis_tree.inventory
696
962
            result = basis_inv.create_by_apply_delta(delta, new_revision_id,
697
963
                propagate_caches=propagate_caches)
698
964
            inv_lines = result.to_lines()
699
965
            return self._inventory_add_lines(new_revision_id, parents,
700
966
                inv_lines, check_content=False), result
701
967
        finally:
702
 
            basis_tree.unlock()
703
 
 
704
 
    def _iter_inventories(self, revision_ids):
 
968
            if basis_tree is not None:
 
969
                basis_tree.unlock()
 
970
 
 
971
    def _deserialise_inventory(self, revision_id, bytes):
 
972
        return inventory.CHKInventory.deserialise(self.chk_bytes, bytes,
 
973
            (revision_id,))
 
974
 
 
975
    def _iter_inventories(self, revision_ids, ordering):
705
976
        """Iterate over many inventory objects."""
 
977
        if ordering is None:
 
978
            ordering = 'unordered'
706
979
        keys = [(revision_id,) for revision_id in revision_ids]
707
 
        stream = self.inventories.get_record_stream(keys, 'unordered', True)
 
980
        stream = self.inventories.get_record_stream(keys, ordering, True)
708
981
        texts = {}
709
982
        for record in stream:
710
983
            if record.storage_kind != 'absent':
711
984
                texts[record.key] = record.get_bytes_as('fulltext')
712
985
            else:
713
 
                raise errors.NoSuchRevision(self, record.key)
 
986
                texts[record.key] = None
714
987
        for key in keys:
715
 
            yield inventory.CHKInventory.deserialise(self.chk_bytes, texts[key], key)
716
 
 
717
 
    def _iter_inventory_xmls(self, revision_ids):
718
 
        # Without a native 'xml' inventory, this method doesn't make sense, so
719
 
        # make it raise to trap naughty direct users.
720
 
        raise NotImplementedError(self._iter_inventory_xmls)
721
 
 
722
 
    def _find_revision_outside_set(self, revision_ids):
723
 
        revision_set = frozenset(revision_ids)
724
 
        for revid in revision_ids:
725
 
            parent_ids = self.get_parent_map([revid]).get(revid, ())
726
 
            for parent in parent_ids:
727
 
                if parent in revision_set:
728
 
                    # Parent is not outside the set
729
 
                    continue
730
 
                if parent not in self.get_parent_map([parent]):
731
 
                    # Parent is a ghost
732
 
                    continue
733
 
                return parent
734
 
        return _mod_revision.NULL_REVISION
735
 
 
736
 
    def _find_file_keys_to_fetch(self, revision_ids, pb):
737
 
        rich_root = self.supports_rich_root()
738
 
        revision_outside_set = self._find_revision_outside_set(revision_ids)
739
 
        if revision_outside_set == _mod_revision.NULL_REVISION:
740
 
            uninteresting_root_keys = set()
741
 
        else:
742
 
            uninteresting_inv = self.get_inventory(revision_outside_set)
743
 
            uninteresting_root_keys = set([uninteresting_inv.id_to_entry.key()])
744
 
        interesting_root_keys = set()
745
 
        for idx, inv in enumerate(self.iter_inventories(revision_ids)):
746
 
            interesting_root_keys.add(inv.id_to_entry.key())
747
 
        revision_ids = frozenset(revision_ids)
748
 
        file_id_revisions = {}
749
 
        bytes_to_info = inventory.CHKInventory._bytes_to_utf8name_key
750
 
        for record, items in chk_map.iter_interesting_nodes(self.chk_bytes,
751
 
                    interesting_root_keys, uninteresting_root_keys,
752
 
                    pb=pb):
753
 
            # This is cheating a bit to use the last grabbed 'inv', but it
754
 
            # works
755
 
            for name, bytes in items:
756
 
                (name_utf8, file_id, revision_id) = bytes_to_info(bytes)
757
 
                if not rich_root and name_utf8 == '':
758
 
                    continue
759
 
                if revision_id in revision_ids:
760
 
                    # Would we rather build this up into file_id => revision
761
 
                    # maps?
762
 
                    try:
763
 
                        file_id_revisions[file_id].add(revision_id)
764
 
                    except KeyError:
765
 
                        file_id_revisions[file_id] = set([revision_id])
766
 
        for file_id, revisions in file_id_revisions.iteritems():
767
 
            yield ('file', file_id, revisions)
 
988
            bytes = texts[key]
 
989
            if bytes is None:
 
990
                yield (None, key[-1])
 
991
            else:
 
992
                yield (inventory.CHKInventory.deserialise(
 
993
                    self.chk_bytes, bytes, key), key[-1])
 
994
 
 
995
    def _get_inventory_xml(self, revision_id):
 
996
        """Get serialized inventory as a string."""
 
997
        # Without a native 'xml' inventory, this method doesn't make sense.
 
998
        # However older working trees, and older bundles want it - so we supply
 
999
        # it allowing _get_inventory_xml to work. Bundles currently use the
 
1000
        # serializer directly; this also isn't ideal, but there isn't an xml
 
1001
        # iteration interface offered at all for repositories.
 
1002
        return self._serializer.write_inventory_to_string(
 
1003
            self.get_inventory(revision_id))
 
1004
 
 
1005
    def _find_present_inventory_keys(self, revision_keys):
 
1006
        parent_map = self.inventories.get_parent_map(revision_keys)
 
1007
        present_inventory_keys = set(k for k in parent_map)
 
1008
        return present_inventory_keys
768
1009
 
769
1010
    def fileids_altered_by_revision_ids(self, revision_ids, _inv_weave=None):
770
1011
        """Find the file ids and versions affected by revisions.
776
1017
            revision_ids. Each altered file-ids has the exact revision_ids that
777
1018
            altered it listed explicitly.
778
1019
        """
779
 
        rich_roots = self.supports_rich_root()
780
 
        result = {}
 
1020
        rich_root = self.supports_rich_root()
 
1021
        bytes_to_info = inventory.CHKInventory._bytes_to_utf8name_key
 
1022
        file_id_revisions = {}
781
1023
        pb = ui.ui_factory.nested_progress_bar()
782
1024
        try:
783
 
            total = len(revision_ids)
784
 
            for pos, inv in enumerate(self.iter_inventories(revision_ids)):
785
 
                pb.update("Finding text references", pos, total)
786
 
                for entry in inv.iter_just_entries():
787
 
                    if entry.revision != inv.revision_id:
788
 
                        continue
789
 
                    if not rich_roots and entry.file_id == inv.root_id:
790
 
                        continue
791
 
                    alterations = result.setdefault(entry.file_id, set([]))
792
 
                    alterations.add(entry.revision)
793
 
            return result
 
1025
            revision_keys = [(r,) for r in revision_ids]
 
1026
            parent_keys = self._find_parent_keys_of_revisions(revision_keys)
 
1027
            # TODO: instead of using _find_present_inventory_keys, change the
 
1028
            #       code paths to allow missing inventories to be tolerated.
 
1029
            #       However, we only want to tolerate missing parent
 
1030
            #       inventories, not missing inventories for revision_ids
 
1031
            present_parent_inv_keys = self._find_present_inventory_keys(
 
1032
                                        parent_keys)
 
1033
            present_parent_inv_ids = set(
 
1034
                [k[-1] for k in present_parent_inv_keys])
 
1035
            inventories_to_read = set(revision_ids)
 
1036
            inventories_to_read.update(present_parent_inv_ids)
 
1037
            root_key_info = _build_interesting_key_sets(
 
1038
                self, inventories_to_read, present_parent_inv_ids)
 
1039
            interesting_root_keys = root_key_info.interesting_root_keys
 
1040
            uninteresting_root_keys = root_key_info.uninteresting_root_keys
 
1041
            chk_bytes = self.chk_bytes
 
1042
            for record, items in chk_map.iter_interesting_nodes(chk_bytes,
 
1043
                        interesting_root_keys, uninteresting_root_keys,
 
1044
                        pb=pb):
 
1045
                for name, bytes in items:
 
1046
                    (name_utf8, file_id, revision_id) = bytes_to_info(bytes)
 
1047
                    # TODO: consider interning file_id, revision_id here, or
 
1048
                    #       pushing that intern() into bytes_to_info()
 
1049
                    # TODO: rich_root should always be True here, for all
 
1050
                    #       repositories that support chk_bytes
 
1051
                    if not rich_root and name_utf8 == '':
 
1052
                        continue
 
1053
                    try:
 
1054
                        file_id_revisions[file_id].add(revision_id)
 
1055
                    except KeyError:
 
1056
                        file_id_revisions[file_id] = set([revision_id])
794
1057
        finally:
795
1058
            pb.finished()
 
1059
        return file_id_revisions
796
1060
 
797
1061
    def find_text_key_references(self):
798
1062
        """Find the text key references within the repository.
826
1090
        finally:
827
1091
            pb.finished()
828
1092
 
 
1093
    @needs_write_lock
 
1094
    def reconcile_canonicalize_chks(self):
 
1095
        """Reconcile this repository to make sure all CHKs are in canonical
 
1096
        form.
 
1097
        """
 
1098
        from bzrlib.reconcile import PackReconciler
 
1099
        reconciler = PackReconciler(self, thorough=True, canonicalize_chks=True)
 
1100
        reconciler.reconcile()
 
1101
        return reconciler
 
1102
 
829
1103
    def _reconcile_pack(self, collection, packs, extension, revs, pb):
830
1104
        packer = GCCHKReconcilePacker(collection, packs, extension)
831
1105
        return packer.pack(pb)
832
1106
 
 
1107
    def _canonicalize_chks_pack(self, collection, packs, extension, revs, pb):
 
1108
        packer = GCCHKCanonicalizingPacker(collection, packs, extension, revs)
 
1109
        return packer.pack(pb)
 
1110
 
833
1111
    def _get_source(self, to_format):
834
1112
        """Return a source for streaming from this repository."""
835
 
        if isinstance(to_format, remote.RemoteRepositoryFormat):
836
 
            # Can't just check attributes on to_format with the current code,
837
 
            # work around this:
838
 
            to_format._ensure_real()
839
 
            to_format = to_format._custom_format
840
 
        if to_format.__class__ is self._format.__class__:
 
1113
        if self._format._serializer == to_format._serializer:
841
1114
            # We must be exactly the same format, otherwise stuff like the chk
842
 
            # page layout might be different
 
1115
            # page layout might be different.
 
1116
            # Actually, this test is just slightly looser than exact so that
 
1117
            # CHK2 <-> 2a transfers will work.
843
1118
            return GroupCHKStreamSource(self, to_format)
844
1119
        return super(CHKInventoryRepository, self)._get_source(to_format)
845
1120
 
846
 
    def suspend_write_group(self):
847
 
        raise errors.UnsuspendableWriteGroup(self)
848
 
 
849
 
    def _resume_write_group(self, tokens):
850
 
        raise errors.UnsuspendableWriteGroup(self)
851
 
 
852
 
 
853
 
class GroupCHKStreamSource(repository.StreamSource):
 
1121
    def _find_inconsistent_revision_parents(self, revisions_iterator=None):
 
1122
        """Find revisions with different parent lists in the revision object
 
1123
        and in the index graph.
 
1124
 
 
1125
        :param revisions_iterator: None, or an iterator of (revid,
 
1126
            Revision-or-None). This iterator controls the revisions checked.
 
1127
        :returns: an iterator yielding tuples of (revison-id, parents-in-index,
 
1128
            parents-in-revision).
 
1129
        """
 
1130
        if not self.is_locked():
 
1131
            raise AssertionError()
 
1132
        vf = self.revisions
 
1133
        if revisions_iterator is None:
 
1134
            revisions_iterator = self._iter_revisions(None)
 
1135
        for revid, revision in revisions_iterator:
 
1136
            if revision is None:
 
1137
                pass
 
1138
            parent_map = vf.get_parent_map([(revid,)])
 
1139
            parents_according_to_index = tuple(parent[-1] for parent in
 
1140
                parent_map[(revid,)])
 
1141
            parents_according_to_revision = tuple(revision.parent_ids)
 
1142
            if parents_according_to_index != parents_according_to_revision:
 
1143
                yield (revid, parents_according_to_index,
 
1144
                    parents_according_to_revision)
 
1145
 
 
1146
    def _check_for_inconsistent_revision_parents(self):
 
1147
        inconsistencies = list(self._find_inconsistent_revision_parents())
 
1148
        if inconsistencies:
 
1149
            raise errors.BzrCheckError(
 
1150
                "Revision index has inconsistent parents.")
 
1151
 
 
1152
 
 
1153
class GroupCHKStreamSource(StreamSource):
854
1154
    """Used when both the source and target repo are GroupCHK repos."""
855
1155
 
856
1156
    def __init__(self, from_repository, to_format):
858
1158
        super(GroupCHKStreamSource, self).__init__(from_repository, to_format)
859
1159
        self._revision_keys = None
860
1160
        self._text_keys = None
 
1161
        self._text_fetch_order = 'groupcompress'
861
1162
        self._chk_id_roots = None
862
1163
        self._chk_p_id_roots = None
863
1164
 
864
 
    def _get_filtered_inv_stream(self):
 
1165
    def _get_inventory_stream(self, inventory_keys, allow_absent=False):
865
1166
        """Get a stream of inventory texts.
866
1167
 
867
1168
        When this function returns, self._chk_id_roots and self._chk_p_id_roots
873
1174
            id_roots_set = set()
874
1175
            p_id_roots_set = set()
875
1176
            source_vf = self.from_repository.inventories
876
 
            stream = source_vf.get_record_stream(self._revision_keys,
 
1177
            stream = source_vf.get_record_stream(inventory_keys,
877
1178
                                                 'groupcompress', True)
878
1179
            for record in stream:
 
1180
                if record.storage_kind == 'absent':
 
1181
                    if allow_absent:
 
1182
                        continue
 
1183
                    else:
 
1184
                        raise errors.NoSuchRevision(self, record.key)
879
1185
                bytes = record.get_bytes_as('fulltext')
880
1186
                chk_inv = inventory.CHKInventory.deserialise(None, bytes,
881
1187
                                                             record.key)
897
1203
            p_id_roots_set.clear()
898
1204
        return ('inventories', _filtered_inv_stream())
899
1205
 
900
 
    def _get_filtered_chk_streams(self, excluded_keys):
 
1206
    def _get_filtered_chk_streams(self, excluded_revision_keys):
901
1207
        self._text_keys = set()
902
 
        excluded_keys.discard(_mod_revision.NULL_REVISION)
903
 
        if not excluded_keys:
 
1208
        excluded_revision_keys.discard(_mod_revision.NULL_REVISION)
 
1209
        if not excluded_revision_keys:
904
1210
            uninteresting_root_keys = set()
905
1211
            uninteresting_pid_root_keys = set()
906
1212
        else:
 
1213
            # filter out any excluded revisions whose inventories are not
 
1214
            # actually present
 
1215
            # TODO: Update Repository.iter_inventories() to add
 
1216
            #       ignore_missing=True
 
1217
            present_keys = self.from_repository._find_present_inventory_keys(
 
1218
                            excluded_revision_keys)
 
1219
            present_ids = [k[-1] for k in present_keys]
907
1220
            uninteresting_root_keys = set()
908
1221
            uninteresting_pid_root_keys = set()
909
 
            for inv in self.from_repository.iter_inventories(excluded_keys):
 
1222
            for inv in self.from_repository.iter_inventories(present_ids):
910
1223
                uninteresting_root_keys.add(inv.id_to_entry.key())
911
1224
                uninteresting_pid_root_keys.add(
912
1225
                    inv.parent_id_basename_to_file_id.key())
913
 
        bytes_to_info = inventory.CHKInventory._bytes_to_utf8name_key
914
1226
        chk_bytes = self.from_repository.chk_bytes
915
1227
        def _filter_id_to_entry():
916
 
            for record, items in chk_map.iter_interesting_nodes(chk_bytes,
917
 
                        self._chk_id_roots, uninteresting_root_keys):
918
 
                for name, bytes in items:
919
 
                    # Note: we don't care about name_utf8, because we are always
920
 
                    # rich-root = True
921
 
                    _, file_id, revision_id = bytes_to_info(bytes)
922
 
                    self._text_keys.add((file_id, revision_id))
 
1228
            interesting_nodes = chk_map.iter_interesting_nodes(chk_bytes,
 
1229
                        self._chk_id_roots, uninteresting_root_keys)
 
1230
            for record in _filter_text_keys(interesting_nodes, self._text_keys,
 
1231
                    chk_map._bytes_to_text_key):
923
1232
                if record is not None:
924
1233
                    yield record
 
1234
            # Consumed
 
1235
            self._chk_id_roots = None
925
1236
        yield 'chk_bytes', _filter_id_to_entry()
926
1237
        def _get_parent_id_basename_to_file_id_pages():
927
1238
            for record, items in chk_map.iter_interesting_nodes(chk_bytes,
928
1239
                        self._chk_p_id_roots, uninteresting_pid_root_keys):
929
1240
                if record is not None:
930
1241
                    yield record
 
1242
            # Consumed
 
1243
            self._chk_p_id_roots = None
931
1244
        yield 'chk_bytes', _get_parent_id_basename_to_file_id_pages()
932
1245
 
933
1246
    def _get_text_stream(self):
934
1247
        # Note: We know we don't have to handle adding root keys, because both
935
 
        # the source and target are GCCHK, and those always support rich-roots
936
 
        # We may want to request as 'unordered', in case the source has done a
937
 
        # 'split' packing
938
 
        return ('texts', self.from_repository.texts.get_record_stream(
939
 
                            self._text_keys, 'groupcompress', False))
 
1248
        # the source and target are the identical network name.
 
1249
        text_stream = self.from_repository.texts.get_record_stream(
 
1250
                        self._text_keys, self._text_fetch_order, False)
 
1251
        return ('texts', text_stream)
940
1252
 
941
1253
    def get_stream(self, search):
 
1254
        def wrap_and_count(pb, rc, stream):
 
1255
            """Yield records from stream while showing progress."""
 
1256
            count = 0
 
1257
            for record in stream:
 
1258
                if count == rc.STEP:
 
1259
                    rc.increment(count)
 
1260
                    pb.update('Estimate', rc.current, rc.max)
 
1261
                    count = 0
 
1262
                count += 1
 
1263
                yield record
 
1264
 
942
1265
        revision_ids = search.get_keys()
 
1266
        pb = ui.ui_factory.nested_progress_bar()
 
1267
        rc = self._record_counter
 
1268
        self._record_counter.setup(len(revision_ids))
943
1269
        for stream_info in self._fetch_revision_texts(revision_ids):
944
 
            yield stream_info
 
1270
            yield (stream_info[0],
 
1271
                wrap_and_count(pb, rc, stream_info[1]))
945
1272
        self._revision_keys = [(rev_id,) for rev_id in revision_ids]
946
 
        yield self._get_filtered_inv_stream()
947
 
        # The keys to exclude are part of the search recipe
948
 
        _, _, exclude_keys, _ = search.get_recipe()
949
 
        for stream_info in self._get_filtered_chk_streams(exclude_keys):
 
1273
        # TODO: The keys to exclude might be part of the search recipe
 
1274
        # For now, exclude all parents that are at the edge of ancestry, for
 
1275
        # which we have inventories
 
1276
        from_repo = self.from_repository
 
1277
        parent_keys = from_repo._find_parent_keys_of_revisions(
 
1278
                        self._revision_keys)
 
1279
        self.from_repository.revisions.clear_cache()
 
1280
        self.from_repository.signatures.clear_cache()
 
1281
        # Clear the repo's get_parent_map cache too.
 
1282
        self.from_repository._unstacked_provider.disable_cache()
 
1283
        self.from_repository._unstacked_provider.enable_cache()
 
1284
        s = self._get_inventory_stream(self._revision_keys)
 
1285
        yield (s[0], wrap_and_count(pb, rc, s[1]))
 
1286
        self.from_repository.inventories.clear_cache()
 
1287
        for stream_info in self._get_filtered_chk_streams(parent_keys):
 
1288
            yield (stream_info[0], wrap_and_count(pb, rc, stream_info[1]))
 
1289
        self.from_repository.chk_bytes.clear_cache()
 
1290
        s = self._get_text_stream()
 
1291
        yield (s[0], wrap_and_count(pb, rc, s[1]))
 
1292
        self.from_repository.texts.clear_cache()
 
1293
        pb.update('Done', rc.max, rc.max)
 
1294
        pb.finished()
 
1295
 
 
1296
    def get_stream_for_missing_keys(self, missing_keys):
 
1297
        # missing keys can only occur when we are byte copying and not
 
1298
        # translating (because translation means we don't send
 
1299
        # unreconstructable deltas ever).
 
1300
        missing_inventory_keys = set()
 
1301
        for key in missing_keys:
 
1302
            if key[0] != 'inventories':
 
1303
                raise AssertionError('The only missing keys we should'
 
1304
                    ' be filling in are inventory keys, not %s'
 
1305
                    % (key[0],))
 
1306
            missing_inventory_keys.add(key[1:])
 
1307
        if self._chk_id_roots or self._chk_p_id_roots:
 
1308
            raise AssertionError('Cannot call get_stream_for_missing_keys'
 
1309
                ' until all of get_stream() has been consumed.')
 
1310
        # Yield the inventory stream, so we can find the chk stream
 
1311
        # Some of the missing_keys will be missing because they are ghosts.
 
1312
        # As such, we can ignore them. The Sink is required to verify there are
 
1313
        # no unavailable texts when the ghost inventories are not filled in.
 
1314
        yield self._get_inventory_stream(missing_inventory_keys,
 
1315
                                         allow_absent=True)
 
1316
        # We use the empty set for excluded_revision_keys, to make it clear
 
1317
        # that we want to transmit all referenced chk pages.
 
1318
        for stream_info in self._get_filtered_chk_streams(set()):
950
1319
            yield stream_info
951
 
        yield self._get_text_stream()
952
 
 
953
 
 
954
 
class RepositoryFormatCHK1(RepositoryFormatPack):
955
 
    """A hashed CHK+group compress pack repository."""
 
1320
 
 
1321
 
 
1322
class _InterestingKeyInfo(object):
 
1323
    def __init__(self):
 
1324
        self.interesting_root_keys = set()
 
1325
        self.interesting_pid_root_keys = set()
 
1326
        self.uninteresting_root_keys = set()
 
1327
        self.uninteresting_pid_root_keys = set()
 
1328
 
 
1329
    def all_interesting(self):
 
1330
        return self.interesting_root_keys.union(self.interesting_pid_root_keys)
 
1331
 
 
1332
    def all_uninteresting(self):
 
1333
        return self.uninteresting_root_keys.union(
 
1334
            self.uninteresting_pid_root_keys)
 
1335
 
 
1336
    def all_keys(self):
 
1337
        return self.all_interesting().union(self.all_uninteresting())
 
1338
 
 
1339
 
 
1340
def _build_interesting_key_sets(repo, inventory_ids, parent_only_inv_ids):
 
1341
    result = _InterestingKeyInfo()
 
1342
    for inv in repo.iter_inventories(inventory_ids, 'unordered'):
 
1343
        root_key = inv.id_to_entry.key()
 
1344
        pid_root_key = inv.parent_id_basename_to_file_id.key()
 
1345
        if inv.revision_id in parent_only_inv_ids:
 
1346
            result.uninteresting_root_keys.add(root_key)
 
1347
            result.uninteresting_pid_root_keys.add(pid_root_key)
 
1348
        else:
 
1349
            result.interesting_root_keys.add(root_key)
 
1350
            result.interesting_pid_root_keys.add(pid_root_key)
 
1351
    return result
 
1352
 
 
1353
 
 
1354
def _filter_text_keys(interesting_nodes_iterable, text_keys, bytes_to_text_key):
 
1355
    """Iterate the result of iter_interesting_nodes, yielding the records
 
1356
    and adding to text_keys.
 
1357
    """
 
1358
    text_keys_update = text_keys.update
 
1359
    for record, items in interesting_nodes_iterable:
 
1360
        text_keys_update([bytes_to_text_key(b) for n,b in items])
 
1361
        yield record
 
1362
 
 
1363
 
 
1364
class RepositoryFormat2a(RepositoryFormatPack):
 
1365
    """A CHK repository that uses the bencode revision serializer."""
956
1366
 
957
1367
    repository_class = CHKInventoryRepository
 
1368
    supports_external_lookups = True
958
1369
    supports_chks = True
959
 
    # For right now, setting this to True gives us InterModel1And2 rather
960
 
    # than InterDifferingSerializer
961
1370
    _commit_builder_class = PackRootCommitBuilder
962
1371
    rich_root_data = True
963
 
    _serializer = chk_serializer.chk_serializer_255_bigpage
 
1372
    _serializer = chk_serializer.chk_bencode_serializer
964
1373
    _commit_inv_deltas = True
965
1374
    # What index classes to use
966
1375
    index_builder_class = BTreeBuilder
974
1383
    _fetch_order = 'unordered'
975
1384
    _fetch_uses_deltas = False # essentially ignored by the groupcompress code.
976
1385
    fast_deltas = True
 
1386
    pack_compresses = True
977
1387
 
978
1388
    def _get_matching_bzrdir(self):
979
 
        return bzrdir.format_registry.make_bzrdir('development6-rich-root')
 
1389
        return controldir.format_registry.make_bzrdir('2a')
980
1390
 
981
1391
    def _ignore_setting_bzrdir(self, format):
982
1392
        pass
983
1393
 
984
1394
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
985
1395
 
986
 
    def get_format_string(self):
987
 
        """See RepositoryFormat.get_format_string()."""
988
 
        return ('Bazaar development format - group compression and chk inventory'
989
 
                ' (needs bzr.dev from 1.14)\n')
 
1396
    @classmethod
 
1397
    def get_format_string(cls):
 
1398
        return ('Bazaar repository format 2a (needs bzr 1.16 or later)\n')
990
1399
 
991
1400
    def get_format_description(self):
992
1401
        """See RepositoryFormat.get_format_description()."""
993
 
        return ("Development repository format - rich roots, group compression"
 
1402
        return ("Repository format 2a - rich roots, group compression"
994
1403
            " and chk inventories")
995
1404
 
996
 
    def check_conversion_target(self, target_format):
997
 
        if not target_format.rich_root_data:
998
 
            raise errors.BadConversionTarget(
999
 
                'Does not support rich root data.', target_format)
1000
 
        if not getattr(target_format, 'supports_tree_reference', False):
1001
 
            raise errors.BadConversionTarget(
1002
 
                'Does not support nested trees', target_format)
1003
 
 
1004
 
 
 
1405
 
 
1406
class RepositoryFormat2aSubtree(RepositoryFormat2a):
 
1407
    """A 2a repository format that supports nested trees.
 
1408
 
 
1409
    """
 
1410
 
 
1411
    def _get_matching_bzrdir(self):
 
1412
        return controldir.format_registry.make_bzrdir('development-subtree')
 
1413
 
 
1414
    def _ignore_setting_bzrdir(self, format):
 
1415
        pass
 
1416
 
 
1417
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
 
1418
 
 
1419
    @classmethod
 
1420
    def get_format_string(cls):
 
1421
        return ('Bazaar development format 8\n')
 
1422
 
 
1423
    def get_format_description(self):
 
1424
        """See RepositoryFormat.get_format_description()."""
 
1425
        return ("Development repository format 8 - nested trees, "
 
1426
                "group compression and chk inventories")
 
1427
 
 
1428
    experimental = True
 
1429
    supports_tree_reference = True