~bzr-pqm/bzr/bzr.dev

« back to all changes in this revision

Viewing changes to bzrlib/repofmt/groupcompress_repo.py

  • Committer: Sidnei da Silva
  • Date: 2009-07-04 02:16:06 UTC
  • mto: (4531.1.1 integration)
  • mto: This revision was merged to the branch mainline in revision 4532.
  • Revision ID: sidnei.da.silva@canonical.com-20090704021606-os06th007b2bfu5u
- Define targets as 'release' and 'dev', allow passing them through make

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
 
# Copyright (C) 2008, 2009, 2010 Canonical Ltd
 
1
# Copyright (C) 2008, 2009 Canonical Ltd
2
2
#
3
3
# This program is free software; you can redistribute it and/or modify
4
4
# it under the terms of the GNU General Public License as published by
29
29
    knit,
30
30
    osutils,
31
31
    pack,
 
32
    remote,
 
33
    repository,
32
34
    revision as _mod_revision,
33
35
    trace,
34
36
    ui,
35
 
    versionedfile,
36
37
    )
37
38
from bzrlib.btree_index import (
38
39
    BTreeGraphIndex,
39
40
    BTreeBuilder,
40
41
    )
41
 
from bzrlib.decorators import needs_write_lock
 
42
from bzrlib.index import GraphIndex, GraphIndexBuilder
42
43
from bzrlib.groupcompress import (
43
44
    _GCGraphIndex,
44
45
    GroupCompressVersionedFiles,
47
48
    Pack,
48
49
    NewPack,
49
50
    KnitPackRepository,
50
 
    KnitPackStreamSource,
51
51
    PackRootCommitBuilder,
52
52
    RepositoryPackCollection,
53
53
    RepositoryFormatPack,
54
54
    ResumedPack,
55
55
    Packer,
56
56
    )
57
 
from bzrlib.static_tuple import StaticTuple
58
57
 
59
58
 
60
59
class GCPack(NewPack):
156
155
        self._writer.begin()
157
156
        # what state is the pack in? (open, finished, aborted)
158
157
        self._state = 'open'
159
 
        # no name until we finish writing the content
160
 
        self.name = None
161
158
 
162
159
    def _check_references(self):
163
160
        """Make sure our external references are present.
220
217
            p_id_roots_set = set()
221
218
            stream = source_vf.get_record_stream(keys, 'groupcompress', True)
222
219
            for idx, record in enumerate(stream):
223
 
                # Inventories should always be with revisions; assume success.
224
220
                bytes = record.get_bytes_as('fulltext')
225
221
                chk_inv = inventory.CHKInventory.deserialise(None, bytes,
226
222
                                                             record.key)
264
260
        remaining_keys = set(keys)
265
261
        counter = [0]
266
262
        if self._gather_text_refs:
 
263
            bytes_to_info = inventory.CHKInventory._bytes_to_utf8name_key
267
264
            self._text_refs = set()
268
265
        def _get_referenced_stream(root_keys, parse_leaf_nodes=False):
269
266
            cur_keys = root_keys
290
287
                    # Store is None, because we know we have a LeafNode, and we
291
288
                    # just want its entries
292
289
                    for file_id, bytes in node.iteritems(None):
293
 
                        self._text_refs.add(chk_map._bytes_to_text_key(bytes))
 
290
                        name_utf8, file_id, revision_id = bytes_to_info(bytes)
 
291
                        self._text_refs.add((file_id, revision_id))
294
292
                def next_stream():
295
293
                    stream = source_vf.get_record_stream(cur_keys,
296
294
                                                         'as-requested', True)
297
295
                    for record in stream:
298
 
                        if record.storage_kind == 'absent':
299
 
                            # An absent CHK record: we assume that the missing
300
 
                            # record is in a different pack - e.g. a page not
301
 
                            # altered by the commit we're packing.
302
 
                            continue
303
296
                        bytes = record.get_bytes_as('fulltext')
304
297
                        # We don't care about search_key_func for this code,
305
298
                        # because we only care about external references.
352
345
        """Build a VersionedFiles instance on top of this group of packs."""
353
346
        index_name = index_name + '_index'
354
347
        index_to_pack = {}
355
 
        access = knit._DirectPackAccess(index_to_pack,
356
 
                                        reload_func=self._reload_func)
 
348
        access = knit._DirectPackAccess(index_to_pack)
357
349
        if for_write:
358
350
            # Use new_pack
359
351
            if self.new_pack is None:
413
405
 
414
406
    def _copy_inventory_texts(self):
415
407
        source_vf, target_vf = self._build_vfs('inventory', True, True)
416
 
        # It is not sufficient to just use self.revision_keys, as stacked
417
 
        # repositories can have more inventories than they have revisions.
418
 
        # One alternative would be to do something with
419
 
        # get_parent_map(self.revision_keys), but that shouldn't be any faster
420
 
        # than this.
421
 
        inventory_keys = source_vf.keys()
422
 
        missing_inventories = set(self.revision_keys).difference(inventory_keys)
423
 
        if missing_inventories:
424
 
            missing_inventories = sorted(missing_inventories)
425
 
            raise ValueError('We are missing inventories for revisions: %s'
426
 
                % (missing_inventories,))
427
 
        self._copy_stream(source_vf, target_vf, inventory_keys,
 
408
        self._copy_stream(source_vf, target_vf, self.revision_keys,
428
409
                          'inventories', self._get_filtered_inv_stream, 2)
429
410
 
430
 
    def _get_chk_vfs_for_copy(self):
431
 
        return self._build_vfs('chk', False, False)
432
 
 
433
411
    def _copy_chk_texts(self):
434
 
        source_vf, target_vf = self._get_chk_vfs_for_copy()
 
412
        source_vf, target_vf = self._build_vfs('chk', False, False)
435
413
        # TODO: This is technically spurious... if it is a performance issue,
436
414
        #       remove it
437
415
        total_keys = source_vf.keys()
460
438
        #      is grabbing too many keys...
461
439
        text_keys = source_vf.keys()
462
440
        self._copy_stream(source_vf, target_vf, text_keys,
463
 
                          'texts', self._get_progress_stream, 4)
 
441
                          'text', self._get_progress_stream, 4)
464
442
 
465
443
    def _copy_signature_texts(self):
466
444
        source_vf, target_vf = self._build_vfs('signature', False, False)
483
461
        if not self._use_pack(self.new_pack):
484
462
            self.new_pack.abort()
485
463
            return None
486
 
        self.new_pack.finish_content()
487
 
        if len(self.packs) == 1:
488
 
            old_pack = self.packs[0]
489
 
            if old_pack.name == self.new_pack._hash.hexdigest():
490
 
                # The single old pack was already optimally packed.
491
 
                trace.mutter('single pack %s was already optimally packed',
492
 
                    old_pack.name)
493
 
                self.new_pack.abort()
494
 
                return None
495
464
        self.pb.update('finishing repack', 6, 7)
496
465
        self.new_pack.finish()
497
466
        self._pack_collection.allocate(self.new_pack)
583
552
        return new_pack.data_inserted() and self._data_changed
584
553
 
585
554
 
586
 
class GCCHKCanonicalizingPacker(GCCHKPacker):
587
 
    """A packer that ensures inventories have canonical-form CHK maps.
588
 
    
589
 
    Ideally this would be part of reconcile, but it's very slow and rarely
590
 
    needed.  (It repairs repositories affected by
591
 
    https://bugs.launchpad.net/bzr/+bug/522637).
592
 
    """
593
 
 
594
 
    def __init__(self, *args, **kwargs):
595
 
        super(GCCHKCanonicalizingPacker, self).__init__(*args, **kwargs)
596
 
        self._data_changed = False
597
 
    
598
 
    def _exhaust_stream(self, source_vf, keys, message, vf_to_stream, pb_offset):
599
 
        """Create and exhaust a stream, but don't insert it.
600
 
        
601
 
        This is useful to get the side-effects of generating a stream.
602
 
        """
603
 
        self.pb.update('scanning %s' % (message,), pb_offset)
604
 
        child_pb = ui.ui_factory.nested_progress_bar()
605
 
        try:
606
 
            list(vf_to_stream(source_vf, keys, message, child_pb))
607
 
        finally:
608
 
            child_pb.finished()
609
 
 
610
 
    def _copy_inventory_texts(self):
611
 
        source_vf, target_vf = self._build_vfs('inventory', True, True)
612
 
        source_chk_vf, target_chk_vf = self._get_chk_vfs_for_copy()
613
 
        inventory_keys = source_vf.keys()
614
 
        # First, copy the existing CHKs on the assumption that most of them
615
 
        # will be correct.  This will save us from having to reinsert (and
616
 
        # recompress) these records later at the cost of perhaps preserving a
617
 
        # few unused CHKs. 
618
 
        # (Iterate but don't insert _get_filtered_inv_stream to populate the
619
 
        # variables needed by GCCHKPacker._copy_chk_texts.)
620
 
        self._exhaust_stream(source_vf, inventory_keys, 'inventories',
621
 
                self._get_filtered_inv_stream, 2)
622
 
        GCCHKPacker._copy_chk_texts(self)
623
 
        # Now copy and fix the inventories, and any regenerated CHKs.
624
 
        def chk_canonicalizing_inv_stream(source_vf, keys, message, pb=None):
625
 
            return self._get_filtered_canonicalizing_inv_stream(
626
 
                source_vf, keys, message, pb, source_chk_vf, target_chk_vf)
627
 
        self._copy_stream(source_vf, target_vf, inventory_keys,
628
 
                          'inventories', chk_canonicalizing_inv_stream, 4)
629
 
 
630
 
    def _copy_chk_texts(self):
631
 
        # No-op; in this class this happens during _copy_inventory_texts.
632
 
        pass
633
 
 
634
 
    def _get_filtered_canonicalizing_inv_stream(self, source_vf, keys, message,
635
 
            pb=None, source_chk_vf=None, target_chk_vf=None):
636
 
        """Filter the texts of inventories, regenerating CHKs to make sure they
637
 
        are canonical.
638
 
        """
639
 
        total_keys = len(keys)
640
 
        target_chk_vf = versionedfile.NoDupeAddLinesDecorator(target_chk_vf)
641
 
        def _filtered_inv_stream():
642
 
            stream = source_vf.get_record_stream(keys, 'groupcompress', True)
643
 
            search_key_name = None
644
 
            for idx, record in enumerate(stream):
645
 
                # Inventories should always be with revisions; assume success.
646
 
                bytes = record.get_bytes_as('fulltext')
647
 
                chk_inv = inventory.CHKInventory.deserialise(
648
 
                    source_chk_vf, bytes, record.key)
649
 
                if pb is not None:
650
 
                    pb.update('inv', idx, total_keys)
651
 
                chk_inv.id_to_entry._ensure_root()
652
 
                if search_key_name is None:
653
 
                    # Find the name corresponding to the search_key_func
654
 
                    search_key_reg = chk_map.search_key_registry
655
 
                    for search_key_name, func in search_key_reg.iteritems():
656
 
                        if func == chk_inv.id_to_entry._search_key_func:
657
 
                            break
658
 
                canonical_inv = inventory.CHKInventory.from_inventory(
659
 
                    target_chk_vf, chk_inv,
660
 
                    maximum_size=chk_inv.id_to_entry._root_node._maximum_size,
661
 
                    search_key_name=search_key_name)
662
 
                if chk_inv.id_to_entry.key() != canonical_inv.id_to_entry.key():
663
 
                    trace.mutter(
664
 
                        'Non-canonical CHK map for id_to_entry of inv: %s '
665
 
                        '(root is %s, should be %s)' % (chk_inv.revision_id,
666
 
                        chk_inv.id_to_entry.key()[0],
667
 
                        canonical_inv.id_to_entry.key()[0]))
668
 
                    self._data_changed = True
669
 
                p_id_map = chk_inv.parent_id_basename_to_file_id
670
 
                p_id_map._ensure_root()
671
 
                canon_p_id_map = canonical_inv.parent_id_basename_to_file_id
672
 
                if p_id_map.key() != canon_p_id_map.key():
673
 
                    trace.mutter(
674
 
                        'Non-canonical CHK map for parent_id_to_basename of '
675
 
                        'inv: %s (root is %s, should be %s)'
676
 
                        % (chk_inv.revision_id, p_id_map.key()[0],
677
 
                           canon_p_id_map.key()[0]))
678
 
                    self._data_changed = True
679
 
                yield versionedfile.ChunkedContentFactory(record.key,
680
 
                        record.parents, record.sha1,
681
 
                        canonical_inv.to_lines())
682
 
            # We have finished processing all of the inventory records, we
683
 
            # don't need these sets anymore
684
 
        return _filtered_inv_stream()
685
 
 
686
 
    def _use_pack(self, new_pack):
687
 
        """Override _use_pack to check for reconcile having changed content."""
688
 
        return new_pack.data_inserted() and self._data_changed
689
 
 
690
 
 
691
555
class GCRepositoryPackCollection(RepositoryPackCollection):
692
556
 
693
557
    pack_factory = GCPack
694
558
    resumed_pack_factory = ResumedGCPack
695
559
 
696
 
    def _check_new_inventories(self):
697
 
        """Detect missing inventories or chk root entries for the new revisions
698
 
        in this write group.
699
 
 
700
 
        :returns: list of strs, summarising any problems found.  If the list is
701
 
            empty no problems were found.
702
 
        """
703
 
        # Ensure that all revisions added in this write group have:
704
 
        #   - corresponding inventories,
705
 
        #   - chk root entries for those inventories,
706
 
        #   - and any present parent inventories have their chk root
707
 
        #     entries too.
708
 
        # And all this should be independent of any fallback repository.
709
 
        problems = []
710
 
        key_deps = self.repo.revisions._index._key_dependencies
711
 
        new_revisions_keys = key_deps.get_new_keys()
712
 
        no_fallback_inv_index = self.repo.inventories._index
713
 
        no_fallback_chk_bytes_index = self.repo.chk_bytes._index
714
 
        no_fallback_texts_index = self.repo.texts._index
715
 
        inv_parent_map = no_fallback_inv_index.get_parent_map(
716
 
            new_revisions_keys)
717
 
        # Are any inventories for corresponding to the new revisions missing?
718
 
        corresponding_invs = set(inv_parent_map)
719
 
        missing_corresponding = set(new_revisions_keys)
720
 
        missing_corresponding.difference_update(corresponding_invs)
721
 
        if missing_corresponding:
722
 
            problems.append("inventories missing for revisions %s" %
723
 
                (sorted(missing_corresponding),))
724
 
            return problems
725
 
        # Are any chk root entries missing for any inventories?  This includes
726
 
        # any present parent inventories, which may be used when calculating
727
 
        # deltas for streaming.
728
 
        all_inv_keys = set(corresponding_invs)
729
 
        for parent_inv_keys in inv_parent_map.itervalues():
730
 
            all_inv_keys.update(parent_inv_keys)
731
 
        # Filter out ghost parents.
732
 
        all_inv_keys.intersection_update(
733
 
            no_fallback_inv_index.get_parent_map(all_inv_keys))
734
 
        parent_invs_only_keys = all_inv_keys.symmetric_difference(
735
 
            corresponding_invs)
736
 
        all_missing = set()
737
 
        inv_ids = [key[-1] for key in all_inv_keys]
738
 
        parent_invs_only_ids = [key[-1] for key in parent_invs_only_keys]
739
 
        root_key_info = _build_interesting_key_sets(
740
 
            self.repo, inv_ids, parent_invs_only_ids)
741
 
        expected_chk_roots = root_key_info.all_keys()
742
 
        present_chk_roots = no_fallback_chk_bytes_index.get_parent_map(
743
 
            expected_chk_roots)
744
 
        missing_chk_roots = expected_chk_roots.difference(present_chk_roots)
745
 
        if missing_chk_roots:
746
 
            problems.append("missing referenced chk root keys: %s"
747
 
                % (sorted(missing_chk_roots),))
748
 
            # Don't bother checking any further.
749
 
            return problems
750
 
        # Find all interesting chk_bytes records, and make sure they are
751
 
        # present, as well as the text keys they reference.
752
 
        chk_bytes_no_fallbacks = self.repo.chk_bytes.without_fallbacks()
753
 
        chk_bytes_no_fallbacks._search_key_func = \
754
 
            self.repo.chk_bytes._search_key_func
755
 
        chk_diff = chk_map.iter_interesting_nodes(
756
 
            chk_bytes_no_fallbacks, root_key_info.interesting_root_keys,
757
 
            root_key_info.uninteresting_root_keys)
758
 
        text_keys = set()
759
 
        try:
760
 
            for record in _filter_text_keys(chk_diff, text_keys,
761
 
                                            chk_map._bytes_to_text_key):
762
 
                pass
763
 
        except errors.NoSuchRevision, e:
764
 
            # XXX: It would be nice if we could give a more precise error here.
765
 
            problems.append("missing chk node(s) for id_to_entry maps")
766
 
        chk_diff = chk_map.iter_interesting_nodes(
767
 
            chk_bytes_no_fallbacks, root_key_info.interesting_pid_root_keys,
768
 
            root_key_info.uninteresting_pid_root_keys)
769
 
        try:
770
 
            for interesting_rec, interesting_map in chk_diff:
771
 
                pass
772
 
        except errors.NoSuchRevision, e:
773
 
            problems.append(
774
 
                "missing chk node(s) for parent_id_basename_to_file_id maps")
775
 
        present_text_keys = no_fallback_texts_index.get_parent_map(text_keys)
776
 
        missing_text_keys = text_keys.difference(present_text_keys)
777
 
        if missing_text_keys:
778
 
            problems.append("missing text keys: %r"
779
 
                % (sorted(missing_text_keys),))
780
 
        return problems
 
560
    def _already_packed(self):
 
561
        """Is the collection already packed?"""
 
562
        # Always repack GC repositories for now
 
563
        return False
781
564
 
782
565
    def _execute_pack_operations(self, pack_operations,
783
566
                                 _packer_class=GCCHKPacker,
797
580
            packer = GCCHKPacker(self, packs, '.autopack',
798
581
                                 reload_func=reload_func)
799
582
            try:
800
 
                result = packer.pack()
 
583
                packer.pack()
801
584
            except errors.RetryWithNewPacks:
802
585
                # An exception is propagating out of this context, make sure
803
586
                # this packer has cleaned up. Packer() doesn't set its new_pack
806
589
                if packer.new_pack is not None:
807
590
                    packer.new_pack.abort()
808
591
                raise
809
 
            if result is None:
810
 
                return
811
592
            for pack in packs:
812
593
                self._remove_pack_from_memory(pack)
813
594
        # record the newly available packs and stop advertising the old
814
595
        # packs
815
 
        to_be_obsoleted = []
816
 
        for _, packs in pack_operations:
817
 
            to_be_obsoleted.extend(packs)
818
 
        result = self._save_pack_names(clear_obsolete_packs=True,
819
 
                                       obsolete_packs=to_be_obsoleted)
820
 
        return result
 
596
        self._save_pack_names(clear_obsolete_packs=True)
 
597
        # Move the old packs out of the way now they are no longer referenced.
 
598
        for revision_count, packs in pack_operations:
 
599
            self._obsolete_packs(packs)
821
600
 
822
601
 
823
602
class CHKInventoryRepository(KnitPackRepository):
841
620
        self.inventories = GroupCompressVersionedFiles(
842
621
            _GCGraphIndex(self._pack_collection.inventory_index.combined_index,
843
622
                add_callback=self._pack_collection.inventory_index.add_callback,
844
 
                parents=True, is_locked=self.is_locked,
845
 
                inconsistency_fatal=False),
 
623
                parents=True, is_locked=self.is_locked),
846
624
            access=self._pack_collection.inventory_index.data_access)
847
625
        self.revisions = GroupCompressVersionedFiles(
848
626
            _GCGraphIndex(self._pack_collection.revision_index.combined_index,
849
627
                add_callback=self._pack_collection.revision_index.add_callback,
850
628
                parents=True, is_locked=self.is_locked,
851
 
                track_external_parent_refs=True, track_new_keys=True),
 
629
                track_external_parent_refs=True),
852
630
            access=self._pack_collection.revision_index.data_access,
853
631
            delta=False)
854
632
        self.signatures = GroupCompressVersionedFiles(
855
633
            _GCGraphIndex(self._pack_collection.signature_index.combined_index,
856
634
                add_callback=self._pack_collection.signature_index.add_callback,
857
 
                parents=False, is_locked=self.is_locked,
858
 
                inconsistency_fatal=False),
 
635
                parents=False, is_locked=self.is_locked),
859
636
            access=self._pack_collection.signature_index.data_access,
860
637
            delta=False)
861
638
        self.texts = GroupCompressVersionedFiles(
862
639
            _GCGraphIndex(self._pack_collection.text_index.combined_index,
863
640
                add_callback=self._pack_collection.text_index.add_callback,
864
 
                parents=True, is_locked=self.is_locked,
865
 
                inconsistency_fatal=False),
 
641
                parents=True, is_locked=self.is_locked),
866
642
            access=self._pack_collection.text_index.data_access)
867
643
        # No parents, individual CHK pages don't have specific ancestry
868
644
        self.chk_bytes = GroupCompressVersionedFiles(
869
645
            _GCGraphIndex(self._pack_collection.chk_index.combined_index,
870
646
                add_callback=self._pack_collection.chk_index.add_callback,
871
 
                parents=False, is_locked=self.is_locked,
872
 
                inconsistency_fatal=False),
 
647
                parents=False, is_locked=self.is_locked),
873
648
            access=self._pack_collection.chk_index.data_access)
874
 
        search_key_name = self._format._serializer.search_key_name
875
 
        search_key_func = chk_map.search_key_registry.get(search_key_name)
876
 
        self.chk_bytes._search_key_func = search_key_func
877
649
        # True when the repository object is 'write locked' (as opposed to the
878
650
        # physical lock only taken out around changes to the pack-names list.)
879
651
        # Another way to represent this would be a decorator around the control
902
674
        return self._inventory_add_lines(revision_id, parents,
903
675
            inv_lines, check_content=False)
904
676
 
905
 
    def _create_inv_from_null(self, delta, revision_id):
906
 
        """This will mutate new_inv directly.
907
 
 
908
 
        This is a simplified form of create_by_apply_delta which knows that all
909
 
        the old values must be None, so everything is a create.
910
 
        """
911
 
        serializer = self._format._serializer
912
 
        new_inv = inventory.CHKInventory(serializer.search_key_name)
913
 
        new_inv.revision_id = revision_id
914
 
        entry_to_bytes = new_inv._entry_to_bytes
915
 
        id_to_entry_dict = {}
916
 
        parent_id_basename_dict = {}
917
 
        for old_path, new_path, file_id, entry in delta:
918
 
            if old_path is not None:
919
 
                raise ValueError('Invalid delta, somebody tried to delete %r'
920
 
                                 ' from the NULL_REVISION'
921
 
                                 % ((old_path, file_id),))
922
 
            if new_path is None:
923
 
                raise ValueError('Invalid delta, delta from NULL_REVISION has'
924
 
                                 ' no new_path %r' % (file_id,))
925
 
            if new_path == '':
926
 
                new_inv.root_id = file_id
927
 
                parent_id_basename_key = StaticTuple('', '').intern()
928
 
            else:
929
 
                utf8_entry_name = entry.name.encode('utf-8')
930
 
                parent_id_basename_key = StaticTuple(entry.parent_id,
931
 
                                                     utf8_entry_name).intern()
932
 
            new_value = entry_to_bytes(entry)
933
 
            # Populate Caches?
934
 
            # new_inv._path_to_fileid_cache[new_path] = file_id
935
 
            key = StaticTuple(file_id).intern()
936
 
            id_to_entry_dict[key] = new_value
937
 
            parent_id_basename_dict[parent_id_basename_key] = file_id
938
 
 
939
 
        new_inv._populate_from_dicts(self.chk_bytes, id_to_entry_dict,
940
 
            parent_id_basename_dict, maximum_size=serializer.maximum_size)
941
 
        return new_inv
942
 
 
943
677
    def add_inventory_by_delta(self, basis_revision_id, delta, new_revision_id,
944
678
                               parents, basis_inv=None, propagate_caches=False):
945
679
        """Add a new inventory expressed as a delta against another revision.
965
699
            repository format specific) of the serialized inventory, and the
966
700
            resulting inventory.
967
701
        """
 
702
        if basis_revision_id == _mod_revision.NULL_REVISION:
 
703
            return KnitPackRepository.add_inventory_by_delta(self,
 
704
                basis_revision_id, delta, new_revision_id, parents)
968
705
        if not self.is_in_write_group():
969
706
            raise AssertionError("%r not in write group" % (self,))
970
707
        _mod_revision.check_not_reserved_id(new_revision_id)
971
 
        basis_tree = None
972
 
        if basis_inv is None:
973
 
            if basis_revision_id == _mod_revision.NULL_REVISION:
974
 
                new_inv = self._create_inv_from_null(delta, new_revision_id)
975
 
                if new_inv.root_id is None:
976
 
                    raise errors.RootMissing()
977
 
                inv_lines = new_inv.to_lines()
978
 
                return self._inventory_add_lines(new_revision_id, parents,
979
 
                    inv_lines, check_content=False), new_inv
980
 
            else:
981
 
                basis_tree = self.revision_tree(basis_revision_id)
982
 
                basis_tree.lock_read()
 
708
        basis_tree = self.revision_tree(basis_revision_id)
 
709
        basis_tree.lock_read()
 
710
        try:
 
711
            if basis_inv is None:
983
712
                basis_inv = basis_tree.inventory
984
 
        try:
985
713
            result = basis_inv.create_by_apply_delta(delta, new_revision_id,
986
714
                propagate_caches=propagate_caches)
987
715
            inv_lines = result.to_lines()
988
716
            return self._inventory_add_lines(new_revision_id, parents,
989
717
                inv_lines, check_content=False), result
990
718
        finally:
991
 
            if basis_tree is not None:
992
 
                basis_tree.unlock()
993
 
 
994
 
    def _deserialise_inventory(self, revision_id, bytes):
995
 
        return inventory.CHKInventory.deserialise(self.chk_bytes, bytes,
996
 
            (revision_id,))
997
 
 
998
 
    def _iter_inventories(self, revision_ids, ordering):
 
719
            basis_tree.unlock()
 
720
 
 
721
    def _iter_inventories(self, revision_ids):
999
722
        """Iterate over many inventory objects."""
1000
 
        if ordering is None:
1001
 
            ordering = 'unordered'
1002
723
        keys = [(revision_id,) for revision_id in revision_ids]
1003
 
        stream = self.inventories.get_record_stream(keys, ordering, True)
 
724
        stream = self.inventories.get_record_stream(keys, 'unordered', True)
1004
725
        texts = {}
1005
726
        for record in stream:
1006
727
            if record.storage_kind != 'absent':
1010
731
        for key in keys:
1011
732
            yield inventory.CHKInventory.deserialise(self.chk_bytes, texts[key], key)
1012
733
 
1013
 
    def _iter_inventory_xmls(self, revision_ids, ordering):
1014
 
        # Without a native 'xml' inventory, this method doesn't make sense.
1015
 
        # However older working trees, and older bundles want it - so we supply
1016
 
        # it allowing _get_inventory_xml to work. Bundles currently use the
1017
 
        # serializer directly; this also isn't ideal, but there isn't an xml
1018
 
        # iteration interface offered at all for repositories. We could make
1019
 
        # _iter_inventory_xmls be part of the contract, even if kept private.
1020
 
        inv_to_str = self._serializer.write_inventory_to_string
1021
 
        for inv in self.iter_inventories(revision_ids, ordering=ordering):
1022
 
            yield inv_to_str(inv), inv.revision_id
1023
 
 
1024
 
    def _find_present_inventory_keys(self, revision_keys):
1025
 
        parent_map = self.inventories.get_parent_map(revision_keys)
1026
 
        present_inventory_keys = set(k for k in parent_map)
1027
 
        return present_inventory_keys
 
734
    def _iter_inventory_xmls(self, revision_ids):
 
735
        # Without a native 'xml' inventory, this method doesn't make sense, so
 
736
        # make it raise to trap naughty direct users.
 
737
        raise NotImplementedError(self._iter_inventory_xmls)
 
738
 
 
739
    def _find_parent_ids_of_revisions(self, revision_ids):
 
740
        # TODO: we probably want to make this a helper that other code can get
 
741
        #       at
 
742
        parent_map = self.get_parent_map(revision_ids)
 
743
        parents = set()
 
744
        map(parents.update, parent_map.itervalues())
 
745
        parents.difference_update(revision_ids)
 
746
        parents.discard(_mod_revision.NULL_REVISION)
 
747
        return parents
 
748
 
 
749
    def _find_present_inventory_ids(self, revision_ids):
 
750
        keys = [(r,) for r in revision_ids]
 
751
        parent_map = self.inventories.get_parent_map(keys)
 
752
        present_inventory_ids = set(k[-1] for k in parent_map)
 
753
        return present_inventory_ids
1028
754
 
1029
755
    def fileids_altered_by_revision_ids(self, revision_ids, _inv_weave=None):
1030
756
        """Find the file ids and versions affected by revisions.
1041
767
        file_id_revisions = {}
1042
768
        pb = ui.ui_factory.nested_progress_bar()
1043
769
        try:
1044
 
            revision_keys = [(r,) for r in revision_ids]
1045
 
            parent_keys = self._find_parent_keys_of_revisions(revision_keys)
1046
 
            # TODO: instead of using _find_present_inventory_keys, change the
1047
 
            #       code paths to allow missing inventories to be tolerated.
1048
 
            #       However, we only want to tolerate missing parent
1049
 
            #       inventories, not missing inventories for revision_ids
1050
 
            present_parent_inv_keys = self._find_present_inventory_keys(
1051
 
                                        parent_keys)
1052
 
            present_parent_inv_ids = set(
1053
 
                [k[-1] for k in present_parent_inv_keys])
1054
 
            inventories_to_read = set(revision_ids)
1055
 
            inventories_to_read.update(present_parent_inv_ids)
1056
 
            root_key_info = _build_interesting_key_sets(
1057
 
                self, inventories_to_read, present_parent_inv_ids)
1058
 
            interesting_root_keys = root_key_info.interesting_root_keys
1059
 
            uninteresting_root_keys = root_key_info.uninteresting_root_keys
 
770
            parent_ids = self._find_parent_ids_of_revisions(revision_ids)
 
771
            present_parent_inv_ids = self._find_present_inventory_ids(parent_ids)
 
772
            uninteresting_root_keys = set()
 
773
            interesting_root_keys = set()
 
774
            inventories_to_read = set(present_parent_inv_ids)
 
775
            inventories_to_read.update(revision_ids)
 
776
            for inv in self.iter_inventories(inventories_to_read):
 
777
                entry_chk_root_key = inv.id_to_entry.key()
 
778
                if inv.revision_id in present_parent_inv_ids:
 
779
                    uninteresting_root_keys.add(entry_chk_root_key)
 
780
                else:
 
781
                    interesting_root_keys.add(entry_chk_root_key)
 
782
 
1060
783
            chk_bytes = self.chk_bytes
1061
784
            for record, items in chk_map.iter_interesting_nodes(chk_bytes,
1062
785
                        interesting_root_keys, uninteresting_root_keys,
1063
786
                        pb=pb):
1064
787
                for name, bytes in items:
1065
788
                    (name_utf8, file_id, revision_id) = bytes_to_info(bytes)
1066
 
                    # TODO: consider interning file_id, revision_id here, or
1067
 
                    #       pushing that intern() into bytes_to_info()
1068
 
                    # TODO: rich_root should always be True here, for all
1069
 
                    #       repositories that support chk_bytes
1070
789
                    if not rich_root and name_utf8 == '':
1071
790
                        continue
1072
791
                    try:
1109
828
        finally:
1110
829
            pb.finished()
1111
830
 
1112
 
    @needs_write_lock
1113
 
    def reconcile_canonicalize_chks(self):
1114
 
        """Reconcile this repository to make sure all CHKs are in canonical
1115
 
        form.
1116
 
        """
1117
 
        from bzrlib.reconcile import PackReconciler
1118
 
        reconciler = PackReconciler(self, thorough=True, canonicalize_chks=True)
1119
 
        reconciler.reconcile()
1120
 
        return reconciler
1121
 
 
1122
831
    def _reconcile_pack(self, collection, packs, extension, revs, pb):
1123
832
        packer = GCCHKReconcilePacker(collection, packs, extension)
1124
833
        return packer.pack(pb)
1125
834
 
1126
 
    def _canonicalize_chks_pack(self, collection, packs, extension, revs, pb):
1127
 
        packer = GCCHKCanonicalizingPacker(collection, packs, extension, revs)
1128
 
        return packer.pack(pb)
1129
 
 
1130
835
    def _get_source(self, to_format):
1131
836
        """Return a source for streaming from this repository."""
1132
 
        if self._format._serializer == to_format._serializer:
 
837
        if isinstance(to_format, remote.RemoteRepositoryFormat):
 
838
            # Can't just check attributes on to_format with the current code,
 
839
            # work around this:
 
840
            to_format._ensure_real()
 
841
            to_format = to_format._custom_format
 
842
        if to_format.__class__ is self._format.__class__:
1133
843
            # We must be exactly the same format, otherwise stuff like the chk
1134
 
            # page layout might be different.
1135
 
            # Actually, this test is just slightly looser than exact so that
1136
 
            # CHK2 <-> 2a transfers will work.
 
844
            # page layout might be different
1137
845
            return GroupCHKStreamSource(self, to_format)
1138
846
        return super(CHKInventoryRepository, self)._get_source(to_format)
1139
847
 
1140
848
 
1141
 
class GroupCHKStreamSource(KnitPackStreamSource):
 
849
class GroupCHKStreamSource(repository.StreamSource):
1142
850
    """Used when both the source and target repo are GroupCHK repos."""
1143
851
 
1144
852
    def __init__(self, from_repository, to_format):
1146
854
        super(GroupCHKStreamSource, self).__init__(from_repository, to_format)
1147
855
        self._revision_keys = None
1148
856
        self._text_keys = None
1149
 
        self._text_fetch_order = 'groupcompress'
1150
857
        self._chk_id_roots = None
1151
858
        self._chk_p_id_roots = None
1152
859
 
1153
 
    def _get_inventory_stream(self, inventory_keys, allow_absent=False):
 
860
    def _get_inventory_stream(self, inventory_keys):
1154
861
        """Get a stream of inventory texts.
1155
862
 
1156
863
        When this function returns, self._chk_id_roots and self._chk_p_id_roots
1165
872
            stream = source_vf.get_record_stream(inventory_keys,
1166
873
                                                 'groupcompress', True)
1167
874
            for record in stream:
1168
 
                if record.storage_kind == 'absent':
1169
 
                    if allow_absent:
1170
 
                        continue
1171
 
                    else:
1172
 
                        raise errors.NoSuchRevision(self, record.key)
1173
875
                bytes = record.get_bytes_as('fulltext')
1174
876
                chk_inv = inventory.CHKInventory.deserialise(None, bytes,
1175
877
                                                             record.key)
1191
893
            p_id_roots_set.clear()
1192
894
        return ('inventories', _filtered_inv_stream())
1193
895
 
1194
 
    def _get_filtered_chk_streams(self, excluded_revision_keys):
 
896
    def _find_present_inventories(self, revision_ids):
 
897
        revision_keys = [(r,) for r in revision_ids]
 
898
        inventories = self.from_repository.inventories
 
899
        present_inventories = inventories.get_parent_map(revision_keys)
 
900
        return [p[-1] for p in present_inventories]
 
901
 
 
902
    def _get_filtered_chk_streams(self, excluded_revision_ids):
1195
903
        self._text_keys = set()
1196
 
        excluded_revision_keys.discard(_mod_revision.NULL_REVISION)
1197
 
        if not excluded_revision_keys:
 
904
        excluded_revision_ids.discard(_mod_revision.NULL_REVISION)
 
905
        if not excluded_revision_ids:
1198
906
            uninteresting_root_keys = set()
1199
907
            uninteresting_pid_root_keys = set()
1200
908
        else:
1202
910
            # actually present
1203
911
            # TODO: Update Repository.iter_inventories() to add
1204
912
            #       ignore_missing=True
1205
 
            present_keys = self.from_repository._find_present_inventory_keys(
1206
 
                            excluded_revision_keys)
1207
 
            present_ids = [k[-1] for k in present_keys]
 
913
            present_ids = self.from_repository._find_present_inventory_ids(
 
914
                            excluded_revision_ids)
 
915
            present_ids = self._find_present_inventories(excluded_revision_ids)
1208
916
            uninteresting_root_keys = set()
1209
917
            uninteresting_pid_root_keys = set()
1210
918
            for inv in self.from_repository.iter_inventories(present_ids):
1211
919
                uninteresting_root_keys.add(inv.id_to_entry.key())
1212
920
                uninteresting_pid_root_keys.add(
1213
921
                    inv.parent_id_basename_to_file_id.key())
 
922
        bytes_to_info = inventory.CHKInventory._bytes_to_utf8name_key
1214
923
        chk_bytes = self.from_repository.chk_bytes
1215
924
        def _filter_id_to_entry():
1216
 
            interesting_nodes = chk_map.iter_interesting_nodes(chk_bytes,
1217
 
                        self._chk_id_roots, uninteresting_root_keys)
1218
 
            for record in _filter_text_keys(interesting_nodes, self._text_keys,
1219
 
                    chk_map._bytes_to_text_key):
 
925
            for record, items in chk_map.iter_interesting_nodes(chk_bytes,
 
926
                        self._chk_id_roots, uninteresting_root_keys):
 
927
                for name, bytes in items:
 
928
                    # Note: we don't care about name_utf8, because we are always
 
929
                    # rich-root = True
 
930
                    _, file_id, revision_id = bytes_to_info(bytes)
 
931
                    self._text_keys.add((file_id, revision_id))
1220
932
                if record is not None:
1221
933
                    yield record
1222
934
            # Consumed
1231
943
            self._chk_p_id_roots = None
1232
944
        yield 'chk_bytes', _get_parent_id_basename_to_file_id_pages()
1233
945
 
 
946
    def _get_text_stream(self):
 
947
        # Note: We know we don't have to handle adding root keys, because both
 
948
        # the source and target are GCCHK, and those always support rich-roots
 
949
        # We may want to request as 'unordered', in case the source has done a
 
950
        # 'split' packing
 
951
        return ('texts', self.from_repository.texts.get_record_stream(
 
952
                            self._text_keys, 'groupcompress', False))
 
953
 
1234
954
    def get_stream(self, search):
1235
 
        def wrap_and_count(pb, rc, stream):
1236
 
            """Yield records from stream while showing progress."""
1237
 
            count = 0
1238
 
            for record in stream:
1239
 
                if count == rc.STEP:
1240
 
                    rc.increment(count)
1241
 
                    pb.update('Estimate', rc.current, rc.max)
1242
 
                    count = 0
1243
 
                count += 1
1244
 
                yield record
1245
 
 
1246
955
        revision_ids = search.get_keys()
1247
 
        pb = ui.ui_factory.nested_progress_bar()
1248
 
        rc = self._record_counter
1249
 
        self._record_counter.setup(len(revision_ids))
1250
956
        for stream_info in self._fetch_revision_texts(revision_ids):
1251
 
            yield (stream_info[0],
1252
 
                wrap_and_count(pb, rc, stream_info[1]))
 
957
            yield stream_info
1253
958
        self._revision_keys = [(rev_id,) for rev_id in revision_ids]
1254
 
        self.from_repository.revisions.clear_cache()
1255
 
        self.from_repository.signatures.clear_cache()
1256
 
        s = self._get_inventory_stream(self._revision_keys)
1257
 
        yield (s[0], wrap_and_count(pb, rc, s[1]))
1258
 
        self.from_repository.inventories.clear_cache()
 
959
        yield self._get_inventory_stream(self._revision_keys)
1259
960
        # TODO: The keys to exclude might be part of the search recipe
1260
961
        # For now, exclude all parents that are at the edge of ancestry, for
1261
962
        # which we have inventories
1262
963
        from_repo = self.from_repository
1263
 
        parent_keys = from_repo._find_parent_keys_of_revisions(
1264
 
                        self._revision_keys)
1265
 
        for stream_info in self._get_filtered_chk_streams(parent_keys):
1266
 
            yield (stream_info[0], wrap_and_count(pb, rc, stream_info[1]))
1267
 
        self.from_repository.chk_bytes.clear_cache()
1268
 
        s = self._get_text_stream()
1269
 
        yield (s[0], wrap_and_count(pb, rc, s[1]))
1270
 
        self.from_repository.texts.clear_cache()
1271
 
        pb.update('Done', rc.max, rc.max)
1272
 
        pb.finished()
 
964
        parent_ids = from_repo._find_parent_ids_of_revisions(revision_ids)
 
965
        for stream_info in self._get_filtered_chk_streams(parent_ids):
 
966
            yield stream_info
 
967
        yield self._get_text_stream()
1273
968
 
1274
969
    def get_stream_for_missing_keys(self, missing_keys):
1275
970
        # missing keys can only occur when we are byte copying and not
1284
979
            missing_inventory_keys.add(key[1:])
1285
980
        if self._chk_id_roots or self._chk_p_id_roots:
1286
981
            raise AssertionError('Cannot call get_stream_for_missing_keys'
1287
 
                ' until all of get_stream() has been consumed.')
 
982
                ' untill all of get_stream() has been consumed.')
1288
983
        # Yield the inventory stream, so we can find the chk stream
1289
 
        # Some of the missing_keys will be missing because they are ghosts.
1290
 
        # As such, we can ignore them. The Sink is required to verify there are
1291
 
        # no unavailable texts when the ghost inventories are not filled in.
1292
 
        yield self._get_inventory_stream(missing_inventory_keys,
1293
 
                                         allow_absent=True)
1294
 
        # We use the empty set for excluded_revision_keys, to make it clear
1295
 
        # that we want to transmit all referenced chk pages.
 
984
        yield self._get_inventory_stream(missing_inventory_keys)
 
985
        # We use the empty set for excluded_revision_ids, to make it clear that
 
986
        # we want to transmit all referenced chk pages.
1296
987
        for stream_info in self._get_filtered_chk_streams(set()):
1297
988
            yield stream_info
1298
989
 
1299
990
 
1300
 
class _InterestingKeyInfo(object):
1301
 
    def __init__(self):
1302
 
        self.interesting_root_keys = set()
1303
 
        self.interesting_pid_root_keys = set()
1304
 
        self.uninteresting_root_keys = set()
1305
 
        self.uninteresting_pid_root_keys = set()
1306
 
 
1307
 
    def all_interesting(self):
1308
 
        return self.interesting_root_keys.union(self.interesting_pid_root_keys)
1309
 
 
1310
 
    def all_uninteresting(self):
1311
 
        return self.uninteresting_root_keys.union(
1312
 
            self.uninteresting_pid_root_keys)
1313
 
 
1314
 
    def all_keys(self):
1315
 
        return self.all_interesting().union(self.all_uninteresting())
1316
 
 
1317
 
 
1318
 
def _build_interesting_key_sets(repo, inventory_ids, parent_only_inv_ids):
1319
 
    result = _InterestingKeyInfo()
1320
 
    for inv in repo.iter_inventories(inventory_ids, 'unordered'):
1321
 
        root_key = inv.id_to_entry.key()
1322
 
        pid_root_key = inv.parent_id_basename_to_file_id.key()
1323
 
        if inv.revision_id in parent_only_inv_ids:
1324
 
            result.uninteresting_root_keys.add(root_key)
1325
 
            result.uninteresting_pid_root_keys.add(pid_root_key)
1326
 
        else:
1327
 
            result.interesting_root_keys.add(root_key)
1328
 
            result.interesting_pid_root_keys.add(pid_root_key)
1329
 
    return result
1330
 
 
1331
 
 
1332
 
def _filter_text_keys(interesting_nodes_iterable, text_keys, bytes_to_text_key):
1333
 
    """Iterate the result of iter_interesting_nodes, yielding the records
1334
 
    and adding to text_keys.
1335
 
    """
1336
 
    text_keys_update = text_keys.update
1337
 
    for record, items in interesting_nodes_iterable:
1338
 
        text_keys_update([bytes_to_text_key(b) for n,b in items])
1339
 
        yield record
1340
 
 
1341
 
 
1342
 
 
1343
 
 
1344
991
class RepositoryFormatCHK1(RepositoryFormatPack):
1345
992
    """A hashed CHK+group compress pack repository."""
1346
993
 
1365
1012
    _fetch_order = 'unordered'
1366
1013
    _fetch_uses_deltas = False # essentially ignored by the groupcompress code.
1367
1014
    fast_deltas = True
1368
 
    pack_compresses = True
1369
1015
 
1370
1016
    def _get_matching_bzrdir(self):
1371
1017
        return bzrdir.format_registry.make_bzrdir('development6-rich-root')
1385
1031
        return ("Development repository format - rich roots, group compression"
1386
1032
            " and chk inventories")
1387
1033
 
1388
 
 
1389
 
class RepositoryFormatCHK2(RepositoryFormatCHK1):
1390
 
    """A CHK repository that uses the bencode revision serializer."""
1391
 
 
1392
 
    _serializer = chk_serializer.chk_bencode_serializer
1393
 
 
1394
 
    def _get_matching_bzrdir(self):
1395
 
        return bzrdir.format_registry.make_bzrdir('development7-rich-root')
1396
 
 
1397
 
    def _ignore_setting_bzrdir(self, format):
1398
 
        pass
1399
 
 
1400
 
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
1401
 
 
1402
 
    def get_format_string(self):
1403
 
        """See RepositoryFormat.get_format_string()."""
1404
 
        return ('Bazaar development format - chk repository with bencode '
1405
 
                'revision serialization (needs bzr.dev from 1.16)\n')
1406
 
 
1407
 
 
1408
 
class RepositoryFormat2a(RepositoryFormatCHK2):
1409
 
    """A CHK repository that uses the bencode revision serializer.
1410
 
 
1411
 
    This is the same as RepositoryFormatCHK2 but with a public name.
1412
 
    """
1413
 
 
1414
 
    _serializer = chk_serializer.chk_bencode_serializer
1415
 
 
1416
 
    def _get_matching_bzrdir(self):
1417
 
        return bzrdir.format_registry.make_bzrdir('2a')
1418
 
 
1419
 
    def _ignore_setting_bzrdir(self, format):
1420
 
        pass
1421
 
 
1422
 
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
1423
 
 
1424
 
    def get_format_string(self):
1425
 
        return ('Bazaar repository format 2a (needs bzr 1.16 or later)\n')
1426
 
 
1427
 
    def get_format_description(self):
1428
 
        """See RepositoryFormat.get_format_description()."""
1429
 
        return ("Repository format 2a - rich roots, group compression"
1430
 
            " and chk inventories")
 
1034
    def check_conversion_target(self, target_format):
 
1035
        if not target_format.rich_root_data:
 
1036
            raise errors.BadConversionTarget(
 
1037
                'Does not support rich root data.', target_format)
 
1038
        if not getattr(target_format, 'supports_tree_reference', False):
 
1039
            raise errors.BadConversionTarget(
 
1040
                'Does not support nested trees', target_format)
 
1041
 
 
1042