~bzr-pqm/bzr/bzr.dev

« back to all changes in this revision

Viewing changes to bzrlib/repofmt/groupcompress_repo.py

  • Committer: Danny van Heumen
  • Date: 2010-03-09 21:42:11 UTC
  • mto: (4634.139.5 2.0)
  • mto: This revision was merged to the branch mainline in revision 5160.
  • Revision ID: danny@dannyvanheumen.nl-20100309214211-iqh42x6qcikgd9p3
Reverted now-useless TODO list.

Show diffs side-by-side

added added

removed removed

Lines of Context:
29
29
    knit,
30
30
    osutils,
31
31
    pack,
 
32
    remote,
32
33
    revision as _mod_revision,
33
34
    trace,
34
35
    ui,
52
53
    ResumedPack,
53
54
    Packer,
54
55
    )
55
 
from bzrlib.static_tuple import StaticTuple
56
56
 
57
57
 
58
58
class GCPack(NewPack):
816
816
                                 ' no new_path %r' % (file_id,))
817
817
            if new_path == '':
818
818
                new_inv.root_id = file_id
819
 
                parent_id_basename_key = StaticTuple('', '').intern()
 
819
                parent_id_basename_key = ('', '')
820
820
            else:
821
821
                utf8_entry_name = entry.name.encode('utf-8')
822
 
                parent_id_basename_key = StaticTuple(entry.parent_id,
823
 
                                                     utf8_entry_name).intern()
 
822
                parent_id_basename_key = (entry.parent_id, utf8_entry_name)
824
823
            new_value = entry_to_bytes(entry)
825
824
            # Populate Caches?
826
825
            # new_inv._path_to_fileid_cache[new_path] = file_id
827
 
            key = StaticTuple(file_id).intern()
828
 
            id_to_entry_dict[key] = new_value
 
826
            id_to_entry_dict[(file_id,)] = new_value
829
827
            parent_id_basename_dict[parent_id_basename_key] = file_id
830
828
 
831
829
        new_inv._populate_from_dicts(self.chk_bytes, id_to_entry_dict,
881
879
            if basis_tree is not None:
882
880
                basis_tree.unlock()
883
881
 
884
 
    def _deserialise_inventory(self, revision_id, bytes):
 
882
    def deserialise_inventory(self, revision_id, bytes):
885
883
        return inventory.CHKInventory.deserialise(self.chk_bytes, bytes,
886
884
            (revision_id,))
887
885
 
903
901
    def _iter_inventory_xmls(self, revision_ids, ordering):
904
902
        # Without a native 'xml' inventory, this method doesn't make sense.
905
903
        # However older working trees, and older bundles want it - so we supply
906
 
        # it allowing _get_inventory_xml to work. Bundles currently use the
 
904
        # it allowing get_inventory_xml to work. Bundles currently use the
907
905
        # serializer directly; this also isn't ideal, but there isn't an xml
908
906
        # iteration interface offered at all for repositories. We could make
909
907
        # _iter_inventory_xmls be part of the contract, even if kept private.
953
951
                        pb=pb):
954
952
                for name, bytes in items:
955
953
                    (name_utf8, file_id, revision_id) = bytes_to_info(bytes)
956
 
                    # TODO: consider interning file_id, revision_id here, or
957
 
                    #       pushing that intern() into bytes_to_info()
958
 
                    # TODO: rich_root should always be True here, for all
959
 
                    #       repositories that support chk_bytes
960
954
                    if not rich_root and name_utf8 == '':
961
955
                        continue
962
956
                    try:
1113
1107
        for stream_info in self._fetch_revision_texts(revision_ids):
1114
1108
            yield stream_info
1115
1109
        self._revision_keys = [(rev_id,) for rev_id in revision_ids]
1116
 
        self.from_repository.revisions.clear_cache()
1117
 
        self.from_repository.signatures.clear_cache()
1118
1110
        yield self._get_inventory_stream(self._revision_keys)
1119
 
        self.from_repository.inventories.clear_cache()
1120
1111
        # TODO: The keys to exclude might be part of the search recipe
1121
1112
        # For now, exclude all parents that are at the edge of ancestry, for
1122
1113
        # which we have inventories
1125
1116
                        self._revision_keys)
1126
1117
        for stream_info in self._get_filtered_chk_streams(parent_keys):
1127
1118
            yield stream_info
1128
 
        self.from_repository.chk_bytes.clear_cache()
1129
1119
        yield self._get_text_stream()
1130
 
        self.from_repository.texts.clear_cache()
1131
1120
 
1132
1121
    def get_stream_for_missing_keys(self, missing_keys):
1133
1122
        # missing keys can only occur when we are byte copying and not
1197
1186
            # are always rich-root, so there are no synthesised root records to
1198
1187
            # ignore.
1199
1188
            _, file_id, revision_id = bytes_to_info(bytes)
1200
 
            file_id = intern(file_id)
1201
 
            revision_id = intern(revision_id)
1202
 
            text_keys.add(StaticTuple(file_id, revision_id).intern())
 
1189
            text_keys.add((file_id, revision_id))
1203
1190
        yield record
1204
1191
 
1205
1192