~bzr-pqm/bzr/bzr.dev

« back to all changes in this revision

Viewing changes to bzrlib/repofmt/groupcompress_repo.py

  • Committer: Canonical.com Patch Queue Manager
  • Date: 2009-09-19 00:32:14 UTC
  • mfrom: (4685.2.1 bzr.dev)
  • Revision ID: pqm@pqm.ubuntu.com-20090919003214-2dli9jc4y5xhjj3n
(mbp for garyvdm) Revert rename of
        test_merge_uncommitted_otherbasis_ancestor_of_thisbasis.

Show diffs side-by-side

added added

removed removed

Lines of Context:
53
53
    ResumedPack,
54
54
    Packer,
55
55
    )
56
 
from bzrlib.static_tuple import StaticTuple
57
56
 
58
57
 
59
58
class GCPack(NewPack):
353
352
        """Build a VersionedFiles instance on top of this group of packs."""
354
353
        index_name = index_name + '_index'
355
354
        index_to_pack = {}
356
 
        access = knit._DirectPackAccess(index_to_pack,
357
 
                                        reload_func=self._reload_func)
 
355
        access = knit._DirectPackAccess(index_to_pack)
358
356
        if for_write:
359
357
            # Use new_pack
360
358
            if self.new_pack is None:
816
814
                                 ' no new_path %r' % (file_id,))
817
815
            if new_path == '':
818
816
                new_inv.root_id = file_id
819
 
                parent_id_basename_key = StaticTuple('', '').intern()
 
817
                parent_id_basename_key = ('', '')
820
818
            else:
821
819
                utf8_entry_name = entry.name.encode('utf-8')
822
 
                parent_id_basename_key = StaticTuple(entry.parent_id,
823
 
                                                     utf8_entry_name).intern()
 
820
                parent_id_basename_key = (entry.parent_id, utf8_entry_name)
824
821
            new_value = entry_to_bytes(entry)
825
822
            # Populate Caches?
826
823
            # new_inv._path_to_fileid_cache[new_path] = file_id
827
 
            key = StaticTuple(file_id).intern()
828
 
            id_to_entry_dict[key] = new_value
 
824
            id_to_entry_dict[(file_id,)] = new_value
829
825
            parent_id_basename_dict[parent_id_basename_key] = file_id
830
826
 
831
827
        new_inv._populate_from_dicts(self.chk_bytes, id_to_entry_dict,
953
949
                        pb=pb):
954
950
                for name, bytes in items:
955
951
                    (name_utf8, file_id, revision_id) = bytes_to_info(bytes)
956
 
                    # TODO: consider interning file_id, revision_id here, or
957
 
                    #       pushing that intern() into bytes_to_info()
958
 
                    # TODO: rich_root should always be True here, for all
959
 
                    #       repositories that support chk_bytes
960
952
                    if not rich_root and name_utf8 == '':
961
953
                        continue
962
954
                    try:
1113
1105
        for stream_info in self._fetch_revision_texts(revision_ids):
1114
1106
            yield stream_info
1115
1107
        self._revision_keys = [(rev_id,) for rev_id in revision_ids]
1116
 
        self.from_repository.revisions.clear_cache()
1117
 
        self.from_repository.signatures.clear_cache()
1118
1108
        yield self._get_inventory_stream(self._revision_keys)
1119
 
        self.from_repository.inventories.clear_cache()
1120
1109
        # TODO: The keys to exclude might be part of the search recipe
1121
1110
        # For now, exclude all parents that are at the edge of ancestry, for
1122
1111
        # which we have inventories
1125
1114
                        self._revision_keys)
1126
1115
        for stream_info in self._get_filtered_chk_streams(parent_keys):
1127
1116
            yield stream_info
1128
 
        self.from_repository.chk_bytes.clear_cache()
1129
1117
        yield self._get_text_stream()
1130
 
        self.from_repository.texts.clear_cache()
1131
1118
 
1132
1119
    def get_stream_for_missing_keys(self, missing_keys):
1133
1120
        # missing keys can only occur when we are byte copying and not
1197
1184
            # are always rich-root, so there are no synthesised root records to
1198
1185
            # ignore.
1199
1186
            _, file_id, revision_id = bytes_to_info(bytes)
1200
 
            file_id = intern(file_id)
1201
 
            revision_id = intern(revision_id)
1202
 
            text_keys.add(StaticTuple(file_id, revision_id).intern())
 
1187
            text_keys.add((file_id, revision_id))
1203
1188
        yield record
1204
1189
 
1205
1190