~bzr-pqm/bzr/bzr.dev

« back to all changes in this revision

Viewing changes to bzrlib/repofmt/groupcompress_repo.py

  • Committer: John Arbash Meinel
  • Date: 2009-10-21 21:27:19 UTC
  • mto: This revision was merged to the branch mainline in revision 4771.
  • Revision ID: john@arbash-meinel.com-20091021212719-05zh4t7oo5kaird3
More cleanups and clarifications.

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
 
# Copyright (C) 2008, 2009, 2010 Canonical Ltd
 
1
# Copyright (C) 2008, 2009 Canonical Ltd
2
2
#
3
3
# This program is free software; you can redistribute it and/or modify
4
4
# it under the terms of the GNU General Public License as published by
29
29
    knit,
30
30
    osutils,
31
31
    pack,
 
32
    remote,
32
33
    revision as _mod_revision,
33
34
    trace,
34
35
    ui,
352
353
        """Build a VersionedFiles instance on top of this group of packs."""
353
354
        index_name = index_name + '_index'
354
355
        index_to_pack = {}
355
 
        access = knit._DirectPackAccess(index_to_pack,
356
 
                                        reload_func=self._reload_func)
 
356
        access = knit._DirectPackAccess(index_to_pack)
357
357
        if for_write:
358
358
            # Use new_pack
359
359
            if self.new_pack is None:
704
704
                self._remove_pack_from_memory(pack)
705
705
        # record the newly available packs and stop advertising the old
706
706
        # packs
707
 
        to_be_obsoleted = []
708
 
        for _, packs in pack_operations:
709
 
            to_be_obsoleted.extend(packs)
710
 
        result = self._save_pack_names(clear_obsolete_packs=True,
711
 
                                       obsolete_packs=to_be_obsoleted)
 
707
        result = self._save_pack_names(clear_obsolete_packs=True)
 
708
        # Move the old packs out of the way now they are no longer referenced.
 
709
        for revision_count, packs in pack_operations:
 
710
            self._obsolete_packs(packs)
712
711
        return result
713
712
 
714
713
 
881
880
            if basis_tree is not None:
882
881
                basis_tree.unlock()
883
882
 
884
 
    def _deserialise_inventory(self, revision_id, bytes):
 
883
    def deserialise_inventory(self, revision_id, bytes):
885
884
        return inventory.CHKInventory.deserialise(self.chk_bytes, bytes,
886
885
            (revision_id,))
887
886
 
903
902
    def _iter_inventory_xmls(self, revision_ids, ordering):
904
903
        # Without a native 'xml' inventory, this method doesn't make sense.
905
904
        # However older working trees, and older bundles want it - so we supply
906
 
        # it allowing _get_inventory_xml to work. Bundles currently use the
 
905
        # it allowing get_inventory_xml to work. Bundles currently use the
907
906
        # serializer directly; this also isn't ideal, but there isn't an xml
908
907
        # iteration interface offered at all for repositories. We could make
909
908
        # _iter_inventory_xmls be part of the contract, even if kept private.
953
952
                        pb=pb):
954
953
                for name, bytes in items:
955
954
                    (name_utf8, file_id, revision_id) = bytes_to_info(bytes)
956
 
                    # TODO: consider interning file_id, revision_id here, or
957
 
                    #       pushing that intern() into bytes_to_info()
958
 
                    # TODO: rich_root should always be True here, for all
959
 
                    #       repositories that support chk_bytes
960
955
                    if not rich_root and name_utf8 == '':
961
956
                        continue
962
957
                    try:
1197
1192
            # are always rich-root, so there are no synthesised root records to
1198
1193
            # ignore.
1199
1194
            _, file_id, revision_id = bytes_to_info(bytes)
1200
 
            file_id = intern(file_id)
1201
 
            revision_id = intern(revision_id)
1202
 
            text_keys.add(StaticTuple(file_id, revision_id).intern())
 
1195
            text_keys.add((file_id, revision_id))
1203
1196
        yield record
1204
1197
 
1205
1198