~bzr-pqm/bzr/bzr.dev

« back to all changes in this revision

Viewing changes to bzrlib/repofmt/groupcompress_repo.py

  • Committer: John Arbash Meinel
  • Date: 2009-08-03 20:38:39 UTC
  • mto: This revision was merged to the branch mainline in revision 4592.
  • Revision ID: john@arbash-meinel.com-20090803203839-du9y39adazy9qdly
more updates to get things to build cleanly.

1) delete the release directories because it seems that gf.recipe.bzr doesn't
clean them up when you update to a new release.
2) fix 'clean-installer-all'. It is a lot more simple now, as we just nuke the
whole build-win32 directory.

Show diffs side-by-side

added added

removed removed

Lines of Context:
154
154
        self._writer.begin()
155
155
        # what state is the pack in? (open, finished, aborted)
156
156
        self._state = 'open'
157
 
        # no name until we finish writing the content
158
 
        self.name = None
159
157
 
160
158
    def _check_references(self):
161
159
        """Make sure our external references are present.
412
410
 
413
411
    def _copy_inventory_texts(self):
414
412
        source_vf, target_vf = self._build_vfs('inventory', True, True)
415
 
        # It is not sufficient to just use self.revision_keys, as stacked
416
 
        # repositories can have more inventories than they have revisions.
417
 
        # One alternative would be to do something with
418
 
        # get_parent_map(self.revision_keys), but that shouldn't be any faster
419
 
        # than this.
420
 
        inventory_keys = source_vf.keys()
421
 
        missing_inventories = set(self.revision_keys).difference(inventory_keys)
422
 
        if missing_inventories:
423
 
            missing_inventories = sorted(missing_inventories)
424
 
            raise ValueError('We are missing inventories for revisions: %s'
425
 
                % (missing_inventories,))
426
 
        self._copy_stream(source_vf, target_vf, inventory_keys,
 
413
        self._copy_stream(source_vf, target_vf, self.revision_keys,
427
414
                          'inventories', self._get_filtered_inv_stream, 2)
428
415
 
429
416
    def _copy_chk_texts(self):
479
466
        if not self._use_pack(self.new_pack):
480
467
            self.new_pack.abort()
481
468
            return None
482
 
        self.new_pack.finish_content()
483
 
        if len(self.packs) == 1:
484
 
            old_pack = self.packs[0]
485
 
            if old_pack.name == self.new_pack._hash.hexdigest():
486
 
                # The single old pack was already optimally packed.
487
 
                trace.mutter('single pack %s was already optimally packed',
488
 
                    old_pack.name)
489
 
                self.new_pack.abort()
490
 
                return None
491
469
        self.pb.update('finishing repack', 6, 7)
492
470
        self.new_pack.finish()
493
471
        self._pack_collection.allocate(self.new_pack)
602
580
            packer = GCCHKPacker(self, packs, '.autopack',
603
581
                                 reload_func=reload_func)
604
582
            try:
605
 
                result = packer.pack()
 
583
                packer.pack()
606
584
            except errors.RetryWithNewPacks:
607
585
                # An exception is propagating out of this context, make sure
608
586
                # this packer has cleaned up. Packer() doesn't set its new_pack
611
589
                if packer.new_pack is not None:
612
590
                    packer.new_pack.abort()
613
591
                raise
614
 
            if result is None:
615
 
                return
616
592
            for pack in packs:
617
593
                self._remove_pack_from_memory(pack)
618
594
        # record the newly available packs and stop advertising the old
790
766
            if basis_tree is not None:
791
767
                basis_tree.unlock()
792
768
 
793
 
    def deserialise_inventory(self, revision_id, bytes):
794
 
        return inventory.CHKInventory.deserialise(self.chk_bytes, bytes,
795
 
            (revision_id,))
796
 
 
797
 
    def _iter_inventories(self, revision_ids, ordering):
 
769
    def _iter_inventories(self, revision_ids):
798
770
        """Iterate over many inventory objects."""
799
 
        if ordering is None:
800
 
            ordering = 'unordered'
801
771
        keys = [(revision_id,) for revision_id in revision_ids]
802
 
        stream = self.inventories.get_record_stream(keys, ordering, True)
 
772
        stream = self.inventories.get_record_stream(keys, 'unordered', True)
803
773
        texts = {}
804
774
        for record in stream:
805
775
            if record.storage_kind != 'absent':
809
779
        for key in keys:
810
780
            yield inventory.CHKInventory.deserialise(self.chk_bytes, texts[key], key)
811
781
 
812
 
    def _iter_inventory_xmls(self, revision_ids, ordering):
813
 
        # Without a native 'xml' inventory, this method doesn't make sense.
814
 
        # However older working trees, and older bundles want it - so we supply
815
 
        # it allowing get_inventory_xml to work. Bundles currently use the
816
 
        # serializer directly; this also isn't ideal, but there isn't an xml
817
 
        # iteration interface offered at all for repositories. We could make
818
 
        # _iter_inventory_xmls be part of the contract, even if kept private.
819
 
        inv_to_str = self._serializer.write_inventory_to_string
820
 
        for inv in self.iter_inventories(revision_ids, ordering=ordering):
821
 
            yield inv_to_str(inv), inv.revision_id
 
782
    def _iter_inventory_xmls(self, revision_ids):
 
783
        # Without a native 'xml' inventory, this method doesn't make sense, so
 
784
        # make it raise to trap naughty direct users.
 
785
        raise NotImplementedError(self._iter_inventory_xmls)
822
786
 
823
787
    def _find_present_inventory_keys(self, revision_keys):
824
788
        parent_map = self.inventories.get_parent_map(revision_keys)
915
879
 
916
880
    def _get_source(self, to_format):
917
881
        """Return a source for streaming from this repository."""
918
 
        if self._format._serializer == to_format._serializer:
 
882
        if isinstance(to_format, remote.RemoteRepositoryFormat):
 
883
            # Can't just check attributes on to_format with the current code,
 
884
            # work around this:
 
885
            to_format._ensure_real()
 
886
            to_format = to_format._custom_format
 
887
        if to_format.__class__ is self._format.__class__:
919
888
            # We must be exactly the same format, otherwise stuff like the chk
920
 
            # page layout might be different.
921
 
            # Actually, this test is just slightly looser than exact so that
922
 
            # CHK2 <-> 2a transfers will work.
 
889
            # page layout might be different
923
890
            return GroupCHKStreamSource(self, to_format)
924
891
        return super(CHKInventoryRepository, self)._get_source(to_format)
925
892
 
1107
1074
        return ("Development repository format - rich roots, group compression"
1108
1075
            " and chk inventories")
1109
1076
 
 
1077
    def check_conversion_target(self, target_format):
 
1078
        if not target_format.rich_root_data:
 
1079
            raise errors.BadConversionTarget(
 
1080
                'Does not support rich root data.', target_format)
 
1081
        if (self.supports_tree_reference and 
 
1082
            not getattr(target_format, 'supports_tree_reference', False)):
 
1083
            raise errors.BadConversionTarget(
 
1084
                'Does not support nested trees', target_format)
 
1085
 
 
1086
 
1110
1087
 
1111
1088
class RepositoryFormatCHK2(RepositoryFormatCHK1):
1112
1089
    """A CHK repository that uses the bencode revision serializer."""
1129
1106
 
1130
1107
class RepositoryFormat2a(RepositoryFormatCHK2):
1131
1108
    """A CHK repository that uses the bencode revision serializer.
1132
 
 
 
1109
    
1133
1110
    This is the same as RepositoryFormatCHK2 but with a public name.
1134
1111
    """
1135
1112