~bzr-pqm/bzr/bzr.dev

« back to all changes in this revision

Viewing changes to bzrlib/repofmt/pack_repo.py

  • Committer: Andrew Bennetts
  • Date: 2011-02-25 08:45:27 UTC
  • mto: This revision was merged to the branch mainline in revision 5695.
  • Revision ID: andrew.bennetts@canonical.com-20110225084527-0ucp7p00d00hoqon
Add another test.

Show diffs side-by-side

added added

removed removed

Lines of Context:
49
49
""")
50
50
from bzrlib import (
51
51
    bzrdir,
 
52
    btree_index,
52
53
    errors,
53
54
    lockable_files,
54
55
    lockdir,
56
57
    )
57
58
 
58
59
from bzrlib.decorators import needs_write_lock, only_raises
59
 
from bzrlib.btree_index import (
60
 
    BTreeGraphIndex,
61
 
    BTreeBuilder,
62
 
    )
63
60
from bzrlib.index import (
64
61
    GraphIndex,
65
62
    InMemoryGraphIndex,
66
63
    )
 
64
from bzrlib.lock import LogicalLockResult
67
65
from bzrlib.repofmt.knitrepo import KnitRepository
68
66
from bzrlib.repository import (
69
67
    CommitBuilder,
70
68
    MetaDirRepositoryFormat,
71
69
    RepositoryFormat,
 
70
    RepositoryWriteLockResult,
72
71
    RootCommitBuilder,
73
72
    StreamSource,
74
73
    )
229
228
        unlimited_cache = False
230
229
        if index_type == 'chk':
231
230
            unlimited_cache = True
232
 
        setattr(self, index_type + '_index',
233
 
            self.index_class(self.index_transport,
234
 
                self.index_name(index_type, self.name),
235
 
                self.index_sizes[self.index_offset(index_type)],
236
 
                unlimited_cache=unlimited_cache))
 
231
        index = self.index_class(self.index_transport,
 
232
                    self.index_name(index_type, self.name),
 
233
                    self.index_sizes[self.index_offset(index_type)],
 
234
                    unlimited_cache=unlimited_cache)
 
235
        if index_type == 'chk':
 
236
            index._leaf_factory = btree_index._gcchk_factory
 
237
        setattr(self, index_type + '_index', index)
237
238
 
238
239
 
239
240
class ExistingPack(Pack):
721
722
        :return: A Pack object, or None if nothing was copied.
722
723
        """
723
724
        # open a pack - using the same name as the last temporary file
724
 
        # - which has already been flushed, so its safe.
 
725
        # - which has already been flushed, so it's safe.
725
726
        # XXX: - duplicate code warning with start_write_group; fix before
726
727
        #      considering 'done'.
727
728
        if self._pack_collection._new_pack is not None:
1291
1292
        # reinserted, and if d3 has incorrect parents it will also be
1292
1293
        # reinserted. If we insert d3 first, d2 is present (as it was bulk
1293
1294
        # copied), so we will try to delta, but d2 is not currently able to be
1294
 
        # extracted because it's basis d1 is not present. Topologically sorting
 
1295
        # extracted because its basis d1 is not present. Topologically sorting
1295
1296
        # addresses this. The following generates a sort for all the texts that
1296
1297
        # are being inserted without having to reference the entire text key
1297
1298
        # space (we only topo sort the revisions, which is smaller).
1572
1573
        mutter('Packing repository %s, which has %d pack files, '
1573
1574
            'containing %d revisions with hint %r.', self, total_packs,
1574
1575
            total_revisions, hint)
 
1576
        while True:
 
1577
            try:
 
1578
                self._try_pack_operations(hint)
 
1579
            except RetryPackOperations:
 
1580
                continue
 
1581
            break
 
1582
 
 
1583
        if clean_obsolete_packs:
 
1584
            self._clear_obsolete_packs()
 
1585
 
 
1586
    def _try_pack_operations(self, hint):
 
1587
        """Calculate the pack operations based on the hint (if any), and
 
1588
        execute them.
 
1589
        """
1575
1590
        # determine which packs need changing
1576
1591
        pack_operations = [[0, []]]
1577
1592
        for pack in self.all_packs():
1580
1595
                # or this pack was included in the hint.
1581
1596
                pack_operations[-1][0] += pack.get_revision_count()
1582
1597
                pack_operations[-1][1].append(pack)
1583
 
        self._execute_pack_operations(pack_operations, OptimisingPacker)
1584
 
 
1585
 
        if clean_obsolete_packs:
1586
 
            self._clear_obsolete_packs()
 
1598
        self._execute_pack_operations(pack_operations, OptimisingPacker,
 
1599
            reload_func=self._restart_pack_operations)
1587
1600
 
1588
1601
    def plan_autopack_combinations(self, existing_packs, pack_distribution):
1589
1602
        """Plan a pack operation.
1599
1612
        pack_operations = [[0, []]]
1600
1613
        # plan out what packs to keep, and what to reorganise
1601
1614
        while len(existing_packs):
1602
 
            # take the largest pack, and if its less than the head of the
 
1615
            # take the largest pack, and if it's less than the head of the
1603
1616
            # distribution chart we will include its contents in the new pack
1604
 
            # for that position. If its larger, we remove its size from the
 
1617
            # for that position. If it's larger, we remove its size from the
1605
1618
            # distribution chart
1606
1619
            next_pack_rev_count, next_pack = existing_packs.pop(0)
1607
1620
            if next_pack_rev_count >= pack_distribution[0]:
1642
1655
 
1643
1656
        :return: True if the disk names had not been previously read.
1644
1657
        """
1645
 
        # NB: if you see an assertion error here, its probably access against
 
1658
        # NB: if you see an assertion error here, it's probably access against
1646
1659
        # an unlocked repo. Naughty.
1647
1660
        if not self.repo.is_locked():
1648
1661
            raise errors.ObjectNotLocked(self.repo)
1678
1691
            txt_index = self._make_index(name, '.tix')
1679
1692
            sig_index = self._make_index(name, '.six')
1680
1693
            if self.chk_index is not None:
1681
 
                chk_index = self._make_index(name, '.cix', unlimited_cache=True)
 
1694
                chk_index = self._make_index(name, '.cix', is_chk=True)
1682
1695
            else:
1683
1696
                chk_index = None
1684
1697
            result = ExistingPack(self._pack_transport, name, rev_index,
1704
1717
            sig_index = self._make_index(name, '.six', resume=True)
1705
1718
            if self.chk_index is not None:
1706
1719
                chk_index = self._make_index(name, '.cix', resume=True,
1707
 
                                             unlimited_cache=True)
 
1720
                                             is_chk=True)
1708
1721
            else:
1709
1722
                chk_index = None
1710
1723
            result = self.resumed_pack_factory(name, rev_index, inv_index,
1740
1753
        return self._index_class(self.transport, 'pack-names', None
1741
1754
                ).iter_all_entries()
1742
1755
 
1743
 
    def _make_index(self, name, suffix, resume=False, unlimited_cache=False):
 
1756
    def _make_index(self, name, suffix, resume=False, is_chk=False):
1744
1757
        size_offset = self._suffix_offsets[suffix]
1745
1758
        index_name = name + suffix
1746
1759
        if resume:
1749
1762
        else:
1750
1763
            transport = self._index_transport
1751
1764
            index_size = self._names[name][size_offset]
1752
 
        return self._index_class(transport, index_name, index_size,
1753
 
                                 unlimited_cache=unlimited_cache)
 
1765
        index = self._index_class(transport, index_name, index_size,
 
1766
                                  unlimited_cache=is_chk)
 
1767
        if is_chk and self._index_class is btree_index.BTreeGraphIndex: 
 
1768
            index._leaf_factory = btree_index._gcchk_factory
 
1769
        return index
1754
1770
 
1755
1771
    def _max_pack_count(self, total_revisions):
1756
1772
        """Return the maximum number of packs to use for total revisions.
1942
1958
                    # disk index because the set values are the same, unless
1943
1959
                    # the only index shows up as deleted by the set difference
1944
1960
                    # - which it may. Until there is a specific test for this,
1945
 
                    # assume its broken. RBC 20071017.
 
1961
                    # assume it's broken. RBC 20071017.
1946
1962
                    self._remove_pack_from_memory(self.get_pack_by_name(name))
1947
1963
                    self._names[name] = sizes
1948
1964
                    self.get_pack_by_name(name)
2013
2029
        """
2014
2030
        # The ensure_loaded call is to handle the case where the first call
2015
2031
        # made involving the collection was to reload_pack_names, where we 
2016
 
        # don't have a view of disk contents. Its a bit of a bandaid, and
2017
 
        # causes two reads of pack-names, but its a rare corner case not struck
2018
 
        # with regular push/pull etc.
 
2032
        # don't have a view of disk contents. It's a bit of a bandaid, and
 
2033
        # causes two reads of pack-names, but it's a rare corner case not
 
2034
        # struck with regular push/pull etc.
2019
2035
        first_read = self.ensure_loaded()
2020
2036
        if first_read:
2021
2037
            return True
2040
2056
            raise
2041
2057
        raise errors.RetryAutopack(self.repo, False, sys.exc_info())
2042
2058
 
 
2059
    def _restart_pack_operations(self):
 
2060
        """Reload the pack names list, and restart the autopack code."""
 
2061
        if not self.reload_pack_names():
 
2062
            # Re-raise the original exception, because something went missing
 
2063
            # and a restart didn't find it
 
2064
            raise
 
2065
        raise RetryPackOperations(self.repo, False, sys.exc_info())
 
2066
 
2043
2067
    def _clear_obsolete_packs(self, preserve=None):
2044
2068
        """Delete everything from the obsolete-packs directory.
2045
2069
 
2340
2364
        return self._write_lock_count
2341
2365
 
2342
2366
    def lock_write(self, token=None):
 
2367
        """Lock the repository for writes.
 
2368
 
 
2369
        :return: A bzrlib.repository.RepositoryWriteLockResult.
 
2370
        """
2343
2371
        locked = self.is_locked()
2344
2372
        if not self._write_lock_count and locked:
2345
2373
            raise errors.ReadOnlyError(self)
2354
2382
                # Writes don't affect fallback repos
2355
2383
                repo.lock_read()
2356
2384
            self._refresh_data()
 
2385
        return RepositoryWriteLockResult(self.unlock, None)
2357
2386
 
2358
2387
    def lock_read(self):
 
2388
        """Lock the repository for reads.
 
2389
 
 
2390
        :return: A bzrlib.lock.LogicalLockResult.
 
2391
        """
2359
2392
        locked = self.is_locked()
2360
2393
        if self._write_lock_count:
2361
2394
            self._write_lock_count += 1
2368
2401
            for repo in self._fallback_repositories:
2369
2402
                repo.lock_read()
2370
2403
            self._refresh_data()
 
2404
        return LogicalLockResult(self.unlock)
2371
2405
 
2372
2406
    def leave_lock_in_place(self):
2373
2407
        # not supported - raise an error
2817
2851
    _commit_builder_class = PackCommitBuilder
2818
2852
    supports_external_lookups = True
2819
2853
    # What index classes to use
2820
 
    index_builder_class = BTreeBuilder
2821
 
    index_class = BTreeGraphIndex
 
2854
    index_builder_class = btree_index.BTreeBuilder
 
2855
    index_class = btree_index.BTreeGraphIndex
2822
2856
 
2823
2857
    @property
2824
2858
    def _serializer(self):
2853
2887
    supports_tree_reference = False # no subtrees
2854
2888
    supports_external_lookups = True
2855
2889
    # What index classes to use
2856
 
    index_builder_class = BTreeBuilder
2857
 
    index_class = BTreeGraphIndex
 
2890
    index_builder_class = btree_index.BTreeBuilder
 
2891
    index_class = btree_index.BTreeGraphIndex
2858
2892
 
2859
2893
    @property
2860
2894
    def _serializer(self):
2880
2914
class RepositoryFormatPackDevelopment2Subtree(RepositoryFormatPack):
2881
2915
    """A subtrees development repository.
2882
2916
 
2883
 
    This format should be retained until the second release after bzr 1.7.
 
2917
    This format should be retained in 2.3, to provide an upgrade path from this
 
2918
    to RepositoryFormat2aSubtree.  It can be removed in later releases.
2884
2919
 
2885
2920
    1.6.1-subtree[as it might have been] with B+Tree indices.
2886
 
 
2887
 
    This is [now] retained until we have a CHK based subtree format in
2888
 
    development.
2889
2921
    """
2890
2922
 
2891
2923
    repository_class = KnitPackRepository
2895
2927
    supports_tree_reference = True
2896
2928
    supports_external_lookups = True
2897
2929
    # What index classes to use
2898
 
    index_builder_class = BTreeBuilder
2899
 
    index_class = BTreeGraphIndex
 
2930
    index_builder_class = btree_index.BTreeBuilder
 
2931
    index_class = btree_index.BTreeGraphIndex
2900
2932
 
2901
2933
    @property
2902
2934
    def _serializer(self):
2904
2936
 
2905
2937
    def _get_matching_bzrdir(self):
2906
2938
        return bzrdir.format_registry.make_bzrdir(
2907
 
            'development-subtree')
 
2939
            'development5-subtree')
2908
2940
 
2909
2941
    def _ignore_setting_bzrdir(self, format):
2910
2942
        pass
2921
2953
        return ("Development repository format, currently the same as "
2922
2954
            "1.6.1-subtree with B+Tree indices.\n")
2923
2955
 
 
2956
 
 
2957
class RetryPackOperations(errors.RetryWithNewPacks):
 
2958
    """Raised when we are packing and we find a missing file.
 
2959
 
 
2960
    Meant as a signaling exception, to tell the RepositoryPackCollection.pack
 
2961
    code it should try again.
 
2962
    """
 
2963
 
 
2964
    internal_error = True
 
2965
 
 
2966
    _fmt = ("Pack files have changed, reload and try pack again."
 
2967
            " context: %(context)s %(orig_error)s")
 
2968
 
 
2969