~bzr-pqm/bzr/bzr.dev

« back to all changes in this revision

Viewing changes to bzrlib/repofmt/pack_repo.py

  • Committer: Ian Clatworthy
  • Date: 2010-02-19 03:02:07 UTC
  • mto: (4797.23.1 integration-2.1)
  • mto: This revision was merged to the branch mainline in revision 5055.
  • Revision ID: ian.clatworthy@canonical.com-20100219030207-zpbzx021zavx4sqt
What's New in 2.1 - a summary of changes since 2.0

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
 
# Copyright (C) 2005, 2006, 2007, 2008 Canonical Ltd
 
1
# Copyright (C) 2007-2010 Canonical Ltd
2
2
#
3
3
# This program is free software; you can redistribute it and/or modify
4
4
# it under the terms of the GNU General Public License as published by
24
24
 
25
25
from bzrlib import (
26
26
    chk_map,
 
27
    cleanup,
27
28
    debug,
28
29
    graph,
29
30
    osutils,
54
55
    revision as _mod_revision,
55
56
    )
56
57
 
57
 
from bzrlib.decorators import needs_write_lock
 
58
from bzrlib.decorators import needs_write_lock, only_raises
58
59
from bzrlib.btree_index import (
59
60
    BTreeGraphIndex,
60
61
    BTreeBuilder,
73
74
    )
74
75
from bzrlib.trace import (
75
76
    mutter,
 
77
    note,
76
78
    warning,
77
79
    )
78
80
 
224
226
        return self.index_name('text', name)
225
227
 
226
228
    def _replace_index_with_readonly(self, index_type):
 
229
        unlimited_cache = False
 
230
        if index_type == 'chk':
 
231
            unlimited_cache = True
227
232
        setattr(self, index_type + '_index',
228
233
            self.index_class(self.index_transport,
229
234
                self.index_name(index_type, self.name),
230
 
                self.index_sizes[self.index_offset(index_type)]))
 
235
                self.index_sizes[self.index_offset(index_type)],
 
236
                unlimited_cache=unlimited_cache))
231
237
 
232
238
 
233
239
class ExistingPack(Pack):
422
428
        self._writer.begin()
423
429
        # what state is the pack in? (open, finished, aborted)
424
430
        self._state = 'open'
 
431
        # no name until we finish writing the content
 
432
        self.name = None
425
433
 
426
434
    def abort(self):
427
435
        """Cancel creating this pack."""
448
456
            self.signature_index.key_count() or
449
457
            (self.chk_index is not None and self.chk_index.key_count()))
450
458
 
 
459
    def finish_content(self):
 
460
        if self.name is not None:
 
461
            return
 
462
        self._writer.end()
 
463
        if self._buffer[1]:
 
464
            self._write_data('', flush=True)
 
465
        self.name = self._hash.hexdigest()
 
466
 
451
467
    def finish(self, suspend=False):
452
468
        """Finish the new pack.
453
469
 
459
475
         - stores the index size tuple for the pack in the index_sizes
460
476
           attribute.
461
477
        """
462
 
        self._writer.end()
463
 
        if self._buffer[1]:
464
 
            self._write_data('', flush=True)
465
 
        self.name = self._hash.hexdigest()
 
478
        self.finish_content()
466
479
        if not suspend:
467
480
            self._check_references()
468
481
        # write indices
634
647
        del self.combined_index._indices[:]
635
648
        self.add_callback = None
636
649
 
637
 
    def remove_index(self, index, pack):
 
650
    def remove_index(self, index):
638
651
        """Remove index from the indices used to answer queries.
639
652
 
640
653
        :param index: An index from the pack parameter.
641
 
        :param pack: A Pack instance.
642
654
        """
643
655
        del self.index_to_pack[index]
644
656
        self.combined_index._indices.remove(index)
1105
1117
            iterator is a tuple with:
1106
1118
            index, readv_vector, node_vector. readv_vector is a list ready to
1107
1119
            hand to the transport readv method, and node_vector is a list of
1108
 
            (key, eol_flag, references) for the the node retrieved by the
 
1120
            (key, eol_flag, references) for the node retrieved by the
1109
1121
            matching readv_vector.
1110
1122
        """
1111
1123
        # group by pack so we do one readv per pack
1411
1423
        # resumed packs
1412
1424
        self._resumed_packs = []
1413
1425
 
 
1426
    def __repr__(self):
 
1427
        return '%s(%r)' % (self.__class__.__name__, self.repo)
 
1428
 
1414
1429
    def add_pack_to_memory(self, pack):
1415
1430
        """Make a Pack object available to the repository to satisfy queries.
1416
1431
 
1530
1545
                self._remove_pack_from_memory(pack)
1531
1546
        # record the newly available packs and stop advertising the old
1532
1547
        # packs
1533
 
        result = self._save_pack_names(clear_obsolete_packs=True)
1534
 
        # Move the old packs out of the way now they are no longer referenced.
1535
 
        for revision_count, packs in pack_operations:
1536
 
            self._obsolete_packs(packs)
 
1548
        to_be_obsoleted = []
 
1549
        for _, packs in pack_operations:
 
1550
            to_be_obsoleted.extend(packs)
 
1551
        result = self._save_pack_names(clear_obsolete_packs=True,
 
1552
                                       obsolete_packs=to_be_obsoleted)
1537
1553
        return result
1538
1554
 
1539
1555
    def _flush_new_pack(self):
1567
1583
        # determine which packs need changing
1568
1584
        pack_operations = [[0, []]]
1569
1585
        for pack in self.all_packs():
1570
 
            if not hint or pack.name in hint:
 
1586
            if hint is None or pack.name in hint:
 
1587
                # Either no hint was provided (so we are packing everything),
 
1588
                # or this pack was included in the hint.
1571
1589
                pack_operations[-1][0] += pack.get_revision_count()
1572
1590
                pack_operations[-1][1].append(pack)
1573
1591
        self._execute_pack_operations(pack_operations, OptimisingPacker)
1665
1683
            txt_index = self._make_index(name, '.tix')
1666
1684
            sig_index = self._make_index(name, '.six')
1667
1685
            if self.chk_index is not None:
1668
 
                chk_index = self._make_index(name, '.cix')
 
1686
                chk_index = self._make_index(name, '.cix', unlimited_cache=True)
1669
1687
            else:
1670
1688
                chk_index = None
1671
1689
            result = ExistingPack(self._pack_transport, name, rev_index,
1690
1708
            txt_index = self._make_index(name, '.tix', resume=True)
1691
1709
            sig_index = self._make_index(name, '.six', resume=True)
1692
1710
            if self.chk_index is not None:
1693
 
                chk_index = self._make_index(name, '.cix', resume=True)
 
1711
                chk_index = self._make_index(name, '.cix', resume=True,
 
1712
                                             unlimited_cache=True)
1694
1713
            else:
1695
1714
                chk_index = None
1696
1715
            result = self.resumed_pack_factory(name, rev_index, inv_index,
1726
1745
        return self._index_class(self.transport, 'pack-names', None
1727
1746
                ).iter_all_entries()
1728
1747
 
1729
 
    def _make_index(self, name, suffix, resume=False):
 
1748
    def _make_index(self, name, suffix, resume=False, unlimited_cache=False):
1730
1749
        size_offset = self._suffix_offsets[suffix]
1731
1750
        index_name = name + suffix
1732
1751
        if resume:
1735
1754
        else:
1736
1755
            transport = self._index_transport
1737
1756
            index_size = self._names[name][size_offset]
1738
 
        return self._index_class(transport, index_name, index_size)
 
1757
        return self._index_class(transport, index_name, index_size,
 
1758
                                 unlimited_cache=unlimited_cache)
1739
1759
 
1740
1760
    def _max_pack_count(self, total_revisions):
1741
1761
        """Return the maximum number of packs to use for total revisions.
1769
1789
        :param return: None.
1770
1790
        """
1771
1791
        for pack in packs:
1772
 
            pack.pack_transport.rename(pack.file_name(),
1773
 
                '../obsolete_packs/' + pack.file_name())
 
1792
            try:
 
1793
                pack.pack_transport.rename(pack.file_name(),
 
1794
                    '../obsolete_packs/' + pack.file_name())
 
1795
            except (errors.PathError, errors.TransportError), e:
 
1796
                # TODO: Should these be warnings or mutters?
 
1797
                mutter("couldn't rename obsolete pack, skipping it:\n%s"
 
1798
                       % (e,))
1774
1799
            # TODO: Probably needs to know all possible indices for this pack
1775
1800
            # - or maybe list the directory and move all indices matching this
1776
1801
            # name whether we recognize it or not?
1778
1803
            if self.chk_index is not None:
1779
1804
                suffixes.append('.cix')
1780
1805
            for suffix in suffixes:
1781
 
                self._index_transport.rename(pack.name + suffix,
1782
 
                    '../obsolete_packs/' + pack.name + suffix)
 
1806
                try:
 
1807
                    self._index_transport.rename(pack.name + suffix,
 
1808
                        '../obsolete_packs/' + pack.name + suffix)
 
1809
                except (errors.PathError, errors.TransportError), e:
 
1810
                    mutter("couldn't rename obsolete index, skipping it:\n%s"
 
1811
                           % (e,))
1783
1812
 
1784
1813
    def pack_distribution(self, total_revisions):
1785
1814
        """Generate a list of the number of revisions to put in each pack.
1811
1840
        self._remove_pack_indices(pack)
1812
1841
        self.packs.remove(pack)
1813
1842
 
1814
 
    def _remove_pack_indices(self, pack):
1815
 
        """Remove the indices for pack from the aggregated indices."""
1816
 
        self.revision_index.remove_index(pack.revision_index, pack)
1817
 
        self.inventory_index.remove_index(pack.inventory_index, pack)
1818
 
        self.text_index.remove_index(pack.text_index, pack)
1819
 
        self.signature_index.remove_index(pack.signature_index, pack)
1820
 
        if self.chk_index is not None:
1821
 
            self.chk_index.remove_index(pack.chk_index, pack)
 
1843
    def _remove_pack_indices(self, pack, ignore_missing=False):
 
1844
        """Remove the indices for pack from the aggregated indices.
 
1845
        
 
1846
        :param ignore_missing: Suppress KeyErrors from calling remove_index.
 
1847
        """
 
1848
        for index_type in Pack.index_definitions.keys():
 
1849
            attr_name = index_type + '_index'
 
1850
            aggregate_index = getattr(self, attr_name)
 
1851
            if aggregate_index is not None:
 
1852
                pack_index = getattr(pack, attr_name)
 
1853
                try:
 
1854
                    aggregate_index.remove_index(pack_index)
 
1855
                except KeyError:
 
1856
                    if ignore_missing:
 
1857
                        continue
 
1858
                    raise
1822
1859
 
1823
1860
    def reset(self):
1824
1861
        """Clear all cached data."""
1857
1894
        disk_nodes = set()
1858
1895
        for index, key, value in self._iter_disk_pack_index():
1859
1896
            disk_nodes.add((key, value))
 
1897
        orig_disk_nodes = set(disk_nodes)
1860
1898
 
1861
1899
        # do a two-way diff against our original content
1862
1900
        current_nodes = set()
1875
1913
        disk_nodes.difference_update(deleted_nodes)
1876
1914
        disk_nodes.update(new_nodes)
1877
1915
 
1878
 
        return disk_nodes, deleted_nodes, new_nodes
 
1916
        return disk_nodes, deleted_nodes, new_nodes, orig_disk_nodes
1879
1917
 
1880
1918
    def _syncronize_pack_names_from_disk_nodes(self, disk_nodes):
1881
1919
        """Given the correct set of pack files, update our saved info.
1921
1959
                added.append(name)
1922
1960
        return removed, added, modified
1923
1961
 
1924
 
    def _save_pack_names(self, clear_obsolete_packs=False):
 
1962
    def _save_pack_names(self, clear_obsolete_packs=False, obsolete_packs=None):
1925
1963
        """Save the list of packs.
1926
1964
 
1927
1965
        This will take out the mutex around the pack names list for the
1931
1969
 
1932
1970
        :param clear_obsolete_packs: If True, clear out the contents of the
1933
1971
            obsolete_packs directory.
 
1972
        :param obsolete_packs: Packs that are obsolete once the new pack-names
 
1973
            file has been written.
1934
1974
        :return: A list of the names saved that were not previously on disk.
1935
1975
        """
 
1976
        already_obsolete = []
1936
1977
        self.lock_names()
1937
1978
        try:
1938
1979
            builder = self._index_builder_class()
1939
 
            disk_nodes, deleted_nodes, new_nodes = self._diff_pack_names()
 
1980
            (disk_nodes, deleted_nodes, new_nodes,
 
1981
             orig_disk_nodes) = self._diff_pack_names()
1940
1982
            # TODO: handle same-name, index-size-changes here -
1941
1983
            # e.g. use the value from disk, not ours, *unless* we're the one
1942
1984
            # changing it.
1944
1986
                builder.add_node(key, value)
1945
1987
            self.transport.put_file('pack-names', builder.finish(),
1946
1988
                mode=self.repo.bzrdir._get_file_mode())
1947
 
            # move the baseline forward
1948
1989
            self._packs_at_load = disk_nodes
1949
1990
            if clear_obsolete_packs:
1950
 
                self._clear_obsolete_packs()
 
1991
                to_preserve = None
 
1992
                if obsolete_packs:
 
1993
                    to_preserve = set([o.name for o in obsolete_packs])
 
1994
                already_obsolete = self._clear_obsolete_packs(to_preserve)
1951
1995
        finally:
1952
1996
            self._unlock_names()
1953
1997
        # synchronise the memory packs list with what we just wrote:
1954
1998
        self._syncronize_pack_names_from_disk_nodes(disk_nodes)
 
1999
        if obsolete_packs:
 
2000
            # TODO: We could add one more condition here. "if o.name not in
 
2001
            #       orig_disk_nodes and o != the new_pack we haven't written to
 
2002
            #       disk yet. However, the new pack object is not easily
 
2003
            #       accessible here (it would have to be passed through the
 
2004
            #       autopacking code, etc.)
 
2005
            obsolete_packs = [o for o in obsolete_packs
 
2006
                              if o.name not in already_obsolete]
 
2007
            self._obsolete_packs(obsolete_packs)
1955
2008
        return [new_node[0][0] for new_node in new_nodes]
1956
2009
 
1957
2010
    def reload_pack_names(self):
1972
2025
        if first_read:
1973
2026
            return True
1974
2027
        # out the new value.
1975
 
        disk_nodes, _, _ = self._diff_pack_names()
1976
 
        self._packs_at_load = disk_nodes
 
2028
        (disk_nodes, deleted_nodes, new_nodes,
 
2029
         orig_disk_nodes) = self._diff_pack_names()
 
2030
        # _packs_at_load is meant to be the explicit list of names in
 
2031
        # 'pack-names' at then start. As such, it should not contain any
 
2032
        # pending names that haven't been written out yet.
 
2033
        self._packs_at_load = orig_disk_nodes
1977
2034
        (removed, added,
1978
2035
         modified) = self._syncronize_pack_names_from_disk_nodes(disk_nodes)
1979
2036
        if removed or added or modified:
1988
2045
            raise
1989
2046
        raise errors.RetryAutopack(self.repo, False, sys.exc_info())
1990
2047
 
1991
 
    def _clear_obsolete_packs(self):
 
2048
    def _clear_obsolete_packs(self, preserve=None):
1992
2049
        """Delete everything from the obsolete-packs directory.
 
2050
 
 
2051
        :return: A list of pack identifiers (the filename without '.pack') that
 
2052
            were found in obsolete_packs.
1993
2053
        """
 
2054
        found = []
1994
2055
        obsolete_pack_transport = self.transport.clone('obsolete_packs')
 
2056
        if preserve is None:
 
2057
            preserve = set()
1995
2058
        for filename in obsolete_pack_transport.list_dir('.'):
 
2059
            name, ext = osutils.splitext(filename)
 
2060
            if ext == '.pack':
 
2061
                found.append(name)
 
2062
            if name in preserve:
 
2063
                continue
1996
2064
            try:
1997
2065
                obsolete_pack_transport.delete(filename)
1998
2066
            except (errors.PathError, errors.TransportError), e:
1999
 
                warning("couldn't delete obsolete pack, skipping it:\n%s" % (e,))
 
2067
                warning("couldn't delete obsolete pack, skipping it:\n%s"
 
2068
                        % (e,))
 
2069
        return found
2000
2070
 
2001
2071
    def _start_write_group(self):
2002
2072
        # Do not permit preparation for writing if we're not in a 'write lock'.
2029
2099
        # FIXME: just drop the transient index.
2030
2100
        # forget what names there are
2031
2101
        if self._new_pack is not None:
2032
 
            try:
2033
 
                self._new_pack.abort()
2034
 
            finally:
2035
 
                # XXX: If we aborted while in the middle of finishing the write
2036
 
                # group, _remove_pack_indices can fail because the indexes are
2037
 
                # already gone.  If they're not there we shouldn't fail in this
2038
 
                # case.  -- mbp 20081113
2039
 
                self._remove_pack_indices(self._new_pack)
2040
 
                self._new_pack = None
 
2102
            operation = cleanup.OperationWithCleanups(self._new_pack.abort)
 
2103
            operation.add_cleanup(setattr, self, '_new_pack', None)
 
2104
            # If we aborted while in the middle of finishing the write
 
2105
            # group, _remove_pack_indices could fail because the indexes are
 
2106
            # already gone.  But they're not there we shouldn't fail in this
 
2107
            # case, so we pass ignore_missing=True.
 
2108
            operation.add_cleanup(self._remove_pack_indices, self._new_pack,
 
2109
                ignore_missing=True)
 
2110
            operation.run_simple()
2041
2111
        for resumed_pack in self._resumed_packs:
2042
 
            try:
2043
 
                resumed_pack.abort()
2044
 
            finally:
2045
 
                # See comment in previous finally block.
2046
 
                try:
2047
 
                    self._remove_pack_indices(resumed_pack)
2048
 
                except KeyError:
2049
 
                    pass
 
2112
            operation = cleanup.OperationWithCleanups(resumed_pack.abort)
 
2113
            # See comment in previous finally block.
 
2114
            operation.add_cleanup(self._remove_pack_indices, resumed_pack,
 
2115
                ignore_missing=True)
 
2116
            operation.run_simple()
2050
2117
        del self._resumed_packs[:]
2051
2118
 
2052
2119
    def _remove_resumed_pack_indices(self):
2054
2121
            self._remove_pack_indices(resumed_pack)
2055
2122
        del self._resumed_packs[:]
2056
2123
 
 
2124
    def _check_new_inventories(self):
 
2125
        """Detect missing inventories in this write group.
 
2126
 
 
2127
        :returns: list of strs, summarising any problems found.  If the list is
 
2128
            empty no problems were found.
 
2129
        """
 
2130
        # The base implementation does no checks.  GCRepositoryPackCollection
 
2131
        # overrides this.
 
2132
        return []
 
2133
        
2057
2134
    def _commit_write_group(self):
2058
2135
        all_missing = set()
2059
2136
        for prefix, versioned_file in (
2068
2145
            raise errors.BzrCheckError(
2069
2146
                "Repository %s has missing compression parent(s) %r "
2070
2147
                 % (self.repo, sorted(all_missing)))
 
2148
        problems = self._check_new_inventories()
 
2149
        if problems:
 
2150
            problems_summary = '\n'.join(problems)
 
2151
            raise errors.BzrCheckError(
 
2152
                "Cannot add revision(s) to repository: " + problems_summary)
2071
2153
        self._remove_pack_indices(self._new_pack)
2072
 
        should_autopack = False
 
2154
        any_new_content = False
2073
2155
        if self._new_pack.data_inserted():
2074
2156
            # get all the data to disk and read to use
2075
2157
            self._new_pack.finish()
2076
2158
            self.allocate(self._new_pack)
2077
2159
            self._new_pack = None
2078
 
            should_autopack = True
 
2160
            any_new_content = True
2079
2161
        else:
2080
2162
            self._new_pack.abort()
2081
2163
            self._new_pack = None
2086
2168
            self._remove_pack_from_memory(resumed_pack)
2087
2169
            resumed_pack.finish()
2088
2170
            self.allocate(resumed_pack)
2089
 
            should_autopack = True
 
2171
            any_new_content = True
2090
2172
        del self._resumed_packs[:]
2091
 
        if should_autopack:
2092
 
            if not self.autopack():
 
2173
        if any_new_content:
 
2174
            result = self.autopack()
 
2175
            if not result:
2093
2176
                # when autopack takes no steps, the names list is still
2094
2177
                # unsaved.
2095
2178
                return self._save_pack_names()
 
2179
            return result
 
2180
        return []
2096
2181
 
2097
2182
    def _suspend_write_group(self):
2098
2183
        tokens = [pack.name for pack in self._resumed_packs]
2200
2285
        self._reconcile_fixes_text_parents = True
2201
2286
        self._reconcile_backsup_inventory = False
2202
2287
 
2203
 
    def _warn_if_deprecated(self):
 
2288
    def _warn_if_deprecated(self, branch=None):
2204
2289
        # This class isn't deprecated, but one sub-format is
2205
2290
        if isinstance(self._format, RepositoryFormatKnitPack5RichRootBroken):
2206
 
            from bzrlib import repository
2207
 
            if repository._deprecation_warning_done:
2208
 
                return
2209
 
            repository._deprecation_warning_done = True
2210
 
            warning("Format %s for %s is deprecated - please use"
2211
 
                    " 'bzr upgrade --1.6.1-rich-root'"
2212
 
                    % (self._format, self.bzrdir.transport.base))
 
2291
            super(KnitPackRepository, self)._warn_if_deprecated(branch)
2213
2292
 
2214
2293
    def _abort_write_group(self):
2215
 
        self.revisions._index._key_dependencies.refs.clear()
 
2294
        self.revisions._index._key_dependencies.clear()
2216
2295
        self._pack_collection._abort_write_group()
2217
2296
 
2218
 
    def _find_inconsistent_revision_parents(self):
2219
 
        """Find revisions with incorrectly cached parents.
2220
 
 
2221
 
        :returns: an iterator yielding tuples of (revison-id, parents-in-index,
2222
 
            parents-in-revision).
2223
 
        """
2224
 
        if not self.is_locked():
2225
 
            raise errors.ObjectNotLocked(self)
2226
 
        pb = ui.ui_factory.nested_progress_bar()
2227
 
        result = []
2228
 
        try:
2229
 
            revision_nodes = self._pack_collection.revision_index \
2230
 
                .combined_index.iter_all_entries()
2231
 
            index_positions = []
2232
 
            # Get the cached index values for all revisions, and also the
2233
 
            # location in each index of the revision text so we can perform
2234
 
            # linear IO.
2235
 
            for index, key, value, refs in revision_nodes:
2236
 
                node = (index, key, value, refs)
2237
 
                index_memo = self.revisions._index._node_to_position(node)
2238
 
                if index_memo[0] != index:
2239
 
                    raise AssertionError('%r != %r' % (index_memo[0], index))
2240
 
                index_positions.append((index_memo, key[0],
2241
 
                                       tuple(parent[0] for parent in refs[0])))
2242
 
                pb.update("Reading revision index", 0, 0)
2243
 
            index_positions.sort()
2244
 
            batch_size = 1000
2245
 
            pb.update("Checking cached revision graph", 0,
2246
 
                      len(index_positions))
2247
 
            for offset in xrange(0, len(index_positions), 1000):
2248
 
                pb.update("Checking cached revision graph", offset)
2249
 
                to_query = index_positions[offset:offset + batch_size]
2250
 
                if not to_query:
2251
 
                    break
2252
 
                rev_ids = [item[1] for item in to_query]
2253
 
                revs = self.get_revisions(rev_ids)
2254
 
                for revision, item in zip(revs, to_query):
2255
 
                    index_parents = item[2]
2256
 
                    rev_parents = tuple(revision.parent_ids)
2257
 
                    if index_parents != rev_parents:
2258
 
                        result.append((revision.revision_id, index_parents,
2259
 
                                       rev_parents))
2260
 
        finally:
2261
 
            pb.finished()
2262
 
        return result
2263
 
 
2264
2297
    def _get_source(self, to_format):
2265
2298
        if to_format.network_name() == self._format.network_name():
2266
2299
            return KnitPackStreamSource(self, to_format)
2278
2311
        self._pack_collection._start_write_group()
2279
2312
 
2280
2313
    def _commit_write_group(self):
2281
 
        self.revisions._index._key_dependencies.refs.clear()
2282
 
        return self._pack_collection._commit_write_group()
 
2314
        hint = self._pack_collection._commit_write_group()
 
2315
        self.revisions._index._key_dependencies.clear()
 
2316
        return hint
2283
2317
 
2284
2318
    def suspend_write_group(self):
2285
2319
        # XXX check self._write_group is self.get_transaction()?
2286
2320
        tokens = self._pack_collection._suspend_write_group()
2287
 
        self.revisions._index._key_dependencies.refs.clear()
 
2321
        self.revisions._index._key_dependencies.clear()
2288
2322
        self._write_group = None
2289
2323
        return tokens
2290
2324
 
2318
2352
        if self._write_lock_count == 1:
2319
2353
            self._transaction = transactions.WriteTransaction()
2320
2354
        if not locked:
 
2355
            if 'relock' in debug.debug_flags and self._prev_lock == 'w':
 
2356
                note('%r was write locked again', self)
 
2357
            self._prev_lock = 'w'
2321
2358
            for repo in self._fallback_repositories:
2322
2359
                # Writes don't affect fallback repos
2323
2360
                repo.lock_read()
2330
2367
        else:
2331
2368
            self.control_files.lock_read()
2332
2369
        if not locked:
 
2370
            if 'relock' in debug.debug_flags and self._prev_lock == 'r':
 
2371
                note('%r was read locked again', self)
 
2372
            self._prev_lock = 'r'
2333
2373
            for repo in self._fallback_repositories:
2334
2374
                repo.lock_read()
2335
2375
            self._refresh_data()
2363
2403
        packer = ReconcilePacker(collection, packs, extension, revs)
2364
2404
        return packer.pack(pb)
2365
2405
 
 
2406
    @only_raises(errors.LockNotHeld, errors.LockBroken)
2366
2407
    def unlock(self):
2367
2408
        if self._write_lock_count == 1 and self._write_group is not None:
2368
2409
            self.abort_write_group()
2565
2606
        """See RepositoryFormat.get_format_description()."""
2566
2607
        return "Packs containing knits without subtree support"
2567
2608
 
2568
 
    def check_conversion_target(self, target_format):
2569
 
        pass
2570
 
 
2571
2609
 
2572
2610
class RepositoryFormatKnitPack3(RepositoryFormatPack):
2573
2611
    """A subtrees parameterized Pack repository.
2599
2637
 
2600
2638
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2601
2639
 
2602
 
    def check_conversion_target(self, target_format):
2603
 
        if not target_format.rich_root_data:
2604
 
            raise errors.BadConversionTarget(
2605
 
                'Does not support rich root data.', target_format)
2606
 
        if not getattr(target_format, 'supports_tree_reference', False):
2607
 
            raise errors.BadConversionTarget(
2608
 
                'Does not support nested trees', target_format)
2609
 
 
2610
2640
    def get_format_string(self):
2611
2641
        """See RepositoryFormat.get_format_string()."""
2612
2642
        return "Bazaar pack repository format 1 with subtree support (needs bzr 0.92)\n"
2645
2675
 
2646
2676
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2647
2677
 
2648
 
    def check_conversion_target(self, target_format):
2649
 
        if not target_format.rich_root_data:
2650
 
            raise errors.BadConversionTarget(
2651
 
                'Does not support rich root data.', target_format)
2652
 
 
2653
2678
    def get_format_string(self):
2654
2679
        """See RepositoryFormat.get_format_string()."""
2655
2680
        return ("Bazaar pack repository format 1 with rich root"
2696
2721
        """See RepositoryFormat.get_format_description()."""
2697
2722
        return "Packs 5 (adds stacking support, requires bzr 1.6)"
2698
2723
 
2699
 
    def check_conversion_target(self, target_format):
2700
 
        pass
2701
 
 
2702
2724
 
2703
2725
class RepositoryFormatKnitPack5RichRoot(RepositoryFormatPack):
2704
2726
    """A repository with rich roots and stacking.
2731
2753
 
2732
2754
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2733
2755
 
2734
 
    def check_conversion_target(self, target_format):
2735
 
        if not target_format.rich_root_data:
2736
 
            raise errors.BadConversionTarget(
2737
 
                'Does not support rich root data.', target_format)
2738
 
 
2739
2756
    def get_format_string(self):
2740
2757
        """See RepositoryFormat.get_format_string()."""
2741
2758
        return "Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6.1)\n"
2782
2799
 
2783
2800
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2784
2801
 
2785
 
    def check_conversion_target(self, target_format):
2786
 
        if not target_format.rich_root_data:
2787
 
            raise errors.BadConversionTarget(
2788
 
                'Does not support rich root data.', target_format)
2789
 
 
2790
2802
    def get_format_string(self):
2791
2803
        """See RepositoryFormat.get_format_string()."""
2792
2804
        return "Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6)\n"
2830
2842
        """See RepositoryFormat.get_format_description()."""
2831
2843
        return "Packs 6 (uses btree indexes, requires bzr 1.9)"
2832
2844
 
2833
 
    def check_conversion_target(self, target_format):
2834
 
        pass
2835
 
 
2836
2845
 
2837
2846
class RepositoryFormatKnitPack6RichRoot(RepositoryFormatPack):
2838
2847
    """A repository with rich roots, no subtrees, stacking and btree indexes.
2862
2871
 
2863
2872
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2864
2873
 
2865
 
    def check_conversion_target(self, target_format):
2866
 
        if not target_format.rich_root_data:
2867
 
            raise errors.BadConversionTarget(
2868
 
                'Does not support rich root data.', target_format)
2869
 
 
2870
2874
    def get_format_string(self):
2871
2875
        """See RepositoryFormat.get_format_string()."""
2872
2876
        return "Bazaar RepositoryFormatKnitPack6RichRoot (bzr 1.9)\n"
2908
2912
 
2909
2913
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2910
2914
 
2911
 
    def check_conversion_target(self, target_format):
2912
 
        if not target_format.rich_root_data:
2913
 
            raise errors.BadConversionTarget(
2914
 
                'Does not support rich root data.', target_format)
2915
 
        if not getattr(target_format, 'supports_tree_reference', False):
2916
 
            raise errors.BadConversionTarget(
2917
 
                'Does not support nested trees', target_format)
2918
 
 
2919
2915
    def get_format_string(self):
2920
2916
        """See RepositoryFormat.get_format_string()."""
2921
2917
        return ("Bazaar development format 2 with subtree support "