~bzr-pqm/bzr/bzr.dev

« back to all changes in this revision

Viewing changes to bzrlib/repofmt/pack_repo.py

  • Committer: Gary van der Merwe
  • Date: 2010-08-02 19:56:52 UTC
  • mfrom: (5050.3.18 2.2)
  • mto: (5050.3.19 2.2)
  • mto: This revision was merged to the branch mainline in revision 5371.
  • Revision ID: garyvdm@gmail.com-20100802195652-o1ppjemhwrr98i61
Merge lp:bzr/2.2.

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
 
# Copyright (C) 2005, 2006, 2007, 2008 Canonical Ltd
 
1
# Copyright (C) 2007-2010 Canonical Ltd
2
2
#
3
3
# This program is free software; you can redistribute it and/or modify
4
4
# it under the terms of the GNU General Public License as published by
24
24
 
25
25
from bzrlib import (
26
26
    chk_map,
 
27
    cleanup,
27
28
    debug,
28
29
    graph,
29
30
    osutils,
54
55
    revision as _mod_revision,
55
56
    )
56
57
 
57
 
from bzrlib.decorators import needs_write_lock
 
58
from bzrlib.decorators import needs_write_lock, only_raises
58
59
from bzrlib.btree_index import (
59
60
    BTreeGraphIndex,
60
61
    BTreeBuilder,
63
64
    GraphIndex,
64
65
    InMemoryGraphIndex,
65
66
    )
 
67
from bzrlib.lock import LogicalLockResult
66
68
from bzrlib.repofmt.knitrepo import KnitRepository
67
69
from bzrlib.repository import (
68
70
    CommitBuilder,
69
71
    MetaDirRepositoryFormat,
70
72
    RepositoryFormat,
 
73
    RepositoryWriteLockResult,
71
74
    RootCommitBuilder,
72
75
    StreamSource,
73
76
    )
74
77
from bzrlib.trace import (
75
78
    mutter,
 
79
    note,
76
80
    warning,
77
81
    )
78
82
 
224
228
        return self.index_name('text', name)
225
229
 
226
230
    def _replace_index_with_readonly(self, index_type):
 
231
        unlimited_cache = False
 
232
        if index_type == 'chk':
 
233
            unlimited_cache = True
227
234
        setattr(self, index_type + '_index',
228
235
            self.index_class(self.index_transport,
229
236
                self.index_name(index_type, self.name),
230
 
                self.index_sizes[self.index_offset(index_type)]))
 
237
                self.index_sizes[self.index_offset(index_type)],
 
238
                unlimited_cache=unlimited_cache))
231
239
 
232
240
 
233
241
class ExistingPack(Pack):
422
430
        self._writer.begin()
423
431
        # what state is the pack in? (open, finished, aborted)
424
432
        self._state = 'open'
 
433
        # no name until we finish writing the content
 
434
        self.name = None
425
435
 
426
436
    def abort(self):
427
437
        """Cancel creating this pack."""
448
458
            self.signature_index.key_count() or
449
459
            (self.chk_index is not None and self.chk_index.key_count()))
450
460
 
 
461
    def finish_content(self):
 
462
        if self.name is not None:
 
463
            return
 
464
        self._writer.end()
 
465
        if self._buffer[1]:
 
466
            self._write_data('', flush=True)
 
467
        self.name = self._hash.hexdigest()
 
468
 
451
469
    def finish(self, suspend=False):
452
470
        """Finish the new pack.
453
471
 
459
477
         - stores the index size tuple for the pack in the index_sizes
460
478
           attribute.
461
479
        """
462
 
        self._writer.end()
463
 
        if self._buffer[1]:
464
 
            self._write_data('', flush=True)
465
 
        self.name = self._hash.hexdigest()
 
480
        self.finish_content()
466
481
        if not suspend:
467
482
            self._check_references()
468
483
        # write indices
574
589
                                             flush_func=flush_func)
575
590
        self.add_callback = None
576
591
 
577
 
    def replace_indices(self, index_to_pack, indices):
578
 
        """Replace the current mappings with fresh ones.
579
 
 
580
 
        This should probably not be used eventually, rather incremental add and
581
 
        removal of indices. It has been added during refactoring of existing
582
 
        code.
583
 
 
584
 
        :param index_to_pack: A mapping from index objects to
585
 
            (transport, name) tuples for the pack file data.
586
 
        :param indices: A list of indices.
587
 
        """
588
 
        # refresh the revision pack map dict without replacing the instance.
589
 
        self.index_to_pack.clear()
590
 
        self.index_to_pack.update(index_to_pack)
591
 
        # XXX: API break - clearly a 'replace' method would be good?
592
 
        self.combined_index._indices[:] = indices
593
 
        # the current add nodes callback for the current writable index if
594
 
        # there is one.
595
 
        self.add_callback = None
596
 
 
597
592
    def add_index(self, index, pack):
598
593
        """Add index to the aggregate, which is an index for Pack pack.
599
594
 
606
601
        # expose it to the index map
607
602
        self.index_to_pack[index] = pack.access_tuple()
608
603
        # put it at the front of the linear index list
609
 
        self.combined_index.insert_index(0, index)
 
604
        self.combined_index.insert_index(0, index, pack.name)
610
605
 
611
606
    def add_writable_index(self, index, pack):
612
607
        """Add an index which is able to have data added to it.
632
627
        self.data_access.set_writer(None, None, (None, None))
633
628
        self.index_to_pack.clear()
634
629
        del self.combined_index._indices[:]
 
630
        del self.combined_index._index_names[:]
635
631
        self.add_callback = None
636
632
 
637
 
    def remove_index(self, index, pack):
 
633
    def remove_index(self, index):
638
634
        """Remove index from the indices used to answer queries.
639
635
 
640
636
        :param index: An index from the pack parameter.
641
 
        :param pack: A Pack instance.
642
637
        """
643
638
        del self.index_to_pack[index]
644
 
        self.combined_index._indices.remove(index)
 
639
        pos = self.combined_index._indices.index(index)
 
640
        del self.combined_index._indices[pos]
 
641
        del self.combined_index._index_names[pos]
645
642
        if (self.add_callback is not None and
646
643
            getattr(index, 'add_nodes', None) == self.add_callback):
647
644
            self.add_callback = None
1105
1102
            iterator is a tuple with:
1106
1103
            index, readv_vector, node_vector. readv_vector is a list ready to
1107
1104
            hand to the transport readv method, and node_vector is a list of
1108
 
            (key, eol_flag, references) for the the node retrieved by the
 
1105
            (key, eol_flag, references) for the node retrieved by the
1109
1106
            matching readv_vector.
1110
1107
        """
1111
1108
        # group by pack so we do one readv per pack
1403
1400
        self.inventory_index = AggregateIndex(self.reload_pack_names, flush)
1404
1401
        self.text_index = AggregateIndex(self.reload_pack_names, flush)
1405
1402
        self.signature_index = AggregateIndex(self.reload_pack_names, flush)
 
1403
        all_indices = [self.revision_index, self.inventory_index,
 
1404
                self.text_index, self.signature_index]
1406
1405
        if use_chk_index:
1407
1406
            self.chk_index = AggregateIndex(self.reload_pack_names, flush)
 
1407
            all_indices.append(self.chk_index)
1408
1408
        else:
1409
1409
            # used to determine if we're using a chk_index elsewhere.
1410
1410
            self.chk_index = None
 
1411
        # Tell all the CombinedGraphIndex objects about each other, so they can
 
1412
        # share hints about which pack names to search first.
 
1413
        all_combined = [agg_idx.combined_index for agg_idx in all_indices]
 
1414
        for combined_idx in all_combined:
 
1415
            combined_idx.set_sibling_indices(
 
1416
                set(all_combined).difference([combined_idx]))
1411
1417
        # resumed packs
1412
1418
        self._resumed_packs = []
1413
1419
 
 
1420
    def __repr__(self):
 
1421
        return '%s(%r)' % (self.__class__.__name__, self.repo)
 
1422
 
1414
1423
    def add_pack_to_memory(self, pack):
1415
1424
        """Make a Pack object available to the repository to satisfy queries.
1416
1425
 
1530
1539
                self._remove_pack_from_memory(pack)
1531
1540
        # record the newly available packs and stop advertising the old
1532
1541
        # packs
1533
 
        result = self._save_pack_names(clear_obsolete_packs=True)
1534
 
        # Move the old packs out of the way now they are no longer referenced.
1535
 
        for revision_count, packs in pack_operations:
1536
 
            self._obsolete_packs(packs)
 
1542
        to_be_obsoleted = []
 
1543
        for _, packs in pack_operations:
 
1544
            to_be_obsoleted.extend(packs)
 
1545
        result = self._save_pack_names(clear_obsolete_packs=True,
 
1546
                                       obsolete_packs=to_be_obsoleted)
1537
1547
        return result
1538
1548
 
1539
1549
    def _flush_new_pack(self):
1552
1562
        """Is the collection already packed?"""
1553
1563
        return not (self.repo._format.pack_compresses or (len(self._names) > 1))
1554
1564
 
1555
 
    def pack(self, hint=None):
 
1565
    def pack(self, hint=None, clean_obsolete_packs=False):
1556
1566
        """Pack the pack collection totally."""
1557
1567
        self.ensure_loaded()
1558
1568
        total_packs = len(self._names)
1567
1577
        # determine which packs need changing
1568
1578
        pack_operations = [[0, []]]
1569
1579
        for pack in self.all_packs():
1570
 
            if not hint or pack.name in hint:
 
1580
            if hint is None or pack.name in hint:
 
1581
                # Either no hint was provided (so we are packing everything),
 
1582
                # or this pack was included in the hint.
1571
1583
                pack_operations[-1][0] += pack.get_revision_count()
1572
1584
                pack_operations[-1][1].append(pack)
1573
1585
        self._execute_pack_operations(pack_operations, OptimisingPacker)
1574
1586
 
 
1587
        if clean_obsolete_packs:
 
1588
            self._clear_obsolete_packs()
 
1589
 
1575
1590
    def plan_autopack_combinations(self, existing_packs, pack_distribution):
1576
1591
        """Plan a pack operation.
1577
1592
 
1665
1680
            txt_index = self._make_index(name, '.tix')
1666
1681
            sig_index = self._make_index(name, '.six')
1667
1682
            if self.chk_index is not None:
1668
 
                chk_index = self._make_index(name, '.cix')
 
1683
                chk_index = self._make_index(name, '.cix', unlimited_cache=True)
1669
1684
            else:
1670
1685
                chk_index = None
1671
1686
            result = ExistingPack(self._pack_transport, name, rev_index,
1690
1705
            txt_index = self._make_index(name, '.tix', resume=True)
1691
1706
            sig_index = self._make_index(name, '.six', resume=True)
1692
1707
            if self.chk_index is not None:
1693
 
                chk_index = self._make_index(name, '.cix', resume=True)
 
1708
                chk_index = self._make_index(name, '.cix', resume=True,
 
1709
                                             unlimited_cache=True)
1694
1710
            else:
1695
1711
                chk_index = None
1696
1712
            result = self.resumed_pack_factory(name, rev_index, inv_index,
1726
1742
        return self._index_class(self.transport, 'pack-names', None
1727
1743
                ).iter_all_entries()
1728
1744
 
1729
 
    def _make_index(self, name, suffix, resume=False):
 
1745
    def _make_index(self, name, suffix, resume=False, unlimited_cache=False):
1730
1746
        size_offset = self._suffix_offsets[suffix]
1731
1747
        index_name = name + suffix
1732
1748
        if resume:
1735
1751
        else:
1736
1752
            transport = self._index_transport
1737
1753
            index_size = self._names[name][size_offset]
1738
 
        return self._index_class(transport, index_name, index_size)
 
1754
        return self._index_class(transport, index_name, index_size,
 
1755
                                 unlimited_cache=unlimited_cache)
1739
1756
 
1740
1757
    def _max_pack_count(self, total_revisions):
1741
1758
        """Return the maximum number of packs to use for total revisions.
1769
1786
        :param return: None.
1770
1787
        """
1771
1788
        for pack in packs:
1772
 
            pack.pack_transport.rename(pack.file_name(),
1773
 
                '../obsolete_packs/' + pack.file_name())
 
1789
            try:
 
1790
                pack.pack_transport.rename(pack.file_name(),
 
1791
                    '../obsolete_packs/' + pack.file_name())
 
1792
            except (errors.PathError, errors.TransportError), e:
 
1793
                # TODO: Should these be warnings or mutters?
 
1794
                mutter("couldn't rename obsolete pack, skipping it:\n%s"
 
1795
                       % (e,))
1774
1796
            # TODO: Probably needs to know all possible indices for this pack
1775
1797
            # - or maybe list the directory and move all indices matching this
1776
1798
            # name whether we recognize it or not?
1778
1800
            if self.chk_index is not None:
1779
1801
                suffixes.append('.cix')
1780
1802
            for suffix in suffixes:
1781
 
                self._index_transport.rename(pack.name + suffix,
1782
 
                    '../obsolete_packs/' + pack.name + suffix)
 
1803
                try:
 
1804
                    self._index_transport.rename(pack.name + suffix,
 
1805
                        '../obsolete_packs/' + pack.name + suffix)
 
1806
                except (errors.PathError, errors.TransportError), e:
 
1807
                    mutter("couldn't rename obsolete index, skipping it:\n%s"
 
1808
                           % (e,))
1783
1809
 
1784
1810
    def pack_distribution(self, total_revisions):
1785
1811
        """Generate a list of the number of revisions to put in each pack.
1811
1837
        self._remove_pack_indices(pack)
1812
1838
        self.packs.remove(pack)
1813
1839
 
1814
 
    def _remove_pack_indices(self, pack):
1815
 
        """Remove the indices for pack from the aggregated indices."""
1816
 
        self.revision_index.remove_index(pack.revision_index, pack)
1817
 
        self.inventory_index.remove_index(pack.inventory_index, pack)
1818
 
        self.text_index.remove_index(pack.text_index, pack)
1819
 
        self.signature_index.remove_index(pack.signature_index, pack)
1820
 
        if self.chk_index is not None:
1821
 
            self.chk_index.remove_index(pack.chk_index, pack)
 
1840
    def _remove_pack_indices(self, pack, ignore_missing=False):
 
1841
        """Remove the indices for pack from the aggregated indices.
 
1842
        
 
1843
        :param ignore_missing: Suppress KeyErrors from calling remove_index.
 
1844
        """
 
1845
        for index_type in Pack.index_definitions.keys():
 
1846
            attr_name = index_type + '_index'
 
1847
            aggregate_index = getattr(self, attr_name)
 
1848
            if aggregate_index is not None:
 
1849
                pack_index = getattr(pack, attr_name)
 
1850
                try:
 
1851
                    aggregate_index.remove_index(pack_index)
 
1852
                except KeyError:
 
1853
                    if ignore_missing:
 
1854
                        continue
 
1855
                    raise
1822
1856
 
1823
1857
    def reset(self):
1824
1858
        """Clear all cached data."""
1857
1891
        disk_nodes = set()
1858
1892
        for index, key, value in self._iter_disk_pack_index():
1859
1893
            disk_nodes.add((key, value))
 
1894
        orig_disk_nodes = set(disk_nodes)
1860
1895
 
1861
1896
        # do a two-way diff against our original content
1862
1897
        current_nodes = set()
1875
1910
        disk_nodes.difference_update(deleted_nodes)
1876
1911
        disk_nodes.update(new_nodes)
1877
1912
 
1878
 
        return disk_nodes, deleted_nodes, new_nodes
 
1913
        return disk_nodes, deleted_nodes, new_nodes, orig_disk_nodes
1879
1914
 
1880
1915
    def _syncronize_pack_names_from_disk_nodes(self, disk_nodes):
1881
1916
        """Given the correct set of pack files, update our saved info.
1921
1956
                added.append(name)
1922
1957
        return removed, added, modified
1923
1958
 
1924
 
    def _save_pack_names(self, clear_obsolete_packs=False):
 
1959
    def _save_pack_names(self, clear_obsolete_packs=False, obsolete_packs=None):
1925
1960
        """Save the list of packs.
1926
1961
 
1927
1962
        This will take out the mutex around the pack names list for the
1931
1966
 
1932
1967
        :param clear_obsolete_packs: If True, clear out the contents of the
1933
1968
            obsolete_packs directory.
 
1969
        :param obsolete_packs: Packs that are obsolete once the new pack-names
 
1970
            file has been written.
1934
1971
        :return: A list of the names saved that were not previously on disk.
1935
1972
        """
 
1973
        already_obsolete = []
1936
1974
        self.lock_names()
1937
1975
        try:
1938
1976
            builder = self._index_builder_class()
1939
 
            disk_nodes, deleted_nodes, new_nodes = self._diff_pack_names()
 
1977
            (disk_nodes, deleted_nodes, new_nodes,
 
1978
             orig_disk_nodes) = self._diff_pack_names()
1940
1979
            # TODO: handle same-name, index-size-changes here -
1941
1980
            # e.g. use the value from disk, not ours, *unless* we're the one
1942
1981
            # changing it.
1944
1983
                builder.add_node(key, value)
1945
1984
            self.transport.put_file('pack-names', builder.finish(),
1946
1985
                mode=self.repo.bzrdir._get_file_mode())
1947
 
            # move the baseline forward
1948
1986
            self._packs_at_load = disk_nodes
1949
1987
            if clear_obsolete_packs:
1950
 
                self._clear_obsolete_packs()
 
1988
                to_preserve = None
 
1989
                if obsolete_packs:
 
1990
                    to_preserve = set([o.name for o in obsolete_packs])
 
1991
                already_obsolete = self._clear_obsolete_packs(to_preserve)
1951
1992
        finally:
1952
1993
            self._unlock_names()
1953
1994
        # synchronise the memory packs list with what we just wrote:
1954
1995
        self._syncronize_pack_names_from_disk_nodes(disk_nodes)
 
1996
        if obsolete_packs:
 
1997
            # TODO: We could add one more condition here. "if o.name not in
 
1998
            #       orig_disk_nodes and o != the new_pack we haven't written to
 
1999
            #       disk yet. However, the new pack object is not easily
 
2000
            #       accessible here (it would have to be passed through the
 
2001
            #       autopacking code, etc.)
 
2002
            obsolete_packs = [o for o in obsolete_packs
 
2003
                              if o.name not in already_obsolete]
 
2004
            self._obsolete_packs(obsolete_packs)
1955
2005
        return [new_node[0][0] for new_node in new_nodes]
1956
2006
 
1957
2007
    def reload_pack_names(self):
1972
2022
        if first_read:
1973
2023
            return True
1974
2024
        # out the new value.
1975
 
        disk_nodes, _, _ = self._diff_pack_names()
1976
 
        self._packs_at_load = disk_nodes
 
2025
        (disk_nodes, deleted_nodes, new_nodes,
 
2026
         orig_disk_nodes) = self._diff_pack_names()
 
2027
        # _packs_at_load is meant to be the explicit list of names in
 
2028
        # 'pack-names' at then start. As such, it should not contain any
 
2029
        # pending names that haven't been written out yet.
 
2030
        self._packs_at_load = orig_disk_nodes
1977
2031
        (removed, added,
1978
2032
         modified) = self._syncronize_pack_names_from_disk_nodes(disk_nodes)
1979
2033
        if removed or added or modified:
1988
2042
            raise
1989
2043
        raise errors.RetryAutopack(self.repo, False, sys.exc_info())
1990
2044
 
1991
 
    def _clear_obsolete_packs(self):
 
2045
    def _clear_obsolete_packs(self, preserve=None):
1992
2046
        """Delete everything from the obsolete-packs directory.
 
2047
 
 
2048
        :return: A list of pack identifiers (the filename without '.pack') that
 
2049
            were found in obsolete_packs.
1993
2050
        """
 
2051
        found = []
1994
2052
        obsolete_pack_transport = self.transport.clone('obsolete_packs')
 
2053
        if preserve is None:
 
2054
            preserve = set()
1995
2055
        for filename in obsolete_pack_transport.list_dir('.'):
 
2056
            name, ext = osutils.splitext(filename)
 
2057
            if ext == '.pack':
 
2058
                found.append(name)
 
2059
            if name in preserve:
 
2060
                continue
1996
2061
            try:
1997
2062
                obsolete_pack_transport.delete(filename)
1998
2063
            except (errors.PathError, errors.TransportError), e:
1999
 
                warning("couldn't delete obsolete pack, skipping it:\n%s" % (e,))
 
2064
                warning("couldn't delete obsolete pack, skipping it:\n%s"
 
2065
                        % (e,))
 
2066
        return found
2000
2067
 
2001
2068
    def _start_write_group(self):
2002
2069
        # Do not permit preparation for writing if we're not in a 'write lock'.
2029
2096
        # FIXME: just drop the transient index.
2030
2097
        # forget what names there are
2031
2098
        if self._new_pack is not None:
2032
 
            try:
2033
 
                self._new_pack.abort()
2034
 
            finally:
2035
 
                # XXX: If we aborted while in the middle of finishing the write
2036
 
                # group, _remove_pack_indices can fail because the indexes are
2037
 
                # already gone.  If they're not there we shouldn't fail in this
2038
 
                # case.  -- mbp 20081113
2039
 
                self._remove_pack_indices(self._new_pack)
2040
 
                self._new_pack = None
 
2099
            operation = cleanup.OperationWithCleanups(self._new_pack.abort)
 
2100
            operation.add_cleanup(setattr, self, '_new_pack', None)
 
2101
            # If we aborted while in the middle of finishing the write
 
2102
            # group, _remove_pack_indices could fail because the indexes are
 
2103
            # already gone.  But they're not there we shouldn't fail in this
 
2104
            # case, so we pass ignore_missing=True.
 
2105
            operation.add_cleanup(self._remove_pack_indices, self._new_pack,
 
2106
                ignore_missing=True)
 
2107
            operation.run_simple()
2041
2108
        for resumed_pack in self._resumed_packs:
2042
 
            try:
2043
 
                resumed_pack.abort()
2044
 
            finally:
2045
 
                # See comment in previous finally block.
2046
 
                try:
2047
 
                    self._remove_pack_indices(resumed_pack)
2048
 
                except KeyError:
2049
 
                    pass
 
2109
            operation = cleanup.OperationWithCleanups(resumed_pack.abort)
 
2110
            # See comment in previous finally block.
 
2111
            operation.add_cleanup(self._remove_pack_indices, resumed_pack,
 
2112
                ignore_missing=True)
 
2113
            operation.run_simple()
2050
2114
        del self._resumed_packs[:]
2051
2115
 
2052
2116
    def _remove_resumed_pack_indices(self):
2054
2118
            self._remove_pack_indices(resumed_pack)
2055
2119
        del self._resumed_packs[:]
2056
2120
 
 
2121
    def _check_new_inventories(self):
 
2122
        """Detect missing inventories in this write group.
 
2123
 
 
2124
        :returns: list of strs, summarising any problems found.  If the list is
 
2125
            empty no problems were found.
 
2126
        """
 
2127
        # The base implementation does no checks.  GCRepositoryPackCollection
 
2128
        # overrides this.
 
2129
        return []
 
2130
        
2057
2131
    def _commit_write_group(self):
2058
2132
        all_missing = set()
2059
2133
        for prefix, versioned_file in (
2068
2142
            raise errors.BzrCheckError(
2069
2143
                "Repository %s has missing compression parent(s) %r "
2070
2144
                 % (self.repo, sorted(all_missing)))
 
2145
        problems = self._check_new_inventories()
 
2146
        if problems:
 
2147
            problems_summary = '\n'.join(problems)
 
2148
            raise errors.BzrCheckError(
 
2149
                "Cannot add revision(s) to repository: " + problems_summary)
2071
2150
        self._remove_pack_indices(self._new_pack)
2072
 
        should_autopack = False
 
2151
        any_new_content = False
2073
2152
        if self._new_pack.data_inserted():
2074
2153
            # get all the data to disk and read to use
2075
2154
            self._new_pack.finish()
2076
2155
            self.allocate(self._new_pack)
2077
2156
            self._new_pack = None
2078
 
            should_autopack = True
 
2157
            any_new_content = True
2079
2158
        else:
2080
2159
            self._new_pack.abort()
2081
2160
            self._new_pack = None
2086
2165
            self._remove_pack_from_memory(resumed_pack)
2087
2166
            resumed_pack.finish()
2088
2167
            self.allocate(resumed_pack)
2089
 
            should_autopack = True
 
2168
            any_new_content = True
2090
2169
        del self._resumed_packs[:]
2091
 
        if should_autopack:
2092
 
            if not self.autopack():
 
2170
        if any_new_content:
 
2171
            result = self.autopack()
 
2172
            if not result:
2093
2173
                # when autopack takes no steps, the names list is still
2094
2174
                # unsaved.
2095
2175
                return self._save_pack_names()
 
2176
            return result
 
2177
        return []
2096
2178
 
2097
2179
    def _suspend_write_group(self):
2098
2180
        tokens = [pack.name for pack in self._resumed_packs]
2200
2282
        self._reconcile_fixes_text_parents = True
2201
2283
        self._reconcile_backsup_inventory = False
2202
2284
 
2203
 
    def _warn_if_deprecated(self):
 
2285
    def _warn_if_deprecated(self, branch=None):
2204
2286
        # This class isn't deprecated, but one sub-format is
2205
2287
        if isinstance(self._format, RepositoryFormatKnitPack5RichRootBroken):
2206
 
            from bzrlib import repository
2207
 
            if repository._deprecation_warning_done:
2208
 
                return
2209
 
            repository._deprecation_warning_done = True
2210
 
            warning("Format %s for %s is deprecated - please use"
2211
 
                    " 'bzr upgrade --1.6.1-rich-root'"
2212
 
                    % (self._format, self.bzrdir.transport.base))
 
2288
            super(KnitPackRepository, self)._warn_if_deprecated(branch)
2213
2289
 
2214
2290
    def _abort_write_group(self):
2215
 
        self.revisions._index._key_dependencies.refs.clear()
 
2291
        self.revisions._index._key_dependencies.clear()
2216
2292
        self._pack_collection._abort_write_group()
2217
2293
 
2218
2294
    def _get_source(self, to_format):
2232
2308
        self._pack_collection._start_write_group()
2233
2309
 
2234
2310
    def _commit_write_group(self):
2235
 
        self.revisions._index._key_dependencies.refs.clear()
2236
 
        return self._pack_collection._commit_write_group()
 
2311
        hint = self._pack_collection._commit_write_group()
 
2312
        self.revisions._index._key_dependencies.clear()
 
2313
        return hint
2237
2314
 
2238
2315
    def suspend_write_group(self):
2239
2316
        # XXX check self._write_group is self.get_transaction()?
2240
2317
        tokens = self._pack_collection._suspend_write_group()
2241
 
        self.revisions._index._key_dependencies.refs.clear()
 
2318
        self.revisions._index._key_dependencies.clear()
2242
2319
        self._write_group = None
2243
2320
        return tokens
2244
2321
 
2265
2342
        return self._write_lock_count
2266
2343
 
2267
2344
    def lock_write(self, token=None):
 
2345
        """Lock the repository for writes.
 
2346
 
 
2347
        :return: A bzrlib.repository.RepositoryWriteLockResult.
 
2348
        """
2268
2349
        locked = self.is_locked()
2269
2350
        if not self._write_lock_count and locked:
2270
2351
            raise errors.ReadOnlyError(self)
2272
2353
        if self._write_lock_count == 1:
2273
2354
            self._transaction = transactions.WriteTransaction()
2274
2355
        if not locked:
 
2356
            if 'relock' in debug.debug_flags and self._prev_lock == 'w':
 
2357
                note('%r was write locked again', self)
 
2358
            self._prev_lock = 'w'
2275
2359
            for repo in self._fallback_repositories:
2276
2360
                # Writes don't affect fallback repos
2277
2361
                repo.lock_read()
2278
2362
            self._refresh_data()
 
2363
        return RepositoryWriteLockResult(self.unlock, None)
2279
2364
 
2280
2365
    def lock_read(self):
 
2366
        """Lock the repository for reads.
 
2367
 
 
2368
        :return: A bzrlib.lock.LogicalLockResult.
 
2369
        """
2281
2370
        locked = self.is_locked()
2282
2371
        if self._write_lock_count:
2283
2372
            self._write_lock_count += 1
2284
2373
        else:
2285
2374
            self.control_files.lock_read()
2286
2375
        if not locked:
 
2376
            if 'relock' in debug.debug_flags and self._prev_lock == 'r':
 
2377
                note('%r was read locked again', self)
 
2378
            self._prev_lock = 'r'
2287
2379
            for repo in self._fallback_repositories:
2288
2380
                repo.lock_read()
2289
2381
            self._refresh_data()
 
2382
        return LogicalLockResult(self.unlock)
2290
2383
 
2291
2384
    def leave_lock_in_place(self):
2292
2385
        # not supported - raise an error
2297
2390
        raise NotImplementedError(self.dont_leave_lock_in_place)
2298
2391
 
2299
2392
    @needs_write_lock
2300
 
    def pack(self, hint=None):
 
2393
    def pack(self, hint=None, clean_obsolete_packs=False):
2301
2394
        """Compress the data within the repository.
2302
2395
 
2303
2396
        This will pack all the data to a single pack. In future it may
2304
2397
        recompress deltas or do other such expensive operations.
2305
2398
        """
2306
 
        self._pack_collection.pack(hint=hint)
 
2399
        self._pack_collection.pack(hint=hint, clean_obsolete_packs=clean_obsolete_packs)
2307
2400
 
2308
2401
    @needs_write_lock
2309
2402
    def reconcile(self, other=None, thorough=False):
2317
2410
        packer = ReconcilePacker(collection, packs, extension, revs)
2318
2411
        return packer.pack(pb)
2319
2412
 
 
2413
    @only_raises(errors.LockNotHeld, errors.LockBroken)
2320
2414
    def unlock(self):
2321
2415
        if self._write_lock_count == 1 and self._write_group is not None:
2322
2416
            self.abort_write_group()
2464
2558
        utf8_files = [('format', self.get_format_string())]
2465
2559
 
2466
2560
        self._upload_blank_content(a_bzrdir, dirs, files, utf8_files, shared)
2467
 
        return self.open(a_bzrdir=a_bzrdir, _found=True)
 
2561
        repository = self.open(a_bzrdir=a_bzrdir, _found=True)
 
2562
        self._run_post_repo_init_hooks(repository, a_bzrdir, shared)
 
2563
        return repository
2468
2564
 
2469
2565
    def open(self, a_bzrdir, _found=False, _override_transport=None):
2470
2566
        """See RepositoryFormat.open().
2519
2615
        """See RepositoryFormat.get_format_description()."""
2520
2616
        return "Packs containing knits without subtree support"
2521
2617
 
2522
 
    def check_conversion_target(self, target_format):
2523
 
        pass
2524
 
 
2525
2618
 
2526
2619
class RepositoryFormatKnitPack3(RepositoryFormatPack):
2527
2620
    """A subtrees parameterized Pack repository.
2536
2629
    repository_class = KnitPackRepository
2537
2630
    _commit_builder_class = PackRootCommitBuilder
2538
2631
    rich_root_data = True
 
2632
    experimental = True
2539
2633
    supports_tree_reference = True
2540
2634
    @property
2541
2635
    def _serializer(self):
2553
2647
 
2554
2648
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2555
2649
 
2556
 
    def check_conversion_target(self, target_format):
2557
 
        if not target_format.rich_root_data:
2558
 
            raise errors.BadConversionTarget(
2559
 
                'Does not support rich root data.', target_format)
2560
 
        if not getattr(target_format, 'supports_tree_reference', False):
2561
 
            raise errors.BadConversionTarget(
2562
 
                'Does not support nested trees', target_format)
2563
 
 
2564
2650
    def get_format_string(self):
2565
2651
        """See RepositoryFormat.get_format_string()."""
2566
2652
        return "Bazaar pack repository format 1 with subtree support (needs bzr 0.92)\n"
2599
2685
 
2600
2686
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2601
2687
 
2602
 
    def check_conversion_target(self, target_format):
2603
 
        if not target_format.rich_root_data:
2604
 
            raise errors.BadConversionTarget(
2605
 
                'Does not support rich root data.', target_format)
2606
 
 
2607
2688
    def get_format_string(self):
2608
2689
        """See RepositoryFormat.get_format_string()."""
2609
2690
        return ("Bazaar pack repository format 1 with rich root"
2650
2731
        """See RepositoryFormat.get_format_description()."""
2651
2732
        return "Packs 5 (adds stacking support, requires bzr 1.6)"
2652
2733
 
2653
 
    def check_conversion_target(self, target_format):
2654
 
        pass
2655
 
 
2656
2734
 
2657
2735
class RepositoryFormatKnitPack5RichRoot(RepositoryFormatPack):
2658
2736
    """A repository with rich roots and stacking.
2685
2763
 
2686
2764
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2687
2765
 
2688
 
    def check_conversion_target(self, target_format):
2689
 
        if not target_format.rich_root_data:
2690
 
            raise errors.BadConversionTarget(
2691
 
                'Does not support rich root data.', target_format)
2692
 
 
2693
2766
    def get_format_string(self):
2694
2767
        """See RepositoryFormat.get_format_string()."""
2695
2768
        return "Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6.1)\n"
2736
2809
 
2737
2810
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2738
2811
 
2739
 
    def check_conversion_target(self, target_format):
2740
 
        if not target_format.rich_root_data:
2741
 
            raise errors.BadConversionTarget(
2742
 
                'Does not support rich root data.', target_format)
2743
 
 
2744
2812
    def get_format_string(self):
2745
2813
        """See RepositoryFormat.get_format_string()."""
2746
2814
        return "Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6)\n"
2784
2852
        """See RepositoryFormat.get_format_description()."""
2785
2853
        return "Packs 6 (uses btree indexes, requires bzr 1.9)"
2786
2854
 
2787
 
    def check_conversion_target(self, target_format):
2788
 
        pass
2789
 
 
2790
2855
 
2791
2856
class RepositoryFormatKnitPack6RichRoot(RepositoryFormatPack):
2792
2857
    """A repository with rich roots, no subtrees, stacking and btree indexes.
2816
2881
 
2817
2882
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2818
2883
 
2819
 
    def check_conversion_target(self, target_format):
2820
 
        if not target_format.rich_root_data:
2821
 
            raise errors.BadConversionTarget(
2822
 
                'Does not support rich root data.', target_format)
2823
 
 
2824
2884
    def get_format_string(self):
2825
2885
        """See RepositoryFormat.get_format_string()."""
2826
2886
        return "Bazaar RepositoryFormatKnitPack6RichRoot (bzr 1.9)\n"
2843
2903
    repository_class = KnitPackRepository
2844
2904
    _commit_builder_class = PackRootCommitBuilder
2845
2905
    rich_root_data = True
 
2906
    experimental = True
2846
2907
    supports_tree_reference = True
2847
2908
    supports_external_lookups = True
2848
2909
    # What index classes to use
2862
2923
 
2863
2924
    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2864
2925
 
2865
 
    def check_conversion_target(self, target_format):
2866
 
        if not target_format.rich_root_data:
2867
 
            raise errors.BadConversionTarget(
2868
 
                'Does not support rich root data.', target_format)
2869
 
        if not getattr(target_format, 'supports_tree_reference', False):
2870
 
            raise errors.BadConversionTarget(
2871
 
                'Does not support nested trees', target_format)
2872
 
 
2873
2926
    def get_format_string(self):
2874
2927
        """See RepositoryFormat.get_format_string()."""
2875
2928
        return ("Bazaar development format 2 with subtree support "