~bzr-pqm/bzr/bzr.dev

« back to all changes in this revision

Viewing changes to bzrlib/repofmt/pack_repo.py

  • Committer: Canonical.com Patch Queue Manager
  • Date: 2009-12-18 09:05:13 UTC
  • mfrom: (4505.6.30 lp-login-oauth-2)
  • Revision ID: pqm@pqm.ubuntu.com-20091218090513-kzwkjw7rdf7bahqi
(jml) Add an lp-mirror command to request that Launchpad mirror a
        branch now. Add an API for interacting with launchpadlib using Bazaar.

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
 
# Copyright (C) 2007-2010 Canonical Ltd
 
1
# Copyright (C) 2005, 2006, 2007, 2008 Canonical Ltd
2
2
#
3
3
# This program is free software; you can redistribute it and/or modify
4
4
# it under the terms of the GNU General Public License as published by
24
24
 
25
25
from bzrlib import (
26
26
    chk_map,
27
 
    cleanup,
28
27
    debug,
29
28
    graph,
30
29
    osutils,
49
48
""")
50
49
from bzrlib import (
51
50
    bzrdir,
52
 
    btree_index,
53
51
    errors,
54
52
    lockable_files,
55
53
    lockdir,
57
55
    )
58
56
 
59
57
from bzrlib.decorators import needs_write_lock, only_raises
 
58
from bzrlib.btree_index import (
 
59
    BTreeGraphIndex,
 
60
    BTreeBuilder,
 
61
    )
60
62
from bzrlib.index import (
61
63
    GraphIndex,
62
64
    InMemoryGraphIndex,
63
65
    )
64
 
from bzrlib.lock import LogicalLockResult
65
66
from bzrlib.repofmt.knitrepo import KnitRepository
66
67
from bzrlib.repository import (
67
68
    CommitBuilder,
68
69
    MetaDirRepositoryFormat,
69
70
    RepositoryFormat,
70
 
    RepositoryWriteLockResult,
71
71
    RootCommitBuilder,
72
72
    StreamSource,
73
73
    )
228
228
        unlimited_cache = False
229
229
        if index_type == 'chk':
230
230
            unlimited_cache = True
231
 
        index = self.index_class(self.index_transport,
232
 
                    self.index_name(index_type, self.name),
233
 
                    self.index_sizes[self.index_offset(index_type)],
234
 
                    unlimited_cache=unlimited_cache)
235
 
        if index_type == 'chk':
236
 
            index._leaf_factory = btree_index._gcchk_factory
237
 
        setattr(self, index_type + '_index', index)
 
231
        setattr(self, index_type + '_index',
 
232
            self.index_class(self.index_transport,
 
233
                self.index_name(index_type, self.name),
 
234
                self.index_sizes[self.index_offset(index_type)],
 
235
                unlimited_cache=unlimited_cache))
238
236
 
239
237
 
240
238
class ExistingPack(Pack):
588
586
                                             flush_func=flush_func)
589
587
        self.add_callback = None
590
588
 
 
589
    def replace_indices(self, index_to_pack, indices):
 
590
        """Replace the current mappings with fresh ones.
 
591
 
 
592
        This should probably not be used eventually, rather incremental add and
 
593
        removal of indices. It has been added during refactoring of existing
 
594
        code.
 
595
 
 
596
        :param index_to_pack: A mapping from index objects to
 
597
            (transport, name) tuples for the pack file data.
 
598
        :param indices: A list of indices.
 
599
        """
 
600
        # refresh the revision pack map dict without replacing the instance.
 
601
        self.index_to_pack.clear()
 
602
        self.index_to_pack.update(index_to_pack)
 
603
        # XXX: API break - clearly a 'replace' method would be good?
 
604
        self.combined_index._indices[:] = indices
 
605
        # the current add nodes callback for the current writable index if
 
606
        # there is one.
 
607
        self.add_callback = None
 
608
 
591
609
    def add_index(self, index, pack):
592
610
        """Add index to the aggregate, which is an index for Pack pack.
593
611
 
600
618
        # expose it to the index map
601
619
        self.index_to_pack[index] = pack.access_tuple()
602
620
        # put it at the front of the linear index list
603
 
        self.combined_index.insert_index(0, index, pack.name)
 
621
        self.combined_index.insert_index(0, index)
604
622
 
605
623
    def add_writable_index(self, index, pack):
606
624
        """Add an index which is able to have data added to it.
626
644
        self.data_access.set_writer(None, None, (None, None))
627
645
        self.index_to_pack.clear()
628
646
        del self.combined_index._indices[:]
629
 
        del self.combined_index._index_names[:]
630
647
        self.add_callback = None
631
648
 
632
 
    def remove_index(self, index):
 
649
    def remove_index(self, index, pack):
633
650
        """Remove index from the indices used to answer queries.
634
651
 
635
652
        :param index: An index from the pack parameter.
 
653
        :param pack: A Pack instance.
636
654
        """
637
655
        del self.index_to_pack[index]
638
 
        pos = self.combined_index._indices.index(index)
639
 
        del self.combined_index._indices[pos]
640
 
        del self.combined_index._index_names[pos]
 
656
        self.combined_index._indices.remove(index)
641
657
        if (self.add_callback is not None and
642
658
            getattr(index, 'add_nodes', None) == self.add_callback):
643
659
            self.add_callback = None
1399
1415
        self.inventory_index = AggregateIndex(self.reload_pack_names, flush)
1400
1416
        self.text_index = AggregateIndex(self.reload_pack_names, flush)
1401
1417
        self.signature_index = AggregateIndex(self.reload_pack_names, flush)
1402
 
        all_indices = [self.revision_index, self.inventory_index,
1403
 
                self.text_index, self.signature_index]
1404
1418
        if use_chk_index:
1405
1419
            self.chk_index = AggregateIndex(self.reload_pack_names, flush)
1406
 
            all_indices.append(self.chk_index)
1407
1420
        else:
1408
1421
            # used to determine if we're using a chk_index elsewhere.
1409
1422
            self.chk_index = None
1410
 
        # Tell all the CombinedGraphIndex objects about each other, so they can
1411
 
        # share hints about which pack names to search first.
1412
 
        all_combined = [agg_idx.combined_index for agg_idx in all_indices]
1413
 
        for combined_idx in all_combined:
1414
 
            combined_idx.set_sibling_indices(
1415
 
                set(all_combined).difference([combined_idx]))
1416
1423
        # resumed packs
1417
1424
        self._resumed_packs = []
1418
1425
 
1419
 
    def __repr__(self):
1420
 
        return '%s(%r)' % (self.__class__.__name__, self.repo)
1421
 
 
1422
1426
    def add_pack_to_memory(self, pack):
1423
1427
        """Make a Pack object available to the repository to satisfy queries.
1424
1428
 
1538
1542
                self._remove_pack_from_memory(pack)
1539
1543
        # record the newly available packs and stop advertising the old
1540
1544
        # packs
1541
 
        to_be_obsoleted = []
1542
 
        for _, packs in pack_operations:
1543
 
            to_be_obsoleted.extend(packs)
1544
 
        result = self._save_pack_names(clear_obsolete_packs=True,
1545
 
                                       obsolete_packs=to_be_obsoleted)
 
1545
        result = self._save_pack_names(clear_obsolete_packs=True)
 
1546
        # Move the old packs out of the way now they are no longer referenced.
 
1547
        for revision_count, packs in pack_operations:
 
1548
            self._obsolete_packs(packs)
1546
1549
        return result
1547
1550
 
1548
1551
    def _flush_new_pack(self):
1561
1564
        """Is the collection already packed?"""
1562
1565
        return not (self.repo._format.pack_compresses or (len(self._names) > 1))
1563
1566
 
1564
 
    def pack(self, hint=None, clean_obsolete_packs=False):
 
1567
    def pack(self, hint=None):
1565
1568
        """Pack the pack collection totally."""
1566
1569
        self.ensure_loaded()
1567
1570
        total_packs = len(self._names)
1583
1586
                pack_operations[-1][1].append(pack)
1584
1587
        self._execute_pack_operations(pack_operations, OptimisingPacker)
1585
1588
 
1586
 
        if clean_obsolete_packs:
1587
 
            self._clear_obsolete_packs()
1588
 
 
1589
1589
    def plan_autopack_combinations(self, existing_packs, pack_distribution):
1590
1590
        """Plan a pack operation.
1591
1591
 
1679
1679
            txt_index = self._make_index(name, '.tix')
1680
1680
            sig_index = self._make_index(name, '.six')
1681
1681
            if self.chk_index is not None:
1682
 
                chk_index = self._make_index(name, '.cix', is_chk=True)
 
1682
                chk_index = self._make_index(name, '.cix', unlimited_cache=True)
1683
1683
            else:
1684
1684
                chk_index = None
1685
1685
            result = ExistingPack(self._pack_transport, name, rev_index,
1705
1705
            sig_index = self._make_index(name, '.six', resume=True)
1706
1706
            if self.chk_index is not None:
1707
1707
                chk_index = self._make_index(name, '.cix', resume=True,
1708
 
                                             is_chk=True)
 
1708
                                             unlimited_cache=True)
1709
1709
            else:
1710
1710
                chk_index = None
1711
1711
            result = self.resumed_pack_factory(name, rev_index, inv_index,
1741
1741
        return self._index_class(self.transport, 'pack-names', None
1742
1742
                ).iter_all_entries()
1743
1743
 
1744
 
    def _make_index(self, name, suffix, resume=False, is_chk=False):
 
1744
    def _make_index(self, name, suffix, resume=False, unlimited_cache=False):
1745
1745
        size_offset = self._suffix_offsets[suffix]
1746
1746
        index_name = name + suffix
1747
1747
        if resume:
1750
1750
        else:
1751
1751
            transport = self._index_transport
1752
1752
            index_size = self._names[name][size_offset]
1753
 
        index = self._index_class(transport, index_name, index_size,
1754
 
                                  unlimited_cache=is_chk)
1755
 
        if is_chk and self._index_class is btree_index.BTreeGraphIndex: 
1756
 
            index._leaf_factory = btree_index._gcchk_factory
1757
 
        return index
 
1753
        return self._index_class(transport, index_name, index_size,
 
1754
                                 unlimited_cache=unlimited_cache)
1758
1755
 
1759
1756
    def _max_pack_count(self, total_revisions):
1760
1757
        """Return the maximum number of packs to use for total revisions.
1788
1785
        :param return: None.
1789
1786
        """
1790
1787
        for pack in packs:
1791
 
            try:
1792
 
                pack.pack_transport.rename(pack.file_name(),
1793
 
                    '../obsolete_packs/' + pack.file_name())
1794
 
            except (errors.PathError, errors.TransportError), e:
1795
 
                # TODO: Should these be warnings or mutters?
1796
 
                mutter("couldn't rename obsolete pack, skipping it:\n%s"
1797
 
                       % (e,))
 
1788
            pack.pack_transport.rename(pack.file_name(),
 
1789
                '../obsolete_packs/' + pack.file_name())
1798
1790
            # TODO: Probably needs to know all possible indices for this pack
1799
1791
            # - or maybe list the directory and move all indices matching this
1800
1792
            # name whether we recognize it or not?
1802
1794
            if self.chk_index is not None:
1803
1795
                suffixes.append('.cix')
1804
1796
            for suffix in suffixes:
1805
 
                try:
1806
 
                    self._index_transport.rename(pack.name + suffix,
1807
 
                        '../obsolete_packs/' + pack.name + suffix)
1808
 
                except (errors.PathError, errors.TransportError), e:
1809
 
                    mutter("couldn't rename obsolete index, skipping it:\n%s"
1810
 
                           % (e,))
 
1797
                self._index_transport.rename(pack.name + suffix,
 
1798
                    '../obsolete_packs/' + pack.name + suffix)
1811
1799
 
1812
1800
    def pack_distribution(self, total_revisions):
1813
1801
        """Generate a list of the number of revisions to put in each pack.
1839
1827
        self._remove_pack_indices(pack)
1840
1828
        self.packs.remove(pack)
1841
1829
 
1842
 
    def _remove_pack_indices(self, pack, ignore_missing=False):
1843
 
        """Remove the indices for pack from the aggregated indices.
1844
 
        
1845
 
        :param ignore_missing: Suppress KeyErrors from calling remove_index.
1846
 
        """
1847
 
        for index_type in Pack.index_definitions.keys():
1848
 
            attr_name = index_type + '_index'
1849
 
            aggregate_index = getattr(self, attr_name)
1850
 
            if aggregate_index is not None:
1851
 
                pack_index = getattr(pack, attr_name)
1852
 
                try:
1853
 
                    aggregate_index.remove_index(pack_index)
1854
 
                except KeyError:
1855
 
                    if ignore_missing:
1856
 
                        continue
1857
 
                    raise
 
1830
    def _remove_pack_indices(self, pack):
 
1831
        """Remove the indices for pack from the aggregated indices."""
 
1832
        self.revision_index.remove_index(pack.revision_index, pack)
 
1833
        self.inventory_index.remove_index(pack.inventory_index, pack)
 
1834
        self.text_index.remove_index(pack.text_index, pack)
 
1835
        self.signature_index.remove_index(pack.signature_index, pack)
 
1836
        if self.chk_index is not None:
 
1837
            self.chk_index.remove_index(pack.chk_index, pack)
1858
1838
 
1859
1839
    def reset(self):
1860
1840
        """Clear all cached data."""
1893
1873
        disk_nodes = set()
1894
1874
        for index, key, value in self._iter_disk_pack_index():
1895
1875
            disk_nodes.add((key, value))
1896
 
        orig_disk_nodes = set(disk_nodes)
1897
1876
 
1898
1877
        # do a two-way diff against our original content
1899
1878
        current_nodes = set()
1912
1891
        disk_nodes.difference_update(deleted_nodes)
1913
1892
        disk_nodes.update(new_nodes)
1914
1893
 
1915
 
        return disk_nodes, deleted_nodes, new_nodes, orig_disk_nodes
 
1894
        return disk_nodes, deleted_nodes, new_nodes
1916
1895
 
1917
1896
    def _syncronize_pack_names_from_disk_nodes(self, disk_nodes):
1918
1897
        """Given the correct set of pack files, update our saved info.
1958
1937
                added.append(name)
1959
1938
        return removed, added, modified
1960
1939
 
1961
 
    def _save_pack_names(self, clear_obsolete_packs=False, obsolete_packs=None):
 
1940
    def _save_pack_names(self, clear_obsolete_packs=False):
1962
1941
        """Save the list of packs.
1963
1942
 
1964
1943
        This will take out the mutex around the pack names list for the
1968
1947
 
1969
1948
        :param clear_obsolete_packs: If True, clear out the contents of the
1970
1949
            obsolete_packs directory.
1971
 
        :param obsolete_packs: Packs that are obsolete once the new pack-names
1972
 
            file has been written.
1973
1950
        :return: A list of the names saved that were not previously on disk.
1974
1951
        """
1975
 
        already_obsolete = []
1976
1952
        self.lock_names()
1977
1953
        try:
1978
1954
            builder = self._index_builder_class()
1979
 
            (disk_nodes, deleted_nodes, new_nodes,
1980
 
             orig_disk_nodes) = self._diff_pack_names()
 
1955
            disk_nodes, deleted_nodes, new_nodes = self._diff_pack_names()
1981
1956
            # TODO: handle same-name, index-size-changes here -
1982
1957
            # e.g. use the value from disk, not ours, *unless* we're the one
1983
1958
            # changing it.
1985
1960
                builder.add_node(key, value)
1986
1961
            self.transport.put_file('pack-names', builder.finish(),
1987
1962
                mode=self.repo.bzrdir._get_file_mode())
 
1963
            # move the baseline forward
1988
1964
            self._packs_at_load = disk_nodes
1989
1965
            if clear_obsolete_packs:
1990
 
                to_preserve = None
1991
 
                if obsolete_packs:
1992
 
                    to_preserve = set([o.name for o in obsolete_packs])
1993
 
                already_obsolete = self._clear_obsolete_packs(to_preserve)
 
1966
                self._clear_obsolete_packs()
1994
1967
        finally:
1995
1968
            self._unlock_names()
1996
1969
        # synchronise the memory packs list with what we just wrote:
1997
1970
        self._syncronize_pack_names_from_disk_nodes(disk_nodes)
1998
 
        if obsolete_packs:
1999
 
            # TODO: We could add one more condition here. "if o.name not in
2000
 
            #       orig_disk_nodes and o != the new_pack we haven't written to
2001
 
            #       disk yet. However, the new pack object is not easily
2002
 
            #       accessible here (it would have to be passed through the
2003
 
            #       autopacking code, etc.)
2004
 
            obsolete_packs = [o for o in obsolete_packs
2005
 
                              if o.name not in already_obsolete]
2006
 
            self._obsolete_packs(obsolete_packs)
2007
1971
        return [new_node[0][0] for new_node in new_nodes]
2008
1972
 
2009
1973
    def reload_pack_names(self):
2024
1988
        if first_read:
2025
1989
            return True
2026
1990
        # out the new value.
2027
 
        (disk_nodes, deleted_nodes, new_nodes,
2028
 
         orig_disk_nodes) = self._diff_pack_names()
2029
 
        # _packs_at_load is meant to be the explicit list of names in
2030
 
        # 'pack-names' at then start. As such, it should not contain any
2031
 
        # pending names that haven't been written out yet.
2032
 
        self._packs_at_load = orig_disk_nodes
 
1991
        disk_nodes, _, _ = self._diff_pack_names()
 
1992
        self._packs_at_load = disk_nodes
2033
1993
        (removed, added,
2034
1994
         modified) = self._syncronize_pack_names_from_disk_nodes(disk_nodes)
2035
1995
        if removed or added or modified:
2044
2004
            raise
2045
2005
        raise errors.RetryAutopack(self.repo, False, sys.exc_info())
2046
2006
 
2047
 
    def _clear_obsolete_packs(self, preserve=None):
 
2007
    def _clear_obsolete_packs(self):
2048
2008
        """Delete everything from the obsolete-packs directory.
2049
 
 
2050
 
        :return: A list of pack identifiers (the filename without '.pack') that
2051
 
            were found in obsolete_packs.
2052
2009
        """
2053
 
        found = []
2054
2010
        obsolete_pack_transport = self.transport.clone('obsolete_packs')
2055
 
        if preserve is None:
2056
 
            preserve = set()
2057
2011
        for filename in obsolete_pack_transport.list_dir('.'):
2058
 
            name, ext = osutils.splitext(filename)
2059
 
            if ext == '.pack':
2060
 
                found.append(name)
2061
 
            if name in preserve:
2062
 
                continue
2063
2012
            try:
2064
2013
                obsolete_pack_transport.delete(filename)
2065
2014
            except (errors.PathError, errors.TransportError), e:
2066
 
                warning("couldn't delete obsolete pack, skipping it:\n%s"
2067
 
                        % (e,))
2068
 
        return found
 
2015
                warning("couldn't delete obsolete pack, skipping it:\n%s" % (e,))
2069
2016
 
2070
2017
    def _start_write_group(self):
2071
2018
        # Do not permit preparation for writing if we're not in a 'write lock'.
2098
2045
        # FIXME: just drop the transient index.
2099
2046
        # forget what names there are
2100
2047
        if self._new_pack is not None:
2101
 
            operation = cleanup.OperationWithCleanups(self._new_pack.abort)
2102
 
            operation.add_cleanup(setattr, self, '_new_pack', None)
2103
 
            # If we aborted while in the middle of finishing the write
2104
 
            # group, _remove_pack_indices could fail because the indexes are
2105
 
            # already gone.  But they're not there we shouldn't fail in this
2106
 
            # case, so we pass ignore_missing=True.
2107
 
            operation.add_cleanup(self._remove_pack_indices, self._new_pack,
2108
 
                ignore_missing=True)
2109
 
            operation.run_simple()
 
2048
            try:
 
2049
                self._new_pack.abort()
 
2050
            finally:
 
2051
                # XXX: If we aborted while in the middle of finishing the write
 
2052
                # group, _remove_pack_indices can fail because the indexes are
 
2053
                # already gone.  If they're not there we shouldn't fail in this
 
2054
                # case.  -- mbp 20081113
 
2055
                self._remove_pack_indices(self._new_pack)
 
2056
                self._new_pack = None
2110
2057
        for resumed_pack in self._resumed_packs:
2111
 
            operation = cleanup.OperationWithCleanups(resumed_pack.abort)
2112
 
            # See comment in previous finally block.
2113
 
            operation.add_cleanup(self._remove_pack_indices, resumed_pack,
2114
 
                ignore_missing=True)
2115
 
            operation.run_simple()
 
2058
            try:
 
2059
                resumed_pack.abort()
 
2060
            finally:
 
2061
                # See comment in previous finally block.
 
2062
                try:
 
2063
                    self._remove_pack_indices(resumed_pack)
 
2064
                except KeyError:
 
2065
                    pass
2116
2066
        del self._resumed_packs[:]
2117
2067
 
2118
2068
    def _remove_resumed_pack_indices(self):
2344
2294
        return self._write_lock_count
2345
2295
 
2346
2296
    def lock_write(self, token=None):
2347
 
        """Lock the repository for writes.
2348
 
 
2349
 
        :return: A bzrlib.repository.RepositoryWriteLockResult.
2350
 
        """
2351
2297
        locked = self.is_locked()
2352
2298
        if not self._write_lock_count and locked:
2353
2299
            raise errors.ReadOnlyError(self)
2362
2308
                # Writes don't affect fallback repos
2363
2309
                repo.lock_read()
2364
2310
            self._refresh_data()
2365
 
        return RepositoryWriteLockResult(self.unlock, None)
2366
2311
 
2367
2312
    def lock_read(self):
2368
 
        """Lock the repository for reads.
2369
 
 
2370
 
        :return: A bzrlib.lock.LogicalLockResult.
2371
 
        """
2372
2313
        locked = self.is_locked()
2373
2314
        if self._write_lock_count:
2374
2315
            self._write_lock_count += 1
2381
2322
            for repo in self._fallback_repositories:
2382
2323
                repo.lock_read()
2383
2324
            self._refresh_data()
2384
 
        return LogicalLockResult(self.unlock)
2385
2325
 
2386
2326
    def leave_lock_in_place(self):
2387
2327
        # not supported - raise an error
2392
2332
        raise NotImplementedError(self.dont_leave_lock_in_place)
2393
2333
 
2394
2334
    @needs_write_lock
2395
 
    def pack(self, hint=None, clean_obsolete_packs=False):
 
2335
    def pack(self, hint=None):
2396
2336
        """Compress the data within the repository.
2397
2337
 
2398
2338
        This will pack all the data to a single pack. In future it may
2399
2339
        recompress deltas or do other such expensive operations.
2400
2340
        """
2401
 
        self._pack_collection.pack(hint=hint, clean_obsolete_packs=clean_obsolete_packs)
 
2341
        self._pack_collection.pack(hint=hint)
2402
2342
 
2403
2343
    @needs_write_lock
2404
2344
    def reconcile(self, other=None, thorough=False):
2560
2500
        utf8_files = [('format', self.get_format_string())]
2561
2501
 
2562
2502
        self._upload_blank_content(a_bzrdir, dirs, files, utf8_files, shared)
2563
 
        repository = self.open(a_bzrdir=a_bzrdir, _found=True)
2564
 
        self._run_post_repo_init_hooks(repository, a_bzrdir, shared)
2565
 
        return repository
 
2503
        return self.open(a_bzrdir=a_bzrdir, _found=True)
2566
2504
 
2567
2505
    def open(self, a_bzrdir, _found=False, _override_transport=None):
2568
2506
        """See RepositoryFormat.open().
2631
2569
    repository_class = KnitPackRepository
2632
2570
    _commit_builder_class = PackRootCommitBuilder
2633
2571
    rich_root_data = True
2634
 
    experimental = True
2635
2572
    supports_tree_reference = True
2636
2573
    @property
2637
2574
    def _serializer(self):
2831
2768
    _commit_builder_class = PackCommitBuilder
2832
2769
    supports_external_lookups = True
2833
2770
    # What index classes to use
2834
 
    index_builder_class = btree_index.BTreeBuilder
2835
 
    index_class = btree_index.BTreeGraphIndex
 
2771
    index_builder_class = BTreeBuilder
 
2772
    index_class = BTreeGraphIndex
2836
2773
 
2837
2774
    @property
2838
2775
    def _serializer(self):
2867
2804
    supports_tree_reference = False # no subtrees
2868
2805
    supports_external_lookups = True
2869
2806
    # What index classes to use
2870
 
    index_builder_class = btree_index.BTreeBuilder
2871
 
    index_class = btree_index.BTreeGraphIndex
 
2807
    index_builder_class = BTreeBuilder
 
2808
    index_class = BTreeGraphIndex
2872
2809
 
2873
2810
    @property
2874
2811
    def _serializer(self):
2905
2842
    repository_class = KnitPackRepository
2906
2843
    _commit_builder_class = PackRootCommitBuilder
2907
2844
    rich_root_data = True
2908
 
    experimental = True
2909
2845
    supports_tree_reference = True
2910
2846
    supports_external_lookups = True
2911
2847
    # What index classes to use
2912
 
    index_builder_class = btree_index.BTreeBuilder
2913
 
    index_class = btree_index.BTreeGraphIndex
 
2848
    index_builder_class = BTreeBuilder
 
2849
    index_class = BTreeGraphIndex
2914
2850
 
2915
2851
    @property
2916
2852
    def _serializer(self):