1
# Copyright (C) 2007-2010 Canonical Ltd
1
# Copyright (C) 2005, 2006, 2007, 2008 Canonical Ltd
3
3
# This program is free software; you can redistribute it and/or modify
4
4
# it under the terms of the GNU General Public License as published by
55
54
revision as _mod_revision,
58
from bzrlib.decorators import needs_write_lock, only_raises
57
from bzrlib.decorators import needs_write_lock
59
58
from bzrlib.btree_index import (
65
64
InMemoryGraphIndex,
67
from bzrlib.lock import LogicalLockResult
68
66
from bzrlib.repofmt.knitrepo import KnitRepository
69
67
from bzrlib.repository import (
71
69
MetaDirRepositoryFormat,
73
RepositoryWriteLockResult,
77
74
from bzrlib.trace import (
228
224
return self.index_name('text', name)
230
226
def _replace_index_with_readonly(self, index_type):
231
unlimited_cache = False
232
if index_type == 'chk':
233
unlimited_cache = True
234
227
setattr(self, index_type + '_index',
235
228
self.index_class(self.index_transport,
236
229
self.index_name(index_type, self.name),
237
self.index_sizes[self.index_offset(index_type)],
238
unlimited_cache=unlimited_cache))
230
self.index_sizes[self.index_offset(index_type)]))
241
233
class ExistingPack(Pack):
589
581
flush_func=flush_func)
590
582
self.add_callback = None
584
def replace_indices(self, index_to_pack, indices):
585
"""Replace the current mappings with fresh ones.
587
This should probably not be used eventually, rather incremental add and
588
removal of indices. It has been added during refactoring of existing
591
:param index_to_pack: A mapping from index objects to
592
(transport, name) tuples for the pack file data.
593
:param indices: A list of indices.
595
# refresh the revision pack map dict without replacing the instance.
596
self.index_to_pack.clear()
597
self.index_to_pack.update(index_to_pack)
598
# XXX: API break - clearly a 'replace' method would be good?
599
self.combined_index._indices[:] = indices
600
# the current add nodes callback for the current writable index if
602
self.add_callback = None
592
604
def add_index(self, index, pack):
593
605
"""Add index to the aggregate, which is an index for Pack pack.
601
613
# expose it to the index map
602
614
self.index_to_pack[index] = pack.access_tuple()
603
615
# put it at the front of the linear index list
604
self.combined_index.insert_index(0, index, pack.name)
616
self.combined_index.insert_index(0, index)
606
618
def add_writable_index(self, index, pack):
607
619
"""Add an index which is able to have data added to it.
627
639
self.data_access.set_writer(None, None, (None, None))
628
640
self.index_to_pack.clear()
629
641
del self.combined_index._indices[:]
630
del self.combined_index._index_names[:]
631
642
self.add_callback = None
633
def remove_index(self, index):
644
def remove_index(self, index, pack):
634
645
"""Remove index from the indices used to answer queries.
636
647
:param index: An index from the pack parameter.
648
:param pack: A Pack instance.
638
650
del self.index_to_pack[index]
639
pos = self.combined_index._indices.index(index)
640
del self.combined_index._indices[pos]
641
del self.combined_index._index_names[pos]
651
self.combined_index._indices.remove(index)
642
652
if (self.add_callback is not None and
643
653
getattr(index, 'add_nodes', None) == self.add_callback):
644
654
self.add_callback = None
1102
1112
iterator is a tuple with:
1103
1113
index, readv_vector, node_vector. readv_vector is a list ready to
1104
1114
hand to the transport readv method, and node_vector is a list of
1105
(key, eol_flag, references) for the node retrieved by the
1115
(key, eol_flag, references) for the the node retrieved by the
1106
1116
matching readv_vector.
1108
1118
# group by pack so we do one readv per pack
1400
1410
self.inventory_index = AggregateIndex(self.reload_pack_names, flush)
1401
1411
self.text_index = AggregateIndex(self.reload_pack_names, flush)
1402
1412
self.signature_index = AggregateIndex(self.reload_pack_names, flush)
1403
all_indices = [self.revision_index, self.inventory_index,
1404
self.text_index, self.signature_index]
1405
1413
if use_chk_index:
1406
1414
self.chk_index = AggregateIndex(self.reload_pack_names, flush)
1407
all_indices.append(self.chk_index)
1409
1416
# used to determine if we're using a chk_index elsewhere.
1410
1417
self.chk_index = None
1411
# Tell all the CombinedGraphIndex objects about each other, so they can
1412
# share hints about which pack names to search first.
1413
all_combined = [agg_idx.combined_index for agg_idx in all_indices]
1414
for combined_idx in all_combined:
1415
combined_idx.set_sibling_indices(
1416
set(all_combined).difference([combined_idx]))
1417
1418
# resumed packs
1418
1419
self._resumed_packs = []
1421
return '%s(%r)' % (self.__class__.__name__, self.repo)
1423
1421
def add_pack_to_memory(self, pack):
1424
1422
"""Make a Pack object available to the repository to satisfy queries.
1539
1537
self._remove_pack_from_memory(pack)
1540
1538
# record the newly available packs and stop advertising the old
1542
to_be_obsoleted = []
1543
for _, packs in pack_operations:
1544
to_be_obsoleted.extend(packs)
1545
result = self._save_pack_names(clear_obsolete_packs=True,
1546
obsolete_packs=to_be_obsoleted)
1540
result = self._save_pack_names(clear_obsolete_packs=True)
1541
# Move the old packs out of the way now they are no longer referenced.
1542
for revision_count, packs in pack_operations:
1543
self._obsolete_packs(packs)
1549
1546
def _flush_new_pack(self):
1562
1559
"""Is the collection already packed?"""
1563
1560
return not (self.repo._format.pack_compresses or (len(self._names) > 1))
1565
def pack(self, hint=None, clean_obsolete_packs=False):
1562
def pack(self, hint=None):
1566
1563
"""Pack the pack collection totally."""
1567
1564
self.ensure_loaded()
1568
1565
total_packs = len(self._names)
1584
1581
pack_operations[-1][1].append(pack)
1585
1582
self._execute_pack_operations(pack_operations, OptimisingPacker)
1587
if clean_obsolete_packs:
1588
self._clear_obsolete_packs()
1590
1584
def plan_autopack_combinations(self, existing_packs, pack_distribution):
1591
1585
"""Plan a pack operation.
1680
1674
txt_index = self._make_index(name, '.tix')
1681
1675
sig_index = self._make_index(name, '.six')
1682
1676
if self.chk_index is not None:
1683
chk_index = self._make_index(name, '.cix', unlimited_cache=True)
1677
chk_index = self._make_index(name, '.cix')
1685
1679
chk_index = None
1686
1680
result = ExistingPack(self._pack_transport, name, rev_index,
1705
1699
txt_index = self._make_index(name, '.tix', resume=True)
1706
1700
sig_index = self._make_index(name, '.six', resume=True)
1707
1701
if self.chk_index is not None:
1708
chk_index = self._make_index(name, '.cix', resume=True,
1709
unlimited_cache=True)
1702
chk_index = self._make_index(name, '.cix', resume=True)
1711
1704
chk_index = None
1712
1705
result = self.resumed_pack_factory(name, rev_index, inv_index,
1742
1735
return self._index_class(self.transport, 'pack-names', None
1743
1736
).iter_all_entries()
1745
def _make_index(self, name, suffix, resume=False, unlimited_cache=False):
1738
def _make_index(self, name, suffix, resume=False):
1746
1739
size_offset = self._suffix_offsets[suffix]
1747
1740
index_name = name + suffix
1752
1745
transport = self._index_transport
1753
1746
index_size = self._names[name][size_offset]
1754
return self._index_class(transport, index_name, index_size,
1755
unlimited_cache=unlimited_cache)
1747
return self._index_class(transport, index_name, index_size)
1757
1749
def _max_pack_count(self, total_revisions):
1758
1750
"""Return the maximum number of packs to use for total revisions.
1786
1778
:param return: None.
1788
1780
for pack in packs:
1790
pack.pack_transport.rename(pack.file_name(),
1791
'../obsolete_packs/' + pack.file_name())
1792
except (errors.PathError, errors.TransportError), e:
1793
# TODO: Should these be warnings or mutters?
1794
mutter("couldn't rename obsolete pack, skipping it:\n%s"
1781
pack.pack_transport.rename(pack.file_name(),
1782
'../obsolete_packs/' + pack.file_name())
1796
1783
# TODO: Probably needs to know all possible indices for this pack
1797
1784
# - or maybe list the directory and move all indices matching this
1798
1785
# name whether we recognize it or not?
1800
1787
if self.chk_index is not None:
1801
1788
suffixes.append('.cix')
1802
1789
for suffix in suffixes:
1804
self._index_transport.rename(pack.name + suffix,
1805
'../obsolete_packs/' + pack.name + suffix)
1806
except (errors.PathError, errors.TransportError), e:
1807
mutter("couldn't rename obsolete index, skipping it:\n%s"
1790
self._index_transport.rename(pack.name + suffix,
1791
'../obsolete_packs/' + pack.name + suffix)
1810
1793
def pack_distribution(self, total_revisions):
1811
1794
"""Generate a list of the number of revisions to put in each pack.
1837
1820
self._remove_pack_indices(pack)
1838
1821
self.packs.remove(pack)
1840
def _remove_pack_indices(self, pack, ignore_missing=False):
1841
"""Remove the indices for pack from the aggregated indices.
1843
:param ignore_missing: Suppress KeyErrors from calling remove_index.
1845
for index_type in Pack.index_definitions.keys():
1846
attr_name = index_type + '_index'
1847
aggregate_index = getattr(self, attr_name)
1848
if aggregate_index is not None:
1849
pack_index = getattr(pack, attr_name)
1851
aggregate_index.remove_index(pack_index)
1823
def _remove_pack_indices(self, pack):
1824
"""Remove the indices for pack from the aggregated indices."""
1825
self.revision_index.remove_index(pack.revision_index, pack)
1826
self.inventory_index.remove_index(pack.inventory_index, pack)
1827
self.text_index.remove_index(pack.text_index, pack)
1828
self.signature_index.remove_index(pack.signature_index, pack)
1829
if self.chk_index is not None:
1830
self.chk_index.remove_index(pack.chk_index, pack)
1857
1832
def reset(self):
1858
1833
"""Clear all cached data."""
1891
1866
disk_nodes = set()
1892
1867
for index, key, value in self._iter_disk_pack_index():
1893
1868
disk_nodes.add((key, value))
1894
orig_disk_nodes = set(disk_nodes)
1896
1870
# do a two-way diff against our original content
1897
1871
current_nodes = set()
1910
1884
disk_nodes.difference_update(deleted_nodes)
1911
1885
disk_nodes.update(new_nodes)
1913
return disk_nodes, deleted_nodes, new_nodes, orig_disk_nodes
1887
return disk_nodes, deleted_nodes, new_nodes
1915
1889
def _syncronize_pack_names_from_disk_nodes(self, disk_nodes):
1916
1890
"""Given the correct set of pack files, update our saved info.
1956
1930
added.append(name)
1957
1931
return removed, added, modified
1959
def _save_pack_names(self, clear_obsolete_packs=False, obsolete_packs=None):
1933
def _save_pack_names(self, clear_obsolete_packs=False):
1960
1934
"""Save the list of packs.
1962
1936
This will take out the mutex around the pack names list for the
1967
1941
:param clear_obsolete_packs: If True, clear out the contents of the
1968
1942
obsolete_packs directory.
1969
:param obsolete_packs: Packs that are obsolete once the new pack-names
1970
file has been written.
1971
1943
:return: A list of the names saved that were not previously on disk.
1973
already_obsolete = []
1974
1945
self.lock_names()
1976
1947
builder = self._index_builder_class()
1977
(disk_nodes, deleted_nodes, new_nodes,
1978
orig_disk_nodes) = self._diff_pack_names()
1948
disk_nodes, deleted_nodes, new_nodes = self._diff_pack_names()
1979
1949
# TODO: handle same-name, index-size-changes here -
1980
1950
# e.g. use the value from disk, not ours, *unless* we're the one
1983
1953
builder.add_node(key, value)
1984
1954
self.transport.put_file('pack-names', builder.finish(),
1985
1955
mode=self.repo.bzrdir._get_file_mode())
1956
# move the baseline forward
1986
1957
self._packs_at_load = disk_nodes
1987
1958
if clear_obsolete_packs:
1990
to_preserve = set([o.name for o in obsolete_packs])
1991
already_obsolete = self._clear_obsolete_packs(to_preserve)
1959
self._clear_obsolete_packs()
1993
1961
self._unlock_names()
1994
1962
# synchronise the memory packs list with what we just wrote:
1995
1963
self._syncronize_pack_names_from_disk_nodes(disk_nodes)
1997
# TODO: We could add one more condition here. "if o.name not in
1998
# orig_disk_nodes and o != the new_pack we haven't written to
1999
# disk yet. However, the new pack object is not easily
2000
# accessible here (it would have to be passed through the
2001
# autopacking code, etc.)
2002
obsolete_packs = [o for o in obsolete_packs
2003
if o.name not in already_obsolete]
2004
self._obsolete_packs(obsolete_packs)
2005
1964
return [new_node[0][0] for new_node in new_nodes]
2007
1966
def reload_pack_names(self):
2024
1983
# out the new value.
2025
(disk_nodes, deleted_nodes, new_nodes,
2026
orig_disk_nodes) = self._diff_pack_names()
2027
# _packs_at_load is meant to be the explicit list of names in
2028
# 'pack-names' at then start. As such, it should not contain any
2029
# pending names that haven't been written out yet.
2030
self._packs_at_load = orig_disk_nodes
1984
disk_nodes, _, _ = self._diff_pack_names()
1985
self._packs_at_load = disk_nodes
2031
1986
(removed, added,
2032
1987
modified) = self._syncronize_pack_names_from_disk_nodes(disk_nodes)
2033
1988
if removed or added or modified:
2043
1998
raise errors.RetryAutopack(self.repo, False, sys.exc_info())
2045
def _clear_obsolete_packs(self, preserve=None):
2000
def _clear_obsolete_packs(self):
2046
2001
"""Delete everything from the obsolete-packs directory.
2048
:return: A list of pack identifiers (the filename without '.pack') that
2049
were found in obsolete_packs.
2052
2003
obsolete_pack_transport = self.transport.clone('obsolete_packs')
2053
if preserve is None:
2055
2004
for filename in obsolete_pack_transport.list_dir('.'):
2056
name, ext = osutils.splitext(filename)
2059
if name in preserve:
2062
2006
obsolete_pack_transport.delete(filename)
2063
2007
except (errors.PathError, errors.TransportError), e:
2064
warning("couldn't delete obsolete pack, skipping it:\n%s"
2008
warning("couldn't delete obsolete pack, skipping it:\n%s" % (e,))
2068
2010
def _start_write_group(self):
2069
2011
# Do not permit preparation for writing if we're not in a 'write lock'.
2096
2038
# FIXME: just drop the transient index.
2097
2039
# forget what names there are
2098
2040
if self._new_pack is not None:
2099
operation = cleanup.OperationWithCleanups(self._new_pack.abort)
2100
operation.add_cleanup(setattr, self, '_new_pack', None)
2101
# If we aborted while in the middle of finishing the write
2102
# group, _remove_pack_indices could fail because the indexes are
2103
# already gone. But they're not there we shouldn't fail in this
2104
# case, so we pass ignore_missing=True.
2105
operation.add_cleanup(self._remove_pack_indices, self._new_pack,
2106
ignore_missing=True)
2107
operation.run_simple()
2042
self._new_pack.abort()
2044
# XXX: If we aborted while in the middle of finishing the write
2045
# group, _remove_pack_indices can fail because the indexes are
2046
# already gone. If they're not there we shouldn't fail in this
2047
# case. -- mbp 20081113
2048
self._remove_pack_indices(self._new_pack)
2049
self._new_pack = None
2108
2050
for resumed_pack in self._resumed_packs:
2109
operation = cleanup.OperationWithCleanups(resumed_pack.abort)
2110
# See comment in previous finally block.
2111
operation.add_cleanup(self._remove_pack_indices, resumed_pack,
2112
ignore_missing=True)
2113
operation.run_simple()
2052
resumed_pack.abort()
2054
# See comment in previous finally block.
2056
self._remove_pack_indices(resumed_pack)
2114
2059
del self._resumed_packs[:]
2116
2061
def _remove_resumed_pack_indices(self):
2118
2063
self._remove_pack_indices(resumed_pack)
2119
2064
del self._resumed_packs[:]
2121
def _check_new_inventories(self):
2122
"""Detect missing inventories in this write group.
2124
:returns: list of strs, summarising any problems found. If the list is
2125
empty no problems were found.
2127
# The base implementation does no checks. GCRepositoryPackCollection
2131
2066
def _commit_write_group(self):
2132
2067
all_missing = set()
2133
2068
for prefix, versioned_file in (
2142
2077
raise errors.BzrCheckError(
2143
2078
"Repository %s has missing compression parent(s) %r "
2144
2079
% (self.repo, sorted(all_missing)))
2145
problems = self._check_new_inventories()
2147
problems_summary = '\n'.join(problems)
2148
raise errors.BzrCheckError(
2149
"Cannot add revision(s) to repository: " + problems_summary)
2150
2080
self._remove_pack_indices(self._new_pack)
2151
any_new_content = False
2081
should_autopack = False
2152
2082
if self._new_pack.data_inserted():
2153
2083
# get all the data to disk and read to use
2154
2084
self._new_pack.finish()
2155
2085
self.allocate(self._new_pack)
2156
2086
self._new_pack = None
2157
any_new_content = True
2087
should_autopack = True
2159
2089
self._new_pack.abort()
2160
2090
self._new_pack = None
2165
2095
self._remove_pack_from_memory(resumed_pack)
2166
2096
resumed_pack.finish()
2167
2097
self.allocate(resumed_pack)
2168
any_new_content = True
2098
should_autopack = True
2169
2099
del self._resumed_packs[:]
2171
result = self.autopack()
2101
if not self.autopack():
2173
2102
# when autopack takes no steps, the names list is still
2175
2104
return self._save_pack_names()
2179
2107
def _suspend_write_group(self):
2282
2210
self._reconcile_fixes_text_parents = True
2283
2211
self._reconcile_backsup_inventory = False
2285
def _warn_if_deprecated(self, branch=None):
2213
def _warn_if_deprecated(self):
2286
2214
# This class isn't deprecated, but one sub-format is
2287
2215
if isinstance(self._format, RepositoryFormatKnitPack5RichRootBroken):
2288
super(KnitPackRepository, self)._warn_if_deprecated(branch)
2216
from bzrlib import repository
2217
if repository._deprecation_warning_done:
2219
repository._deprecation_warning_done = True
2220
warning("Format %s for %s is deprecated - please use"
2221
" 'bzr upgrade --1.6.1-rich-root'"
2222
% (self._format, self.bzrdir.transport.base))
2290
2224
def _abort_write_group(self):
2291
self.revisions._index._key_dependencies.clear()
2225
self.revisions._index._key_dependencies.refs.clear()
2292
2226
self._pack_collection._abort_write_group()
2294
2228
def _get_source(self, to_format):
2308
2242
self._pack_collection._start_write_group()
2310
2244
def _commit_write_group(self):
2311
hint = self._pack_collection._commit_write_group()
2312
self.revisions._index._key_dependencies.clear()
2245
self.revisions._index._key_dependencies.refs.clear()
2246
return self._pack_collection._commit_write_group()
2315
2248
def suspend_write_group(self):
2316
2249
# XXX check self._write_group is self.get_transaction()?
2317
2250
tokens = self._pack_collection._suspend_write_group()
2318
self.revisions._index._key_dependencies.clear()
2251
self.revisions._index._key_dependencies.refs.clear()
2319
2252
self._write_group = None
2342
2275
return self._write_lock_count
2344
2277
def lock_write(self, token=None):
2345
"""Lock the repository for writes.
2347
:return: A bzrlib.repository.RepositoryWriteLockResult.
2349
2278
locked = self.is_locked()
2350
2279
if not self._write_lock_count and locked:
2351
2280
raise errors.ReadOnlyError(self)
2353
2282
if self._write_lock_count == 1:
2354
2283
self._transaction = transactions.WriteTransaction()
2356
if 'relock' in debug.debug_flags and self._prev_lock == 'w':
2357
note('%r was write locked again', self)
2358
self._prev_lock = 'w'
2359
2285
for repo in self._fallback_repositories:
2360
2286
# Writes don't affect fallback repos
2361
2287
repo.lock_read()
2362
2288
self._refresh_data()
2363
return RepositoryWriteLockResult(self.unlock, None)
2365
2290
def lock_read(self):
2366
"""Lock the repository for reads.
2368
:return: A bzrlib.lock.LogicalLockResult.
2370
2291
locked = self.is_locked()
2371
2292
if self._write_lock_count:
2372
2293
self._write_lock_count += 1
2374
2295
self.control_files.lock_read()
2376
if 'relock' in debug.debug_flags and self._prev_lock == 'r':
2377
note('%r was read locked again', self)
2378
self._prev_lock = 'r'
2379
2297
for repo in self._fallback_repositories:
2380
2298
repo.lock_read()
2381
2299
self._refresh_data()
2382
return LogicalLockResult(self.unlock)
2384
2301
def leave_lock_in_place(self):
2385
2302
# not supported - raise an error
2390
2307
raise NotImplementedError(self.dont_leave_lock_in_place)
2392
2309
@needs_write_lock
2393
def pack(self, hint=None, clean_obsolete_packs=False):
2310
def pack(self, hint=None):
2394
2311
"""Compress the data within the repository.
2396
2313
This will pack all the data to a single pack. In future it may
2397
2314
recompress deltas or do other such expensive operations.
2399
self._pack_collection.pack(hint=hint, clean_obsolete_packs=clean_obsolete_packs)
2316
self._pack_collection.pack(hint=hint)
2401
2318
@needs_write_lock
2402
2319
def reconcile(self, other=None, thorough=False):
2410
2327
packer = ReconcilePacker(collection, packs, extension, revs)
2411
2328
return packer.pack(pb)
2413
@only_raises(errors.LockNotHeld, errors.LockBroken)
2414
2330
def unlock(self):
2415
2331
if self._write_lock_count == 1 and self._write_group is not None:
2416
2332
self.abort_write_group()
2558
2474
utf8_files = [('format', self.get_format_string())]
2560
2476
self._upload_blank_content(a_bzrdir, dirs, files, utf8_files, shared)
2561
repository = self.open(a_bzrdir=a_bzrdir, _found=True)
2562
self._run_post_repo_init_hooks(repository, a_bzrdir, shared)
2477
return self.open(a_bzrdir=a_bzrdir, _found=True)
2565
2479
def open(self, a_bzrdir, _found=False, _override_transport=None):
2566
2480
"""See RepositoryFormat.open().
2615
2529
"""See RepositoryFormat.get_format_description()."""
2616
2530
return "Packs containing knits without subtree support"
2532
def check_conversion_target(self, target_format):
2619
2536
class RepositoryFormatKnitPack3(RepositoryFormatPack):
2620
2537
"""A subtrees parameterized Pack repository.
2648
2564
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2566
def check_conversion_target(self, target_format):
2567
if not target_format.rich_root_data:
2568
raise errors.BadConversionTarget(
2569
'Does not support rich root data.', target_format)
2570
if not getattr(target_format, 'supports_tree_reference', False):
2571
raise errors.BadConversionTarget(
2572
'Does not support nested trees', target_format)
2650
2574
def get_format_string(self):
2651
2575
"""See RepositoryFormat.get_format_string()."""
2652
2576
return "Bazaar pack repository format 1 with subtree support (needs bzr 0.92)\n"
2686
2610
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2612
def check_conversion_target(self, target_format):
2613
if not target_format.rich_root_data:
2614
raise errors.BadConversionTarget(
2615
'Does not support rich root data.', target_format)
2688
2617
def get_format_string(self):
2689
2618
"""See RepositoryFormat.get_format_string()."""
2690
2619
return ("Bazaar pack repository format 1 with rich root"
2731
2660
"""See RepositoryFormat.get_format_description()."""
2732
2661
return "Packs 5 (adds stacking support, requires bzr 1.6)"
2663
def check_conversion_target(self, target_format):
2735
2667
class RepositoryFormatKnitPack5RichRoot(RepositoryFormatPack):
2736
2668
"""A repository with rich roots and stacking.
2764
2696
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2698
def check_conversion_target(self, target_format):
2699
if not target_format.rich_root_data:
2700
raise errors.BadConversionTarget(
2701
'Does not support rich root data.', target_format)
2766
2703
def get_format_string(self):
2767
2704
"""See RepositoryFormat.get_format_string()."""
2768
2705
return "Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6.1)\n"
2810
2747
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2749
def check_conversion_target(self, target_format):
2750
if not target_format.rich_root_data:
2751
raise errors.BadConversionTarget(
2752
'Does not support rich root data.', target_format)
2812
2754
def get_format_string(self):
2813
2755
"""See RepositoryFormat.get_format_string()."""
2814
2756
return "Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6)\n"
2852
2794
"""See RepositoryFormat.get_format_description()."""
2853
2795
return "Packs 6 (uses btree indexes, requires bzr 1.9)"
2797
def check_conversion_target(self, target_format):
2856
2801
class RepositoryFormatKnitPack6RichRoot(RepositoryFormatPack):
2857
2802
"""A repository with rich roots, no subtrees, stacking and btree indexes.
2882
2827
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2829
def check_conversion_target(self, target_format):
2830
if not target_format.rich_root_data:
2831
raise errors.BadConversionTarget(
2832
'Does not support rich root data.', target_format)
2884
2834
def get_format_string(self):
2885
2835
"""See RepositoryFormat.get_format_string()."""
2886
2836
return "Bazaar RepositoryFormatKnitPack6RichRoot (bzr 1.9)\n"
2903
2853
repository_class = KnitPackRepository
2904
2854
_commit_builder_class = PackRootCommitBuilder
2905
2855
rich_root_data = True
2907
2856
supports_tree_reference = True
2908
2857
supports_external_lookups = True
2909
2858
# What index classes to use
2924
2873
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2875
def check_conversion_target(self, target_format):
2876
if not target_format.rich_root_data:
2877
raise errors.BadConversionTarget(
2878
'Does not support rich root data.', target_format)
2879
if not getattr(target_format, 'supports_tree_reference', False):
2880
raise errors.BadConversionTarget(
2881
'Does not support nested trees', target_format)
2926
2883
def get_format_string(self):
2927
2884
"""See RepositoryFormat.get_format_string()."""
2928
2885
return ("Bazaar development format 2 with subtree support "