224
228
return self.index_name('text', name)
226
230
def _replace_index_with_readonly(self, index_type):
231
unlimited_cache = False
232
if index_type == 'chk':
233
unlimited_cache = True
227
234
setattr(self, index_type + '_index',
228
235
self.index_class(self.index_transport,
229
236
self.index_name(index_type, self.name),
230
self.index_sizes[self.index_offset(index_type)]))
237
self.index_sizes[self.index_offset(index_type)],
238
unlimited_cache=unlimited_cache))
233
241
class ExistingPack(Pack):
574
589
flush_func=flush_func)
575
590
self.add_callback = None
577
def replace_indices(self, index_to_pack, indices):
578
"""Replace the current mappings with fresh ones.
580
This should probably not be used eventually, rather incremental add and
581
removal of indices. It has been added during refactoring of existing
584
:param index_to_pack: A mapping from index objects to
585
(transport, name) tuples for the pack file data.
586
:param indices: A list of indices.
588
# refresh the revision pack map dict without replacing the instance.
589
self.index_to_pack.clear()
590
self.index_to_pack.update(index_to_pack)
591
# XXX: API break - clearly a 'replace' method would be good?
592
self.combined_index._indices[:] = indices
593
# the current add nodes callback for the current writable index if
595
self.add_callback = None
597
592
def add_index(self, index, pack):
598
593
"""Add index to the aggregate, which is an index for Pack pack.
606
601
# expose it to the index map
607
602
self.index_to_pack[index] = pack.access_tuple()
608
603
# put it at the front of the linear index list
609
self.combined_index.insert_index(0, index)
604
self.combined_index.insert_index(0, index, pack.name)
611
606
def add_writable_index(self, index, pack):
612
607
"""Add an index which is able to have data added to it.
632
627
self.data_access.set_writer(None, None, (None, None))
633
628
self.index_to_pack.clear()
634
629
del self.combined_index._indices[:]
630
del self.combined_index._index_names[:]
635
631
self.add_callback = None
637
def remove_index(self, index, pack):
633
def remove_index(self, index):
638
634
"""Remove index from the indices used to answer queries.
640
636
:param index: An index from the pack parameter.
641
:param pack: A Pack instance.
643
638
del self.index_to_pack[index]
644
self.combined_index._indices.remove(index)
639
pos = self.combined_index._indices.index(index)
640
del self.combined_index._indices[pos]
641
del self.combined_index._index_names[pos]
645
642
if (self.add_callback is not None and
646
643
getattr(index, 'add_nodes', None) == self.add_callback):
647
644
self.add_callback = None
1105
1102
iterator is a tuple with:
1106
1103
index, readv_vector, node_vector. readv_vector is a list ready to
1107
1104
hand to the transport readv method, and node_vector is a list of
1108
(key, eol_flag, references) for the the node retrieved by the
1105
(key, eol_flag, references) for the node retrieved by the
1109
1106
matching readv_vector.
1111
1108
# group by pack so we do one readv per pack
1403
1400
self.inventory_index = AggregateIndex(self.reload_pack_names, flush)
1404
1401
self.text_index = AggregateIndex(self.reload_pack_names, flush)
1405
1402
self.signature_index = AggregateIndex(self.reload_pack_names, flush)
1403
all_indices = [self.revision_index, self.inventory_index,
1404
self.text_index, self.signature_index]
1406
1405
if use_chk_index:
1407
1406
self.chk_index = AggregateIndex(self.reload_pack_names, flush)
1407
all_indices.append(self.chk_index)
1409
1409
# used to determine if we're using a chk_index elsewhere.
1410
1410
self.chk_index = None
1411
# Tell all the CombinedGraphIndex objects about each other, so they can
1412
# share hints about which pack names to search first.
1413
all_combined = [agg_idx.combined_index for agg_idx in all_indices]
1414
for combined_idx in all_combined:
1415
combined_idx.set_sibling_indices(
1416
set(all_combined).difference([combined_idx]))
1411
1417
# resumed packs
1412
1418
self._resumed_packs = []
1421
return '%s(%r)' % (self.__class__.__name__, self.repo)
1414
1423
def add_pack_to_memory(self, pack):
1415
1424
"""Make a Pack object available to the repository to satisfy queries.
1530
1539
self._remove_pack_from_memory(pack)
1531
1540
# record the newly available packs and stop advertising the old
1533
result = self._save_pack_names(clear_obsolete_packs=True)
1534
# Move the old packs out of the way now they are no longer referenced.
1535
for revision_count, packs in pack_operations:
1536
self._obsolete_packs(packs)
1542
to_be_obsoleted = []
1543
for _, packs in pack_operations:
1544
to_be_obsoleted.extend(packs)
1545
result = self._save_pack_names(clear_obsolete_packs=True,
1546
obsolete_packs=to_be_obsoleted)
1539
1549
def _flush_new_pack(self):
1567
1577
# determine which packs need changing
1568
1578
pack_operations = [[0, []]]
1569
1579
for pack in self.all_packs():
1570
if not hint or pack.name in hint:
1580
if hint is None or pack.name in hint:
1581
# Either no hint was provided (so we are packing everything),
1582
# or this pack was included in the hint.
1571
1583
pack_operations[-1][0] += pack.get_revision_count()
1572
1584
pack_operations[-1][1].append(pack)
1573
1585
self._execute_pack_operations(pack_operations, OptimisingPacker)
1587
if clean_obsolete_packs:
1588
self._clear_obsolete_packs()
1575
1590
def plan_autopack_combinations(self, existing_packs, pack_distribution):
1576
1591
"""Plan a pack operation.
1665
1680
txt_index = self._make_index(name, '.tix')
1666
1681
sig_index = self._make_index(name, '.six')
1667
1682
if self.chk_index is not None:
1668
chk_index = self._make_index(name, '.cix')
1683
chk_index = self._make_index(name, '.cix', unlimited_cache=True)
1670
1685
chk_index = None
1671
1686
result = ExistingPack(self._pack_transport, name, rev_index,
1690
1705
txt_index = self._make_index(name, '.tix', resume=True)
1691
1706
sig_index = self._make_index(name, '.six', resume=True)
1692
1707
if self.chk_index is not None:
1693
chk_index = self._make_index(name, '.cix', resume=True)
1708
chk_index = self._make_index(name, '.cix', resume=True,
1709
unlimited_cache=True)
1695
1711
chk_index = None
1696
1712
result = self.resumed_pack_factory(name, rev_index, inv_index,
1726
1742
return self._index_class(self.transport, 'pack-names', None
1727
1743
).iter_all_entries()
1729
def _make_index(self, name, suffix, resume=False):
1745
def _make_index(self, name, suffix, resume=False, unlimited_cache=False):
1730
1746
size_offset = self._suffix_offsets[suffix]
1731
1747
index_name = name + suffix
1736
1752
transport = self._index_transport
1737
1753
index_size = self._names[name][size_offset]
1738
return self._index_class(transport, index_name, index_size)
1754
return self._index_class(transport, index_name, index_size,
1755
unlimited_cache=unlimited_cache)
1740
1757
def _max_pack_count(self, total_revisions):
1741
1758
"""Return the maximum number of packs to use for total revisions.
1769
1786
:param return: None.
1771
1788
for pack in packs:
1772
pack.pack_transport.rename(pack.file_name(),
1773
'../obsolete_packs/' + pack.file_name())
1790
pack.pack_transport.rename(pack.file_name(),
1791
'../obsolete_packs/' + pack.file_name())
1792
except (errors.PathError, errors.TransportError), e:
1793
# TODO: Should these be warnings or mutters?
1794
mutter("couldn't rename obsolete pack, skipping it:\n%s"
1774
1796
# TODO: Probably needs to know all possible indices for this pack
1775
1797
# - or maybe list the directory and move all indices matching this
1776
1798
# name whether we recognize it or not?
1778
1800
if self.chk_index is not None:
1779
1801
suffixes.append('.cix')
1780
1802
for suffix in suffixes:
1781
self._index_transport.rename(pack.name + suffix,
1782
'../obsolete_packs/' + pack.name + suffix)
1804
self._index_transport.rename(pack.name + suffix,
1805
'../obsolete_packs/' + pack.name + suffix)
1806
except (errors.PathError, errors.TransportError), e:
1807
mutter("couldn't rename obsolete index, skipping it:\n%s"
1784
1810
def pack_distribution(self, total_revisions):
1785
1811
"""Generate a list of the number of revisions to put in each pack.
1811
1837
self._remove_pack_indices(pack)
1812
1838
self.packs.remove(pack)
1814
def _remove_pack_indices(self, pack):
1815
"""Remove the indices for pack from the aggregated indices."""
1816
self.revision_index.remove_index(pack.revision_index, pack)
1817
self.inventory_index.remove_index(pack.inventory_index, pack)
1818
self.text_index.remove_index(pack.text_index, pack)
1819
self.signature_index.remove_index(pack.signature_index, pack)
1820
if self.chk_index is not None:
1821
self.chk_index.remove_index(pack.chk_index, pack)
1840
def _remove_pack_indices(self, pack, ignore_missing=False):
1841
"""Remove the indices for pack from the aggregated indices.
1843
:param ignore_missing: Suppress KeyErrors from calling remove_index.
1845
for index_type in Pack.index_definitions.keys():
1846
attr_name = index_type + '_index'
1847
aggregate_index = getattr(self, attr_name)
1848
if aggregate_index is not None:
1849
pack_index = getattr(pack, attr_name)
1851
aggregate_index.remove_index(pack_index)
1823
1857
def reset(self):
1824
1858
"""Clear all cached data."""
1875
1910
disk_nodes.difference_update(deleted_nodes)
1876
1911
disk_nodes.update(new_nodes)
1878
return disk_nodes, deleted_nodes, new_nodes
1913
return disk_nodes, deleted_nodes, new_nodes, orig_disk_nodes
1880
1915
def _syncronize_pack_names_from_disk_nodes(self, disk_nodes):
1881
1916
"""Given the correct set of pack files, update our saved info.
1932
1967
:param clear_obsolete_packs: If True, clear out the contents of the
1933
1968
obsolete_packs directory.
1969
:param obsolete_packs: Packs that are obsolete once the new pack-names
1970
file has been written.
1934
1971
:return: A list of the names saved that were not previously on disk.
1973
already_obsolete = []
1936
1974
self.lock_names()
1938
1976
builder = self._index_builder_class()
1939
disk_nodes, deleted_nodes, new_nodes = self._diff_pack_names()
1977
(disk_nodes, deleted_nodes, new_nodes,
1978
orig_disk_nodes) = self._diff_pack_names()
1940
1979
# TODO: handle same-name, index-size-changes here -
1941
1980
# e.g. use the value from disk, not ours, *unless* we're the one
1944
1983
builder.add_node(key, value)
1945
1984
self.transport.put_file('pack-names', builder.finish(),
1946
1985
mode=self.repo.bzrdir._get_file_mode())
1947
# move the baseline forward
1948
1986
self._packs_at_load = disk_nodes
1949
1987
if clear_obsolete_packs:
1950
self._clear_obsolete_packs()
1990
to_preserve = set([o.name for o in obsolete_packs])
1991
already_obsolete = self._clear_obsolete_packs(to_preserve)
1952
1993
self._unlock_names()
1953
1994
# synchronise the memory packs list with what we just wrote:
1954
1995
self._syncronize_pack_names_from_disk_nodes(disk_nodes)
1997
# TODO: We could add one more condition here. "if o.name not in
1998
# orig_disk_nodes and o != the new_pack we haven't written to
1999
# disk yet. However, the new pack object is not easily
2000
# accessible here (it would have to be passed through the
2001
# autopacking code, etc.)
2002
obsolete_packs = [o for o in obsolete_packs
2003
if o.name not in already_obsolete]
2004
self._obsolete_packs(obsolete_packs)
1955
2005
return [new_node[0][0] for new_node in new_nodes]
1957
2007
def reload_pack_names(self):
1974
2024
# out the new value.
1975
disk_nodes, _, _ = self._diff_pack_names()
1976
self._packs_at_load = disk_nodes
2025
(disk_nodes, deleted_nodes, new_nodes,
2026
orig_disk_nodes) = self._diff_pack_names()
2027
# _packs_at_load is meant to be the explicit list of names in
2028
# 'pack-names' at then start. As such, it should not contain any
2029
# pending names that haven't been written out yet.
2030
self._packs_at_load = orig_disk_nodes
1977
2031
(removed, added,
1978
2032
modified) = self._syncronize_pack_names_from_disk_nodes(disk_nodes)
1979
2033
if removed or added or modified:
1989
2043
raise errors.RetryAutopack(self.repo, False, sys.exc_info())
1991
def _clear_obsolete_packs(self):
2045
def _clear_obsolete_packs(self, preserve=None):
1992
2046
"""Delete everything from the obsolete-packs directory.
2048
:return: A list of pack identifiers (the filename without '.pack') that
2049
were found in obsolete_packs.
1994
2052
obsolete_pack_transport = self.transport.clone('obsolete_packs')
2053
if preserve is None:
1995
2055
for filename in obsolete_pack_transport.list_dir('.'):
2056
name, ext = osutils.splitext(filename)
2059
if name in preserve:
1997
2062
obsolete_pack_transport.delete(filename)
1998
2063
except (errors.PathError, errors.TransportError), e:
1999
warning("couldn't delete obsolete pack, skipping it:\n%s" % (e,))
2064
warning("couldn't delete obsolete pack, skipping it:\n%s"
2001
2068
def _start_write_group(self):
2002
2069
# Do not permit preparation for writing if we're not in a 'write lock'.
2029
2096
# FIXME: just drop the transient index.
2030
2097
# forget what names there are
2031
2098
if self._new_pack is not None:
2033
self._new_pack.abort()
2035
# XXX: If we aborted while in the middle of finishing the write
2036
# group, _remove_pack_indices can fail because the indexes are
2037
# already gone. If they're not there we shouldn't fail in this
2038
# case. -- mbp 20081113
2039
self._remove_pack_indices(self._new_pack)
2040
self._new_pack = None
2099
operation = cleanup.OperationWithCleanups(self._new_pack.abort)
2100
operation.add_cleanup(setattr, self, '_new_pack', None)
2101
# If we aborted while in the middle of finishing the write
2102
# group, _remove_pack_indices could fail because the indexes are
2103
# already gone. But they're not there we shouldn't fail in this
2104
# case, so we pass ignore_missing=True.
2105
operation.add_cleanup(self._remove_pack_indices, self._new_pack,
2106
ignore_missing=True)
2107
operation.run_simple()
2041
2108
for resumed_pack in self._resumed_packs:
2043
resumed_pack.abort()
2045
# See comment in previous finally block.
2047
self._remove_pack_indices(resumed_pack)
2109
operation = cleanup.OperationWithCleanups(resumed_pack.abort)
2110
# See comment in previous finally block.
2111
operation.add_cleanup(self._remove_pack_indices, resumed_pack,
2112
ignore_missing=True)
2113
operation.run_simple()
2050
2114
del self._resumed_packs[:]
2052
2116
def _remove_resumed_pack_indices(self):
2054
2118
self._remove_pack_indices(resumed_pack)
2055
2119
del self._resumed_packs[:]
2121
def _check_new_inventories(self):
2122
"""Detect missing inventories in this write group.
2124
:returns: list of strs, summarising any problems found. If the list is
2125
empty no problems were found.
2127
# The base implementation does no checks. GCRepositoryPackCollection
2057
2131
def _commit_write_group(self):
2058
2132
all_missing = set()
2059
2133
for prefix, versioned_file in (
2068
2142
raise errors.BzrCheckError(
2069
2143
"Repository %s has missing compression parent(s) %r "
2070
2144
% (self.repo, sorted(all_missing)))
2145
problems = self._check_new_inventories()
2147
problems_summary = '\n'.join(problems)
2148
raise errors.BzrCheckError(
2149
"Cannot add revision(s) to repository: " + problems_summary)
2071
2150
self._remove_pack_indices(self._new_pack)
2072
should_autopack = False
2151
any_new_content = False
2073
2152
if self._new_pack.data_inserted():
2074
2153
# get all the data to disk and read to use
2075
2154
self._new_pack.finish()
2076
2155
self.allocate(self._new_pack)
2077
2156
self._new_pack = None
2078
should_autopack = True
2157
any_new_content = True
2080
2159
self._new_pack.abort()
2081
2160
self._new_pack = None
2200
2282
self._reconcile_fixes_text_parents = True
2201
2283
self._reconcile_backsup_inventory = False
2203
def _warn_if_deprecated(self):
2285
def _warn_if_deprecated(self, branch=None):
2204
2286
# This class isn't deprecated, but one sub-format is
2205
2287
if isinstance(self._format, RepositoryFormatKnitPack5RichRootBroken):
2206
from bzrlib import repository
2207
if repository._deprecation_warning_done:
2209
repository._deprecation_warning_done = True
2210
warning("Format %s for %s is deprecated - please use"
2211
" 'bzr upgrade --1.6.1-rich-root'"
2212
% (self._format, self.bzrdir.transport.base))
2288
super(KnitPackRepository, self)._warn_if_deprecated(branch)
2214
2290
def _abort_write_group(self):
2215
self.revisions._index._key_dependencies.refs.clear()
2291
self.revisions._index._key_dependencies.clear()
2216
2292
self._pack_collection._abort_write_group()
2218
def _find_inconsistent_revision_parents(self):
2219
"""Find revisions with incorrectly cached parents.
2221
:returns: an iterator yielding tuples of (revison-id, parents-in-index,
2222
parents-in-revision).
2224
if not self.is_locked():
2225
raise errors.ObjectNotLocked(self)
2226
pb = ui.ui_factory.nested_progress_bar()
2229
revision_nodes = self._pack_collection.revision_index \
2230
.combined_index.iter_all_entries()
2231
index_positions = []
2232
# Get the cached index values for all revisions, and also the
2233
# location in each index of the revision text so we can perform
2235
for index, key, value, refs in revision_nodes:
2236
node = (index, key, value, refs)
2237
index_memo = self.revisions._index._node_to_position(node)
2238
if index_memo[0] != index:
2239
raise AssertionError('%r != %r' % (index_memo[0], index))
2240
index_positions.append((index_memo, key[0],
2241
tuple(parent[0] for parent in refs[0])))
2242
pb.update("Reading revision index", 0, 0)
2243
index_positions.sort()
2245
pb.update("Checking cached revision graph", 0,
2246
len(index_positions))
2247
for offset in xrange(0, len(index_positions), 1000):
2248
pb.update("Checking cached revision graph", offset)
2249
to_query = index_positions[offset:offset + batch_size]
2252
rev_ids = [item[1] for item in to_query]
2253
revs = self.get_revisions(rev_ids)
2254
for revision, item in zip(revs, to_query):
2255
index_parents = item[2]
2256
rev_parents = tuple(revision.parent_ids)
2257
if index_parents != rev_parents:
2258
result.append((revision.revision_id, index_parents,
2264
2294
def _get_source(self, to_format):
2265
2295
if to_format.network_name() == self._format.network_name():
2266
2296
return KnitPackStreamSource(self, to_format)
2278
2308
self._pack_collection._start_write_group()
2280
2310
def _commit_write_group(self):
2281
self.revisions._index._key_dependencies.refs.clear()
2282
return self._pack_collection._commit_write_group()
2311
hint = self._pack_collection._commit_write_group()
2312
self.revisions._index._key_dependencies.clear()
2284
2315
def suspend_write_group(self):
2285
2316
# XXX check self._write_group is self.get_transaction()?
2286
2317
tokens = self._pack_collection._suspend_write_group()
2287
self.revisions._index._key_dependencies.refs.clear()
2318
self.revisions._index._key_dependencies.clear()
2288
2319
self._write_group = None
2318
2353
if self._write_lock_count == 1:
2319
2354
self._transaction = transactions.WriteTransaction()
2356
if 'relock' in debug.debug_flags and self._prev_lock == 'w':
2357
note('%r was write locked again', self)
2358
self._prev_lock = 'w'
2321
2359
for repo in self._fallback_repositories:
2322
2360
# Writes don't affect fallback repos
2323
2361
repo.lock_read()
2324
2362
self._refresh_data()
2363
return RepositoryWriteLockResult(self.unlock, None)
2326
2365
def lock_read(self):
2366
"""Lock the repository for reads.
2368
:return: A bzrlib.lock.LogicalLockResult.
2327
2370
locked = self.is_locked()
2328
2371
if self._write_lock_count:
2329
2372
self._write_lock_count += 1
2331
2374
self.control_files.lock_read()
2376
if 'relock' in debug.debug_flags and self._prev_lock == 'r':
2377
note('%r was read locked again', self)
2378
self._prev_lock = 'r'
2333
2379
for repo in self._fallback_repositories:
2334
2380
repo.lock_read()
2335
2381
self._refresh_data()
2382
return LogicalLockResult(self.unlock)
2337
2384
def leave_lock_in_place(self):
2338
2385
# not supported - raise an error
2343
2390
raise NotImplementedError(self.dont_leave_lock_in_place)
2345
2392
@needs_write_lock
2346
def pack(self, hint=None):
2393
def pack(self, hint=None, clean_obsolete_packs=False):
2347
2394
"""Compress the data within the repository.
2349
2396
This will pack all the data to a single pack. In future it may
2350
2397
recompress deltas or do other such expensive operations.
2352
self._pack_collection.pack(hint=hint)
2399
self._pack_collection.pack(hint=hint, clean_obsolete_packs=clean_obsolete_packs)
2354
2401
@needs_write_lock
2355
2402
def reconcile(self, other=None, thorough=False):
2510
2558
utf8_files = [('format', self.get_format_string())]
2512
2560
self._upload_blank_content(a_bzrdir, dirs, files, utf8_files, shared)
2513
return self.open(a_bzrdir=a_bzrdir, _found=True)
2561
repository = self.open(a_bzrdir=a_bzrdir, _found=True)
2562
self._run_post_repo_init_hooks(repository, a_bzrdir, shared)
2515
2565
def open(self, a_bzrdir, _found=False, _override_transport=None):
2516
2566
"""See RepositoryFormat.open().
2600
2648
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2602
def check_conversion_target(self, target_format):
2603
if not target_format.rich_root_data:
2604
raise errors.BadConversionTarget(
2605
'Does not support rich root data.', target_format)
2606
if not getattr(target_format, 'supports_tree_reference', False):
2607
raise errors.BadConversionTarget(
2608
'Does not support nested trees', target_format)
2610
2650
def get_format_string(self):
2611
2651
"""See RepositoryFormat.get_format_string()."""
2612
2652
return "Bazaar pack repository format 1 with subtree support (needs bzr 0.92)\n"
2646
2686
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2648
def check_conversion_target(self, target_format):
2649
if not target_format.rich_root_data:
2650
raise errors.BadConversionTarget(
2651
'Does not support rich root data.', target_format)
2653
2688
def get_format_string(self):
2654
2689
"""See RepositoryFormat.get_format_string()."""
2655
2690
return ("Bazaar pack repository format 1 with rich root"
2732
2764
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2734
def check_conversion_target(self, target_format):
2735
if not target_format.rich_root_data:
2736
raise errors.BadConversionTarget(
2737
'Does not support rich root data.', target_format)
2739
2766
def get_format_string(self):
2740
2767
"""See RepositoryFormat.get_format_string()."""
2741
2768
return "Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6.1)\n"
2783
2810
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2785
def check_conversion_target(self, target_format):
2786
if not target_format.rich_root_data:
2787
raise errors.BadConversionTarget(
2788
'Does not support rich root data.', target_format)
2790
2812
def get_format_string(self):
2791
2813
"""See RepositoryFormat.get_format_string()."""
2792
2814
return "Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6)\n"
2863
2882
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2865
def check_conversion_target(self, target_format):
2866
if not target_format.rich_root_data:
2867
raise errors.BadConversionTarget(
2868
'Does not support rich root data.', target_format)
2870
2884
def get_format_string(self):
2871
2885
"""See RepositoryFormat.get_format_string()."""
2872
2886
return "Bazaar RepositoryFormatKnitPack6RichRoot (bzr 1.9)\n"
2909
2924
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2911
def check_conversion_target(self, target_format):
2912
if not target_format.rich_root_data:
2913
raise errors.BadConversionTarget(
2914
'Does not support rich root data.', target_format)
2915
if not getattr(target_format, 'supports_tree_reference', False):
2916
raise errors.BadConversionTarget(
2917
'Does not support nested trees', target_format)
2919
2926
def get_format_string(self):
2920
2927
"""See RepositoryFormat.get_format_string()."""
2921
2928
return ("Bazaar development format 2 with subtree support "