50
52
from bzrlib import (
56
57
revision as _mod_revision,
59
from bzrlib.decorators import needs_write_lock, only_raises
61
from bzrlib.decorators import needs_write_lock
62
from bzrlib.btree_index import (
60
66
from bzrlib.index import (
62
68
InMemoryGraphIndex,
64
from bzrlib.lock import LogicalLockResult
65
70
from bzrlib.repofmt.knitrepo import KnitRepository
66
71
from bzrlib.repository import (
68
73
MetaDirRepositoryFormat,
70
RepositoryWriteLockResult,
77
import bzrlib.revision as _mod_revision
74
78
from bzrlib.trace import (
225
228
return self.index_name('text', name)
227
230
def _replace_index_with_readonly(self, index_type):
228
unlimited_cache = False
229
if index_type == 'chk':
230
unlimited_cache = True
231
index = self.index_class(self.index_transport,
232
self.index_name(index_type, self.name),
233
self.index_sizes[self.index_offset(index_type)],
234
unlimited_cache=unlimited_cache)
235
if index_type == 'chk':
236
index._leaf_factory = btree_index._gcchk_factory
237
setattr(self, index_type + '_index', index)
231
setattr(self, index_type + '_index',
232
self.index_class(self.index_transport,
233
self.index_name(index_type, self.name),
234
self.index_sizes[self.index_offset(index_type)]))
240
237
class ExistingPack(Pack):
588
578
flush_func=flush_func)
589
579
self.add_callback = None
581
def replace_indices(self, index_to_pack, indices):
582
"""Replace the current mappings with fresh ones.
584
This should probably not be used eventually, rather incremental add and
585
removal of indices. It has been added during refactoring of existing
588
:param index_to_pack: A mapping from index objects to
589
(transport, name) tuples for the pack file data.
590
:param indices: A list of indices.
592
# refresh the revision pack map dict without replacing the instance.
593
self.index_to_pack.clear()
594
self.index_to_pack.update(index_to_pack)
595
# XXX: API break - clearly a 'replace' method would be good?
596
self.combined_index._indices[:] = indices
597
# the current add nodes callback for the current writable index if
599
self.add_callback = None
591
601
def add_index(self, index, pack):
592
602
"""Add index to the aggregate, which is an index for Pack pack.
626
636
self.data_access.set_writer(None, None, (None, None))
627
637
self.index_to_pack.clear()
628
638
del self.combined_index._indices[:]
629
del self.combined_index._index_names[:]
630
639
self.add_callback = None
632
def remove_index(self, index):
641
def remove_index(self, index, pack):
633
642
"""Remove index from the indices used to answer queries.
635
644
:param index: An index from the pack parameter.
645
:param pack: A Pack instance.
637
647
del self.index_to_pack[index]
638
pos = self.combined_index._indices.index(index)
639
del self.combined_index._indices[pos]
640
del self.combined_index._index_names[pos]
648
self.combined_index._indices.remove(index)
641
649
if (self.add_callback is not None and
642
650
getattr(index, 'add_nodes', None) == self.add_callback):
643
651
self.add_callback = None
1292
1300
# reinserted, and if d3 has incorrect parents it will also be
1293
1301
# reinserted. If we insert d3 first, d2 is present (as it was bulk
1294
1302
# copied), so we will try to delta, but d2 is not currently able to be
1295
# extracted because its basis d1 is not present. Topologically sorting
1303
# extracted because it's basis d1 is not present. Topologically sorting
1296
1304
# addresses this. The following generates a sort for all the texts that
1297
1305
# are being inserted without having to reference the entire text key
1298
1306
# space (we only topo sort the revisions, which is smaller).
1399
1407
self.inventory_index = AggregateIndex(self.reload_pack_names, flush)
1400
1408
self.text_index = AggregateIndex(self.reload_pack_names, flush)
1401
1409
self.signature_index = AggregateIndex(self.reload_pack_names, flush)
1402
all_indices = [self.revision_index, self.inventory_index,
1403
self.text_index, self.signature_index]
1404
1410
if use_chk_index:
1405
1411
self.chk_index = AggregateIndex(self.reload_pack_names, flush)
1406
all_indices.append(self.chk_index)
1408
1413
# used to determine if we're using a chk_index elsewhere.
1409
1414
self.chk_index = None
1410
# Tell all the CombinedGraphIndex objects about each other, so they can
1411
# share hints about which pack names to search first.
1412
all_combined = [agg_idx.combined_index for agg_idx in all_indices]
1413
for combined_idx in all_combined:
1414
combined_idx.set_sibling_indices(
1415
set(all_combined).difference([combined_idx]))
1416
1415
# resumed packs
1417
1416
self._resumed_packs = []
1420
return '%s(%r)' % (self.__class__.__name__, self.repo)
1422
1418
def add_pack_to_memory(self, pack):
1423
1419
"""Make a Pack object available to the repository to satisfy queries.
1505
1501
'containing %d revisions. Packing %d files into %d affecting %d'
1506
1502
' revisions', self, total_packs, total_revisions, num_old_packs,
1507
1503
num_new_packs, num_revs_affected)
1508
result = self._execute_pack_operations(pack_operations,
1504
self._execute_pack_operations(pack_operations,
1509
1505
reload_func=self._restart_autopack)
1510
1506
mutter('Auto-packing repository %s completed', self)
1513
1509
def _execute_pack_operations(self, pack_operations, _packer_class=Packer,
1514
1510
reload_func=None):
1538
1534
self._remove_pack_from_memory(pack)
1539
1535
# record the newly available packs and stop advertising the old
1541
to_be_obsoleted = []
1542
for _, packs in pack_operations:
1543
to_be_obsoleted.extend(packs)
1544
result = self._save_pack_names(clear_obsolete_packs=True,
1545
obsolete_packs=to_be_obsoleted)
1537
self._save_pack_names(clear_obsolete_packs=True)
1538
# Move the old packs out of the way now they are no longer referenced.
1539
for revision_count, packs in pack_operations:
1540
self._obsolete_packs(packs)
1548
1542
def _flush_new_pack(self):
1549
1543
if self._new_pack is not None:
1560
1554
def _already_packed(self):
1561
1555
"""Is the collection already packed?"""
1562
return not (self.repo._format.pack_compresses or (len(self._names) > 1))
1556
return len(self._names) < 2
1564
def pack(self, hint=None, clean_obsolete_packs=False):
1565
1559
"""Pack the pack collection totally."""
1566
1560
self.ensure_loaded()
1567
1561
total_packs = len(self._names)
1568
1562
if self._already_packed():
1563
# This is arguably wrong because we might not be optimal, but for
1564
# now lets leave it in. (e.g. reconcile -> one pack. But not
1570
1567
total_revisions = self.revision_index.combined_index.key_count()
1571
1568
# XXX: the following may want to be a class, to pack with a given
1573
1570
mutter('Packing repository %s, which has %d pack files, '
1574
'containing %d revisions with hint %r.', self, total_packs,
1575
total_revisions, hint)
1571
'containing %d revisions into 1 packs.', self, total_packs,
1576
1573
# determine which packs need changing
1574
pack_distribution = [1]
1577
1575
pack_operations = [[0, []]]
1578
1576
for pack in self.all_packs():
1579
if hint is None or pack.name in hint:
1580
# Either no hint was provided (so we are packing everything),
1581
# or this pack was included in the hint.
1582
pack_operations[-1][0] += pack.get_revision_count()
1583
pack_operations[-1][1].append(pack)
1577
pack_operations[-1][0] += pack.get_revision_count()
1578
pack_operations[-1][1].append(pack)
1584
1579
self._execute_pack_operations(pack_operations, OptimisingPacker)
1586
if clean_obsolete_packs:
1587
self._clear_obsolete_packs()
1589
1581
def plan_autopack_combinations(self, existing_packs, pack_distribution):
1590
1582
"""Plan a pack operation.
1600
1592
pack_operations = [[0, []]]
1601
1593
# plan out what packs to keep, and what to reorganise
1602
1594
while len(existing_packs):
1603
# take the largest pack, and if it's less than the head of the
1595
# take the largest pack, and if its less than the head of the
1604
1596
# distribution chart we will include its contents in the new pack
1605
# for that position. If it's larger, we remove its size from the
1597
# for that position. If its larger, we remove its size from the
1606
1598
# distribution chart
1607
1599
next_pack_rev_count, next_pack = existing_packs.pop(0)
1608
1600
if next_pack_rev_count >= pack_distribution[0]:
1704
1696
txt_index = self._make_index(name, '.tix', resume=True)
1705
1697
sig_index = self._make_index(name, '.six', resume=True)
1706
1698
if self.chk_index is not None:
1707
chk_index = self._make_index(name, '.cix', resume=True,
1699
chk_index = self._make_index(name, '.cix', resume=True)
1710
1701
chk_index = None
1711
1702
result = self.resumed_pack_factory(name, rev_index, inv_index,
1751
1742
transport = self._index_transport
1752
1743
index_size = self._names[name][size_offset]
1753
index = self._index_class(transport, index_name, index_size,
1754
unlimited_cache=is_chk)
1755
if is_chk and self._index_class is btree_index.BTreeGraphIndex:
1756
index._leaf_factory = btree_index._gcchk_factory
1744
return self._index_class(transport, index_name, index_size)
1759
1746
def _max_pack_count(self, total_revisions):
1760
1747
"""Return the maximum number of packs to use for total revisions.
1788
1775
:param return: None.
1790
1777
for pack in packs:
1792
pack.pack_transport.rename(pack.file_name(),
1793
'../obsolete_packs/' + pack.file_name())
1794
except (errors.PathError, errors.TransportError), e:
1795
# TODO: Should these be warnings or mutters?
1796
mutter("couldn't rename obsolete pack, skipping it:\n%s"
1778
pack.pack_transport.rename(pack.file_name(),
1779
'../obsolete_packs/' + pack.file_name())
1798
1780
# TODO: Probably needs to know all possible indices for this pack
1799
1781
# - or maybe list the directory and move all indices matching this
1800
1782
# name whether we recognize it or not?
1802
1784
if self.chk_index is not None:
1803
1785
suffixes.append('.cix')
1804
1786
for suffix in suffixes:
1806
self._index_transport.rename(pack.name + suffix,
1807
'../obsolete_packs/' + pack.name + suffix)
1808
except (errors.PathError, errors.TransportError), e:
1809
mutter("couldn't rename obsolete index, skipping it:\n%s"
1787
self._index_transport.rename(pack.name + suffix,
1788
'../obsolete_packs/' + pack.name + suffix)
1812
1790
def pack_distribution(self, total_revisions):
1813
1791
"""Generate a list of the number of revisions to put in each pack.
1839
1817
self._remove_pack_indices(pack)
1840
1818
self.packs.remove(pack)
1842
def _remove_pack_indices(self, pack, ignore_missing=False):
1843
"""Remove the indices for pack from the aggregated indices.
1845
:param ignore_missing: Suppress KeyErrors from calling remove_index.
1847
for index_type in Pack.index_definitions.keys():
1848
attr_name = index_type + '_index'
1849
aggregate_index = getattr(self, attr_name)
1850
if aggregate_index is not None:
1851
pack_index = getattr(pack, attr_name)
1853
aggregate_index.remove_index(pack_index)
1820
def _remove_pack_indices(self, pack):
1821
"""Remove the indices for pack from the aggregated indices."""
1822
self.revision_index.remove_index(pack.revision_index, pack)
1823
self.inventory_index.remove_index(pack.inventory_index, pack)
1824
self.text_index.remove_index(pack.text_index, pack)
1825
self.signature_index.remove_index(pack.signature_index, pack)
1826
if self.chk_index is not None:
1827
self.chk_index.remove_index(pack.chk_index, pack)
1859
1829
def reset(self):
1860
1830
"""Clear all cached data."""
1969
1938
:param clear_obsolete_packs: If True, clear out the contents of the
1970
1939
obsolete_packs directory.
1971
:param obsolete_packs: Packs that are obsolete once the new pack-names
1972
file has been written.
1973
:return: A list of the names saved that were not previously on disk.
1975
already_obsolete = []
1976
1941
self.lock_names()
1978
1943
builder = self._index_builder_class()
1979
(disk_nodes, deleted_nodes, new_nodes,
1980
orig_disk_nodes) = self._diff_pack_names()
1944
disk_nodes, deleted_nodes, new_nodes = self._diff_pack_names()
1981
1945
# TODO: handle same-name, index-size-changes here -
1982
1946
# e.g. use the value from disk, not ours, *unless* we're the one
1985
1949
builder.add_node(key, value)
1986
1950
self.transport.put_file('pack-names', builder.finish(),
1987
1951
mode=self.repo.bzrdir._get_file_mode())
1952
# move the baseline forward
1988
1953
self._packs_at_load = disk_nodes
1989
1954
if clear_obsolete_packs:
1992
to_preserve = set([o.name for o in obsolete_packs])
1993
already_obsolete = self._clear_obsolete_packs(to_preserve)
1955
self._clear_obsolete_packs()
1995
1957
self._unlock_names()
1996
1958
# synchronise the memory packs list with what we just wrote:
1997
1959
self._syncronize_pack_names_from_disk_nodes(disk_nodes)
1999
# TODO: We could add one more condition here. "if o.name not in
2000
# orig_disk_nodes and o != the new_pack we haven't written to
2001
# disk yet. However, the new pack object is not easily
2002
# accessible here (it would have to be passed through the
2003
# autopacking code, etc.)
2004
obsolete_packs = [o for o in obsolete_packs
2005
if o.name not in already_obsolete]
2006
self._obsolete_packs(obsolete_packs)
2007
return [new_node[0][0] for new_node in new_nodes]
2009
1961
def reload_pack_names(self):
2010
1962
"""Sync our pack listing with what is present in the repository.
2018
1970
# The ensure_loaded call is to handle the case where the first call
2019
1971
# made involving the collection was to reload_pack_names, where we
2020
# don't have a view of disk contents. It's a bit of a bandaid, and
2021
# causes two reads of pack-names, but it's a rare corner case not
2022
# struck with regular push/pull etc.
1972
# don't have a view of disk contents. Its a bit of a bandaid, and
1973
# causes two reads of pack-names, but its a rare corner case not struck
1974
# with regular push/pull etc.
2023
1975
first_read = self.ensure_loaded()
2026
1978
# out the new value.
2027
(disk_nodes, deleted_nodes, new_nodes,
2028
orig_disk_nodes) = self._diff_pack_names()
2029
# _packs_at_load is meant to be the explicit list of names in
2030
# 'pack-names' at then start. As such, it should not contain any
2031
# pending names that haven't been written out yet.
2032
self._packs_at_load = orig_disk_nodes
1979
disk_nodes, _, _ = self._diff_pack_names()
1980
self._packs_at_load = disk_nodes
2033
1981
(removed, added,
2034
1982
modified) = self._syncronize_pack_names_from_disk_nodes(disk_nodes)
2035
1983
if removed or added or modified:
2045
1993
raise errors.RetryAutopack(self.repo, False, sys.exc_info())
2047
def _clear_obsolete_packs(self, preserve=None):
1995
def _clear_obsolete_packs(self):
2048
1996
"""Delete everything from the obsolete-packs directory.
2050
:return: A list of pack identifiers (the filename without '.pack') that
2051
were found in obsolete_packs.
2054
1998
obsolete_pack_transport = self.transport.clone('obsolete_packs')
2055
if preserve is None:
2057
1999
for filename in obsolete_pack_transport.list_dir('.'):
2058
name, ext = osutils.splitext(filename)
2061
if name in preserve:
2064
2001
obsolete_pack_transport.delete(filename)
2065
2002
except (errors.PathError, errors.TransportError), e:
2066
warning("couldn't delete obsolete pack, skipping it:\n%s"
2003
warning("couldn't delete obsolete pack, skipping it:\n%s" % (e,))
2070
2005
def _start_write_group(self):
2071
2006
# Do not permit preparation for writing if we're not in a 'write lock'.
2098
2033
# FIXME: just drop the transient index.
2099
2034
# forget what names there are
2100
2035
if self._new_pack is not None:
2101
operation = cleanup.OperationWithCleanups(self._new_pack.abort)
2102
operation.add_cleanup(setattr, self, '_new_pack', None)
2103
# If we aborted while in the middle of finishing the write
2104
# group, _remove_pack_indices could fail because the indexes are
2105
# already gone. But they're not there we shouldn't fail in this
2106
# case, so we pass ignore_missing=True.
2107
operation.add_cleanup(self._remove_pack_indices, self._new_pack,
2108
ignore_missing=True)
2109
operation.run_simple()
2037
self._new_pack.abort()
2039
# XXX: If we aborted while in the middle of finishing the write
2040
# group, _remove_pack_indices can fail because the indexes are
2041
# already gone. If they're not there we shouldn't fail in this
2042
# case. -- mbp 20081113
2043
self._remove_pack_indices(self._new_pack)
2044
self._new_pack = None
2110
2045
for resumed_pack in self._resumed_packs:
2111
operation = cleanup.OperationWithCleanups(resumed_pack.abort)
2112
# See comment in previous finally block.
2113
operation.add_cleanup(self._remove_pack_indices, resumed_pack,
2114
ignore_missing=True)
2115
operation.run_simple()
2047
resumed_pack.abort()
2049
# See comment in previous finally block.
2051
self._remove_pack_indices(resumed_pack)
2116
2054
del self._resumed_packs[:]
2118
2056
def _remove_resumed_pack_indices(self):
2144
2072
raise errors.BzrCheckError(
2145
2073
"Repository %s has missing compression parent(s) %r "
2146
2074
% (self.repo, sorted(all_missing)))
2147
problems = self._check_new_inventories()
2149
problems_summary = '\n'.join(problems)
2150
raise errors.BzrCheckError(
2151
"Cannot add revision(s) to repository: " + problems_summary)
2152
2075
self._remove_pack_indices(self._new_pack)
2153
any_new_content = False
2076
should_autopack = False
2154
2077
if self._new_pack.data_inserted():
2155
2078
# get all the data to disk and read to use
2156
2079
self._new_pack.finish()
2157
2080
self.allocate(self._new_pack)
2158
2081
self._new_pack = None
2159
any_new_content = True
2082
should_autopack = True
2161
2084
self._new_pack.abort()
2162
2085
self._new_pack = None
2284
2204
self._reconcile_fixes_text_parents = True
2285
2205
self._reconcile_backsup_inventory = False
2287
def _warn_if_deprecated(self, branch=None):
2207
def _warn_if_deprecated(self):
2288
2208
# This class isn't deprecated, but one sub-format is
2289
2209
if isinstance(self._format, RepositoryFormatKnitPack5RichRootBroken):
2290
super(KnitPackRepository, self)._warn_if_deprecated(branch)
2210
from bzrlib import repository
2211
if repository._deprecation_warning_done:
2213
repository._deprecation_warning_done = True
2214
warning("Format %s for %s is deprecated - please use"
2215
" 'bzr upgrade --1.6.1-rich-root'"
2216
% (self._format, self.bzrdir.transport.base))
2292
2218
def _abort_write_group(self):
2293
self.revisions._index._key_dependencies.clear()
2219
self.revisions._index._key_dependencies.refs.clear()
2294
2220
self._pack_collection._abort_write_group()
2296
def _get_source(self, to_format):
2297
if to_format.network_name() == self._format.network_name():
2298
return KnitPackStreamSource(self, to_format)
2299
return super(KnitPackRepository, self)._get_source(to_format)
2222
def _find_inconsistent_revision_parents(self):
2223
"""Find revisions with incorrectly cached parents.
2225
:returns: an iterator yielding tuples of (revison-id, parents-in-index,
2226
parents-in-revision).
2228
if not self.is_locked():
2229
raise errors.ObjectNotLocked(self)
2230
pb = ui.ui_factory.nested_progress_bar()
2233
revision_nodes = self._pack_collection.revision_index \
2234
.combined_index.iter_all_entries()
2235
index_positions = []
2236
# Get the cached index values for all revisions, and also the
2237
# location in each index of the revision text so we can perform
2239
for index, key, value, refs in revision_nodes:
2240
node = (index, key, value, refs)
2241
index_memo = self.revisions._index._node_to_position(node)
2242
if index_memo[0] != index:
2243
raise AssertionError('%r != %r' % (index_memo[0], index))
2244
index_positions.append((index_memo, key[0],
2245
tuple(parent[0] for parent in refs[0])))
2246
pb.update("Reading revision index", 0, 0)
2247
index_positions.sort()
2249
pb.update("Checking cached revision graph", 0,
2250
len(index_positions))
2251
for offset in xrange(0, len(index_positions), 1000):
2252
pb.update("Checking cached revision graph", offset)
2253
to_query = index_positions[offset:offset + batch_size]
2256
rev_ids = [item[1] for item in to_query]
2257
revs = self.get_revisions(rev_ids)
2258
for revision, item in zip(revs, to_query):
2259
index_parents = item[2]
2260
rev_parents = tuple(revision.parent_ids)
2261
if index_parents != rev_parents:
2262
result.append((revision.revision_id, index_parents,
2301
2268
def _make_parents_provider(self):
2302
2269
return graph.CachingParentsProvider(self)
2310
2277
self._pack_collection._start_write_group()
2312
2279
def _commit_write_group(self):
2313
hint = self._pack_collection._commit_write_group()
2314
self.revisions._index._key_dependencies.clear()
2280
self.revisions._index._key_dependencies.refs.clear()
2281
return self._pack_collection._commit_write_group()
2317
2283
def suspend_write_group(self):
2318
2284
# XXX check self._write_group is self.get_transaction()?
2319
2285
tokens = self._pack_collection._suspend_write_group()
2320
self.revisions._index._key_dependencies.clear()
2286
self.revisions._index._key_dependencies.refs.clear()
2321
2287
self._write_group = None
2355
2317
if self._write_lock_count == 1:
2356
2318
self._transaction = transactions.WriteTransaction()
2358
if 'relock' in debug.debug_flags and self._prev_lock == 'w':
2359
note('%r was write locked again', self)
2360
self._prev_lock = 'w'
2361
2320
for repo in self._fallback_repositories:
2362
2321
# Writes don't affect fallback repos
2363
2322
repo.lock_read()
2364
2323
self._refresh_data()
2365
return RepositoryWriteLockResult(self.unlock, None)
2367
2325
def lock_read(self):
2368
"""Lock the repository for reads.
2370
:return: A bzrlib.lock.LogicalLockResult.
2372
2326
locked = self.is_locked()
2373
2327
if self._write_lock_count:
2374
2328
self._write_lock_count += 1
2376
2330
self.control_files.lock_read()
2378
if 'relock' in debug.debug_flags and self._prev_lock == 'r':
2379
note('%r was read locked again', self)
2380
self._prev_lock = 'r'
2381
2332
for repo in self._fallback_repositories:
2382
2333
repo.lock_read()
2383
2334
self._refresh_data()
2384
return LogicalLockResult(self.unlock)
2386
2336
def leave_lock_in_place(self):
2387
2337
# not supported - raise an error
2392
2342
raise NotImplementedError(self.dont_leave_lock_in_place)
2394
2344
@needs_write_lock
2395
def pack(self, hint=None, clean_obsolete_packs=False):
2396
2346
"""Compress the data within the repository.
2398
2348
This will pack all the data to a single pack. In future it may
2399
2349
recompress deltas or do other such expensive operations.
2401
self._pack_collection.pack(hint=hint, clean_obsolete_packs=clean_obsolete_packs)
2351
self._pack_collection.pack()
2403
2353
@needs_write_lock
2404
2354
def reconcile(self, other=None, thorough=False):
2438
class KnitPackStreamSource(StreamSource):
2439
"""A StreamSource used to transfer data between same-format KnitPack repos.
2441
This source assumes:
2442
1) Same serialization format for all objects
2443
2) Same root information
2444
3) XML format inventories
2445
4) Atomic inserts (so we can stream inventory texts before text
2450
def __init__(self, from_repository, to_format):
2451
super(KnitPackStreamSource, self).__init__(from_repository, to_format)
2452
self._text_keys = None
2453
self._text_fetch_order = 'unordered'
2455
def _get_filtered_inv_stream(self, revision_ids):
2456
from_repo = self.from_repository
2457
parent_ids = from_repo._find_parent_ids_of_revisions(revision_ids)
2458
parent_keys = [(p,) for p in parent_ids]
2459
find_text_keys = from_repo._find_text_key_references_from_xml_inventory_lines
2460
parent_text_keys = set(find_text_keys(
2461
from_repo._inventory_xml_lines_for_keys(parent_keys)))
2462
content_text_keys = set()
2463
knit = KnitVersionedFiles(None, None)
2464
factory = KnitPlainFactory()
2465
def find_text_keys_from_content(record):
2466
if record.storage_kind not in ('knit-delta-gz', 'knit-ft-gz'):
2467
raise ValueError("Unknown content storage kind for"
2468
" inventory text: %s" % (record.storage_kind,))
2469
# It's a knit record, it has a _raw_record field (even if it was
2470
# reconstituted from a network stream).
2471
raw_data = record._raw_record
2472
# read the entire thing
2473
revision_id = record.key[-1]
2474
content, _ = knit._parse_record(revision_id, raw_data)
2475
if record.storage_kind == 'knit-delta-gz':
2476
line_iterator = factory.get_linedelta_content(content)
2477
elif record.storage_kind == 'knit-ft-gz':
2478
line_iterator = factory.get_fulltext_content(content)
2479
content_text_keys.update(find_text_keys(
2480
[(line, revision_id) for line in line_iterator]))
2481
revision_keys = [(r,) for r in revision_ids]
2482
def _filtered_inv_stream():
2483
source_vf = from_repo.inventories
2484
stream = source_vf.get_record_stream(revision_keys,
2486
for record in stream:
2487
if record.storage_kind == 'absent':
2488
raise errors.NoSuchRevision(from_repo, record.key)
2489
find_text_keys_from_content(record)
2491
self._text_keys = content_text_keys - parent_text_keys
2492
return ('inventories', _filtered_inv_stream())
2494
def _get_text_stream(self):
2495
# Note: We know we don't have to handle adding root keys, because both
2496
# the source and target are the identical network name.
2497
text_stream = self.from_repository.texts.get_record_stream(
2498
self._text_keys, self._text_fetch_order, False)
2499
return ('texts', text_stream)
2501
def get_stream(self, search):
2502
revision_ids = search.get_keys()
2503
for stream_info in self._fetch_revision_texts(revision_ids):
2505
self._revision_keys = [(rev_id,) for rev_id in revision_ids]
2506
yield self._get_filtered_inv_stream(revision_ids)
2507
yield self._get_text_stream()
2511
2387
class RepositoryFormatPack(MetaDirRepositoryFormat):
2512
2388
"""Format logic for pack structured repositories.
2560
2436
utf8_files = [('format', self.get_format_string())]
2562
2438
self._upload_blank_content(a_bzrdir, dirs, files, utf8_files, shared)
2563
repository = self.open(a_bzrdir=a_bzrdir, _found=True)
2564
self._run_post_repo_init_hooks(repository, a_bzrdir, shared)
2439
return self.open(a_bzrdir=a_bzrdir, _found=True)
2567
2441
def open(self, a_bzrdir, _found=False, _override_transport=None):
2568
2442
"""See RepositoryFormat.open().
2650
2526
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2528
def check_conversion_target(self, target_format):
2529
if not target_format.rich_root_data:
2530
raise errors.BadConversionTarget(
2531
'Does not support rich root data.', target_format)
2532
if not getattr(target_format, 'supports_tree_reference', False):
2533
raise errors.BadConversionTarget(
2534
'Does not support nested trees', target_format)
2652
2536
def get_format_string(self):
2653
2537
"""See RepositoryFormat.get_format_string()."""
2654
2538
return "Bazaar pack repository format 1 with subtree support (needs bzr 0.92)\n"
2919
2828
def _get_matching_bzrdir(self):
2920
2829
return bzrdir.format_registry.make_bzrdir(
2921
'development5-subtree')
2830
'development-subtree')
2923
2832
def _ignore_setting_bzrdir(self, format):
2926
2835
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2837
def check_conversion_target(self, target_format):
2838
if not target_format.rich_root_data:
2839
raise errors.BadConversionTarget(
2840
'Does not support rich root data.', target_format)
2841
if not getattr(target_format, 'supports_tree_reference', False):
2842
raise errors.BadConversionTarget(
2843
'Does not support nested trees', target_format)
2928
2845
def get_format_string(self):
2929
2846
"""See RepositoryFormat.get_format_string()."""
2930
2847
return ("Bazaar development format 2 with subtree support "