59
57
from bzrlib.decorators import needs_write_lock, only_raises
58
from bzrlib.btree_index import (
60
62
from bzrlib.index import (
62
64
InMemoryGraphIndex,
64
from bzrlib.lock import LogicalLockResult
65
66
from bzrlib.repofmt.knitrepo import KnitRepository
66
67
from bzrlib.repository import (
68
69
MetaDirRepositoryFormat,
70
RepositoryWriteLockResult,
228
228
unlimited_cache = False
229
229
if index_type == 'chk':
230
230
unlimited_cache = True
231
index = self.index_class(self.index_transport,
232
self.index_name(index_type, self.name),
233
self.index_sizes[self.index_offset(index_type)],
234
unlimited_cache=unlimited_cache)
235
if index_type == 'chk':
236
index._leaf_factory = btree_index._gcchk_factory
237
setattr(self, index_type + '_index', index)
231
setattr(self, index_type + '_index',
232
self.index_class(self.index_transport,
233
self.index_name(index_type, self.name),
234
self.index_sizes[self.index_offset(index_type)],
235
unlimited_cache=unlimited_cache))
240
238
class ExistingPack(Pack):
588
586
flush_func=flush_func)
589
587
self.add_callback = None
589
def replace_indices(self, index_to_pack, indices):
590
"""Replace the current mappings with fresh ones.
592
This should probably not be used eventually, rather incremental add and
593
removal of indices. It has been added during refactoring of existing
596
:param index_to_pack: A mapping from index objects to
597
(transport, name) tuples for the pack file data.
598
:param indices: A list of indices.
600
# refresh the revision pack map dict without replacing the instance.
601
self.index_to_pack.clear()
602
self.index_to_pack.update(index_to_pack)
603
# XXX: API break - clearly a 'replace' method would be good?
604
self.combined_index._indices[:] = indices
605
# the current add nodes callback for the current writable index if
607
self.add_callback = None
591
609
def add_index(self, index, pack):
592
610
"""Add index to the aggregate, which is an index for Pack pack.
600
618
# expose it to the index map
601
619
self.index_to_pack[index] = pack.access_tuple()
602
620
# put it at the front of the linear index list
603
self.combined_index.insert_index(0, index, pack.name)
621
self.combined_index.insert_index(0, index)
605
623
def add_writable_index(self, index, pack):
606
624
"""Add an index which is able to have data added to it.
626
644
self.data_access.set_writer(None, None, (None, None))
627
645
self.index_to_pack.clear()
628
646
del self.combined_index._indices[:]
629
del self.combined_index._index_names[:]
630
647
self.add_callback = None
632
def remove_index(self, index):
649
def remove_index(self, index, pack):
633
650
"""Remove index from the indices used to answer queries.
635
652
:param index: An index from the pack parameter.
653
:param pack: A Pack instance.
637
655
del self.index_to_pack[index]
638
pos = self.combined_index._indices.index(index)
639
del self.combined_index._indices[pos]
640
del self.combined_index._index_names[pos]
656
self.combined_index._indices.remove(index)
641
657
if (self.add_callback is not None and
642
658
getattr(index, 'add_nodes', None) == self.add_callback):
643
659
self.add_callback = None
1399
1415
self.inventory_index = AggregateIndex(self.reload_pack_names, flush)
1400
1416
self.text_index = AggregateIndex(self.reload_pack_names, flush)
1401
1417
self.signature_index = AggregateIndex(self.reload_pack_names, flush)
1402
all_indices = [self.revision_index, self.inventory_index,
1403
self.text_index, self.signature_index]
1404
1418
if use_chk_index:
1405
1419
self.chk_index = AggregateIndex(self.reload_pack_names, flush)
1406
all_indices.append(self.chk_index)
1408
1421
# used to determine if we're using a chk_index elsewhere.
1409
1422
self.chk_index = None
1410
# Tell all the CombinedGraphIndex objects about each other, so they can
1411
# share hints about which pack names to search first.
1412
all_combined = [agg_idx.combined_index for agg_idx in all_indices]
1413
for combined_idx in all_combined:
1414
combined_idx.set_sibling_indices(
1415
set(all_combined).difference([combined_idx]))
1416
1423
# resumed packs
1417
1424
self._resumed_packs = []
1561
1568
"""Is the collection already packed?"""
1562
1569
return not (self.repo._format.pack_compresses or (len(self._names) > 1))
1564
def pack(self, hint=None, clean_obsolete_packs=False):
1571
def pack(self, hint=None):
1565
1572
"""Pack the pack collection totally."""
1566
1573
self.ensure_loaded()
1567
1574
total_packs = len(self._names)
1583
1590
pack_operations[-1][1].append(pack)
1584
1591
self._execute_pack_operations(pack_operations, OptimisingPacker)
1586
if clean_obsolete_packs:
1587
self._clear_obsolete_packs()
1589
1593
def plan_autopack_combinations(self, existing_packs, pack_distribution):
1590
1594
"""Plan a pack operation.
1679
1683
txt_index = self._make_index(name, '.tix')
1680
1684
sig_index = self._make_index(name, '.six')
1681
1685
if self.chk_index is not None:
1682
chk_index = self._make_index(name, '.cix', is_chk=True)
1686
chk_index = self._make_index(name, '.cix', unlimited_cache=True)
1684
1688
chk_index = None
1685
1689
result = ExistingPack(self._pack_transport, name, rev_index,
1705
1709
sig_index = self._make_index(name, '.six', resume=True)
1706
1710
if self.chk_index is not None:
1707
1711
chk_index = self._make_index(name, '.cix', resume=True,
1712
unlimited_cache=True)
1710
1714
chk_index = None
1711
1715
result = self.resumed_pack_factory(name, rev_index, inv_index,
1741
1745
return self._index_class(self.transport, 'pack-names', None
1742
1746
).iter_all_entries()
1744
def _make_index(self, name, suffix, resume=False, is_chk=False):
1748
def _make_index(self, name, suffix, resume=False, unlimited_cache=False):
1745
1749
size_offset = self._suffix_offsets[suffix]
1746
1750
index_name = name + suffix
1751
1755
transport = self._index_transport
1752
1756
index_size = self._names[name][size_offset]
1753
index = self._index_class(transport, index_name, index_size,
1754
unlimited_cache=is_chk)
1755
if is_chk and self._index_class is btree_index.BTreeGraphIndex:
1756
index._leaf_factory = btree_index._gcchk_factory
1757
return self._index_class(transport, index_name, index_size,
1758
unlimited_cache=unlimited_cache)
1759
1760
def _max_pack_count(self, total_revisions):
1760
1761
"""Return the maximum number of packs to use for total revisions.
1839
1840
self._remove_pack_indices(pack)
1840
1841
self.packs.remove(pack)
1842
def _remove_pack_indices(self, pack, ignore_missing=False):
1843
"""Remove the indices for pack from the aggregated indices.
1845
:param ignore_missing: Suppress KeyErrors from calling remove_index.
1847
for index_type in Pack.index_definitions.keys():
1848
attr_name = index_type + '_index'
1849
aggregate_index = getattr(self, attr_name)
1850
if aggregate_index is not None:
1851
pack_index = getattr(pack, attr_name)
1853
aggregate_index.remove_index(pack_index)
1843
def _remove_pack_indices(self, pack):
1844
"""Remove the indices for pack from the aggregated indices."""
1845
self.revision_index.remove_index(pack.revision_index, pack)
1846
self.inventory_index.remove_index(pack.inventory_index, pack)
1847
self.text_index.remove_index(pack.text_index, pack)
1848
self.signature_index.remove_index(pack.signature_index, pack)
1849
if self.chk_index is not None:
1850
self.chk_index.remove_index(pack.chk_index, pack)
1859
1852
def reset(self):
1860
1853
"""Clear all cached data."""
2098
2091
# FIXME: just drop the transient index.
2099
2092
# forget what names there are
2100
2093
if self._new_pack is not None:
2101
operation = cleanup.OperationWithCleanups(self._new_pack.abort)
2102
operation.add_cleanup(setattr, self, '_new_pack', None)
2103
# If we aborted while in the middle of finishing the write
2104
# group, _remove_pack_indices could fail because the indexes are
2105
# already gone. But they're not there we shouldn't fail in this
2106
# case, so we pass ignore_missing=True.
2107
operation.add_cleanup(self._remove_pack_indices, self._new_pack,
2108
ignore_missing=True)
2109
operation.run_simple()
2095
self._new_pack.abort()
2097
# XXX: If we aborted while in the middle of finishing the write
2098
# group, _remove_pack_indices can fail because the indexes are
2099
# already gone. If they're not there we shouldn't fail in this
2100
# case. -- mbp 20081113
2101
self._remove_pack_indices(self._new_pack)
2102
self._new_pack = None
2110
2103
for resumed_pack in self._resumed_packs:
2111
operation = cleanup.OperationWithCleanups(resumed_pack.abort)
2112
# See comment in previous finally block.
2113
operation.add_cleanup(self._remove_pack_indices, resumed_pack,
2114
ignore_missing=True)
2115
operation.run_simple()
2105
resumed_pack.abort()
2107
# See comment in previous finally block.
2109
self._remove_pack_indices(resumed_pack)
2116
2112
del self._resumed_packs[:]
2118
2114
def _remove_resumed_pack_indices(self):
2344
2340
return self._write_lock_count
2346
2342
def lock_write(self, token=None):
2347
"""Lock the repository for writes.
2349
:return: A bzrlib.repository.RepositoryWriteLockResult.
2351
2343
locked = self.is_locked()
2352
2344
if not self._write_lock_count and locked:
2353
2345
raise errors.ReadOnlyError(self)
2362
2354
# Writes don't affect fallback repos
2363
2355
repo.lock_read()
2364
2356
self._refresh_data()
2365
return RepositoryWriteLockResult(self.unlock, None)
2367
2358
def lock_read(self):
2368
"""Lock the repository for reads.
2370
:return: A bzrlib.lock.LogicalLockResult.
2372
2359
locked = self.is_locked()
2373
2360
if self._write_lock_count:
2374
2361
self._write_lock_count += 1
2392
2378
raise NotImplementedError(self.dont_leave_lock_in_place)
2394
2380
@needs_write_lock
2395
def pack(self, hint=None, clean_obsolete_packs=False):
2381
def pack(self, hint=None):
2396
2382
"""Compress the data within the repository.
2398
2384
This will pack all the data to a single pack. In future it may
2399
2385
recompress deltas or do other such expensive operations.
2401
self._pack_collection.pack(hint=hint, clean_obsolete_packs=clean_obsolete_packs)
2387
self._pack_collection.pack(hint=hint)
2403
2389
@needs_write_lock
2404
2390
def reconcile(self, other=None, thorough=False):
2560
2546
utf8_files = [('format', self.get_format_string())]
2562
2548
self._upload_blank_content(a_bzrdir, dirs, files, utf8_files, shared)
2563
repository = self.open(a_bzrdir=a_bzrdir, _found=True)
2564
self._run_post_repo_init_hooks(repository, a_bzrdir, shared)
2549
return self.open(a_bzrdir=a_bzrdir, _found=True)
2567
2551
def open(self, a_bzrdir, _found=False, _override_transport=None):
2568
2552
"""See RepositoryFormat.open().
2831
2814
_commit_builder_class = PackCommitBuilder
2832
2815
supports_external_lookups = True
2833
2816
# What index classes to use
2834
index_builder_class = btree_index.BTreeBuilder
2835
index_class = btree_index.BTreeGraphIndex
2817
index_builder_class = BTreeBuilder
2818
index_class = BTreeGraphIndex
2838
2821
def _serializer(self):
2867
2850
supports_tree_reference = False # no subtrees
2868
2851
supports_external_lookups = True
2869
2852
# What index classes to use
2870
index_builder_class = btree_index.BTreeBuilder
2871
index_class = btree_index.BTreeGraphIndex
2853
index_builder_class = BTreeBuilder
2854
index_class = BTreeGraphIndex
2874
2857
def _serializer(self):
2905
2888
repository_class = KnitPackRepository
2906
2889
_commit_builder_class = PackRootCommitBuilder
2907
2890
rich_root_data = True
2909
2891
supports_tree_reference = True
2910
2892
supports_external_lookups = True
2911
2893
# What index classes to use
2912
index_builder_class = btree_index.BTreeBuilder
2913
index_class = btree_index.BTreeGraphIndex
2894
index_builder_class = BTreeBuilder
2895
index_class = BTreeGraphIndex
2916
2898
def _serializer(self):