1
# Copyright (C) 2008, 2009 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17
"""Repository formats using CHK inventories and groupcompress compression."""
33
revision as _mod_revision,
37
from bzrlib.btree_index import (
41
from bzrlib.groupcompress import (
43
GroupCompressVersionedFiles,
45
from bzrlib.repofmt.pack_repo import (
50
PackRootCommitBuilder,
51
RepositoryPackCollection,
56
from bzrlib.static_tuple import StaticTuple
59
class GCPack(NewPack):
61
def __init__(self, pack_collection, upload_suffix='', file_mode=None):
62
"""Create a NewPack instance.
64
:param pack_collection: A PackCollection into which this is being
66
:param upload_suffix: An optional suffix to be given to any temporary
67
files created during the pack creation. e.g '.autopack'
68
:param file_mode: An optional file mode to create the new files with.
70
# replaced from NewPack to:
71
# - change inventory reference list length to 1
72
# - change texts reference lists to 1
73
# TODO: patch this to be parameterised
75
# The relative locations of the packs are constrained, but all are
76
# passed in because the caller has them, so as to avoid object churn.
77
index_builder_class = pack_collection._index_builder_class
79
if pack_collection.chk_index is not None:
80
chk_index = index_builder_class(reference_lists=0)
84
# Revisions: parents list, no text compression.
85
index_builder_class(reference_lists=1),
86
# Inventory: We want to map compression only, but currently the
87
# knit code hasn't been updated enough to understand that, so we
88
# have a regular 2-list index giving parents and compression
90
index_builder_class(reference_lists=1),
91
# Texts: per file graph, for all fileids - so one reference list
92
# and two elements in the key tuple.
93
index_builder_class(reference_lists=1, key_elements=2),
94
# Signatures: Just blobs to store, no compression, no parents
96
index_builder_class(reference_lists=0),
97
# CHK based storage - just blobs, no compression or parents.
100
self._pack_collection = pack_collection
101
# When we make readonly indices, we need this.
102
self.index_class = pack_collection._index_class
103
# where should the new pack be opened
104
self.upload_transport = pack_collection._upload_transport
105
# where are indices written out to
106
self.index_transport = pack_collection._index_transport
107
# where is the pack renamed to when it is finished?
108
self.pack_transport = pack_collection._pack_transport
109
# What file mode to upload the pack and indices with.
110
self._file_mode = file_mode
111
# tracks the content written to the .pack file.
112
self._hash = osutils.md5()
113
# a four-tuple with the length in bytes of the indices, once the pack
114
# is finalised. (rev, inv, text, sigs)
115
self.index_sizes = None
116
# How much data to cache when writing packs. Note that this is not
117
# synchronised with reads, because it's not in the transport layer, so
118
# is not safe unless the client knows it won't be reading from the pack
120
self._cache_limit = 0
121
# the temporary pack file name.
122
self.random_name = osutils.rand_chars(20) + upload_suffix
123
# when was this pack started ?
124
self.start_time = time.time()
125
# open an output stream for the data added to the pack.
126
self.write_stream = self.upload_transport.open_write_stream(
127
self.random_name, mode=self._file_mode)
128
if 'pack' in debug.debug_flags:
129
trace.mutter('%s: create_pack: pack stream open: %s%s t+%6.3fs',
130
time.ctime(), self.upload_transport.base, self.random_name,
131
time.time() - self.start_time)
132
# A list of byte sequences to be written to the new pack, and the
133
# aggregate size of them. Stored as a list rather than separate
134
# variables so that the _write_data closure below can update them.
135
self._buffer = [[], 0]
136
# create a callable for adding data
138
# robertc says- this is a closure rather than a method on the object
139
# so that the variables are locals, and faster than accessing object
141
def _write_data(bytes, flush=False, _buffer=self._buffer,
142
_write=self.write_stream.write, _update=self._hash.update):
143
_buffer[0].append(bytes)
144
_buffer[1] += len(bytes)
146
if _buffer[1] > self._cache_limit or flush:
147
bytes = ''.join(_buffer[0])
151
# expose this on self, for the occasion when clients want to add data.
152
self._write_data = _write_data
153
# a pack writer object to serialise pack records.
154
self._writer = pack.ContainerWriter(self._write_data)
156
# what state is the pack in? (open, finished, aborted)
158
# no name until we finish writing the content
161
def _check_references(self):
162
"""Make sure our external references are present.
164
Packs are allowed to have deltas whose base is not in the pack, but it
165
must be present somewhere in this collection. It is not allowed to
166
have deltas based on a fallback repository.
167
(See <https://bugs.launchpad.net/bzr/+bug/288751>)
169
# Groupcompress packs don't have any external references, arguably CHK
170
# pages have external references, but we cannot 'cheaply' determine
171
# them without actually walking all of the chk pages.
174
class ResumedGCPack(ResumedPack):
176
def _check_references(self):
177
"""Make sure our external compression parents are present."""
178
# See GCPack._check_references for why this is empty
180
def _get_external_refs(self, index):
181
# GC repositories don't have compression parents external to a given
186
class GCCHKPacker(Packer):
187
"""This class understand what it takes to collect a GCCHK repo."""
189
def __init__(self, pack_collection, packs, suffix, revision_ids=None,
191
super(GCCHKPacker, self).__init__(pack_collection, packs, suffix,
192
revision_ids=revision_ids,
193
reload_func=reload_func)
194
self._pack_collection = pack_collection
195
# ATM, We only support this for GCCHK repositories
196
if pack_collection.chk_index is None:
197
raise AssertionError('pack_collection.chk_index should not be None')
198
self._gather_text_refs = False
199
self._chk_id_roots = []
200
self._chk_p_id_roots = []
201
self._text_refs = None
202
# set by .pack() if self.revision_ids is not None
203
self.revision_keys = None
205
def _get_progress_stream(self, source_vf, keys, message, pb):
207
substream = source_vf.get_record_stream(keys, 'groupcompress', True)
208
for idx, record in enumerate(substream):
210
pb.update(message, idx + 1, len(keys))
214
def _get_filtered_inv_stream(self, source_vf, keys, message, pb=None):
215
"""Filter the texts of inventories, to find the chk pages."""
216
total_keys = len(keys)
217
def _filtered_inv_stream():
219
p_id_roots_set = set()
220
stream = source_vf.get_record_stream(keys, 'groupcompress', True)
221
for idx, record in enumerate(stream):
222
# Inventories should always be with revisions; assume success.
223
bytes = record.get_bytes_as('fulltext')
224
chk_inv = inventory.CHKInventory.deserialise(None, bytes,
227
pb.update('inv', idx, total_keys)
228
key = chk_inv.id_to_entry.key()
229
if key not in id_roots_set:
230
self._chk_id_roots.append(key)
231
id_roots_set.add(key)
232
p_id_map = chk_inv.parent_id_basename_to_file_id
234
raise AssertionError('Parent id -> file_id map not set')
236
if key not in p_id_roots_set:
237
p_id_roots_set.add(key)
238
self._chk_p_id_roots.append(key)
240
# We have finished processing all of the inventory records, we
241
# don't need these sets anymore
243
p_id_roots_set.clear()
244
return _filtered_inv_stream()
246
def _get_chk_streams(self, source_vf, keys, pb=None):
247
# We want to stream the keys from 'id_roots', and things they
248
# reference, and then stream things from p_id_roots and things they
249
# reference, and then any remaining keys that we didn't get to.
251
# We also group referenced texts together, so if one root references a
252
# text with prefix 'a', and another root references a node with prefix
253
# 'a', we want to yield those nodes before we yield the nodes for 'b'
254
# This keeps 'similar' nodes together.
256
# Note: We probably actually want multiple streams here, to help the
257
# client understand that the different levels won't compress well
258
# against each other.
259
# Test the difference between using one Group per level, and
260
# using 1 Group per prefix. (so '' (root) would get a group, then
261
# all the references to search-key 'a' would get a group, etc.)
262
total_keys = len(keys)
263
remaining_keys = set(keys)
265
if self._gather_text_refs:
266
bytes_to_info = inventory.CHKInventory._bytes_to_utf8name_key
267
self._text_refs = set()
268
def _get_referenced_stream(root_keys, parse_leaf_nodes=False):
271
keys_by_search_prefix = {}
272
remaining_keys.difference_update(cur_keys)
274
def handle_internal_node(node):
275
for prefix, value in node._items.iteritems():
276
# We don't want to request the same key twice, and we
277
# want to order it by the first time it is seen.
278
# Even further, we don't want to request a key which is
279
# not in this group of pack files (it should be in the
280
# repo, but it doesn't have to be in the group being
282
# TODO: consider how to treat externally referenced chk
283
# pages as 'external_references' so that we
284
# always fill them in for stacked branches
285
if value not in next_keys and value in remaining_keys:
286
keys_by_search_prefix.setdefault(prefix,
289
def handle_leaf_node(node):
290
# Store is None, because we know we have a LeafNode, and we
291
# just want its entries
292
for file_id, bytes in node.iteritems(None):
293
name_utf8, file_id, revision_id = bytes_to_info(bytes)
294
self._text_refs.add((file_id, revision_id))
296
stream = source_vf.get_record_stream(cur_keys,
297
'as-requested', True)
298
for record in stream:
299
if record.storage_kind == 'absent':
300
# An absent CHK record: we assume that the missing
301
# record is in a different pack - e.g. a page not
302
# altered by the commit we're packing.
304
bytes = record.get_bytes_as('fulltext')
305
# We don't care about search_key_func for this code,
306
# because we only care about external references.
307
node = chk_map._deserialise(bytes, record.key,
308
search_key_func=None)
309
common_base = node._search_prefix
310
if isinstance(node, chk_map.InternalNode):
311
handle_internal_node(node)
312
elif parse_leaf_nodes:
313
handle_leaf_node(node)
316
pb.update('chk node', counter[0], total_keys)
319
# Double check that we won't be emitting any keys twice
320
# If we get rid of the pre-calculation of all keys, we could
321
# turn this around and do
322
# next_keys.difference_update(seen_keys)
323
# However, we also may have references to chk pages in another
324
# pack file during autopack. We filter earlier, so we should no
325
# longer need to do this
326
# next_keys = next_keys.intersection(remaining_keys)
328
for prefix in sorted(keys_by_search_prefix):
329
cur_keys.extend(keys_by_search_prefix.pop(prefix))
330
for stream in _get_referenced_stream(self._chk_id_roots,
331
self._gather_text_refs):
333
del self._chk_id_roots
334
# while it isn't really possible for chk_id_roots to not be in the
335
# local group of packs, it is possible that the tree shape has not
336
# changed recently, so we need to filter _chk_p_id_roots by the
338
chk_p_id_roots = [key for key in self._chk_p_id_roots
339
if key in remaining_keys]
340
del self._chk_p_id_roots
341
for stream in _get_referenced_stream(chk_p_id_roots, False):
344
trace.mutter('There were %d keys in the chk index, %d of which'
345
' were not referenced', total_keys,
347
if self.revision_ids is None:
348
stream = source_vf.get_record_stream(remaining_keys,
352
def _build_vf(self, index_name, parents, delta, for_write=False):
353
"""Build a VersionedFiles instance on top of this group of packs."""
354
index_name = index_name + '_index'
356
access = knit._DirectPackAccess(index_to_pack)
359
if self.new_pack is None:
360
raise AssertionError('No new pack has been set')
361
index = getattr(self.new_pack, index_name)
362
index_to_pack[index] = self.new_pack.access_tuple()
363
index.set_optimize(for_size=True)
364
access.set_writer(self.new_pack._writer, index,
365
self.new_pack.access_tuple())
366
add_callback = index.add_nodes
369
for pack in self.packs:
370
sub_index = getattr(pack, index_name)
371
index_to_pack[sub_index] = pack.access_tuple()
372
indices.append(sub_index)
373
index = _mod_index.CombinedGraphIndex(indices)
375
vf = GroupCompressVersionedFiles(
377
add_callback=add_callback,
379
is_locked=self._pack_collection.repo.is_locked),
384
def _build_vfs(self, index_name, parents, delta):
385
"""Build the source and target VersionedFiles."""
386
source_vf = self._build_vf(index_name, parents,
387
delta, for_write=False)
388
target_vf = self._build_vf(index_name, parents,
389
delta, for_write=True)
390
return source_vf, target_vf
392
def _copy_stream(self, source_vf, target_vf, keys, message, vf_to_stream,
394
trace.mutter('repacking %d %s', len(keys), message)
395
self.pb.update('repacking %s' % (message,), pb_offset)
396
child_pb = ui.ui_factory.nested_progress_bar()
398
stream = vf_to_stream(source_vf, keys, message, child_pb)
399
for _ in target_vf._insert_record_stream(stream,
406
def _copy_revision_texts(self):
407
source_vf, target_vf = self._build_vfs('revision', True, False)
408
if not self.revision_keys:
409
# We are doing a full fetch, aka 'pack'
410
self.revision_keys = source_vf.keys()
411
self._copy_stream(source_vf, target_vf, self.revision_keys,
412
'revisions', self._get_progress_stream, 1)
414
def _copy_inventory_texts(self):
415
source_vf, target_vf = self._build_vfs('inventory', True, True)
416
# It is not sufficient to just use self.revision_keys, as stacked
417
# repositories can have more inventories than they have revisions.
418
# One alternative would be to do something with
419
# get_parent_map(self.revision_keys), but that shouldn't be any faster
421
inventory_keys = source_vf.keys()
422
missing_inventories = set(self.revision_keys).difference(inventory_keys)
423
if missing_inventories:
424
missing_inventories = sorted(missing_inventories)
425
raise ValueError('We are missing inventories for revisions: %s'
426
% (missing_inventories,))
427
self._copy_stream(source_vf, target_vf, inventory_keys,
428
'inventories', self._get_filtered_inv_stream, 2)
430
def _copy_chk_texts(self):
431
source_vf, target_vf = self._build_vfs('chk', False, False)
432
# TODO: This is technically spurious... if it is a performance issue,
434
total_keys = source_vf.keys()
435
trace.mutter('repacking chk: %d id_to_entry roots,'
436
' %d p_id_map roots, %d total keys',
437
len(self._chk_id_roots), len(self._chk_p_id_roots),
439
self.pb.update('repacking chk', 3)
440
child_pb = ui.ui_factory.nested_progress_bar()
442
for stream in self._get_chk_streams(source_vf, total_keys,
444
for _ in target_vf._insert_record_stream(stream,
451
def _copy_text_texts(self):
452
source_vf, target_vf = self._build_vfs('text', True, True)
453
# XXX: We don't walk the chk map to determine referenced (file_id,
454
# revision_id) keys. We don't do it yet because you really need
455
# to filter out the ones that are present in the parents of the
456
# rev just before the ones you are copying, otherwise the filter
457
# is grabbing too many keys...
458
text_keys = source_vf.keys()
459
self._copy_stream(source_vf, target_vf, text_keys,
460
'texts', self._get_progress_stream, 4)
462
def _copy_signature_texts(self):
463
source_vf, target_vf = self._build_vfs('signature', False, False)
464
signature_keys = source_vf.keys()
465
signature_keys.intersection(self.revision_keys)
466
self._copy_stream(source_vf, target_vf, signature_keys,
467
'signatures', self._get_progress_stream, 5)
469
def _create_pack_from_packs(self):
470
self.pb.update('repacking', 0, 7)
471
self.new_pack = self.open_pack()
472
# Is this necessary for GC ?
473
self.new_pack.set_write_cache_size(1024*1024)
474
self._copy_revision_texts()
475
self._copy_inventory_texts()
476
self._copy_chk_texts()
477
self._copy_text_texts()
478
self._copy_signature_texts()
479
self.new_pack._check_references()
480
if not self._use_pack(self.new_pack):
481
self.new_pack.abort()
483
self.new_pack.finish_content()
484
if len(self.packs) == 1:
485
old_pack = self.packs[0]
486
if old_pack.name == self.new_pack._hash.hexdigest():
487
# The single old pack was already optimally packed.
488
trace.mutter('single pack %s was already optimally packed',
490
self.new_pack.abort()
492
self.pb.update('finishing repack', 6, 7)
493
self.new_pack.finish()
494
self._pack_collection.allocate(self.new_pack)
498
class GCCHKReconcilePacker(GCCHKPacker):
499
"""A packer which regenerates indices etc as it copies.
501
This is used by ``bzr reconcile`` to cause parent text pointers to be
505
def __init__(self, *args, **kwargs):
506
super(GCCHKReconcilePacker, self).__init__(*args, **kwargs)
507
self._data_changed = False
508
self._gather_text_refs = True
510
def _copy_inventory_texts(self):
511
source_vf, target_vf = self._build_vfs('inventory', True, True)
512
self._copy_stream(source_vf, target_vf, self.revision_keys,
513
'inventories', self._get_filtered_inv_stream, 2)
514
if source_vf.keys() != self.revision_keys:
515
self._data_changed = True
517
def _copy_text_texts(self):
518
"""generate what texts we should have and then copy."""
519
source_vf, target_vf = self._build_vfs('text', True, True)
520
trace.mutter('repacking %d texts', len(self._text_refs))
521
self.pb.update("repacking texts", 4)
522
# we have three major tasks here:
523
# 1) generate the ideal index
524
repo = self._pack_collection.repo
525
# We want the one we just wrote, so base it on self.new_pack
526
revision_vf = self._build_vf('revision', True, False, for_write=True)
527
ancestor_keys = revision_vf.get_parent_map(revision_vf.keys())
528
# Strip keys back into revision_ids.
529
ancestors = dict((k[0], tuple([p[0] for p in parents]))
530
for k, parents in ancestor_keys.iteritems())
532
# TODO: _generate_text_key_index should be much cheaper to generate from
533
# a chk repository, rather than the current implementation
534
ideal_index = repo._generate_text_key_index(None, ancestors)
535
file_id_parent_map = source_vf.get_parent_map(self._text_refs)
536
# 2) generate a keys list that contains all the entries that can
537
# be used as-is, with corrected parents.
539
new_parent_keys = {} # (key, parent_keys)
541
NULL_REVISION = _mod_revision.NULL_REVISION
542
for key in self._text_refs:
548
ideal_parents = tuple(ideal_index[key])
550
discarded_keys.append(key)
551
self._data_changed = True
553
if ideal_parents == (NULL_REVISION,):
555
source_parents = file_id_parent_map[key]
556
if ideal_parents == source_parents:
560
# We need to change the parent graph, but we don't need to
561
# re-insert the text (since we don't pun the compression
562
# parent with the parents list)
563
self._data_changed = True
564
new_parent_keys[key] = ideal_parents
565
# we're finished with some data.
567
del file_id_parent_map
568
# 3) bulk copy the data, updating records than need it
569
def _update_parents_for_texts():
570
stream = source_vf.get_record_stream(self._text_refs,
571
'groupcompress', False)
572
for record in stream:
573
if record.key in new_parent_keys:
574
record.parents = new_parent_keys[record.key]
576
target_vf.insert_record_stream(_update_parents_for_texts())
578
def _use_pack(self, new_pack):
579
"""Override _use_pack to check for reconcile having changed content."""
580
return new_pack.data_inserted() and self._data_changed
583
class GCRepositoryPackCollection(RepositoryPackCollection):
585
pack_factory = GCPack
586
resumed_pack_factory = ResumedGCPack
588
def _check_new_inventories(self):
589
"""Detect missing inventories or chk root entries for the new revisions
592
:returns: list of strs, summarising any problems found. If the list is
593
empty no problems were found.
595
# Ensure that all revisions added in this write group have:
596
# - corresponding inventories,
597
# - chk root entries for those inventories,
598
# - and any present parent inventories have their chk root
600
# And all this should be independent of any fallback repository.
602
key_deps = self.repo.revisions._index._key_dependencies
603
new_revisions_keys = key_deps.get_new_keys()
604
no_fallback_inv_index = self.repo.inventories._index
605
no_fallback_chk_bytes_index = self.repo.chk_bytes._index
606
no_fallback_texts_index = self.repo.texts._index
607
inv_parent_map = no_fallback_inv_index.get_parent_map(
609
# Are any inventories for corresponding to the new revisions missing?
610
corresponding_invs = set(inv_parent_map)
611
missing_corresponding = set(new_revisions_keys)
612
missing_corresponding.difference_update(corresponding_invs)
613
if missing_corresponding:
614
problems.append("inventories missing for revisions %s" %
615
(sorted(missing_corresponding),))
617
# Are any chk root entries missing for any inventories? This includes
618
# any present parent inventories, which may be used when calculating
619
# deltas for streaming.
620
all_inv_keys = set(corresponding_invs)
621
for parent_inv_keys in inv_parent_map.itervalues():
622
all_inv_keys.update(parent_inv_keys)
623
# Filter out ghost parents.
624
all_inv_keys.intersection_update(
625
no_fallback_inv_index.get_parent_map(all_inv_keys))
626
parent_invs_only_keys = all_inv_keys.symmetric_difference(
629
inv_ids = [key[-1] for key in all_inv_keys]
630
parent_invs_only_ids = [key[-1] for key in parent_invs_only_keys]
631
root_key_info = _build_interesting_key_sets(
632
self.repo, inv_ids, parent_invs_only_ids)
633
expected_chk_roots = root_key_info.all_keys()
634
present_chk_roots = no_fallback_chk_bytes_index.get_parent_map(
636
missing_chk_roots = expected_chk_roots.difference(present_chk_roots)
637
if missing_chk_roots:
638
problems.append("missing referenced chk root keys: %s"
639
% (sorted(missing_chk_roots),))
640
# Don't bother checking any further.
642
# Find all interesting chk_bytes records, and make sure they are
643
# present, as well as the text keys they reference.
644
chk_bytes_no_fallbacks = self.repo.chk_bytes.without_fallbacks()
645
chk_bytes_no_fallbacks._search_key_func = \
646
self.repo.chk_bytes._search_key_func
647
chk_diff = chk_map.iter_interesting_nodes(
648
chk_bytes_no_fallbacks, root_key_info.interesting_root_keys,
649
root_key_info.uninteresting_root_keys)
650
bytes_to_info = inventory.CHKInventory._bytes_to_utf8name_key
653
for record in _filter_text_keys(chk_diff, text_keys, bytes_to_info):
655
except errors.NoSuchRevision, e:
656
# XXX: It would be nice if we could give a more precise error here.
657
problems.append("missing chk node(s) for id_to_entry maps")
658
chk_diff = chk_map.iter_interesting_nodes(
659
chk_bytes_no_fallbacks, root_key_info.interesting_pid_root_keys,
660
root_key_info.uninteresting_pid_root_keys)
662
for interesting_rec, interesting_map in chk_diff:
664
except errors.NoSuchRevision, e:
666
"missing chk node(s) for parent_id_basename_to_file_id maps")
667
present_text_keys = no_fallback_texts_index.get_parent_map(text_keys)
668
missing_text_keys = text_keys.difference(present_text_keys)
669
if missing_text_keys:
670
problems.append("missing text keys: %r"
671
% (sorted(missing_text_keys),))
674
def _execute_pack_operations(self, pack_operations,
675
_packer_class=GCCHKPacker,
677
"""Execute a series of pack operations.
679
:param pack_operations: A list of [revision_count, packs_to_combine].
680
:param _packer_class: The class of packer to use (default: Packer).
683
# XXX: Copied across from RepositoryPackCollection simply because we
684
# want to override the _packer_class ... :(
685
for revision_count, packs in pack_operations:
686
# we may have no-ops from the setup logic
689
packer = GCCHKPacker(self, packs, '.autopack',
690
reload_func=reload_func)
692
result = packer.pack()
693
except errors.RetryWithNewPacks:
694
# An exception is propagating out of this context, make sure
695
# this packer has cleaned up. Packer() doesn't set its new_pack
696
# state into the RepositoryPackCollection object, so we only
697
# have access to it directly here.
698
if packer.new_pack is not None:
699
packer.new_pack.abort()
704
self._remove_pack_from_memory(pack)
705
# record the newly available packs and stop advertising the old
707
result = self._save_pack_names(clear_obsolete_packs=True)
708
# Move the old packs out of the way now they are no longer referenced.
709
for revision_count, packs in pack_operations:
710
self._obsolete_packs(packs)
714
class CHKInventoryRepository(KnitPackRepository):
715
"""subclass of KnitPackRepository that uses CHK based inventories."""
717
def __init__(self, _format, a_bzrdir, control_files, _commit_builder_class,
719
"""Overridden to change pack collection class."""
720
KnitPackRepository.__init__(self, _format, a_bzrdir, control_files,
721
_commit_builder_class, _serializer)
722
# and now replace everything it did :)
723
index_transport = self._transport.clone('indices')
724
self._pack_collection = GCRepositoryPackCollection(self,
725
self._transport, index_transport,
726
self._transport.clone('upload'),
727
self._transport.clone('packs'),
728
_format.index_builder_class,
730
use_chk_index=self._format.supports_chks,
732
self.inventories = GroupCompressVersionedFiles(
733
_GCGraphIndex(self._pack_collection.inventory_index.combined_index,
734
add_callback=self._pack_collection.inventory_index.add_callback,
735
parents=True, is_locked=self.is_locked,
736
inconsistency_fatal=False),
737
access=self._pack_collection.inventory_index.data_access)
738
self.revisions = GroupCompressVersionedFiles(
739
_GCGraphIndex(self._pack_collection.revision_index.combined_index,
740
add_callback=self._pack_collection.revision_index.add_callback,
741
parents=True, is_locked=self.is_locked,
742
track_external_parent_refs=True, track_new_keys=True),
743
access=self._pack_collection.revision_index.data_access,
745
self.signatures = GroupCompressVersionedFiles(
746
_GCGraphIndex(self._pack_collection.signature_index.combined_index,
747
add_callback=self._pack_collection.signature_index.add_callback,
748
parents=False, is_locked=self.is_locked,
749
inconsistency_fatal=False),
750
access=self._pack_collection.signature_index.data_access,
752
self.texts = GroupCompressVersionedFiles(
753
_GCGraphIndex(self._pack_collection.text_index.combined_index,
754
add_callback=self._pack_collection.text_index.add_callback,
755
parents=True, is_locked=self.is_locked,
756
inconsistency_fatal=False),
757
access=self._pack_collection.text_index.data_access)
758
# No parents, individual CHK pages don't have specific ancestry
759
self.chk_bytes = GroupCompressVersionedFiles(
760
_GCGraphIndex(self._pack_collection.chk_index.combined_index,
761
add_callback=self._pack_collection.chk_index.add_callback,
762
parents=False, is_locked=self.is_locked,
763
inconsistency_fatal=False),
764
access=self._pack_collection.chk_index.data_access)
765
search_key_name = self._format._serializer.search_key_name
766
search_key_func = chk_map.search_key_registry.get(search_key_name)
767
self.chk_bytes._search_key_func = search_key_func
768
# True when the repository object is 'write locked' (as opposed to the
769
# physical lock only taken out around changes to the pack-names list.)
770
# Another way to represent this would be a decorator around the control
771
# files object that presents logical locks as physical ones - if this
772
# gets ugly consider that alternative design. RBC 20071011
773
self._write_lock_count = 0
774
self._transaction = None
776
self._reconcile_does_inventory_gc = True
777
self._reconcile_fixes_text_parents = True
778
self._reconcile_backsup_inventory = False
780
def _add_inventory_checked(self, revision_id, inv, parents):
781
"""Add inv to the repository after checking the inputs.
783
This function can be overridden to allow different inventory styles.
785
:seealso: add_inventory, for the contract.
788
serializer = self._format._serializer
789
result = inventory.CHKInventory.from_inventory(self.chk_bytes, inv,
790
maximum_size=serializer.maximum_size,
791
search_key_name=serializer.search_key_name)
792
inv_lines = result.to_lines()
793
return self._inventory_add_lines(revision_id, parents,
794
inv_lines, check_content=False)
796
def _create_inv_from_null(self, delta, revision_id):
797
"""This will mutate new_inv directly.
799
This is a simplified form of create_by_apply_delta which knows that all
800
the old values must be None, so everything is a create.
802
serializer = self._format._serializer
803
new_inv = inventory.CHKInventory(serializer.search_key_name)
804
new_inv.revision_id = revision_id
805
entry_to_bytes = new_inv._entry_to_bytes
806
id_to_entry_dict = {}
807
parent_id_basename_dict = {}
808
for old_path, new_path, file_id, entry in delta:
809
if old_path is not None:
810
raise ValueError('Invalid delta, somebody tried to delete %r'
811
' from the NULL_REVISION'
812
% ((old_path, file_id),))
814
raise ValueError('Invalid delta, delta from NULL_REVISION has'
815
' no new_path %r' % (file_id,))
817
new_inv.root_id = file_id
818
parent_id_basename_key = StaticTuple('', '').intern()
820
utf8_entry_name = entry.name.encode('utf-8')
821
parent_id_basename_key = StaticTuple(entry.parent_id,
822
utf8_entry_name).intern()
823
new_value = entry_to_bytes(entry)
825
# new_inv._path_to_fileid_cache[new_path] = file_id
826
key = StaticTuple(file_id).intern()
827
id_to_entry_dict[key] = new_value
828
parent_id_basename_dict[parent_id_basename_key] = file_id
830
new_inv._populate_from_dicts(self.chk_bytes, id_to_entry_dict,
831
parent_id_basename_dict, maximum_size=serializer.maximum_size)
834
def add_inventory_by_delta(self, basis_revision_id, delta, new_revision_id,
835
parents, basis_inv=None, propagate_caches=False):
836
"""Add a new inventory expressed as a delta against another revision.
838
:param basis_revision_id: The inventory id the delta was created
840
:param delta: The inventory delta (see Inventory.apply_delta for
842
:param new_revision_id: The revision id that the inventory is being
844
:param parents: The revision ids of the parents that revision_id is
845
known to have and are in the repository already. These are supplied
846
for repositories that depend on the inventory graph for revision
847
graph access, as well as for those that pun ancestry with delta
849
:param basis_inv: The basis inventory if it is already known,
851
:param propagate_caches: If True, the caches for this inventory are
852
copied to and updated for the result if possible.
854
:returns: (validator, new_inv)
855
The validator(which is a sha1 digest, though what is sha'd is
856
repository format specific) of the serialized inventory, and the
859
if not self.is_in_write_group():
860
raise AssertionError("%r not in write group" % (self,))
861
_mod_revision.check_not_reserved_id(new_revision_id)
863
if basis_inv is None:
864
if basis_revision_id == _mod_revision.NULL_REVISION:
865
new_inv = self._create_inv_from_null(delta, new_revision_id)
866
inv_lines = new_inv.to_lines()
867
return self._inventory_add_lines(new_revision_id, parents,
868
inv_lines, check_content=False), new_inv
870
basis_tree = self.revision_tree(basis_revision_id)
871
basis_tree.lock_read()
872
basis_inv = basis_tree.inventory
874
result = basis_inv.create_by_apply_delta(delta, new_revision_id,
875
propagate_caches=propagate_caches)
876
inv_lines = result.to_lines()
877
return self._inventory_add_lines(new_revision_id, parents,
878
inv_lines, check_content=False), result
880
if basis_tree is not None:
883
def deserialise_inventory(self, revision_id, bytes):
884
return inventory.CHKInventory.deserialise(self.chk_bytes, bytes,
887
def _iter_inventories(self, revision_ids, ordering):
888
"""Iterate over many inventory objects."""
890
ordering = 'unordered'
891
keys = [(revision_id,) for revision_id in revision_ids]
892
stream = self.inventories.get_record_stream(keys, ordering, True)
894
for record in stream:
895
if record.storage_kind != 'absent':
896
texts[record.key] = record.get_bytes_as('fulltext')
898
raise errors.NoSuchRevision(self, record.key)
900
yield inventory.CHKInventory.deserialise(self.chk_bytes, texts[key], key)
902
def _iter_inventory_xmls(self, revision_ids, ordering):
903
# Without a native 'xml' inventory, this method doesn't make sense.
904
# However older working trees, and older bundles want it - so we supply
905
# it allowing get_inventory_xml to work. Bundles currently use the
906
# serializer directly; this also isn't ideal, but there isn't an xml
907
# iteration interface offered at all for repositories. We could make
908
# _iter_inventory_xmls be part of the contract, even if kept private.
909
inv_to_str = self._serializer.write_inventory_to_string
910
for inv in self.iter_inventories(revision_ids, ordering=ordering):
911
yield inv_to_str(inv), inv.revision_id
913
def _find_present_inventory_keys(self, revision_keys):
914
parent_map = self.inventories.get_parent_map(revision_keys)
915
present_inventory_keys = set(k for k in parent_map)
916
return present_inventory_keys
918
def fileids_altered_by_revision_ids(self, revision_ids, _inv_weave=None):
919
"""Find the file ids and versions affected by revisions.
921
:param revisions: an iterable containing revision ids.
922
:param _inv_weave: The inventory weave from this repository or None.
923
If None, the inventory weave will be opened automatically.
924
:return: a dictionary mapping altered file-ids to an iterable of
925
revision_ids. Each altered file-ids has the exact revision_ids that
926
altered it listed explicitly.
928
rich_root = self.supports_rich_root()
929
bytes_to_info = inventory.CHKInventory._bytes_to_utf8name_key
930
file_id_revisions = {}
931
pb = ui.ui_factory.nested_progress_bar()
933
revision_keys = [(r,) for r in revision_ids]
934
parent_keys = self._find_parent_keys_of_revisions(revision_keys)
935
# TODO: instead of using _find_present_inventory_keys, change the
936
# code paths to allow missing inventories to be tolerated.
937
# However, we only want to tolerate missing parent
938
# inventories, not missing inventories for revision_ids
939
present_parent_inv_keys = self._find_present_inventory_keys(
941
present_parent_inv_ids = set(
942
[k[-1] for k in present_parent_inv_keys])
943
inventories_to_read = set(revision_ids)
944
inventories_to_read.update(present_parent_inv_ids)
945
root_key_info = _build_interesting_key_sets(
946
self, inventories_to_read, present_parent_inv_ids)
947
interesting_root_keys = root_key_info.interesting_root_keys
948
uninteresting_root_keys = root_key_info.uninteresting_root_keys
949
chk_bytes = self.chk_bytes
950
for record, items in chk_map.iter_interesting_nodes(chk_bytes,
951
interesting_root_keys, uninteresting_root_keys,
953
for name, bytes in items:
954
(name_utf8, file_id, revision_id) = bytes_to_info(bytes)
955
# TODO: consider interning file_id, revision_id here, or
956
# pushing that intern() into bytes_to_info()
957
# TODO: rich_root should always be True here, for all
958
# repositories that support chk_bytes
959
if not rich_root and name_utf8 == '':
962
file_id_revisions[file_id].add(revision_id)
964
file_id_revisions[file_id] = set([revision_id])
967
return file_id_revisions
969
def find_text_key_references(self):
970
"""Find the text key references within the repository.
972
:return: A dictionary mapping text keys ((fileid, revision_id) tuples)
973
to whether they were referred to by the inventory of the
974
revision_id that they contain. The inventory texts from all present
975
revision ids are assessed to generate this report.
977
# XXX: Slow version but correct: rewrite as a series of delta
978
# examinations/direct tree traversal. Note that that will require care
979
# as a common node is reachable both from the inventory that added it,
980
# and others afterwards.
981
revision_keys = self.revisions.keys()
983
rich_roots = self.supports_rich_root()
984
pb = ui.ui_factory.nested_progress_bar()
986
all_revs = self.all_revision_ids()
987
total = len(all_revs)
988
for pos, inv in enumerate(self.iter_inventories(all_revs)):
989
pb.update("Finding text references", pos, total)
990
for _, entry in inv.iter_entries():
991
if not rich_roots and entry.file_id == inv.root_id:
993
key = (entry.file_id, entry.revision)
994
result.setdefault(key, False)
995
if entry.revision == inv.revision_id:
1001
def _reconcile_pack(self, collection, packs, extension, revs, pb):
1002
packer = GCCHKReconcilePacker(collection, packs, extension)
1003
return packer.pack(pb)
1005
def _get_source(self, to_format):
1006
"""Return a source for streaming from this repository."""
1007
if self._format._serializer == to_format._serializer:
1008
# We must be exactly the same format, otherwise stuff like the chk
1009
# page layout might be different.
1010
# Actually, this test is just slightly looser than exact so that
1011
# CHK2 <-> 2a transfers will work.
1012
return GroupCHKStreamSource(self, to_format)
1013
return super(CHKInventoryRepository, self)._get_source(to_format)
1016
class GroupCHKStreamSource(KnitPackStreamSource):
1017
"""Used when both the source and target repo are GroupCHK repos."""
1019
def __init__(self, from_repository, to_format):
1020
"""Create a StreamSource streaming from from_repository."""
1021
super(GroupCHKStreamSource, self).__init__(from_repository, to_format)
1022
self._revision_keys = None
1023
self._text_keys = None
1024
self._text_fetch_order = 'groupcompress'
1025
self._chk_id_roots = None
1026
self._chk_p_id_roots = None
1028
def _get_inventory_stream(self, inventory_keys, allow_absent=False):
1029
"""Get a stream of inventory texts.
1031
When this function returns, self._chk_id_roots and self._chk_p_id_roots
1032
should be populated.
1034
self._chk_id_roots = []
1035
self._chk_p_id_roots = []
1036
def _filtered_inv_stream():
1037
id_roots_set = set()
1038
p_id_roots_set = set()
1039
source_vf = self.from_repository.inventories
1040
stream = source_vf.get_record_stream(inventory_keys,
1041
'groupcompress', True)
1042
for record in stream:
1043
if record.storage_kind == 'absent':
1047
raise errors.NoSuchRevision(self, record.key)
1048
bytes = record.get_bytes_as('fulltext')
1049
chk_inv = inventory.CHKInventory.deserialise(None, bytes,
1051
key = chk_inv.id_to_entry.key()
1052
if key not in id_roots_set:
1053
self._chk_id_roots.append(key)
1054
id_roots_set.add(key)
1055
p_id_map = chk_inv.parent_id_basename_to_file_id
1056
if p_id_map is None:
1057
raise AssertionError('Parent id -> file_id map not set')
1058
key = p_id_map.key()
1059
if key not in p_id_roots_set:
1060
p_id_roots_set.add(key)
1061
self._chk_p_id_roots.append(key)
1063
# We have finished processing all of the inventory records, we
1064
# don't need these sets anymore
1065
id_roots_set.clear()
1066
p_id_roots_set.clear()
1067
return ('inventories', _filtered_inv_stream())
1069
def _get_filtered_chk_streams(self, excluded_revision_keys):
1070
self._text_keys = set()
1071
excluded_revision_keys.discard(_mod_revision.NULL_REVISION)
1072
if not excluded_revision_keys:
1073
uninteresting_root_keys = set()
1074
uninteresting_pid_root_keys = set()
1076
# filter out any excluded revisions whose inventories are not
1078
# TODO: Update Repository.iter_inventories() to add
1079
# ignore_missing=True
1080
present_keys = self.from_repository._find_present_inventory_keys(
1081
excluded_revision_keys)
1082
present_ids = [k[-1] for k in present_keys]
1083
uninteresting_root_keys = set()
1084
uninteresting_pid_root_keys = set()
1085
for inv in self.from_repository.iter_inventories(present_ids):
1086
uninteresting_root_keys.add(inv.id_to_entry.key())
1087
uninteresting_pid_root_keys.add(
1088
inv.parent_id_basename_to_file_id.key())
1089
bytes_to_info = inventory.CHKInventory._bytes_to_utf8name_key
1090
chk_bytes = self.from_repository.chk_bytes
1091
def _filter_id_to_entry():
1092
interesting_nodes = chk_map.iter_interesting_nodes(chk_bytes,
1093
self._chk_id_roots, uninteresting_root_keys)
1094
for record in _filter_text_keys(interesting_nodes, self._text_keys,
1096
if record is not None:
1099
self._chk_id_roots = None
1100
yield 'chk_bytes', _filter_id_to_entry()
1101
def _get_parent_id_basename_to_file_id_pages():
1102
for record, items in chk_map.iter_interesting_nodes(chk_bytes,
1103
self._chk_p_id_roots, uninteresting_pid_root_keys):
1104
if record is not None:
1107
self._chk_p_id_roots = None
1108
yield 'chk_bytes', _get_parent_id_basename_to_file_id_pages()
1110
def get_stream(self, search):
1111
revision_ids = search.get_keys()
1112
for stream_info in self._fetch_revision_texts(revision_ids):
1114
self._revision_keys = [(rev_id,) for rev_id in revision_ids]
1115
self.from_repository.revisions.clear_cache()
1116
self.from_repository.signatures.clear_cache()
1117
yield self._get_inventory_stream(self._revision_keys)
1118
self.from_repository.inventories.clear_cache()
1119
# TODO: The keys to exclude might be part of the search recipe
1120
# For now, exclude all parents that are at the edge of ancestry, for
1121
# which we have inventories
1122
from_repo = self.from_repository
1123
parent_keys = from_repo._find_parent_keys_of_revisions(
1124
self._revision_keys)
1125
for stream_info in self._get_filtered_chk_streams(parent_keys):
1127
self.from_repository.chk_bytes.clear_cache()
1128
yield self._get_text_stream()
1129
self.from_repository.texts.clear_cache()
1131
def get_stream_for_missing_keys(self, missing_keys):
1132
# missing keys can only occur when we are byte copying and not
1133
# translating (because translation means we don't send
1134
# unreconstructable deltas ever).
1135
missing_inventory_keys = set()
1136
for key in missing_keys:
1137
if key[0] != 'inventories':
1138
raise AssertionError('The only missing keys we should'
1139
' be filling in are inventory keys, not %s'
1141
missing_inventory_keys.add(key[1:])
1142
if self._chk_id_roots or self._chk_p_id_roots:
1143
raise AssertionError('Cannot call get_stream_for_missing_keys'
1144
' until all of get_stream() has been consumed.')
1145
# Yield the inventory stream, so we can find the chk stream
1146
# Some of the missing_keys will be missing because they are ghosts.
1147
# As such, we can ignore them. The Sink is required to verify there are
1148
# no unavailable texts when the ghost inventories are not filled in.
1149
yield self._get_inventory_stream(missing_inventory_keys,
1151
# We use the empty set for excluded_revision_keys, to make it clear
1152
# that we want to transmit all referenced chk pages.
1153
for stream_info in self._get_filtered_chk_streams(set()):
1157
class _InterestingKeyInfo(object):
1159
self.interesting_root_keys = set()
1160
self.interesting_pid_root_keys = set()
1161
self.uninteresting_root_keys = set()
1162
self.uninteresting_pid_root_keys = set()
1164
def all_interesting(self):
1165
return self.interesting_root_keys.union(self.interesting_pid_root_keys)
1167
def all_uninteresting(self):
1168
return self.uninteresting_root_keys.union(
1169
self.uninteresting_pid_root_keys)
1172
return self.all_interesting().union(self.all_uninteresting())
1175
def _build_interesting_key_sets(repo, inventory_ids, parent_only_inv_ids):
1176
result = _InterestingKeyInfo()
1177
for inv in repo.iter_inventories(inventory_ids, 'unordered'):
1178
root_key = inv.id_to_entry.key()
1179
pid_root_key = inv.parent_id_basename_to_file_id.key()
1180
if inv.revision_id in parent_only_inv_ids:
1181
result.uninteresting_root_keys.add(root_key)
1182
result.uninteresting_pid_root_keys.add(pid_root_key)
1184
result.interesting_root_keys.add(root_key)
1185
result.interesting_pid_root_keys.add(pid_root_key)
1189
def _filter_text_keys(interesting_nodes_iterable, text_keys, bytes_to_info):
1190
"""Iterate the result of iter_interesting_nodes, yielding the records
1191
and adding to text_keys.
1193
for record, items in interesting_nodes_iterable:
1194
for name, bytes in items:
1195
# Note: we don't care about name_utf8, because groupcompress repos
1196
# are always rich-root, so there are no synthesised root records to
1198
_, file_id, revision_id = bytes_to_info(bytes)
1199
file_id = intern(file_id)
1200
revision_id = intern(revision_id)
1201
text_keys.add(StaticTuple(file_id, revision_id).intern())
1207
class RepositoryFormatCHK1(RepositoryFormatPack):
1208
"""A hashed CHK+group compress pack repository."""
1210
repository_class = CHKInventoryRepository
1211
supports_external_lookups = True
1212
supports_chks = True
1213
# For right now, setting this to True gives us InterModel1And2 rather
1214
# than InterDifferingSerializer
1215
_commit_builder_class = PackRootCommitBuilder
1216
rich_root_data = True
1217
_serializer = chk_serializer.chk_serializer_255_bigpage
1218
_commit_inv_deltas = True
1219
# What index classes to use
1220
index_builder_class = BTreeBuilder
1221
index_class = BTreeGraphIndex
1222
# Note: We cannot unpack a delta that references a text we haven't
1223
# seen yet. There are 2 options, work in fulltexts, or require
1224
# topological sorting. Using fulltexts is more optimal for local
1225
# operations, because the source can be smart about extracting
1226
# multiple in-a-row (and sharing strings). Topological is better
1227
# for remote, because we access less data.
1228
_fetch_order = 'unordered'
1229
_fetch_uses_deltas = False # essentially ignored by the groupcompress code.
1231
pack_compresses = True
1233
def _get_matching_bzrdir(self):
1234
return bzrdir.format_registry.make_bzrdir('development6-rich-root')
1236
def _ignore_setting_bzrdir(self, format):
1239
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
1241
def get_format_string(self):
1242
"""See RepositoryFormat.get_format_string()."""
1243
return ('Bazaar development format - group compression and chk inventory'
1244
' (needs bzr.dev from 1.14)\n')
1246
def get_format_description(self):
1247
"""See RepositoryFormat.get_format_description()."""
1248
return ("Development repository format - rich roots, group compression"
1249
" and chk inventories")
1252
class RepositoryFormatCHK2(RepositoryFormatCHK1):
1253
"""A CHK repository that uses the bencode revision serializer."""
1255
_serializer = chk_serializer.chk_bencode_serializer
1257
def _get_matching_bzrdir(self):
1258
return bzrdir.format_registry.make_bzrdir('development7-rich-root')
1260
def _ignore_setting_bzrdir(self, format):
1263
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
1265
def get_format_string(self):
1266
"""See RepositoryFormat.get_format_string()."""
1267
return ('Bazaar development format - chk repository with bencode '
1268
'revision serialization (needs bzr.dev from 1.16)\n')
1271
class RepositoryFormat2a(RepositoryFormatCHK2):
1272
"""A CHK repository that uses the bencode revision serializer.
1274
This is the same as RepositoryFormatCHK2 but with a public name.
1277
_serializer = chk_serializer.chk_bencode_serializer
1279
def _get_matching_bzrdir(self):
1280
return bzrdir.format_registry.make_bzrdir('2a')
1282
def _ignore_setting_bzrdir(self, format):
1285
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
1287
def get_format_string(self):
1288
return ('Bazaar repository format 2a (needs bzr 1.16 or later)\n')
1290
def get_format_description(self):
1291
"""See RepositoryFormat.get_format_description()."""
1292
return ("Repository format 2a - rich roots, group compression"
1293
" and chk inventories")