1
# Copyright (C) 2008, 2009 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17
"""Repository formats using CHK inventories and groupcompress compression."""
33
revision as _mod_revision,
37
from bzrlib.btree_index import (
41
from bzrlib.groupcompress import (
43
GroupCompressVersionedFiles,
45
from bzrlib.repofmt.pack_repo import (
50
PackRootCommitBuilder,
51
RepositoryPackCollection,
58
class GCPack(NewPack):
60
def __init__(self, pack_collection, upload_suffix='', file_mode=None):
61
"""Create a NewPack instance.
63
:param pack_collection: A PackCollection into which this is being
65
:param upload_suffix: An optional suffix to be given to any temporary
66
files created during the pack creation. e.g '.autopack'
67
:param file_mode: An optional file mode to create the new files with.
69
# replaced from NewPack to:
70
# - change inventory reference list length to 1
71
# - change texts reference lists to 1
72
# TODO: patch this to be parameterised
74
# The relative locations of the packs are constrained, but all are
75
# passed in because the caller has them, so as to avoid object churn.
76
index_builder_class = pack_collection._index_builder_class
78
if pack_collection.chk_index is not None:
79
chk_index = index_builder_class(reference_lists=0)
83
# Revisions: parents list, no text compression.
84
index_builder_class(reference_lists=1),
85
# Inventory: We want to map compression only, but currently the
86
# knit code hasn't been updated enough to understand that, so we
87
# have a regular 2-list index giving parents and compression
89
index_builder_class(reference_lists=1),
90
# Texts: per file graph, for all fileids - so one reference list
91
# and two elements in the key tuple.
92
index_builder_class(reference_lists=1, key_elements=2),
93
# Signatures: Just blobs to store, no compression, no parents
95
index_builder_class(reference_lists=0),
96
# CHK based storage - just blobs, no compression or parents.
99
self._pack_collection = pack_collection
100
# When we make readonly indices, we need this.
101
self.index_class = pack_collection._index_class
102
# where should the new pack be opened
103
self.upload_transport = pack_collection._upload_transport
104
# where are indices written out to
105
self.index_transport = pack_collection._index_transport
106
# where is the pack renamed to when it is finished?
107
self.pack_transport = pack_collection._pack_transport
108
# What file mode to upload the pack and indices with.
109
self._file_mode = file_mode
110
# tracks the content written to the .pack file.
111
self._hash = osutils.md5()
112
# a four-tuple with the length in bytes of the indices, once the pack
113
# is finalised. (rev, inv, text, sigs)
114
self.index_sizes = None
115
# How much data to cache when writing packs. Note that this is not
116
# synchronised with reads, because it's not in the transport layer, so
117
# is not safe unless the client knows it won't be reading from the pack
119
self._cache_limit = 0
120
# the temporary pack file name.
121
self.random_name = osutils.rand_chars(20) + upload_suffix
122
# when was this pack started ?
123
self.start_time = time.time()
124
# open an output stream for the data added to the pack.
125
self.write_stream = self.upload_transport.open_write_stream(
126
self.random_name, mode=self._file_mode)
127
if 'pack' in debug.debug_flags:
128
trace.mutter('%s: create_pack: pack stream open: %s%s t+%6.3fs',
129
time.ctime(), self.upload_transport.base, self.random_name,
130
time.time() - self.start_time)
131
# A list of byte sequences to be written to the new pack, and the
132
# aggregate size of them. Stored as a list rather than separate
133
# variables so that the _write_data closure below can update them.
134
self._buffer = [[], 0]
135
# create a callable for adding data
137
# robertc says- this is a closure rather than a method on the object
138
# so that the variables are locals, and faster than accessing object
140
def _write_data(bytes, flush=False, _buffer=self._buffer,
141
_write=self.write_stream.write, _update=self._hash.update):
142
_buffer[0].append(bytes)
143
_buffer[1] += len(bytes)
145
if _buffer[1] > self._cache_limit or flush:
146
bytes = ''.join(_buffer[0])
150
# expose this on self, for the occasion when clients want to add data.
151
self._write_data = _write_data
152
# a pack writer object to serialise pack records.
153
self._writer = pack.ContainerWriter(self._write_data)
155
# what state is the pack in? (open, finished, aborted)
157
# no name until we finish writing the content
160
def _check_references(self):
161
"""Make sure our external references are present.
163
Packs are allowed to have deltas whose base is not in the pack, but it
164
must be present somewhere in this collection. It is not allowed to
165
have deltas based on a fallback repository.
166
(See <https://bugs.launchpad.net/bzr/+bug/288751>)
168
# Groupcompress packs don't have any external references, arguably CHK
169
# pages have external references, but we cannot 'cheaply' determine
170
# them without actually walking all of the chk pages.
173
class ResumedGCPack(ResumedPack):
175
def _check_references(self):
176
"""Make sure our external compression parents are present."""
177
# See GCPack._check_references for why this is empty
179
def _get_external_refs(self, index):
180
# GC repositories don't have compression parents external to a given
185
class GCCHKPacker(Packer):
186
"""This class understand what it takes to collect a GCCHK repo."""
188
def __init__(self, pack_collection, packs, suffix, revision_ids=None,
190
super(GCCHKPacker, self).__init__(pack_collection, packs, suffix,
191
revision_ids=revision_ids,
192
reload_func=reload_func)
193
self._pack_collection = pack_collection
194
# ATM, We only support this for GCCHK repositories
195
if pack_collection.chk_index is None:
196
raise AssertionError('pack_collection.chk_index should not be None')
197
self._gather_text_refs = False
198
self._chk_id_roots = []
199
self._chk_p_id_roots = []
200
self._text_refs = None
201
# set by .pack() if self.revision_ids is not None
202
self.revision_keys = None
204
def _get_progress_stream(self, source_vf, keys, message, pb):
206
substream = source_vf.get_record_stream(keys, 'groupcompress', True)
207
for idx, record in enumerate(substream):
209
pb.update(message, idx + 1, len(keys))
213
def _get_filtered_inv_stream(self, source_vf, keys, message, pb=None):
214
"""Filter the texts of inventories, to find the chk pages."""
215
total_keys = len(keys)
216
def _filtered_inv_stream():
218
p_id_roots_set = set()
219
stream = source_vf.get_record_stream(keys, 'groupcompress', True)
220
for idx, record in enumerate(stream):
221
# Inventories should always be with revisions; assume success.
222
bytes = record.get_bytes_as('fulltext')
223
chk_inv = inventory.CHKInventory.deserialise(None, bytes,
226
pb.update('inv', idx, total_keys)
227
key = chk_inv.id_to_entry.key()
228
if key not in id_roots_set:
229
self._chk_id_roots.append(key)
230
id_roots_set.add(key)
231
p_id_map = chk_inv.parent_id_basename_to_file_id
233
raise AssertionError('Parent id -> file_id map not set')
235
if key not in p_id_roots_set:
236
p_id_roots_set.add(key)
237
self._chk_p_id_roots.append(key)
239
# We have finished processing all of the inventory records, we
240
# don't need these sets anymore
242
p_id_roots_set.clear()
243
return _filtered_inv_stream()
245
def _get_chk_streams(self, source_vf, keys, pb=None):
246
# We want to stream the keys from 'id_roots', and things they
247
# reference, and then stream things from p_id_roots and things they
248
# reference, and then any remaining keys that we didn't get to.
250
# We also group referenced texts together, so if one root references a
251
# text with prefix 'a', and another root references a node with prefix
252
# 'a', we want to yield those nodes before we yield the nodes for 'b'
253
# This keeps 'similar' nodes together.
255
# Note: We probably actually want multiple streams here, to help the
256
# client understand that the different levels won't compress well
257
# against each other.
258
# Test the difference between using one Group per level, and
259
# using 1 Group per prefix. (so '' (root) would get a group, then
260
# all the references to search-key 'a' would get a group, etc.)
261
total_keys = len(keys)
262
remaining_keys = set(keys)
264
if self._gather_text_refs:
265
bytes_to_info = inventory.CHKInventory._bytes_to_utf8name_key
266
self._text_refs = set()
267
def _get_referenced_stream(root_keys, parse_leaf_nodes=False):
270
keys_by_search_prefix = {}
271
remaining_keys.difference_update(cur_keys)
273
def handle_internal_node(node):
274
for prefix, value in node._items.iteritems():
275
# We don't want to request the same key twice, and we
276
# want to order it by the first time it is seen.
277
# Even further, we don't want to request a key which is
278
# not in this group of pack files (it should be in the
279
# repo, but it doesn't have to be in the group being
281
# TODO: consider how to treat externally referenced chk
282
# pages as 'external_references' so that we
283
# always fill them in for stacked branches
284
if value not in next_keys and value in remaining_keys:
285
keys_by_search_prefix.setdefault(prefix,
288
def handle_leaf_node(node):
289
# Store is None, because we know we have a LeafNode, and we
290
# just want its entries
291
for file_id, bytes in node.iteritems(None):
292
name_utf8, file_id, revision_id = bytes_to_info(bytes)
293
self._text_refs.add((file_id, revision_id))
295
stream = source_vf.get_record_stream(cur_keys,
296
'as-requested', True)
297
for record in stream:
298
if record.storage_kind == 'absent':
299
# An absent CHK record: we assume that the missing
300
# record is in a different pack - e.g. a page not
301
# altered by the commit we're packing.
303
bytes = record.get_bytes_as('fulltext')
304
# We don't care about search_key_func for this code,
305
# because we only care about external references.
306
node = chk_map._deserialise(bytes, record.key,
307
search_key_func=None)
308
common_base = node._search_prefix
309
if isinstance(node, chk_map.InternalNode):
310
handle_internal_node(node)
311
elif parse_leaf_nodes:
312
handle_leaf_node(node)
315
pb.update('chk node', counter[0], total_keys)
318
# Double check that we won't be emitting any keys twice
319
# If we get rid of the pre-calculation of all keys, we could
320
# turn this around and do
321
# next_keys.difference_update(seen_keys)
322
# However, we also may have references to chk pages in another
323
# pack file during autopack. We filter earlier, so we should no
324
# longer need to do this
325
# next_keys = next_keys.intersection(remaining_keys)
327
for prefix in sorted(keys_by_search_prefix):
328
cur_keys.extend(keys_by_search_prefix.pop(prefix))
329
for stream in _get_referenced_stream(self._chk_id_roots,
330
self._gather_text_refs):
332
del self._chk_id_roots
333
# while it isn't really possible for chk_id_roots to not be in the
334
# local group of packs, it is possible that the tree shape has not
335
# changed recently, so we need to filter _chk_p_id_roots by the
337
chk_p_id_roots = [key for key in self._chk_p_id_roots
338
if key in remaining_keys]
339
del self._chk_p_id_roots
340
for stream in _get_referenced_stream(chk_p_id_roots, False):
343
trace.mutter('There were %d keys in the chk index, %d of which'
344
' were not referenced', total_keys,
346
if self.revision_ids is None:
347
stream = source_vf.get_record_stream(remaining_keys,
351
def _build_vf(self, index_name, parents, delta, for_write=False):
352
"""Build a VersionedFiles instance on top of this group of packs."""
353
index_name = index_name + '_index'
355
access = knit._DirectPackAccess(index_to_pack)
358
if self.new_pack is None:
359
raise AssertionError('No new pack has been set')
360
index = getattr(self.new_pack, index_name)
361
index_to_pack[index] = self.new_pack.access_tuple()
362
index.set_optimize(for_size=True)
363
access.set_writer(self.new_pack._writer, index,
364
self.new_pack.access_tuple())
365
add_callback = index.add_nodes
368
for pack in self.packs:
369
sub_index = getattr(pack, index_name)
370
index_to_pack[sub_index] = pack.access_tuple()
371
indices.append(sub_index)
372
index = _mod_index.CombinedGraphIndex(indices)
374
vf = GroupCompressVersionedFiles(
376
add_callback=add_callback,
378
is_locked=self._pack_collection.repo.is_locked),
383
def _build_vfs(self, index_name, parents, delta):
384
"""Build the source and target VersionedFiles."""
385
source_vf = self._build_vf(index_name, parents,
386
delta, for_write=False)
387
target_vf = self._build_vf(index_name, parents,
388
delta, for_write=True)
389
return source_vf, target_vf
391
def _copy_stream(self, source_vf, target_vf, keys, message, vf_to_stream,
393
trace.mutter('repacking %d %s', len(keys), message)
394
self.pb.update('repacking %s' % (message,), pb_offset)
395
child_pb = ui.ui_factory.nested_progress_bar()
397
stream = vf_to_stream(source_vf, keys, message, child_pb)
398
for _ in target_vf._insert_record_stream(stream,
405
def _copy_revision_texts(self):
406
source_vf, target_vf = self._build_vfs('revision', True, False)
407
if not self.revision_keys:
408
# We are doing a full fetch, aka 'pack'
409
self.revision_keys = source_vf.keys()
410
self._copy_stream(source_vf, target_vf, self.revision_keys,
411
'revisions', self._get_progress_stream, 1)
413
def _copy_inventory_texts(self):
414
source_vf, target_vf = self._build_vfs('inventory', True, True)
415
# It is not sufficient to just use self.revision_keys, as stacked
416
# repositories can have more inventories than they have revisions.
417
# One alternative would be to do something with
418
# get_parent_map(self.revision_keys), but that shouldn't be any faster
420
inventory_keys = source_vf.keys()
421
missing_inventories = set(self.revision_keys).difference(inventory_keys)
422
if missing_inventories:
423
missing_inventories = sorted(missing_inventories)
424
raise ValueError('We are missing inventories for revisions: %s'
425
% (missing_inventories,))
426
self._copy_stream(source_vf, target_vf, inventory_keys,
427
'inventories', self._get_filtered_inv_stream, 2)
429
def _copy_chk_texts(self):
430
source_vf, target_vf = self._build_vfs('chk', False, False)
431
# TODO: This is technically spurious... if it is a performance issue,
433
total_keys = source_vf.keys()
434
trace.mutter('repacking chk: %d id_to_entry roots,'
435
' %d p_id_map roots, %d total keys',
436
len(self._chk_id_roots), len(self._chk_p_id_roots),
438
self.pb.update('repacking chk', 3)
439
child_pb = ui.ui_factory.nested_progress_bar()
441
for stream in self._get_chk_streams(source_vf, total_keys,
443
for _ in target_vf._insert_record_stream(stream,
450
def _copy_text_texts(self):
451
source_vf, target_vf = self._build_vfs('text', True, True)
452
# XXX: We don't walk the chk map to determine referenced (file_id,
453
# revision_id) keys. We don't do it yet because you really need
454
# to filter out the ones that are present in the parents of the
455
# rev just before the ones you are copying, otherwise the filter
456
# is grabbing too many keys...
457
text_keys = source_vf.keys()
458
self._copy_stream(source_vf, target_vf, text_keys,
459
'texts', self._get_progress_stream, 4)
461
def _copy_signature_texts(self):
462
source_vf, target_vf = self._build_vfs('signature', False, False)
463
signature_keys = source_vf.keys()
464
signature_keys.intersection(self.revision_keys)
465
self._copy_stream(source_vf, target_vf, signature_keys,
466
'signatures', self._get_progress_stream, 5)
468
def _create_pack_from_packs(self):
469
self.pb.update('repacking', 0, 7)
470
self.new_pack = self.open_pack()
471
# Is this necessary for GC ?
472
self.new_pack.set_write_cache_size(1024*1024)
473
self._copy_revision_texts()
474
self._copy_inventory_texts()
475
self._copy_chk_texts()
476
self._copy_text_texts()
477
self._copy_signature_texts()
478
self.new_pack._check_references()
479
if not self._use_pack(self.new_pack):
480
self.new_pack.abort()
482
self.new_pack.finish_content()
483
if len(self.packs) == 1:
484
old_pack = self.packs[0]
485
if old_pack.name == self.new_pack._hash.hexdigest():
486
# The single old pack was already optimally packed.
487
trace.mutter('single pack %s was already optimally packed',
489
self.new_pack.abort()
491
self.pb.update('finishing repack', 6, 7)
492
self.new_pack.finish()
493
self._pack_collection.allocate(self.new_pack)
497
class GCCHKReconcilePacker(GCCHKPacker):
498
"""A packer which regenerates indices etc as it copies.
500
This is used by ``bzr reconcile`` to cause parent text pointers to be
504
def __init__(self, *args, **kwargs):
505
super(GCCHKReconcilePacker, self).__init__(*args, **kwargs)
506
self._data_changed = False
507
self._gather_text_refs = True
509
def _copy_inventory_texts(self):
510
source_vf, target_vf = self._build_vfs('inventory', True, True)
511
self._copy_stream(source_vf, target_vf, self.revision_keys,
512
'inventories', self._get_filtered_inv_stream, 2)
513
if source_vf.keys() != self.revision_keys:
514
self._data_changed = True
516
def _copy_text_texts(self):
517
"""generate what texts we should have and then copy."""
518
source_vf, target_vf = self._build_vfs('text', True, True)
519
trace.mutter('repacking %d texts', len(self._text_refs))
520
self.pb.update("repacking texts", 4)
521
# we have three major tasks here:
522
# 1) generate the ideal index
523
repo = self._pack_collection.repo
524
# We want the one we just wrote, so base it on self.new_pack
525
revision_vf = self._build_vf('revision', True, False, for_write=True)
526
ancestor_keys = revision_vf.get_parent_map(revision_vf.keys())
527
# Strip keys back into revision_ids.
528
ancestors = dict((k[0], tuple([p[0] for p in parents]))
529
for k, parents in ancestor_keys.iteritems())
531
# TODO: _generate_text_key_index should be much cheaper to generate from
532
# a chk repository, rather than the current implementation
533
ideal_index = repo._generate_text_key_index(None, ancestors)
534
file_id_parent_map = source_vf.get_parent_map(self._text_refs)
535
# 2) generate a keys list that contains all the entries that can
536
# be used as-is, with corrected parents.
538
new_parent_keys = {} # (key, parent_keys)
540
NULL_REVISION = _mod_revision.NULL_REVISION
541
for key in self._text_refs:
547
ideal_parents = tuple(ideal_index[key])
549
discarded_keys.append(key)
550
self._data_changed = True
552
if ideal_parents == (NULL_REVISION,):
554
source_parents = file_id_parent_map[key]
555
if ideal_parents == source_parents:
559
# We need to change the parent graph, but we don't need to
560
# re-insert the text (since we don't pun the compression
561
# parent with the parents list)
562
self._data_changed = True
563
new_parent_keys[key] = ideal_parents
564
# we're finished with some data.
566
del file_id_parent_map
567
# 3) bulk copy the data, updating records than need it
568
def _update_parents_for_texts():
569
stream = source_vf.get_record_stream(self._text_refs,
570
'groupcompress', False)
571
for record in stream:
572
if record.key in new_parent_keys:
573
record.parents = new_parent_keys[record.key]
575
target_vf.insert_record_stream(_update_parents_for_texts())
577
def _use_pack(self, new_pack):
578
"""Override _use_pack to check for reconcile having changed content."""
579
return new_pack.data_inserted() and self._data_changed
582
class GCRepositoryPackCollection(RepositoryPackCollection):
584
pack_factory = GCPack
585
resumed_pack_factory = ResumedGCPack
587
def _execute_pack_operations(self, pack_operations,
588
_packer_class=GCCHKPacker,
590
"""Execute a series of pack operations.
592
:param pack_operations: A list of [revision_count, packs_to_combine].
593
:param _packer_class: The class of packer to use (default: Packer).
596
# XXX: Copied across from RepositoryPackCollection simply because we
597
# want to override the _packer_class ... :(
598
for revision_count, packs in pack_operations:
599
# we may have no-ops from the setup logic
602
packer = GCCHKPacker(self, packs, '.autopack',
603
reload_func=reload_func)
605
result = packer.pack()
606
except errors.RetryWithNewPacks:
607
# An exception is propagating out of this context, make sure
608
# this packer has cleaned up. Packer() doesn't set its new_pack
609
# state into the RepositoryPackCollection object, so we only
610
# have access to it directly here.
611
if packer.new_pack is not None:
612
packer.new_pack.abort()
617
self._remove_pack_from_memory(pack)
618
# record the newly available packs and stop advertising the old
620
self._save_pack_names(clear_obsolete_packs=True)
621
# Move the old packs out of the way now they are no longer referenced.
622
for revision_count, packs in pack_operations:
623
self._obsolete_packs(packs)
626
class CHKInventoryRepository(KnitPackRepository):
627
"""subclass of KnitPackRepository that uses CHK based inventories."""
629
def __init__(self, _format, a_bzrdir, control_files, _commit_builder_class,
631
"""Overridden to change pack collection class."""
632
KnitPackRepository.__init__(self, _format, a_bzrdir, control_files,
633
_commit_builder_class, _serializer)
634
# and now replace everything it did :)
635
index_transport = self._transport.clone('indices')
636
self._pack_collection = GCRepositoryPackCollection(self,
637
self._transport, index_transport,
638
self._transport.clone('upload'),
639
self._transport.clone('packs'),
640
_format.index_builder_class,
642
use_chk_index=self._format.supports_chks,
644
self.inventories = GroupCompressVersionedFiles(
645
_GCGraphIndex(self._pack_collection.inventory_index.combined_index,
646
add_callback=self._pack_collection.inventory_index.add_callback,
647
parents=True, is_locked=self.is_locked,
648
inconsistency_fatal=False),
649
access=self._pack_collection.inventory_index.data_access)
650
self.revisions = GroupCompressVersionedFiles(
651
_GCGraphIndex(self._pack_collection.revision_index.combined_index,
652
add_callback=self._pack_collection.revision_index.add_callback,
653
parents=True, is_locked=self.is_locked,
654
track_external_parent_refs=True),
655
access=self._pack_collection.revision_index.data_access,
657
self.signatures = GroupCompressVersionedFiles(
658
_GCGraphIndex(self._pack_collection.signature_index.combined_index,
659
add_callback=self._pack_collection.signature_index.add_callback,
660
parents=False, is_locked=self.is_locked,
661
inconsistency_fatal=False),
662
access=self._pack_collection.signature_index.data_access,
664
self.texts = GroupCompressVersionedFiles(
665
_GCGraphIndex(self._pack_collection.text_index.combined_index,
666
add_callback=self._pack_collection.text_index.add_callback,
667
parents=True, is_locked=self.is_locked,
668
inconsistency_fatal=False),
669
access=self._pack_collection.text_index.data_access)
670
# No parents, individual CHK pages don't have specific ancestry
671
self.chk_bytes = GroupCompressVersionedFiles(
672
_GCGraphIndex(self._pack_collection.chk_index.combined_index,
673
add_callback=self._pack_collection.chk_index.add_callback,
674
parents=False, is_locked=self.is_locked,
675
inconsistency_fatal=False),
676
access=self._pack_collection.chk_index.data_access)
677
search_key_name = self._format._serializer.search_key_name
678
search_key_func = chk_map.search_key_registry.get(search_key_name)
679
self.chk_bytes._search_key_func = search_key_func
680
# True when the repository object is 'write locked' (as opposed to the
681
# physical lock only taken out around changes to the pack-names list.)
682
# Another way to represent this would be a decorator around the control
683
# files object that presents logical locks as physical ones - if this
684
# gets ugly consider that alternative design. RBC 20071011
685
self._write_lock_count = 0
686
self._transaction = None
688
self._reconcile_does_inventory_gc = True
689
self._reconcile_fixes_text_parents = True
690
self._reconcile_backsup_inventory = False
692
def _add_inventory_checked(self, revision_id, inv, parents):
693
"""Add inv to the repository after checking the inputs.
695
This function can be overridden to allow different inventory styles.
697
:seealso: add_inventory, for the contract.
700
serializer = self._format._serializer
701
result = inventory.CHKInventory.from_inventory(self.chk_bytes, inv,
702
maximum_size=serializer.maximum_size,
703
search_key_name=serializer.search_key_name)
704
inv_lines = result.to_lines()
705
return self._inventory_add_lines(revision_id, parents,
706
inv_lines, check_content=False)
708
def _create_inv_from_null(self, delta, revision_id):
709
"""This will mutate new_inv directly.
711
This is a simplified form of create_by_apply_delta which knows that all
712
the old values must be None, so everything is a create.
714
serializer = self._format._serializer
715
new_inv = inventory.CHKInventory(serializer.search_key_name)
716
new_inv.revision_id = revision_id
717
entry_to_bytes = new_inv._entry_to_bytes
718
id_to_entry_dict = {}
719
parent_id_basename_dict = {}
720
for old_path, new_path, file_id, entry in delta:
721
if old_path is not None:
722
raise ValueError('Invalid delta, somebody tried to delete %r'
723
' from the NULL_REVISION'
724
% ((old_path, file_id),))
726
raise ValueError('Invalid delta, delta from NULL_REVISION has'
727
' no new_path %r' % (file_id,))
729
new_inv.root_id = file_id
730
parent_id_basename_key = ('', '')
732
utf8_entry_name = entry.name.encode('utf-8')
733
parent_id_basename_key = (entry.parent_id, utf8_entry_name)
734
new_value = entry_to_bytes(entry)
736
# new_inv._path_to_fileid_cache[new_path] = file_id
737
id_to_entry_dict[(file_id,)] = new_value
738
parent_id_basename_dict[parent_id_basename_key] = file_id
740
new_inv._populate_from_dicts(self.chk_bytes, id_to_entry_dict,
741
parent_id_basename_dict, maximum_size=serializer.maximum_size)
744
def add_inventory_by_delta(self, basis_revision_id, delta, new_revision_id,
745
parents, basis_inv=None, propagate_caches=False):
746
"""Add a new inventory expressed as a delta against another revision.
748
:param basis_revision_id: The inventory id the delta was created
750
:param delta: The inventory delta (see Inventory.apply_delta for
752
:param new_revision_id: The revision id that the inventory is being
754
:param parents: The revision ids of the parents that revision_id is
755
known to have and are in the repository already. These are supplied
756
for repositories that depend on the inventory graph for revision
757
graph access, as well as for those that pun ancestry with delta
759
:param basis_inv: The basis inventory if it is already known,
761
:param propagate_caches: If True, the caches for this inventory are
762
copied to and updated for the result if possible.
764
:returns: (validator, new_inv)
765
The validator(which is a sha1 digest, though what is sha'd is
766
repository format specific) of the serialized inventory, and the
769
if not self.is_in_write_group():
770
raise AssertionError("%r not in write group" % (self,))
771
_mod_revision.check_not_reserved_id(new_revision_id)
773
if basis_inv is None:
774
if basis_revision_id == _mod_revision.NULL_REVISION:
775
new_inv = self._create_inv_from_null(delta, new_revision_id)
776
inv_lines = new_inv.to_lines()
777
return self._inventory_add_lines(new_revision_id, parents,
778
inv_lines, check_content=False), new_inv
780
basis_tree = self.revision_tree(basis_revision_id)
781
basis_tree.lock_read()
782
basis_inv = basis_tree.inventory
784
result = basis_inv.create_by_apply_delta(delta, new_revision_id,
785
propagate_caches=propagate_caches)
786
inv_lines = result.to_lines()
787
return self._inventory_add_lines(new_revision_id, parents,
788
inv_lines, check_content=False), result
790
if basis_tree is not None:
793
def deserialise_inventory(self, revision_id, bytes):
794
return inventory.CHKInventory.deserialise(self.chk_bytes, bytes,
797
def _iter_inventories(self, revision_ids, ordering):
798
"""Iterate over many inventory objects."""
800
ordering = 'unordered'
801
keys = [(revision_id,) for revision_id in revision_ids]
802
stream = self.inventories.get_record_stream(keys, ordering, True)
804
for record in stream:
805
if record.storage_kind != 'absent':
806
texts[record.key] = record.get_bytes_as('fulltext')
808
raise errors.NoSuchRevision(self, record.key)
810
yield inventory.CHKInventory.deserialise(self.chk_bytes, texts[key], key)
812
def _iter_inventory_xmls(self, revision_ids, ordering):
813
# Without a native 'xml' inventory, this method doesn't make sense.
814
# However older working trees, and older bundles want it - so we supply
815
# it allowing get_inventory_xml to work. Bundles currently use the
816
# serializer directly; this also isn't ideal, but there isn't an xml
817
# iteration interface offered at all for repositories. We could make
818
# _iter_inventory_xmls be part of the contract, even if kept private.
819
inv_to_str = self._serializer.write_inventory_to_string
820
for inv in self.iter_inventories(revision_ids, ordering=ordering):
821
yield inv_to_str(inv), inv.revision_id
823
def _find_present_inventory_keys(self, revision_keys):
824
parent_map = self.inventories.get_parent_map(revision_keys)
825
present_inventory_keys = set(k for k in parent_map)
826
return present_inventory_keys
828
def fileids_altered_by_revision_ids(self, revision_ids, _inv_weave=None):
829
"""Find the file ids and versions affected by revisions.
831
:param revisions: an iterable containing revision ids.
832
:param _inv_weave: The inventory weave from this repository or None.
833
If None, the inventory weave will be opened automatically.
834
:return: a dictionary mapping altered file-ids to an iterable of
835
revision_ids. Each altered file-ids has the exact revision_ids that
836
altered it listed explicitly.
838
rich_root = self.supports_rich_root()
839
bytes_to_info = inventory.CHKInventory._bytes_to_utf8name_key
840
file_id_revisions = {}
841
pb = ui.ui_factory.nested_progress_bar()
843
revision_keys = [(r,) for r in revision_ids]
844
parent_keys = self._find_parent_keys_of_revisions(revision_keys)
845
# TODO: instead of using _find_present_inventory_keys, change the
846
# code paths to allow missing inventories to be tolerated.
847
# However, we only want to tolerate missing parent
848
# inventories, not missing inventories for revision_ids
849
present_parent_inv_keys = self._find_present_inventory_keys(
851
present_parent_inv_ids = set(
852
[k[-1] for k in present_parent_inv_keys])
853
uninteresting_root_keys = set()
854
interesting_root_keys = set()
855
inventories_to_read = set(revision_ids)
856
inventories_to_read.update(present_parent_inv_ids)
857
for inv in self.iter_inventories(inventories_to_read):
858
entry_chk_root_key = inv.id_to_entry.key()
859
if inv.revision_id in present_parent_inv_ids:
860
uninteresting_root_keys.add(entry_chk_root_key)
862
interesting_root_keys.add(entry_chk_root_key)
864
chk_bytes = self.chk_bytes
865
for record, items in chk_map.iter_interesting_nodes(chk_bytes,
866
interesting_root_keys, uninteresting_root_keys,
868
for name, bytes in items:
869
(name_utf8, file_id, revision_id) = bytes_to_info(bytes)
870
if not rich_root and name_utf8 == '':
873
file_id_revisions[file_id].add(revision_id)
875
file_id_revisions[file_id] = set([revision_id])
878
return file_id_revisions
880
def find_text_key_references(self):
881
"""Find the text key references within the repository.
883
:return: A dictionary mapping text keys ((fileid, revision_id) tuples)
884
to whether they were referred to by the inventory of the
885
revision_id that they contain. The inventory texts from all present
886
revision ids are assessed to generate this report.
888
# XXX: Slow version but correct: rewrite as a series of delta
889
# examinations/direct tree traversal. Note that that will require care
890
# as a common node is reachable both from the inventory that added it,
891
# and others afterwards.
892
revision_keys = self.revisions.keys()
894
rich_roots = self.supports_rich_root()
895
pb = ui.ui_factory.nested_progress_bar()
897
all_revs = self.all_revision_ids()
898
total = len(all_revs)
899
for pos, inv in enumerate(self.iter_inventories(all_revs)):
900
pb.update("Finding text references", pos, total)
901
for _, entry in inv.iter_entries():
902
if not rich_roots and entry.file_id == inv.root_id:
904
key = (entry.file_id, entry.revision)
905
result.setdefault(key, False)
906
if entry.revision == inv.revision_id:
912
def _reconcile_pack(self, collection, packs, extension, revs, pb):
913
packer = GCCHKReconcilePacker(collection, packs, extension)
914
return packer.pack(pb)
916
def _get_source(self, to_format):
917
"""Return a source for streaming from this repository."""
918
if self._format._serializer == to_format._serializer:
919
# We must be exactly the same format, otherwise stuff like the chk
920
# page layout might be different.
921
# Actually, this test is just slightly looser than exact so that
922
# CHK2 <-> 2a transfers will work.
923
return GroupCHKStreamSource(self, to_format)
924
return super(CHKInventoryRepository, self)._get_source(to_format)
927
class GroupCHKStreamSource(KnitPackStreamSource):
928
"""Used when both the source and target repo are GroupCHK repos."""
930
def __init__(self, from_repository, to_format):
931
"""Create a StreamSource streaming from from_repository."""
932
super(GroupCHKStreamSource, self).__init__(from_repository, to_format)
933
self._revision_keys = None
934
self._text_keys = None
935
self._text_fetch_order = 'groupcompress'
936
self._chk_id_roots = None
937
self._chk_p_id_roots = None
939
def _get_inventory_stream(self, inventory_keys, allow_absent=False):
940
"""Get a stream of inventory texts.
942
When this function returns, self._chk_id_roots and self._chk_p_id_roots
945
self._chk_id_roots = []
946
self._chk_p_id_roots = []
947
def _filtered_inv_stream():
949
p_id_roots_set = set()
950
source_vf = self.from_repository.inventories
951
stream = source_vf.get_record_stream(inventory_keys,
952
'groupcompress', True)
953
for record in stream:
954
if record.storage_kind == 'absent':
958
raise errors.NoSuchRevision(self, record.key)
959
bytes = record.get_bytes_as('fulltext')
960
chk_inv = inventory.CHKInventory.deserialise(None, bytes,
962
key = chk_inv.id_to_entry.key()
963
if key not in id_roots_set:
964
self._chk_id_roots.append(key)
965
id_roots_set.add(key)
966
p_id_map = chk_inv.parent_id_basename_to_file_id
968
raise AssertionError('Parent id -> file_id map not set')
970
if key not in p_id_roots_set:
971
p_id_roots_set.add(key)
972
self._chk_p_id_roots.append(key)
974
# We have finished processing all of the inventory records, we
975
# don't need these sets anymore
977
p_id_roots_set.clear()
978
return ('inventories', _filtered_inv_stream())
980
def _get_filtered_chk_streams(self, excluded_revision_keys):
981
self._text_keys = set()
982
excluded_revision_keys.discard(_mod_revision.NULL_REVISION)
983
if not excluded_revision_keys:
984
uninteresting_root_keys = set()
985
uninteresting_pid_root_keys = set()
987
# filter out any excluded revisions whose inventories are not
989
# TODO: Update Repository.iter_inventories() to add
990
# ignore_missing=True
991
present_keys = self.from_repository._find_present_inventory_keys(
992
excluded_revision_keys)
993
present_ids = [k[-1] for k in present_keys]
994
uninteresting_root_keys = set()
995
uninteresting_pid_root_keys = set()
996
for inv in self.from_repository.iter_inventories(present_ids):
997
uninteresting_root_keys.add(inv.id_to_entry.key())
998
uninteresting_pid_root_keys.add(
999
inv.parent_id_basename_to_file_id.key())
1000
bytes_to_info = inventory.CHKInventory._bytes_to_utf8name_key
1001
chk_bytes = self.from_repository.chk_bytes
1002
def _filter_id_to_entry():
1003
for record, items in chk_map.iter_interesting_nodes(chk_bytes,
1004
self._chk_id_roots, uninteresting_root_keys):
1005
for name, bytes in items:
1006
# Note: we don't care about name_utf8, because we are always
1008
_, file_id, revision_id = bytes_to_info(bytes)
1009
self._text_keys.add((file_id, revision_id))
1010
if record is not None:
1013
self._chk_id_roots = None
1014
yield 'chk_bytes', _filter_id_to_entry()
1015
def _get_parent_id_basename_to_file_id_pages():
1016
for record, items in chk_map.iter_interesting_nodes(chk_bytes,
1017
self._chk_p_id_roots, uninteresting_pid_root_keys):
1018
if record is not None:
1021
self._chk_p_id_roots = None
1022
yield 'chk_bytes', _get_parent_id_basename_to_file_id_pages()
1024
def get_stream(self, search):
1025
revision_ids = search.get_keys()
1026
for stream_info in self._fetch_revision_texts(revision_ids):
1028
self._revision_keys = [(rev_id,) for rev_id in revision_ids]
1029
yield self._get_inventory_stream(self._revision_keys)
1030
# TODO: The keys to exclude might be part of the search recipe
1031
# For now, exclude all parents that are at the edge of ancestry, for
1032
# which we have inventories
1033
from_repo = self.from_repository
1034
parent_keys = from_repo._find_parent_keys_of_revisions(
1035
self._revision_keys)
1036
for stream_info in self._get_filtered_chk_streams(parent_keys):
1038
yield self._get_text_stream()
1040
def get_stream_for_missing_keys(self, missing_keys):
1041
# missing keys can only occur when we are byte copying and not
1042
# translating (because translation means we don't send
1043
# unreconstructable deltas ever).
1044
missing_inventory_keys = set()
1045
for key in missing_keys:
1046
if key[0] != 'inventories':
1047
raise AssertionError('The only missing keys we should'
1048
' be filling in are inventory keys, not %s'
1050
missing_inventory_keys.add(key[1:])
1051
if self._chk_id_roots or self._chk_p_id_roots:
1052
raise AssertionError('Cannot call get_stream_for_missing_keys'
1053
' untill all of get_stream() has been consumed.')
1054
# Yield the inventory stream, so we can find the chk stream
1055
# Some of the missing_keys will be missing because they are ghosts.
1056
# As such, we can ignore them. The Sink is required to verify there are
1057
# no unavailable texts when the ghost inventories are not filled in.
1058
yield self._get_inventory_stream(missing_inventory_keys,
1060
# We use the empty set for excluded_revision_keys, to make it clear
1061
# that we want to transmit all referenced chk pages.
1062
for stream_info in self._get_filtered_chk_streams(set()):
1066
class RepositoryFormatCHK1(RepositoryFormatPack):
1067
"""A hashed CHK+group compress pack repository."""
1069
repository_class = CHKInventoryRepository
1070
supports_external_lookups = True
1071
supports_chks = True
1072
# For right now, setting this to True gives us InterModel1And2 rather
1073
# than InterDifferingSerializer
1074
_commit_builder_class = PackRootCommitBuilder
1075
rich_root_data = True
1076
_serializer = chk_serializer.chk_serializer_255_bigpage
1077
_commit_inv_deltas = True
1078
# What index classes to use
1079
index_builder_class = BTreeBuilder
1080
index_class = BTreeGraphIndex
1081
# Note: We cannot unpack a delta that references a text we haven't
1082
# seen yet. There are 2 options, work in fulltexts, or require
1083
# topological sorting. Using fulltexts is more optimal for local
1084
# operations, because the source can be smart about extracting
1085
# multiple in-a-row (and sharing strings). Topological is better
1086
# for remote, because we access less data.
1087
_fetch_order = 'unordered'
1088
_fetch_uses_deltas = False # essentially ignored by the groupcompress code.
1090
pack_compresses = True
1092
def _get_matching_bzrdir(self):
1093
return bzrdir.format_registry.make_bzrdir('development6-rich-root')
1095
def _ignore_setting_bzrdir(self, format):
1098
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
1100
def get_format_string(self):
1101
"""See RepositoryFormat.get_format_string()."""
1102
return ('Bazaar development format - group compression and chk inventory'
1103
' (needs bzr.dev from 1.14)\n')
1105
def get_format_description(self):
1106
"""See RepositoryFormat.get_format_description()."""
1107
return ("Development repository format - rich roots, group compression"
1108
" and chk inventories")
1111
class RepositoryFormatCHK2(RepositoryFormatCHK1):
1112
"""A CHK repository that uses the bencode revision serializer."""
1114
_serializer = chk_serializer.chk_bencode_serializer
1116
def _get_matching_bzrdir(self):
1117
return bzrdir.format_registry.make_bzrdir('development7-rich-root')
1119
def _ignore_setting_bzrdir(self, format):
1122
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
1124
def get_format_string(self):
1125
"""See RepositoryFormat.get_format_string()."""
1126
return ('Bazaar development format - chk repository with bencode '
1127
'revision serialization (needs bzr.dev from 1.16)\n')
1130
class RepositoryFormat2a(RepositoryFormatCHK2):
1131
"""A CHK repository that uses the bencode revision serializer.
1133
This is the same as RepositoryFormatCHK2 but with a public name.
1136
_serializer = chk_serializer.chk_bencode_serializer
1138
def _get_matching_bzrdir(self):
1139
return bzrdir.format_registry.make_bzrdir('2a')
1141
def _ignore_setting_bzrdir(self, format):
1144
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
1146
def get_format_string(self):
1147
return ('Bazaar repository format 2a (needs bzr 1.16 or later)\n')