478
489
yield self._get_filtered_inv_stream(revision_ids)
479
490
yield self._get_text_stream()
493
class KnitReconcilePacker(Packer):
494
"""A packer which regenerates indices etc as it copies.
496
This is used by ``bzr reconcile`` to cause parent text pointers to be
500
def _extra_init(self):
501
self._data_changed = False
503
def _process_inventory_lines(self, inv_lines):
504
"""Generate a text key reference map rather for reconciling with."""
505
repo = self._pack_collection.repo
506
refs = repo._serializer._find_text_key_references(inv_lines)
507
self._text_refs = refs
508
# during reconcile we:
509
# - convert unreferenced texts to full texts
510
# - correct texts which reference a text not copied to be full texts
511
# - copy all others as-is but with corrected parents.
512
# - so at this point we don't know enough to decide what becomes a full
514
self._text_filter = None
516
def _copy_text_texts(self):
517
"""generate what texts we should have and then copy."""
518
self.pb.update("Copying content texts", 3)
519
# we have three major tasks here:
520
# 1) generate the ideal index
521
repo = self._pack_collection.repo
522
ancestors = dict([(key[0], tuple(ref[0] for ref in refs[0])) for
524
self.new_pack.revision_index.iter_all_entries()])
525
ideal_index = repo._generate_text_key_index(self._text_refs, ancestors)
526
# 2) generate a text_nodes list that contains all the deltas that can
527
# be used as-is, with corrected parents.
531
NULL_REVISION = _mod_revision.NULL_REVISION
532
text_index_map, text_nodes = self._get_text_nodes()
533
for node in text_nodes:
539
ideal_parents = tuple(ideal_index[node[1]])
541
discarded_nodes.append(node)
542
self._data_changed = True
544
if ideal_parents == (NULL_REVISION,):
546
if ideal_parents == node[3][0]:
548
ok_nodes.append(node)
549
elif ideal_parents[0:1] == node[3][0][0:1]:
550
# the left most parent is the same, or there are no parents
551
# today. Either way, we can preserve the representation as
552
# long as we change the refs to be inserted.
553
self._data_changed = True
554
ok_nodes.append((node[0], node[1], node[2],
555
(ideal_parents, node[3][1])))
556
self._data_changed = True
558
# Reinsert this text completely
559
bad_texts.append((node[1], ideal_parents))
560
self._data_changed = True
561
# we're finished with some data.
564
# 3) bulk copy the ok data
565
total_items, readv_group_iter = self._least_readv_node_readv(ok_nodes)
566
list(self._copy_nodes_graph(text_index_map, self.new_pack._writer,
567
self.new_pack.text_index, readv_group_iter, total_items))
568
# 4) adhoc copy all the other texts.
569
# We have to topologically insert all texts otherwise we can fail to
570
# reconcile when parts of a single delta chain are preserved intact,
571
# and other parts are not. E.g. Discarded->d1->d2->d3. d1 will be
572
# reinserted, and if d3 has incorrect parents it will also be
573
# reinserted. If we insert d3 first, d2 is present (as it was bulk
574
# copied), so we will try to delta, but d2 is not currently able to be
575
# extracted because its basis d1 is not present. Topologically sorting
576
# addresses this. The following generates a sort for all the texts that
577
# are being inserted without having to reference the entire text key
578
# space (we only topo sort the revisions, which is smaller).
579
topo_order = tsort.topo_sort(ancestors)
580
rev_order = dict(zip(topo_order, range(len(topo_order))))
581
bad_texts.sort(key=lambda key:rev_order.get(key[0][1], 0))
582
transaction = repo.get_transaction()
583
file_id_index = GraphIndexPrefixAdapter(
584
self.new_pack.text_index,
586
add_nodes_callback=self.new_pack.text_index.add_nodes)
587
data_access = knit._DirectPackAccess(
588
{self.new_pack.text_index:self.new_pack.access_tuple()})
589
data_access.set_writer(self.new_pack._writer, self.new_pack.text_index,
590
self.new_pack.access_tuple())
591
output_texts = KnitVersionedFiles(
592
_KnitGraphIndex(self.new_pack.text_index,
593
add_callback=self.new_pack.text_index.add_nodes,
594
deltas=True, parents=True, is_locked=repo.is_locked),
595
data_access=data_access, max_delta_chain=200)
596
for key, parent_keys in bad_texts:
597
# We refer to the new pack to delta data being output.
598
# A possible improvement would be to catch errors on short reads
599
# and only flush then.
600
self.new_pack.flush()
602
for parent_key in parent_keys:
603
if parent_key[0] != key[0]:
604
# Graph parents must match the fileid
605
raise errors.BzrError('Mismatched key parent %r:%r' %
607
parents.append(parent_key[1])
608
text_lines = osutils.split_lines(repo.texts.get_record_stream(
609
[key], 'unordered', True).next().get_bytes_as('fulltext'))
610
output_texts.add_lines(key, parent_keys, text_lines,
611
random_id=True, check_content=False)
612
# 5) check that nothing inserted has a reference outside the keyspace.
613
missing_text_keys = self.new_pack.text_index._external_references()
614
if missing_text_keys:
615
raise errors.BzrCheckError('Reference to missing compression parents %r'
616
% (missing_text_keys,))
617
self._log_copied_texts()
619
def _use_pack(self, new_pack):
620
"""Override _use_pack to check for reconcile having changed content."""
621
# XXX: we might be better checking this at the copy time.
622
original_inventory_keys = set()
623
inv_index = self._pack_collection.inventory_index.combined_index
624
for entry in inv_index.iter_all_entries():
625
original_inventory_keys.add(entry[1])
626
new_inventory_keys = set()
627
for entry in new_pack.inventory_index.iter_all_entries():
628
new_inventory_keys.add(entry[1])
629
if new_inventory_keys != original_inventory_keys:
630
self._data_changed = True
631
return new_pack.data_inserted() and self._data_changed