3395
3328
annotator = _KnitAnnotator(knit)
3396
return iter(annotator.annotate_flat(revision_id))
3399
class _KnitAnnotator(annotate.Annotator):
3329
return iter(annotator.annotate(revision_id))
3332
class _KnitAnnotator(object):
3400
3333
"""Build up the annotations for a text."""
3402
def __init__(self, vf):
3403
annotate.Annotator.__init__(self, vf)
3405
# TODO: handle Nodes which cannot be extracted
3406
# self._ghosts = set()
3408
# Map from (key, parent_key) => matching_blocks, should be 'use once'
3409
self._matching_blocks = {}
3411
# KnitContent objects
3412
self._content_objects = {}
3413
# The number of children that depend on this fulltext content object
3414
self._num_compression_children = {}
3415
# Delta records that need their compression parent before they can be
3417
self._pending_deltas = {}
3418
# Fulltext records that are waiting for their parents fulltexts before
3419
# they can be yielded for annotation
3420
self._pending_annotation = {}
3335
def __init__(self, knit):
3338
# Content objects, differs from fulltexts because of how final newlines
3339
# are treated by knits. the content objects here will always have a
3341
self._fulltext_contents = {}
3343
# Annotated lines of specific revisions
3344
self._annotated_lines = {}
3346
# Track the raw data for nodes that we could not process yet.
3347
# This maps the revision_id of the base to a list of children that will
3348
# annotated from it.
3349
self._pending_children = {}
3351
# Nodes which cannot be extracted
3352
self._ghosts = set()
3354
# Track how many children this node has, so we know if we need to keep
3356
self._annotate_children = {}
3357
self._compression_children = {}
3422
3359
self._all_build_details = {}
3360
# The children => parent revision_id graph
3361
self._revision_id_graph = {}
3363
self._heads_provider = None
3365
self._nodes_to_keep_annotations = set()
3366
self._generations_until_keep = 100
3368
def set_generations_until_keep(self, value):
3369
"""Set the number of generations before caching a node.
3371
Setting this to -1 will cache every merge node, setting this higher
3372
will cache fewer nodes.
3374
self._generations_until_keep = value
3376
def _add_fulltext_content(self, revision_id, content_obj):
3377
self._fulltext_contents[revision_id] = content_obj
3378
# TODO: jam 20080305 It might be good to check the sha1digest here
3379
return content_obj.text()
3381
def _check_parents(self, child, nodes_to_annotate):
3382
"""Check if all parents have been processed.
3384
:param child: A tuple of (rev_id, parents, raw_content)
3385
:param nodes_to_annotate: If child is ready, add it to
3386
nodes_to_annotate, otherwise put it back in self._pending_children
3388
for parent_id in child[1]:
3389
if (parent_id not in self._annotated_lines):
3390
# This parent is present, but another parent is missing
3391
self._pending_children.setdefault(parent_id,
3395
# This one is ready to be processed
3396
nodes_to_annotate.append(child)
3398
def _add_annotation(self, revision_id, fulltext, parent_ids,
3399
left_matching_blocks=None):
3400
"""Add an annotation entry.
3402
All parents should already have been annotated.
3403
:return: A list of children that now have their parents satisfied.
3405
a = self._annotated_lines
3406
annotated_parent_lines = [a[p] for p in parent_ids]
3407
annotated_lines = list(annotate.reannotate(annotated_parent_lines,
3408
fulltext, revision_id, left_matching_blocks,
3409
heads_provider=self._get_heads_provider()))
3410
self._annotated_lines[revision_id] = annotated_lines
3411
for p in parent_ids:
3412
ann_children = self._annotate_children[p]
3413
ann_children.remove(revision_id)
3414
if (not ann_children
3415
and p not in self._nodes_to_keep_annotations):
3416
del self._annotated_lines[p]
3417
del self._all_build_details[p]
3418
if p in self._fulltext_contents:
3419
del self._fulltext_contents[p]
3420
# Now that we've added this one, see if there are any pending
3421
# deltas to be done, certainly this parent is finished
3422
nodes_to_annotate = []
3423
for child in self._pending_children.pop(revision_id, []):
3424
self._check_parents(child, nodes_to_annotate)
3425
return nodes_to_annotate
3424
3427
def _get_build_graph(self, key):
3425
3428
"""Get the graphs for building texts and annotations.
3433
3436
passing to read_records_iter to start reading in the raw data from
3439
if key in self._annotated_lines:
3436
3442
pending = set([key])
3439
self._num_needed_children[key] = 1
3441
3447
# get all pending nodes
3442
3449
this_iteration = pending
3443
build_details = self._vf._index.get_build_details(this_iteration)
3450
build_details = self._knit._index.get_build_details(this_iteration)
3444
3451
self._all_build_details.update(build_details)
3445
# new_nodes = self._vf._index._get_entries(this_iteration)
3452
# new_nodes = self._knit._index._get_entries(this_iteration)
3446
3453
pending = set()
3447
3454
for key, details in build_details.iteritems():
3448
(index_memo, compression_parent, parent_keys,
3455
(index_memo, compression_parent, parents,
3449
3456
record_details) = details
3450
self._parent_map[key] = parent_keys
3451
self._heads_provider = None
3457
self._revision_id_graph[key] = parents
3452
3458
records.append((key, index_memo))
3453
3459
# Do we actually need to check _annotated_lines?
3454
pending.update([p for p in parent_keys
3455
if p not in self._all_build_details])
3457
for parent_key in parent_keys:
3458
if parent_key in self._num_needed_children:
3459
self._num_needed_children[parent_key] += 1
3461
self._num_needed_children[parent_key] = 1
3460
pending.update(p for p in parents
3461
if p not in self._all_build_details)
3462
3462
if compression_parent:
3463
if compression_parent in self._num_compression_children:
3464
self._num_compression_children[compression_parent] += 1
3466
self._num_compression_children[compression_parent] = 1
3463
self._compression_children.setdefault(compression_parent,
3466
for parent in parents:
3467
self._annotate_children.setdefault(parent,
3469
num_gens = generation - kept_generation
3470
if ((num_gens >= self._generations_until_keep)
3471
and len(parents) > 1):
3472
kept_generation = generation
3473
self._nodes_to_keep_annotations.add(key)
3468
3475
missing_versions = this_iteration.difference(build_details.keys())
3469
if missing_versions:
3470
for key in missing_versions:
3471
if key in self._parent_map and key in self._text_cache:
3472
# We already have this text ready, we just need to
3473
# yield it later so we get it annotated
3475
parent_keys = self._parent_map[key]
3476
for parent_key in parent_keys:
3477
if parent_key in self._num_needed_children:
3478
self._num_needed_children[parent_key] += 1
3480
self._num_needed_children[parent_key] = 1
3481
pending.update([p for p in parent_keys
3482
if p not in self._all_build_details])
3484
raise errors.RevisionNotPresent(key, self._vf)
3476
self._ghosts.update(missing_versions)
3477
for missing_version in missing_versions:
3478
# add a key, no parents
3479
self._revision_id_graph[missing_version] = ()
3480
pending.discard(missing_version) # don't look for it
3481
if self._ghosts.intersection(self._compression_children):
3483
"We cannot have nodes which have a ghost compression parent:\n"
3485
"compression children: %r"
3486
% (self._ghosts, self._compression_children))
3487
# Cleanout anything that depends on a ghost so that we don't wait for
3488
# the ghost to show up
3489
for node in self._ghosts:
3490
if node in self._annotate_children:
3491
# We won't be building this node
3492
del self._annotate_children[node]
3485
3493
# Generally we will want to read the records in reverse order, because
3486
3494
# we find the parent nodes after the children
3487
3495
records.reverse()
3488
return records, ann_keys
3490
def _get_needed_texts(self, key, pb=None):
3491
# if True or len(self._vf._fallback_vfs) > 0:
3492
if len(self._vf._fallback_vfs) > 0:
3493
# If we have fallbacks, go to the generic path
3494
for v in annotate.Annotator._get_needed_texts(self, key, pb=pb):
3499
records, ann_keys = self._get_build_graph(key)
3500
for idx, (sub_key, text, num_lines) in enumerate(
3501
self._extract_texts(records)):
3503
pb.update('annotating', idx, len(records))
3504
yield sub_key, text, num_lines
3505
for sub_key in ann_keys:
3506
text = self._text_cache[sub_key]
3507
num_lines = len(text) # bad assumption
3508
yield sub_key, text, num_lines
3510
except errors.RetryWithNewPacks, e:
3511
self._vf._access.reload_or_raise(e)
3512
# The cached build_details are no longer valid
3513
self._all_build_details.clear()
3515
def _cache_delta_blocks(self, key, compression_parent, delta, lines):
3516
parent_lines = self._text_cache[compression_parent]
3517
blocks = list(KnitContent.get_line_delta_blocks(delta, parent_lines, lines))
3518
self._matching_blocks[(key, compression_parent)] = blocks
3520
def _expand_record(self, key, parent_keys, compression_parent, record,
3523
if compression_parent:
3524
if compression_parent not in self._content_objects:
3525
# Waiting for the parent
3526
self._pending_deltas.setdefault(compression_parent, []).append(
3527
(key, parent_keys, record, record_details))
3529
# We have the basis parent, so expand the delta
3530
num = self._num_compression_children[compression_parent]
3533
base_content = self._content_objects.pop(compression_parent)
3534
self._num_compression_children.pop(compression_parent)
3536
self._num_compression_children[compression_parent] = num
3537
base_content = self._content_objects[compression_parent]
3538
# It is tempting to want to copy_base_content=False for the last
3539
# child object. However, whenever noeol=False,
3540
# self._text_cache[parent_key] is content._lines. So mutating it
3541
# gives very bad results.
3542
# The alternative is to copy the lines into text cache, but then we
3543
# are copying anyway, so just do it here.
3544
content, delta = self._vf._factory.parse_record(
3545
key, record, record_details, base_content,
3546
copy_base_content=True)
3549
content, _ = self._vf._factory.parse_record(
3550
key, record, record_details, None)
3551
if self._num_compression_children.get(key, 0) > 0:
3552
self._content_objects[key] = content
3553
lines = content.text()
3554
self._text_cache[key] = lines
3555
if delta is not None:
3556
self._cache_delta_blocks(key, compression_parent, delta, lines)
3559
def _get_parent_annotations_and_matches(self, key, text, parent_key):
3560
"""Get the list of annotations for the parent, and the matching lines.
3562
:param text: The opaque value given by _get_needed_texts
3563
:param parent_key: The key for the parent text
3564
:return: (parent_annotations, matching_blocks)
3565
parent_annotations is a list as long as the number of lines in
3567
matching_blocks is a list of (parent_idx, text_idx, len) tuples
3568
indicating which lines match between the two texts
3570
block_key = (key, parent_key)
3571
if block_key in self._matching_blocks:
3572
blocks = self._matching_blocks.pop(block_key)
3573
parent_annotations = self._annotations_cache[parent_key]
3574
return parent_annotations, blocks
3575
return annotate.Annotator._get_parent_annotations_and_matches(self,
3576
key, text, parent_key)
3578
def _process_pending(self, key):
3579
"""The content for 'key' was just processed.
3581
Determine if there is any more pending work to be processed.
3584
if key in self._pending_deltas:
3585
compression_parent = key
3586
children = self._pending_deltas.pop(key)
3587
for child_key, parent_keys, record, record_details in children:
3588
lines = self._expand_record(child_key, parent_keys,
3590
record, record_details)
3591
if self._check_ready_for_annotations(child_key, parent_keys):
3592
to_return.append(child_key)
3593
# Also check any children that are waiting for this parent to be
3595
if key in self._pending_annotation:
3596
children = self._pending_annotation.pop(key)
3597
to_return.extend([c for c, p_keys in children
3598
if self._check_ready_for_annotations(c, p_keys)])
3601
def _check_ready_for_annotations(self, key, parent_keys):
3602
"""return true if this text is ready to be yielded.
3604
Otherwise, this will return False, and queue the text into
3605
self._pending_annotation
3607
for parent_key in parent_keys:
3608
if parent_key not in self._annotations_cache:
3609
# still waiting on at least one parent text, so queue it up
3610
# Note that if there are multiple parents, we need to wait
3612
self._pending_annotation.setdefault(parent_key,
3613
[]).append((key, parent_keys))
3617
def _extract_texts(self, records):
3618
"""Extract the various texts needed based on records"""
3498
def _annotate_records(self, records):
3499
"""Build the annotations for the listed records."""
3619
3500
# We iterate in the order read, rather than a strict order requested
3620
3501
# However, process what we can, and put off to the side things that
3621
3502
# still need parents, cleaning them up when those parents are
3624
# 1) As 'records' are read, see if we can expand these records into
3625
# Content objects (and thus lines)
3626
# 2) If a given line-delta is waiting on its compression parent, it
3627
# gets queued up into self._pending_deltas, otherwise we expand
3628
# it, and put it into self._text_cache and self._content_objects
3629
# 3) If we expanded the text, we will then check to see if all
3630
# parents have also been processed. If so, this text gets yielded,
3631
# else this record gets set aside into pending_annotation
3632
# 4) Further, if we expanded the text in (2), we will then check to
3633
# see if there are any children in self._pending_deltas waiting to
3634
# also be processed. If so, we go back to (2) for those
3635
# 5) Further again, if we yielded the text, we can then check if that
3636
# 'unlocks' any of the texts in pending_annotations, which should
3637
# then get yielded as well
3638
# Note that both steps 4 and 5 are 'recursive' in that unlocking one
3639
# compression child could unlock yet another, and yielding a fulltext
3640
# will also 'unlock' the children that are waiting on that annotation.
3641
# (Though also, unlocking 1 parent's fulltext, does not unlock a child
3642
# if other parents are also waiting.)
3643
# We want to yield content before expanding child content objects, so
3644
# that we know when we can re-use the content lines, and the annotation
3645
# code can know when it can stop caching fulltexts, as well.
3647
# Children that are missing their compression parent
3649
for (key, record, digest) in self._vf._read_records_iter(records):
3651
details = self._all_build_details[key]
3652
(_, compression_parent, parent_keys, record_details) = details
3653
lines = self._expand_record(key, parent_keys, compression_parent,
3654
record, record_details)
3656
# Pending delta should be queued up
3504
for (rev_id, record,
3505
digest) in self._knit._read_records_iter(records):
3506
if rev_id in self._annotated_lines:
3658
# At this point, we may be able to yield this content, if all
3659
# parents are also finished
3660
yield_this_text = self._check_ready_for_annotations(key,
3663
# All parents present
3664
yield key, lines, len(lines)
3665
to_process = self._process_pending(key)
3667
this_process = to_process
3669
for key in this_process:
3670
lines = self._text_cache[key]
3671
yield key, lines, len(lines)
3672
to_process.extend(self._process_pending(key))
3508
parent_ids = self._revision_id_graph[rev_id]
3509
parent_ids = [p for p in parent_ids if p not in self._ghosts]
3510
details = self._all_build_details[rev_id]
3511
(index_memo, compression_parent, parents,
3512
record_details) = details
3513
nodes_to_annotate = []
3514
# TODO: Remove the punning between compression parents, and
3515
# parent_ids, we should be able to do this without assuming
3517
if len(parent_ids) == 0:
3518
# There are no parents for this node, so just add it
3519
# TODO: This probably needs to be decoupled
3520
fulltext_content, delta = self._knit._factory.parse_record(
3521
rev_id, record, record_details, None)
3522
fulltext = self._add_fulltext_content(rev_id, fulltext_content)
3523
nodes_to_annotate.extend(self._add_annotation(rev_id, fulltext,
3524
parent_ids, left_matching_blocks=None))
3526
child = (rev_id, parent_ids, record)
3527
# Check if all the parents are present
3528
self._check_parents(child, nodes_to_annotate)
3529
while nodes_to_annotate:
3530
# Should we use a queue here instead of a stack?
3531
(rev_id, parent_ids, record) = nodes_to_annotate.pop()
3532
(index_memo, compression_parent, parents,
3533
record_details) = self._all_build_details[rev_id]
3535
if compression_parent is not None:
3536
comp_children = self._compression_children[compression_parent]
3537
if rev_id not in comp_children:
3538
raise AssertionError("%r not in compression children %r"
3539
% (rev_id, comp_children))
3540
# If there is only 1 child, it is safe to reuse this
3542
reuse_content = (len(comp_children) == 1
3543
and compression_parent not in
3544
self._nodes_to_keep_annotations)
3546
# Remove it from the cache since it will be changing
3547
parent_fulltext_content = self._fulltext_contents.pop(compression_parent)
3548
# Make sure to copy the fulltext since it might be
3550
parent_fulltext = list(parent_fulltext_content.text())
3552
parent_fulltext_content = self._fulltext_contents[compression_parent]
3553
parent_fulltext = parent_fulltext_content.text()
3554
comp_children.remove(rev_id)
3555
fulltext_content, delta = self._knit._factory.parse_record(
3556
rev_id, record, record_details,
3557
parent_fulltext_content,
3558
copy_base_content=(not reuse_content))
3559
fulltext = self._add_fulltext_content(rev_id,
3561
if compression_parent == parent_ids[0]:
3562
# the compression_parent is the left parent, so we can
3564
blocks = KnitContent.get_line_delta_blocks(delta,
3565
parent_fulltext, fulltext)
3567
fulltext_content = self._knit._factory.parse_fulltext(
3569
fulltext = self._add_fulltext_content(rev_id,
3571
nodes_to_annotate.extend(
3572
self._add_annotation(rev_id, fulltext, parent_ids,
3573
left_matching_blocks=blocks))
3575
def _get_heads_provider(self):
3576
"""Create a heads provider for resolving ancestry issues."""
3577
if self._heads_provider is not None:
3578
return self._heads_provider
3579
parent_provider = _mod_graph.DictParentsProvider(
3580
self._revision_id_graph)
3581
graph_obj = _mod_graph.Graph(parent_provider)
3582
head_cache = _mod_graph.FrozenHeadsCache(graph_obj)
3583
self._heads_provider = head_cache
3586
def annotate(self, key):
3587
"""Return the annotated fulltext at the given key.
3589
:param key: The key to annotate.
3591
if len(self._knit._fallback_vfs) > 0:
3592
# stacked knits can't use the fast path at present.
3593
return self._simple_annotate(key)
3596
records = self._get_build_graph(key)
3597
if key in self._ghosts:
3598
raise errors.RevisionNotPresent(key, self._knit)
3599
self._annotate_records(records)
3600
return self._annotated_lines[key]
3601
except errors.RetryWithNewPacks, e:
3602
self._knit._access.reload_or_raise(e)
3603
# The cached build_details are no longer valid
3604
self._all_build_details.clear()
3606
def _simple_annotate(self, key):
3607
"""Return annotated fulltext, rediffing from the full texts.
3609
This is slow but makes no assumptions about the repository
3610
being able to produce line deltas.
3612
# TODO: this code generates a parent maps of present ancestors; it
3613
# could be split out into a separate method, and probably should use
3614
# iter_ancestry instead. -- mbp and robertc 20080704
3615
graph = _mod_graph.Graph(self._knit)
3616
head_cache = _mod_graph.FrozenHeadsCache(graph)
3617
search = graph._make_breadth_first_searcher([key])
3621
present, ghosts = search.next_with_ghosts()
3622
except StopIteration:
3624
keys.update(present)
3625
parent_map = self._knit.get_parent_map(keys)
3627
reannotate = annotate.reannotate
3628
for record in self._knit.get_record_stream(keys, 'topological', True):
3630
fulltext = osutils.chunks_to_lines(record.get_bytes_as('chunked'))
3631
parents = parent_map[key]
3632
if parents is not None:
3633
parent_lines = [parent_cache[parent] for parent in parent_map[key]]
3636
parent_cache[key] = list(
3637
reannotate(parent_lines, fulltext, key, None, head_cache))
3639
return parent_cache[key]
3641
raise errors.RevisionNotPresent(key, self._knit)
3675
from bzrlib._knit_load_data_pyx import _load_data_c as _load_data
3645
from bzrlib._knit_load_data_c import _load_data_c as _load_data
3676
3646
except ImportError:
3677
3647
from bzrlib._knit_load_data_py import _load_data_py as _load_data