3427
3208
annotator = _KnitAnnotator(knit)
3428
return iter(annotator.annotate_flat(revision_id))
3431
class _KnitAnnotator(annotate.Annotator):
3209
return iter(annotator.annotate(revision_id))
3212
class _KnitAnnotator(object):
3432
3213
"""Build up the annotations for a text."""
3434
def __init__(self, vf):
3435
annotate.Annotator.__init__(self, vf)
3437
# TODO: handle Nodes which cannot be extracted
3438
# self._ghosts = set()
3440
# Map from (key, parent_key) => matching_blocks, should be 'use once'
3441
self._matching_blocks = {}
3443
# KnitContent objects
3444
self._content_objects = {}
3445
# The number of children that depend on this fulltext content object
3446
self._num_compression_children = {}
3447
# Delta records that need their compression parent before they can be
3449
self._pending_deltas = {}
3450
# Fulltext records that are waiting for their parents fulltexts before
3451
# they can be yielded for annotation
3452
self._pending_annotation = {}
3215
def __init__(self, knit):
3218
# Content objects, differs from fulltexts because of how final newlines
3219
# are treated by knits. the content objects here will always have a
3221
self._fulltext_contents = {}
3223
# Annotated lines of specific revisions
3224
self._annotated_lines = {}
3226
# Track the raw data for nodes that we could not process yet.
3227
# This maps the revision_id of the base to a list of children that will
3228
# annotated from it.
3229
self._pending_children = {}
3231
# Nodes which cannot be extracted
3232
self._ghosts = set()
3234
# Track how many children this node has, so we know if we need to keep
3236
self._annotate_children = {}
3237
self._compression_children = {}
3454
3239
self._all_build_details = {}
3240
# The children => parent revision_id graph
3241
self._revision_id_graph = {}
3243
self._heads_provider = None
3245
self._nodes_to_keep_annotations = set()
3246
self._generations_until_keep = 100
3248
def set_generations_until_keep(self, value):
3249
"""Set the number of generations before caching a node.
3251
Setting this to -1 will cache every merge node, setting this higher
3252
will cache fewer nodes.
3254
self._generations_until_keep = value
3256
def _add_fulltext_content(self, revision_id, content_obj):
3257
self._fulltext_contents[revision_id] = content_obj
3258
# TODO: jam 20080305 It might be good to check the sha1digest here
3259
return content_obj.text()
3261
def _check_parents(self, child, nodes_to_annotate):
3262
"""Check if all parents have been processed.
3264
:param child: A tuple of (rev_id, parents, raw_content)
3265
:param nodes_to_annotate: If child is ready, add it to
3266
nodes_to_annotate, otherwise put it back in self._pending_children
3268
for parent_id in child[1]:
3269
if (parent_id not in self._annotated_lines):
3270
# This parent is present, but another parent is missing
3271
self._pending_children.setdefault(parent_id,
3275
# This one is ready to be processed
3276
nodes_to_annotate.append(child)
3278
def _add_annotation(self, revision_id, fulltext, parent_ids,
3279
left_matching_blocks=None):
3280
"""Add an annotation entry.
3282
All parents should already have been annotated.
3283
:return: A list of children that now have their parents satisfied.
3285
a = self._annotated_lines
3286
annotated_parent_lines = [a[p] for p in parent_ids]
3287
annotated_lines = list(annotate.reannotate(annotated_parent_lines,
3288
fulltext, revision_id, left_matching_blocks,
3289
heads_provider=self._get_heads_provider()))
3290
self._annotated_lines[revision_id] = annotated_lines
3291
for p in parent_ids:
3292
ann_children = self._annotate_children[p]
3293
ann_children.remove(revision_id)
3294
if (not ann_children
3295
and p not in self._nodes_to_keep_annotations):
3296
del self._annotated_lines[p]
3297
del self._all_build_details[p]
3298
if p in self._fulltext_contents:
3299
del self._fulltext_contents[p]
3300
# Now that we've added this one, see if there are any pending
3301
# deltas to be done, certainly this parent is finished
3302
nodes_to_annotate = []
3303
for child in self._pending_children.pop(revision_id, []):
3304
self._check_parents(child, nodes_to_annotate)
3305
return nodes_to_annotate
3456
3307
def _get_build_graph(self, key):
3457
3308
"""Get the graphs for building texts and annotations.
3464
3315
:return: A list of (key, index_memo) records, suitable for
3465
passing to read_records_iter to start reading in the raw data from
3316
passing to read_records_iter to start reading in the raw data fro/
3319
if key in self._annotated_lines:
3468
3322
pending = set([key])
3471
self._num_needed_children[key] = 1
3473
3327
# get all pending nodes
3474
3329
this_iteration = pending
3475
build_details = self._vf._index.get_build_details(this_iteration)
3330
build_details = self._knit._index.get_build_details(this_iteration)
3476
3331
self._all_build_details.update(build_details)
3477
# new_nodes = self._vf._index._get_entries(this_iteration)
3332
# new_nodes = self._knit._index._get_entries(this_iteration)
3478
3333
pending = set()
3479
3334
for key, details in build_details.iteritems():
3480
(index_memo, compression_parent, parent_keys,
3335
(index_memo, compression_parent, parents,
3481
3336
record_details) = details
3482
self._parent_map[key] = parent_keys
3483
self._heads_provider = None
3337
self._revision_id_graph[key] = parents
3484
3338
records.append((key, index_memo))
3485
3339
# Do we actually need to check _annotated_lines?
3486
pending.update([p for p in parent_keys
3487
if p not in self._all_build_details])
3489
for parent_key in parent_keys:
3490
if parent_key in self._num_needed_children:
3491
self._num_needed_children[parent_key] += 1
3493
self._num_needed_children[parent_key] = 1
3340
pending.update(p for p in parents
3341
if p not in self._all_build_details)
3494
3342
if compression_parent:
3495
if compression_parent in self._num_compression_children:
3496
self._num_compression_children[compression_parent] += 1
3498
self._num_compression_children[compression_parent] = 1
3343
self._compression_children.setdefault(compression_parent,
3346
for parent in parents:
3347
self._annotate_children.setdefault(parent,
3349
num_gens = generation - kept_generation
3350
if ((num_gens >= self._generations_until_keep)
3351
and len(parents) > 1):
3352
kept_generation = generation
3353
self._nodes_to_keep_annotations.add(key)
3500
3355
missing_versions = this_iteration.difference(build_details.keys())
3501
if missing_versions:
3502
for key in missing_versions:
3503
if key in self._parent_map and key in self._text_cache:
3504
# We already have this text ready, we just need to
3505
# yield it later so we get it annotated
3507
parent_keys = self._parent_map[key]
3508
for parent_key in parent_keys:
3509
if parent_key in self._num_needed_children:
3510
self._num_needed_children[parent_key] += 1
3512
self._num_needed_children[parent_key] = 1
3513
pending.update([p for p in parent_keys
3514
if p not in self._all_build_details])
3516
raise errors.RevisionNotPresent(key, self._vf)
3356
self._ghosts.update(missing_versions)
3357
for missing_version in missing_versions:
3358
# add a key, no parents
3359
self._revision_id_graph[missing_version] = ()
3360
pending.discard(missing_version) # don't look for it
3361
if self._ghosts.intersection(self._compression_children):
3363
"We cannot have nodes which have a ghost compression parent:\n"
3365
"compression children: %r"
3366
% (self._ghosts, self._compression_children))
3367
# Cleanout anything that depends on a ghost so that we don't wait for
3368
# the ghost to show up
3369
for node in self._ghosts:
3370
if node in self._annotate_children:
3371
# We won't be building this node
3372
del self._annotate_children[node]
3517
3373
# Generally we will want to read the records in reverse order, because
3518
3374
# we find the parent nodes after the children
3519
3375
records.reverse()
3520
return records, ann_keys
3522
def _get_needed_texts(self, key, pb=None):
3523
# if True or len(self._vf._fallback_vfs) > 0:
3524
if len(self._vf._fallback_vfs) > 0:
3525
# If we have fallbacks, go to the generic path
3526
for v in annotate.Annotator._get_needed_texts(self, key, pb=pb):
3531
records, ann_keys = self._get_build_graph(key)
3532
for idx, (sub_key, text, num_lines) in enumerate(
3533
self._extract_texts(records)):
3535
pb.update('annotating', idx, len(records))
3536
yield sub_key, text, num_lines
3537
for sub_key in ann_keys:
3538
text = self._text_cache[sub_key]
3539
num_lines = len(text) # bad assumption
3540
yield sub_key, text, num_lines
3542
except errors.RetryWithNewPacks, e:
3543
self._vf._access.reload_or_raise(e)
3544
# The cached build_details are no longer valid
3545
self._all_build_details.clear()
3547
def _cache_delta_blocks(self, key, compression_parent, delta, lines):
3548
parent_lines = self._text_cache[compression_parent]
3549
blocks = list(KnitContent.get_line_delta_blocks(delta, parent_lines, lines))
3550
self._matching_blocks[(key, compression_parent)] = blocks
3552
def _expand_record(self, key, parent_keys, compression_parent, record,
3555
if compression_parent:
3556
if compression_parent not in self._content_objects:
3557
# Waiting for the parent
3558
self._pending_deltas.setdefault(compression_parent, []).append(
3559
(key, parent_keys, record, record_details))
3561
# We have the basis parent, so expand the delta
3562
num = self._num_compression_children[compression_parent]
3565
base_content = self._content_objects.pop(compression_parent)
3566
self._num_compression_children.pop(compression_parent)
3568
self._num_compression_children[compression_parent] = num
3569
base_content = self._content_objects[compression_parent]
3570
# It is tempting to want to copy_base_content=False for the last
3571
# child object. However, whenever noeol=False,
3572
# self._text_cache[parent_key] is content._lines. So mutating it
3573
# gives very bad results.
3574
# The alternative is to copy the lines into text cache, but then we
3575
# are copying anyway, so just do it here.
3576
content, delta = self._vf._factory.parse_record(
3577
key, record, record_details, base_content,
3578
copy_base_content=True)
3581
content, _ = self._vf._factory.parse_record(
3582
key, record, record_details, None)
3583
if self._num_compression_children.get(key, 0) > 0:
3584
self._content_objects[key] = content
3585
lines = content.text()
3586
self._text_cache[key] = lines
3587
if delta is not None:
3588
self._cache_delta_blocks(key, compression_parent, delta, lines)
3591
def _get_parent_annotations_and_matches(self, key, text, parent_key):
3592
"""Get the list of annotations for the parent, and the matching lines.
3594
:param text: The opaque value given by _get_needed_texts
3595
:param parent_key: The key for the parent text
3596
:return: (parent_annotations, matching_blocks)
3597
parent_annotations is a list as long as the number of lines in
3599
matching_blocks is a list of (parent_idx, text_idx, len) tuples
3600
indicating which lines match between the two texts
3602
block_key = (key, parent_key)
3603
if block_key in self._matching_blocks:
3604
blocks = self._matching_blocks.pop(block_key)
3605
parent_annotations = self._annotations_cache[parent_key]
3606
return parent_annotations, blocks
3607
return annotate.Annotator._get_parent_annotations_and_matches(self,
3608
key, text, parent_key)
3610
def _process_pending(self, key):
3611
"""The content for 'key' was just processed.
3613
Determine if there is any more pending work to be processed.
3616
if key in self._pending_deltas:
3617
compression_parent = key
3618
children = self._pending_deltas.pop(key)
3619
for child_key, parent_keys, record, record_details in children:
3620
lines = self._expand_record(child_key, parent_keys,
3622
record, record_details)
3623
if self._check_ready_for_annotations(child_key, parent_keys):
3624
to_return.append(child_key)
3625
# Also check any children that are waiting for this parent to be
3627
if key in self._pending_annotation:
3628
children = self._pending_annotation.pop(key)
3629
to_return.extend([c for c, p_keys in children
3630
if self._check_ready_for_annotations(c, p_keys)])
3633
def _check_ready_for_annotations(self, key, parent_keys):
3634
"""return true if this text is ready to be yielded.
3636
Otherwise, this will return False, and queue the text into
3637
self._pending_annotation
3639
for parent_key in parent_keys:
3640
if parent_key not in self._annotations_cache:
3641
# still waiting on at least one parent text, so queue it up
3642
# Note that if there are multiple parents, we need to wait
3644
self._pending_annotation.setdefault(parent_key,
3645
[]).append((key, parent_keys))
3649
def _extract_texts(self, records):
3650
"""Extract the various texts needed based on records"""
3378
def _annotate_records(self, records):
3379
"""Build the annotations for the listed records."""
3651
3380
# We iterate in the order read, rather than a strict order requested
3652
3381
# However, process what we can, and put off to the side things that
3653
3382
# still need parents, cleaning them up when those parents are
3656
# 1) As 'records' are read, see if we can expand these records into
3657
# Content objects (and thus lines)
3658
# 2) If a given line-delta is waiting on its compression parent, it
3659
# gets queued up into self._pending_deltas, otherwise we expand
3660
# it, and put it into self._text_cache and self._content_objects
3661
# 3) If we expanded the text, we will then check to see if all
3662
# parents have also been processed. If so, this text gets yielded,
3663
# else this record gets set aside into pending_annotation
3664
# 4) Further, if we expanded the text in (2), we will then check to
3665
# see if there are any children in self._pending_deltas waiting to
3666
# also be processed. If so, we go back to (2) for those
3667
# 5) Further again, if we yielded the text, we can then check if that
3668
# 'unlocks' any of the texts in pending_annotations, which should
3669
# then get yielded as well
3670
# Note that both steps 4 and 5 are 'recursive' in that unlocking one
3671
# compression child could unlock yet another, and yielding a fulltext
3672
# will also 'unlock' the children that are waiting on that annotation.
3673
# (Though also, unlocking 1 parent's fulltext, does not unlock a child
3674
# if other parents are also waiting.)
3675
# We want to yield content before expanding child content objects, so
3676
# that we know when we can re-use the content lines, and the annotation
3677
# code can know when it can stop caching fulltexts, as well.
3679
# Children that are missing their compression parent
3681
for (key, record, digest) in self._vf._read_records_iter(records):
3683
details = self._all_build_details[key]
3684
(_, compression_parent, parent_keys, record_details) = details
3685
lines = self._expand_record(key, parent_keys, compression_parent,
3686
record, record_details)
3688
# Pending delta should be queued up
3384
for (rev_id, record,
3385
digest) in self._knit._read_records_iter(records):
3386
if rev_id in self._annotated_lines:
3690
# At this point, we may be able to yield this content, if all
3691
# parents are also finished
3692
yield_this_text = self._check_ready_for_annotations(key,
3695
# All parents present
3696
yield key, lines, len(lines)
3697
to_process = self._process_pending(key)
3699
this_process = to_process
3701
for key in this_process:
3702
lines = self._text_cache[key]
3703
yield key, lines, len(lines)
3704
to_process.extend(self._process_pending(key))
3388
parent_ids = self._revision_id_graph[rev_id]
3389
parent_ids = [p for p in parent_ids if p not in self._ghosts]
3390
details = self._all_build_details[rev_id]
3391
(index_memo, compression_parent, parents,
3392
record_details) = details
3393
nodes_to_annotate = []
3394
# TODO: Remove the punning between compression parents, and
3395
# parent_ids, we should be able to do this without assuming
3397
if len(parent_ids) == 0:
3398
# There are no parents for this node, so just add it
3399
# TODO: This probably needs to be decoupled
3400
fulltext_content, delta = self._knit._factory.parse_record(
3401
rev_id, record, record_details, None)
3402
fulltext = self._add_fulltext_content(rev_id, fulltext_content)
3403
nodes_to_annotate.extend(self._add_annotation(rev_id, fulltext,
3404
parent_ids, left_matching_blocks=None))
3406
child = (rev_id, parent_ids, record)
3407
# Check if all the parents are present
3408
self._check_parents(child, nodes_to_annotate)
3409
while nodes_to_annotate:
3410
# Should we use a queue here instead of a stack?
3411
(rev_id, parent_ids, record) = nodes_to_annotate.pop()
3412
(index_memo, compression_parent, parents,
3413
record_details) = self._all_build_details[rev_id]
3415
if compression_parent is not None:
3416
comp_children = self._compression_children[compression_parent]
3417
if rev_id not in comp_children:
3418
raise AssertionError("%r not in compression children %r"
3419
% (rev_id, comp_children))
3420
# If there is only 1 child, it is safe to reuse this
3422
reuse_content = (len(comp_children) == 1
3423
and compression_parent not in
3424
self._nodes_to_keep_annotations)
3426
# Remove it from the cache since it will be changing
3427
parent_fulltext_content = self._fulltext_contents.pop(compression_parent)
3428
# Make sure to copy the fulltext since it might be
3430
parent_fulltext = list(parent_fulltext_content.text())
3432
parent_fulltext_content = self._fulltext_contents[compression_parent]
3433
parent_fulltext = parent_fulltext_content.text()
3434
comp_children.remove(rev_id)
3435
fulltext_content, delta = self._knit._factory.parse_record(
3436
rev_id, record, record_details,
3437
parent_fulltext_content,
3438
copy_base_content=(not reuse_content))
3439
fulltext = self._add_fulltext_content(rev_id,
3441
if compression_parent == parent_ids[0]:
3442
# the compression_parent is the left parent, so we can
3444
blocks = KnitContent.get_line_delta_blocks(delta,
3445
parent_fulltext, fulltext)
3447
fulltext_content = self._knit._factory.parse_fulltext(
3449
fulltext = self._add_fulltext_content(rev_id,
3451
nodes_to_annotate.extend(
3452
self._add_annotation(rev_id, fulltext, parent_ids,
3453
left_matching_blocks=blocks))
3455
def _get_heads_provider(self):
3456
"""Create a heads provider for resolving ancestry issues."""
3457
if self._heads_provider is not None:
3458
return self._heads_provider
3459
parent_provider = _mod_graph.DictParentsProvider(
3460
self._revision_id_graph)
3461
graph_obj = _mod_graph.Graph(parent_provider)
3462
head_cache = _mod_graph.FrozenHeadsCache(graph_obj)
3463
self._heads_provider = head_cache
3466
def annotate(self, key):
3467
"""Return the annotated fulltext at the given key.
3469
:param key: The key to annotate.
3471
if len(self._knit._fallback_vfs) > 0:
3472
# stacked knits can't use the fast path at present.
3473
return self._simple_annotate(key)
3476
records = self._get_build_graph(key)
3477
if key in self._ghosts:
3478
raise errors.RevisionNotPresent(key, self._knit)
3479
self._annotate_records(records)
3480
return self._annotated_lines[key]
3481
except errors.RetryWithNewPacks, e:
3482
self._knit._access.reload_or_raise(e)
3483
# The cached build_details are no longer valid
3484
self._all_build_details.clear()
3486
def _simple_annotate(self, key):
3487
"""Return annotated fulltext, rediffing from the full texts.
3489
This is slow but makes no assumptions about the repository
3490
being able to produce line deltas.
3492
# TODO: this code generates a parent maps of present ancestors; it
3493
# could be split out into a separate method, and probably should use
3494
# iter_ancestry instead. -- mbp and robertc 20080704
3495
graph = _mod_graph.Graph(self._knit)
3496
head_cache = _mod_graph.FrozenHeadsCache(graph)
3497
search = graph._make_breadth_first_searcher([key])
3501
present, ghosts = search.next_with_ghosts()
3502
except StopIteration:
3504
keys.update(present)
3505
parent_map = self._knit.get_parent_map(keys)
3507
reannotate = annotate.reannotate
3508
for record in self._knit.get_record_stream(keys, 'topological', True):
3510
fulltext = osutils.chunks_to_lines(record.get_bytes_as('chunked'))
3511
parents = parent_map[key]
3512
if parents is not None:
3513
parent_lines = [parent_cache[parent] for parent in parent_map[key]]
3516
parent_cache[key] = list(
3517
reannotate(parent_lines, fulltext, key, None, head_cache))
3519
return parent_cache[key]
3521
raise errors.RevisionNotPresent(key, self._knit)
3707
from bzrlib._knit_load_data_pyx import _load_data_c as _load_data
3708
except ImportError, e:
3709
osutils.failed_to_load_extension(e)
3525
from bzrlib._knit_load_data_c import _load_data_c as _load_data
3710
3527
from bzrlib._knit_load_data_py import _load_data_py as _load_data