126
97
class CachingParentsProvider(object):
127
"""A parents provider which will cache the revision => parents as a dict.
129
This is useful for providers which have an expensive look up.
131
Either a ParentsProvider or a get_parent_map-like callback may be
132
supplied. If it provides extra un-asked-for parents, they will be cached,
133
but filtered out of get_parent_map.
135
The cache is enabled by default, but may be disabled and re-enabled.
98
"""A parents provider which will cache the revision => parents in a dict.
100
This is useful for providers that have an expensive lookup.
137
def __init__(self, parent_provider=None, get_parent_map=None):
140
:param parent_provider: The ParentProvider to use. It or
141
get_parent_map must be supplied.
142
:param get_parent_map: The get_parent_map callback to use. It or
143
parent_provider must be supplied.
103
def __init__(self, parent_provider):
145
104
self._real_provider = parent_provider
146
if get_parent_map is None:
147
self._get_parent_map = self._real_provider.get_parent_map
149
self._get_parent_map = get_parent_map
151
self.enable_cache(True)
105
# Theoretically we could use an LRUCache here
153
108
def __repr__(self):
154
109
return "%s(%r)" % (self.__class__.__name__, self._real_provider)
156
def enable_cache(self, cache_misses=True):
158
if self._cache is not None:
159
raise AssertionError('Cache enabled when already enabled.')
161
self._cache_misses = cache_misses
162
self.missing_keys = set()
164
def disable_cache(self):
165
"""Disable and clear the cache."""
167
self._cache_misses = None
168
self.missing_keys = set()
170
def get_cached_map(self):
171
"""Return any cached get_parent_map values."""
172
if self._cache is None:
174
return dict(self._cache)
176
def get_cached_parent_map(self, keys):
177
"""Return items from the cache.
179
This returns the same info as get_parent_map, but explicitly does not
180
invoke the supplied ParentsProvider to search for uncached values.
185
return dict([(key, cache[key]) for key in keys if key in cache])
187
111
def get_parent_map(self, keys):
188
"""See StackedParentsProvider.get_parent_map."""
112
"""See _StackedParentsProvider.get_parent_map"""
114
# If the _real_provider doesn't have a key, we cache a value of None,
115
# which we then later use to realize we cannot provide a value for that
189
118
cache = self._cache
191
cache = self._get_parent_map(keys)
193
needed_revisions = set(key for key in keys if key not in cache)
194
# Do not ask for negatively cached keys
195
needed_revisions.difference_update(self.missing_keys)
197
parent_map = self._get_parent_map(needed_revisions)
198
cache.update(parent_map)
199
if self._cache_misses:
200
for key in needed_revisions:
201
if key not in parent_map:
202
self.note_missing_key(key)
205
value = cache.get(key)
206
if value is not None:
210
def note_missing_key(self, key):
211
"""Note that key is a missing key."""
212
if self._cache_misses:
213
self.missing_keys.add(key)
216
class CallableToParentsProviderAdapter(object):
217
"""A parents provider that adapts any callable to the parents provider API.
219
i.e. it accepts calls to self.get_parent_map and relays them to the
220
callable it was constructed with.
223
def __init__(self, a_callable):
224
self.callable = a_callable
227
return "%s(%r)" % (self.__class__.__name__, self.callable)
229
def get_parent_map(self, keys):
230
return self.callable(keys)
122
if value is not None:
123
parent_map[key] = value
128
new_parents = self._real_provider.get_parent_map(needed)
129
cache.update(new_parents)
130
parent_map.update(new_parents)
131
needed.difference_update(new_parents)
132
cache.update(dict.fromkeys(needed, None))
233
136
class Graph(object):
306
208
right = searchers[1].seen
307
209
return (left.difference(right), right.difference(left))
309
def find_descendants(self, old_key, new_key):
310
"""Find descendants of old_key that are ancestors of new_key."""
311
child_map = self.get_child_map(self._find_descendant_ancestors(
313
graph = Graph(DictParentsProvider(child_map))
314
searcher = graph._make_breadth_first_searcher([old_key])
318
def _find_descendant_ancestors(self, old_key, new_key):
319
"""Find ancestors of new_key that may be descendants of old_key."""
320
stop = self._make_breadth_first_searcher([old_key])
321
descendants = self._make_breadth_first_searcher([new_key])
322
for revisions in descendants:
323
old_stop = stop.seen.intersection(revisions)
324
descendants.stop_searching_any(old_stop)
325
seen_stop = descendants.find_seen_ancestors(stop.step())
326
descendants.stop_searching_any(seen_stop)
327
return descendants.seen.difference(stop.seen)
329
def get_child_map(self, keys):
330
"""Get a mapping from parents to children of the specified keys.
332
This is simply the inversion of get_parent_map. Only supplied keys
333
will be discovered as children.
334
:return: a dict of key:child_list for keys.
336
parent_map = self._parents_provider.get_parent_map(keys)
338
for child, parents in sorted(parent_map.items()):
339
for parent in parents:
340
parent_child.setdefault(parent, []).append(child)
343
def find_distance_to_null(self, target_revision_id, known_revision_ids):
344
"""Find the left-hand distance to the NULL_REVISION.
346
(This can also be considered the revno of a branch at
349
:param target_revision_id: A revision_id which we would like to know
351
:param known_revision_ids: [(revision_id, revno)] A list of known
352
revno, revision_id tuples. We'll use this to seed the search.
354
# Map from revision_ids to a known value for their revno
355
known_revnos = dict(known_revision_ids)
356
cur_tip = target_revision_id
358
NULL_REVISION = revision.NULL_REVISION
359
known_revnos[NULL_REVISION] = 0
361
searching_known_tips = list(known_revnos.keys())
363
unknown_searched = {}
365
while cur_tip not in known_revnos:
366
unknown_searched[cur_tip] = num_steps
368
to_search = set([cur_tip])
369
to_search.update(searching_known_tips)
370
parent_map = self.get_parent_map(to_search)
371
parents = parent_map.get(cur_tip, None)
372
if not parents: # An empty list or None is a ghost
373
raise errors.GhostRevisionsHaveNoRevno(target_revision_id,
377
for revision_id in searching_known_tips:
378
parents = parent_map.get(revision_id, None)
382
next_revno = known_revnos[revision_id] - 1
383
if next in unknown_searched:
384
# We have enough information to return a value right now
385
return next_revno + unknown_searched[next]
386
if next in known_revnos:
388
known_revnos[next] = next_revno
389
next_known_tips.append(next)
390
searching_known_tips = next_known_tips
392
# We reached a known revision, so just add in how many steps it took to
394
return known_revnos[cur_tip] + num_steps
396
def find_lefthand_distances(self, keys):
397
"""Find the distance to null for all the keys in keys.
399
:param keys: keys to lookup.
400
:return: A dict key->distance for all of keys.
402
# Optimisable by concurrent searching, but a random spread should get
403
# some sort of hit rate.
410
(key, self.find_distance_to_null(key, known_revnos)))
411
except errors.GhostRevisionsHaveNoRevno:
414
known_revnos.append((key, -1))
415
return dict(known_revnos)
417
211
def find_unique_ancestors(self, unique_revision, common_revisions):
418
212
"""Find the unique ancestors for a revision versus others.
498
262
if unique_are_common_nodes:
499
263
ancestors = unique_searcher.find_seen_ancestors(
500
264
unique_are_common_nodes)
501
# TODO: This is a bit overboard, we only really care about
502
# the ancestors of the tips because the rest we
503
# already know. This is *correct* but causes us to
504
# search too much ancestry.
505
265
ancestors.update(common_searcher.find_seen_ancestors(ancestors))
506
266
unique_searcher.stop_searching_any(ancestors)
507
267
common_searcher.start_searching(ancestors)
509
return unique_searcher, common_searcher
511
def _make_unique_searchers(self, unique_nodes, unique_searcher,
513
"""Create a searcher for all the unique search tips (step 4).
515
As a side effect, the common_searcher will stop searching any nodes
516
that are ancestors of the unique searcher tips.
518
:return: (all_unique_searcher, unique_tip_searchers)
269
unique_nodes = unique_searcher.seen.difference(common_searcher.seen)
520
272
unique_tips = self._remove_simple_descendants(unique_nodes,
521
273
self.get_parent_map(unique_nodes))
523
275
if len(unique_tips) == 1:
524
unique_tip_searchers = []
276
unique_searchers = []
525
277
ancestor_all_unique = unique_searcher.find_seen_ancestors(unique_tips)
527
unique_tip_searchers = []
279
unique_searchers = []
528
280
for tip in unique_tips:
529
281
revs_to_search = unique_searcher.find_seen_ancestors([tip])
530
revs_to_search.update(
531
common_searcher.find_seen_ancestors(revs_to_search))
532
282
searcher = self._make_breadth_first_searcher(revs_to_search)
533
283
# We don't care about the starting nodes.
534
284
searcher._label = tip
536
unique_tip_searchers.append(searcher)
286
unique_searchers.append(searcher)
538
288
ancestor_all_unique = None
539
for searcher in unique_tip_searchers:
289
for searcher in unique_searchers:
540
290
if ancestor_all_unique is None:
541
291
ancestor_all_unique = set(searcher.seen)
543
293
ancestor_all_unique = ancestor_all_unique.intersection(
545
295
# Collapse all the common nodes into a single searcher
546
all_unique_searcher = self._make_breadth_first_searcher(
296
all_unique_searcher = self._make_breadth_first_searcher(ancestor_all_unique)
548
297
if ancestor_all_unique:
549
# We've seen these nodes in all the searchers, so we'll just go to
551
298
all_unique_searcher.step()
553
300
# Stop any search tips that are already known as ancestors of the
555
stopped_common = common_searcher.stop_searching_any(
302
common_searcher.stop_searching_any(
556
303
common_searcher.find_seen_ancestors(ancestor_all_unique))
558
305
total_stopped = 0
559
for searcher in unique_tip_searchers:
306
for searcher in unique_searchers:
560
307
total_stopped += len(searcher.stop_searching_any(
561
308
searcher.find_seen_ancestors(ancestor_all_unique)))
562
if 'graph' in debug.debug_flags:
563
trace.mutter('For %d unique nodes, created %d + 1 unique searchers'
564
' (%d stopped search tips, %d common ancestors'
565
' (%d stopped common)',
566
len(unique_nodes), len(unique_tip_searchers),
567
total_stopped, len(ancestor_all_unique),
569
return all_unique_searcher, unique_tip_searchers
571
def _step_unique_and_common_searchers(self, common_searcher,
572
unique_tip_searchers,
574
"""Step all the searchers"""
575
newly_seen_common = set(common_searcher.step())
576
newly_seen_unique = set()
577
for searcher in unique_tip_searchers:
578
next = set(searcher.step())
579
next.update(unique_searcher.find_seen_ancestors(next))
580
next.update(common_searcher.find_seen_ancestors(next))
581
for alt_searcher in unique_tip_searchers:
582
if alt_searcher is searcher:
584
next.update(alt_searcher.find_seen_ancestors(next))
585
searcher.start_searching(next)
586
newly_seen_unique.update(next)
587
return newly_seen_common, newly_seen_unique
589
def _find_nodes_common_to_all_unique(self, unique_tip_searchers,
591
newly_seen_unique, step_all_unique):
592
"""Find nodes that are common to all unique_tip_searchers.
594
If it is time, step the all_unique_searcher, and add its nodes to the
597
common_to_all_unique_nodes = newly_seen_unique.copy()
598
for searcher in unique_tip_searchers:
599
common_to_all_unique_nodes.intersection_update(searcher.seen)
600
common_to_all_unique_nodes.intersection_update(
601
all_unique_searcher.seen)
602
# Step all-unique less frequently than the other searchers.
603
# In the common case, we don't need to spider out far here, so
604
# avoid doing extra work.
606
tstart = time.clock()
607
nodes = all_unique_searcher.step()
608
common_to_all_unique_nodes.update(nodes)
609
if 'graph' in debug.debug_flags:
610
tdelta = time.clock() - tstart
611
trace.mutter('all_unique_searcher step() took %.3fs'
612
'for %d nodes (%d total), iteration: %s',
613
tdelta, len(nodes), len(all_unique_searcher.seen),
614
all_unique_searcher._iterations)
615
return common_to_all_unique_nodes
617
def _collapse_unique_searchers(self, unique_tip_searchers,
618
common_to_all_unique_nodes):
619
"""Combine searchers that are searching the same tips.
621
When two searchers are searching the same tips, we can stop one of the
622
searchers. We also know that the maximal set of common ancestors is the
623
intersection of the two original searchers.
625
:return: A list of searchers that are searching unique nodes.
627
# Filter out searchers that don't actually search different
628
# nodes. We already have the ancestry intersection for them
629
unique_search_tips = {}
630
for searcher in unique_tip_searchers:
631
stopped = searcher.stop_searching_any(common_to_all_unique_nodes)
632
will_search_set = frozenset(searcher._next_query)
633
if not will_search_set:
634
if 'graph' in debug.debug_flags:
635
trace.mutter('Unique searcher %s was stopped.'
636
' (%s iterations) %d nodes stopped',
638
searcher._iterations,
640
elif will_search_set not in unique_search_tips:
641
# This searcher is searching a unique set of nodes, let it
642
unique_search_tips[will_search_set] = [searcher]
644
unique_search_tips[will_search_set].append(searcher)
645
# TODO: it might be possible to collapse searchers faster when they
646
# only have *some* search tips in common.
647
next_unique_searchers = []
648
for searchers in unique_search_tips.itervalues():
649
if len(searchers) == 1:
650
# Searching unique tips, go for it
651
next_unique_searchers.append(searchers[0])
653
# These searchers have started searching the same tips, we
654
# don't need them to cover the same ground. The
655
# intersection of their ancestry won't change, so create a
656
# new searcher, combining their histories.
657
next_searcher = searchers[0]
658
for searcher in searchers[1:]:
659
next_searcher.seen.intersection_update(searcher.seen)
660
if 'graph' in debug.debug_flags:
661
trace.mutter('Combining %d searchers into a single'
662
' searcher searching %d nodes with'
665
len(next_searcher._next_query),
666
len(next_searcher.seen))
667
next_unique_searchers.append(next_searcher)
668
return next_unique_searchers
670
def _refine_unique_nodes(self, unique_searcher, all_unique_searcher,
671
unique_tip_searchers, common_searcher):
672
"""Steps 5-8 of find_unique_ancestors.
674
This function returns when common_searcher has stopped searching for
677
# We step the ancestor_all_unique searcher only every
678
# STEP_UNIQUE_SEARCHER_EVERY steps.
679
step_all_unique_counter = 0
309
_mutter('For %s unique nodes, created %s + 1 unique searchers'
310
' (%s stopped search tips, %s common ancestors)',
311
len(unique_nodes), len(unique_searchers), total_stopped,
312
len(ancestor_all_unique))
313
del ancestor_all_unique
680
315
# While we still have common nodes to search
681
316
while common_searcher._next_query:
683
newly_seen_unique) = self._step_unique_and_common_searchers(
684
common_searcher, unique_tip_searchers, unique_searcher)
317
newly_seen_common = set(common_searcher.step())
318
newly_seen_unique = set()
319
for searcher in unique_searchers:
320
newly_seen_unique.update(searcher.step())
685
321
# These nodes are common ancestors of all unique nodes
686
common_to_all_unique_nodes = self._find_nodes_common_to_all_unique(
687
unique_tip_searchers, all_unique_searcher, newly_seen_unique,
688
step_all_unique_counter==0)
689
step_all_unique_counter = ((step_all_unique_counter + 1)
690
% STEP_UNIQUE_SEARCHER_EVERY)
322
unique_are_common_nodes = newly_seen_unique.copy()
323
for searcher in unique_searchers:
324
unique_are_common_nodes = unique_are_common_nodes.intersection(
326
unique_are_common_nodes = unique_are_common_nodes.intersection(
327
all_unique_searcher.seen)
328
unique_are_common_nodes.update(all_unique_searcher.step())
692
329
if newly_seen_common:
693
330
# If a 'common' node is an ancestor of all unique searchers, we
694
331
# can stop searching it.
695
332
common_searcher.stop_searching_any(
696
333
all_unique_searcher.seen.intersection(newly_seen_common))
697
if common_to_all_unique_nodes:
698
common_to_all_unique_nodes.update(
699
common_searcher.find_seen_ancestors(
700
common_to_all_unique_nodes))
334
if unique_are_common_nodes:
335
# We have new common-to-all-unique-searchers nodes
336
for searcher in unique_searchers:
337
unique_are_common_nodes.update(
338
searcher.find_seen_ancestors(unique_are_common_nodes))
339
unique_are_common_nodes.update(
340
all_unique_searcher.find_seen_ancestors(unique_are_common_nodes))
341
# Since these are common, we can grab another set of ancestors
343
unique_are_common_nodes.update(
344
common_searcher.find_seen_ancestors(unique_are_common_nodes))
701
346
# The all_unique searcher can start searching the common nodes
702
347
# but everyone else can stop.
703
# This is the sort of thing where we would like to not have it
704
# start_searching all of the nodes, but only mark all of them
705
# as seen, and have it search only the actual tips. Otherwise
706
# it is another get_parent_map() traversal for it to figure out
707
# what we already should know.
708
all_unique_searcher.start_searching(common_to_all_unique_nodes)
709
common_searcher.stop_searching_any(common_to_all_unique_nodes)
711
next_unique_searchers = self._collapse_unique_searchers(
712
unique_tip_searchers, common_to_all_unique_nodes)
713
if len(unique_tip_searchers) != len(next_unique_searchers):
714
if 'graph' in debug.debug_flags:
715
trace.mutter('Collapsed %d unique searchers => %d'
717
len(unique_tip_searchers),
718
len(next_unique_searchers),
719
all_unique_searcher._iterations)
720
unique_tip_searchers = next_unique_searchers
348
all_unique_searcher.start_searching(unique_are_common_nodes)
349
common_searcher.stop_searching_any(unique_are_common_nodes)
351
# Filter out searchers that don't actually search different
352
# nodes. We already have the ancestry intersection for them
353
next_unique_searchers = []
354
unique_search_sets = set()
355
for searcher in unique_searchers:
356
stopped = searcher.stop_searching_any(unique_are_common_nodes)
357
will_search_set = frozenset(searcher._next_query)
358
if not will_search_set:
359
_mutter('Unique searcher %s was stopped.'
360
' (%s iterations) %d nodes stopped',
362
searcher._iterations,
364
elif will_search_set not in unique_search_sets:
365
# This searcher is searching a unique set of nodes, let it
366
unique_search_sets.add(will_search_set)
367
next_unique_searchers.append(searcher)
369
_mutter('Unique searcher %s stopped for repeated'
370
' search of %s nodes',
371
searcher._label, len(will_search_set))
372
if len(unique_searchers) != len(next_unique_searchers):
373
_mutter('Collapsed %s unique searchers => %s'
375
len(unique_searchers), len(next_unique_searchers),
376
all_unique_searcher._iterations)
377
unique_searchers = next_unique_searchers
378
return unique_nodes.difference(common_searcher.seen)
380
@symbol_versioning.deprecated_method(symbol_versioning.one_one)
381
def get_parents(self, revisions):
382
"""Find revision ids of the parents of a list of revisions
384
A list is returned of the same length as the input. Each entry
385
is a list of parent ids for the corresponding input revision.
387
[NULL_REVISION] is used as the parent of the first user-committed
388
revision. Its parent list is empty.
390
If the revision is not present (i.e. a ghost), None is used in place
391
of the list of parents.
393
Deprecated in bzr 1.2 - please see get_parent_map.
395
parents = self.get_parent_map(revisions)
396
return [parents.get(r, None) for r in revisions]
722
398
def get_parent_map(self, revisions):
723
399
"""Get a map of key:parent_list for revisions.
897
573
common_walker.start_searching(new_common)
898
574
return candidate_heads
900
def find_merge_order(self, tip_revision_id, lca_revision_ids):
901
"""Find the order that each revision was merged into tip.
903
This basically just walks backwards with a stack, and walks left-first
904
until it finds a node to stop.
906
if len(lca_revision_ids) == 1:
907
return list(lca_revision_ids)
908
looking_for = set(lca_revision_ids)
909
# TODO: Is there a way we could do this "faster" by batching up the
910
# get_parent_map requests?
911
# TODO: Should we also be culling the ancestry search right away? We
912
# could add looking_for to the "stop" list, and walk their
913
# ancestry in batched mode. The flip side is it might mean we walk a
914
# lot of "stop" nodes, rather than only the minimum.
915
# Then again, without it we may trace back into ancestry we could have
917
stack = [tip_revision_id]
920
while stack and looking_for:
923
if next in looking_for:
925
looking_for.remove(next)
926
if len(looking_for) == 1:
927
found.append(looking_for.pop())
930
parent_ids = self.get_parent_map([next]).get(next, None)
931
if not parent_ids: # Ghost, nothing to search here
933
for parent_id in reversed(parent_ids):
934
# TODO: (performance) We see the parent at this point, but we
935
# wait to mark it until later to make sure we get left
936
# parents before right parents. However, instead of
937
# waiting until we have traversed enough parents, we
938
# could instead note that we've found it, and once all
939
# parents are in the stack, just reverse iterate the
941
if parent_id not in stop:
942
# this will need to be searched
943
stack.append(parent_id)
947
def find_lefthand_merger(self, merged_key, tip_key):
948
"""Find the first lefthand ancestor of tip_key that merged merged_key.
950
We do this by first finding the descendants of merged_key, then
951
walking through the lefthand ancestry of tip_key until we find a key
952
that doesn't descend from merged_key. Its child is the key that
955
:return: The first lefthand ancestor of tip_key to merge merged_key.
956
merged_key if it is a lefthand ancestor of tip_key.
957
None if no ancestor of tip_key merged merged_key.
959
descendants = self.find_descendants(merged_key, tip_key)
960
candidate_iterator = self.iter_lefthand_ancestry(tip_key)
961
last_candidate = None
962
for candidate in candidate_iterator:
963
if candidate not in descendants:
964
return last_candidate
965
last_candidate = candidate
967
576
def find_unique_lca(self, left_revision, right_revision,
968
577
count_steps=False):
969
578
"""Find a unique LCA.
1594
1141
return revs, ghosts
1597
def invert_parent_map(parent_map):
1598
"""Given a map from child => parents, create a map of parent=>children"""
1600
for child, parents in parent_map.iteritems():
1602
# Any given parent is likely to have only a small handful
1603
# of children, many will have only one. So we avoid mem overhead of
1604
# a list, in exchange for extra copying of tuples
1605
if p not in child_map:
1606
child_map[p] = (child,)
1608
child_map[p] = child_map[p] + (child,)
1612
def collapse_linear_regions(parent_map):
1613
"""Collapse regions of the graph that are 'linear'.
1619
can be collapsed by removing B and getting::
1623
:param parent_map: A dictionary mapping children to their parents
1624
:return: Another dictionary with 'linear' chains collapsed
1144
class SearchResult(object):
1145
"""The result of a breadth first search.
1147
A SearchResult provides the ability to reconstruct the search or access a
1148
set of the keys the search found.
1626
# Note: this isn't a strictly minimal collapse. For example:
1634
# Will not have 'D' removed, even though 'E' could fit. Also:
1640
# A and C are both kept because they are edges of the graph. We *could* get
1641
# rid of A if we wanted.
1649
# Will not have any nodes removed, even though you do have an
1650
# 'uninteresting' linear D->B and E->C
1652
for child, parents in parent_map.iteritems():
1653
children.setdefault(child, [])
1655
children.setdefault(p, []).append(child)
1657
orig_children = dict(children)
1659
result = dict(parent_map)
1660
for node in parent_map:
1661
parents = result[node]
1662
if len(parents) == 1:
1663
parent_children = children[parents[0]]
1664
if len(parent_children) != 1:
1665
# This is not the only child
1667
node_children = children[node]
1668
if len(node_children) != 1:
1670
child_parents = result.get(node_children[0], None)
1671
if len(child_parents) != 1:
1672
# This is not its only parent
1674
# The child of this node only points at it, and the parent only has
1675
# this as a child. remove this node, and join the others together
1676
result[node_children[0]] = parents
1677
children[parents[0]] = node_children
1685
class GraphThunkIdsToKeys(object):
1686
"""Forwards calls about 'ids' to be about keys internally."""
1688
def __init__(self, graph):
1691
def topo_sort(self):
1692
return [r for (r,) in self._graph.topo_sort()]
1694
def heads(self, ids):
1695
"""See Graph.heads()"""
1696
as_keys = [(i,) for i in ids]
1697
head_keys = self._graph.heads(as_keys)
1698
return set([h[0] for h in head_keys])
1700
def merge_sort(self, tip_revision):
1701
nodes = self._graph.merge_sort((tip_revision,))
1703
node.key = node.key[0]
1706
def add_node(self, revision, parents):
1707
self._graph.add_node((revision,), [(p,) for p in parents])
1710
_counters = [0,0,0,0,0,0,0]
1712
from bzrlib._known_graph_pyx import KnownGraph
1713
except ImportError, e:
1714
osutils.failed_to_load_extension(e)
1715
from bzrlib._known_graph_py import KnownGraph
1151
def __init__(self, start_keys, exclude_keys, key_count, keys):
1152
"""Create a SearchResult.
1154
:param start_keys: The keys the search started at.
1155
:param exclude_keys: The keys the search excludes.
1156
:param key_count: The total number of keys (from start to but not
1158
:param keys: The keys the search found. Note that in future we may get
1159
a SearchResult from a smart server, in which case the keys list is
1160
not necessarily immediately available.
1162
self._recipe = (start_keys, exclude_keys, key_count)
1163
self._keys = frozenset(keys)
1165
def get_recipe(self):
1166
"""Return a recipe that can be used to replay this search.
1168
The recipe allows reconstruction of the same results at a later date
1169
without knowing all the found keys. The essential elements are a list
1170
of keys to start and and to stop at. In order to give reproducible
1171
results when ghosts are encountered by a search they are automatically
1172
added to the exclude list (or else ghost filling may alter the
1175
:return: A tuple (start_keys_set, exclude_keys_set, revision_count). To
1176
recreate the results of this search, create a breadth first
1177
searcher on the same graph starting at start_keys. Then call next()
1178
(or next_with_ghosts()) repeatedly, and on every result, call
1179
stop_searching_any on any keys from the exclude_keys set. The
1180
revision_count value acts as a trivial cross-check - the found
1181
revisions of the new search should have as many elements as
1182
revision_count. If it does not, then additional revisions have been
1183
ghosted since the search was executed the first time and the second
1189
"""Return the keys found in this search.
1191
:return: A set of keys.