61
59
return 'DictParentsProvider(%r)' % self.ancestry
63
61
def get_parent_map(self, keys):
64
"""See _StackedParentsProvider.get_parent_map"""
62
"""See StackedParentsProvider.get_parent_map"""
65
63
ancestry = self.ancestry
66
64
return dict((k, ancestry[k]) for k in keys if k in ancestry)
69
class _StackedParentsProvider(object):
66
@deprecated_function(deprecated_in((1, 16, 0)))
67
def _StackedParentsProvider(*args, **kwargs):
68
return StackedParentsProvider(*args, **kwargs)
70
class StackedParentsProvider(object):
71
"""A parents provider which stacks (or unions) multiple providers.
73
The providers are queries in the order of the provided parent_providers.
71
76
def __init__(self, parent_providers):
72
77
self._parent_providers = parent_providers
74
79
def __repr__(self):
75
return "_StackedParentsProvider(%r)" % self._parent_providers
80
return "%s(%r)" % (self.__class__.__name__, self._parent_providers)
77
82
def get_parent_map(self, keys):
78
83
"""Get a mapping of keys => parents
101
106
class CachingParentsProvider(object):
102
"""A parents provider which will cache the revision => parents in a dict.
104
This is useful for providers that have an expensive lookup.
107
"""A parents provider which will cache the revision => parents as a dict.
109
This is useful for providers which have an expensive look up.
111
Either a ParentsProvider or a get_parent_map-like callback may be
112
supplied. If it provides extra un-asked-for parents, they will be cached,
113
but filtered out of get_parent_map.
115
The cache is enabled by default, but may be disabled and re-enabled.
117
def __init__(self, parent_provider=None, get_parent_map=None):
107
def __init__(self, parent_provider):
120
:param parent_provider: The ParentProvider to use. It or
121
get_parent_map must be supplied.
122
:param get_parent_map: The get_parent_map callback to use. It or
123
parent_provider must be supplied.
108
125
self._real_provider = parent_provider
109
# Theoretically we could use an LRUCache here
126
if get_parent_map is None:
127
self._get_parent_map = self._real_provider.get_parent_map
129
self._get_parent_map = get_parent_map
131
self.enable_cache(True)
134
return "%s(%r)" % (self.__class__.__name__, self._real_provider)
136
def enable_cache(self, cache_misses=True):
138
if self._cache is not None:
139
raise AssertionError('Cache enabled when already enabled.')
113
return "%s(%r)" % (self.__class__.__name__, self._real_provider)
141
self._cache_misses = cache_misses
142
self.missing_keys = set()
144
def disable_cache(self):
145
"""Disable and clear the cache."""
147
self._cache_misses = None
148
self.missing_keys = set()
150
def get_cached_map(self):
151
"""Return any cached get_parent_map values."""
152
if self._cache is None:
154
return dict(self._cache)
115
156
def get_parent_map(self, keys):
116
"""See _StackedParentsProvider.get_parent_map"""
118
# If the _real_provider doesn't have a key, we cache a value of None,
119
# which we then later use to realize we cannot provide a value for that
157
"""See StackedParentsProvider.get_parent_map."""
122
158
cache = self._cache
160
cache = self._get_parent_map(keys)
162
needed_revisions = set(key for key in keys if key not in cache)
163
# Do not ask for negatively cached keys
164
needed_revisions.difference_update(self.missing_keys)
166
parent_map = self._get_parent_map(needed_revisions)
167
cache.update(parent_map)
168
if self._cache_misses:
169
for key in needed_revisions:
170
if key not in parent_map:
171
self.note_missing_key(key)
126
if value is not None:
127
parent_map[key] = value
174
value = cache.get(key)
175
if value is not None:
132
new_parents = self._real_provider.get_parent_map(needed)
133
cache.update(new_parents)
134
parent_map.update(new_parents)
135
needed.difference_update(new_parents)
136
cache.update(dict.fromkeys(needed, None))
179
def note_missing_key(self, key):
180
"""Note that key is a missing key."""
181
if self._cache_misses:
182
self.missing_keys.add(key)
140
185
class Graph(object):
570
636
all_unique_searcher._iterations)
571
637
unique_tip_searchers = next_unique_searchers
573
@symbol_versioning.deprecated_method(symbol_versioning.one_one)
574
def get_parents(self, revisions):
575
"""Find revision ids of the parents of a list of revisions
577
A list is returned of the same length as the input. Each entry
578
is a list of parent ids for the corresponding input revision.
580
[NULL_REVISION] is used as the parent of the first user-committed
581
revision. Its parent list is empty.
583
If the revision is not present (i.e. a ghost), None is used in place
584
of the list of parents.
586
Deprecated in bzr 1.2 - please see get_parent_map.
588
parents = self.get_parent_map(revisions)
589
return [parents.get(r, None) for r in revisions]
591
639
def get_parent_map(self, revisions):
592
640
"""Get a map of key:parent_list for revisions.
766
814
common_walker.start_searching(new_common)
767
815
return candidate_heads
817
def find_merge_order(self, tip_revision_id, lca_revision_ids):
818
"""Find the order that each revision was merged into tip.
820
This basically just walks backwards with a stack, and walks left-first
821
until it finds a node to stop.
823
if len(lca_revision_ids) == 1:
824
return list(lca_revision_ids)
825
looking_for = set(lca_revision_ids)
826
# TODO: Is there a way we could do this "faster" by batching up the
827
# get_parent_map requests?
828
# TODO: Should we also be culling the ancestry search right away? We
829
# could add looking_for to the "stop" list, and walk their
830
# ancestry in batched mode. The flip side is it might mean we walk a
831
# lot of "stop" nodes, rather than only the minimum.
832
# Then again, without it we may trace back into ancestry we could have
834
stack = [tip_revision_id]
837
while stack and looking_for:
840
if next in looking_for:
842
looking_for.remove(next)
843
if len(looking_for) == 1:
844
found.append(looking_for.pop())
847
parent_ids = self.get_parent_map([next]).get(next, None)
848
if not parent_ids: # Ghost, nothing to search here
850
for parent_id in reversed(parent_ids):
851
# TODO: (performance) We see the parent at this point, but we
852
# wait to mark it until later to make sure we get left
853
# parents before right parents. However, instead of
854
# waiting until we have traversed enough parents, we
855
# could instead note that we've found it, and once all
856
# parents are in the stack, just reverse iterate the
858
if parent_id not in stop:
859
# this will need to be searched
860
stack.append(parent_id)
769
864
def find_unique_lca(self, left_revision, right_revision,
770
865
count_steps=False):
771
866
"""Find a unique LCA.
843
939
return set([candidate_descendant]) == self.heads(
844
940
[candidate_ancestor, candidate_descendant])
942
def is_between(self, revid, lower_bound_revid, upper_bound_revid):
943
"""Determine whether a revision is between two others.
945
returns true if and only if:
946
lower_bound_revid <= revid <= upper_bound_revid
948
return ((upper_bound_revid is None or
949
self.is_ancestor(revid, upper_bound_revid)) and
950
(lower_bound_revid is None or
951
self.is_ancestor(lower_bound_revid, revid)))
846
953
def _search_for_extra_common(self, common, searchers):
847
954
"""Make sure that unique nodes are genuinely unique.
1366
1480
a SearchResult from a smart server, in which case the keys list is
1367
1481
not necessarily immediately available.
1369
self._recipe = (start_keys, exclude_keys, key_count)
1483
self._recipe = ('search', start_keys, exclude_keys, key_count)
1370
1484
self._keys = frozenset(keys)
1372
1486
def get_recipe(self):
1373
1487
"""Return a recipe that can be used to replay this search.
1375
1489
The recipe allows reconstruction of the same results at a later date
1376
1490
without knowing all the found keys. The essential elements are a list
1377
of keys to start and and to stop at. In order to give reproducible
1491
of keys to start and to stop at. In order to give reproducible
1378
1492
results when ghosts are encountered by a search they are automatically
1379
1493
added to the exclude list (or else ghost filling may alter the
1382
:return: A tuple (start_keys_set, exclude_keys_set, revision_count). To
1383
recreate the results of this search, create a breadth first
1384
searcher on the same graph starting at start_keys. Then call next()
1385
(or next_with_ghosts()) repeatedly, and on every result, call
1386
stop_searching_any on any keys from the exclude_keys set. The
1387
revision_count value acts as a trivial cross-check - the found
1388
revisions of the new search should have as many elements as
1496
:return: A tuple ('search', start_keys_set, exclude_keys_set,
1497
revision_count). To recreate the results of this search, create a
1498
breadth first searcher on the same graph starting at start_keys.
1499
Then call next() (or next_with_ghosts()) repeatedly, and on every
1500
result, call stop_searching_any on any keys from the exclude_keys
1501
set. The revision_count value acts as a trivial cross-check - the
1502
found revisions of the new search should have as many elements as
1389
1503
revision_count. If it does not, then additional revisions have been
1390
1504
ghosted since the search was executed the first time and the second
1400
1514
return self._keys
1517
"""Return false if the search lists 1 or more revisions."""
1518
return self._recipe[3] == 0
1520
def refine(self, seen, referenced):
1521
"""Create a new search by refining this search.
1523
:param seen: Revisions that have been satisfied.
1524
:param referenced: Revision references observed while satisfying some
1527
start = self._recipe[1]
1528
exclude = self._recipe[2]
1529
count = self._recipe[3]
1530
keys = self.get_keys()
1531
# New heads = referenced + old heads - seen things - exclude
1532
pending_refs = set(referenced)
1533
pending_refs.update(start)
1534
pending_refs.difference_update(seen)
1535
pending_refs.difference_update(exclude)
1536
# New exclude = old exclude + satisfied heads
1537
seen_heads = start.intersection(seen)
1538
exclude.update(seen_heads)
1539
# keys gets seen removed
1541
# length is reduced by len(seen)
1543
return SearchResult(pending_refs, exclude, count, keys)
1546
class PendingAncestryResult(object):
1547
"""A search result that will reconstruct the ancestry for some graph heads.
1549
Unlike SearchResult, this doesn't hold the complete search result in
1550
memory, it just holds a description of how to generate it.
1553
def __init__(self, heads, repo):
1556
:param heads: an iterable of graph heads.
1557
:param repo: a repository to use to generate the ancestry for the given
1560
self.heads = frozenset(heads)
1563
def get_recipe(self):
1564
"""Return a recipe that can be used to replay this search.
1566
The recipe allows reconstruction of the same results at a later date.
1568
:seealso SearchResult.get_recipe:
1570
:return: A tuple ('proxy-search', start_keys_set, set(), -1)
1571
To recreate this result, create a PendingAncestryResult with the
1574
return ('proxy-search', self.heads, set(), -1)
1577
"""See SearchResult.get_keys.
1579
Returns all the keys for the ancestry of the heads, excluding
1582
return self._get_keys(self.repo.get_graph())
1584
def _get_keys(self, graph):
1585
NULL_REVISION = revision.NULL_REVISION
1586
keys = [key for (key, parents) in graph.iter_ancestry(self.heads)
1587
if key != NULL_REVISION and parents is not None]
1591
"""Return false if the search lists 1 or more revisions."""
1592
if revision.NULL_REVISION in self.heads:
1593
return len(self.heads) == 1
1595
return len(self.heads) == 0
1597
def refine(self, seen, referenced):
1598
"""Create a new search by refining this search.
1600
:param seen: Revisions that have been satisfied.
1601
:param referenced: Revision references observed while satisfying some
1604
referenced = self.heads.union(referenced)
1605
return PendingAncestryResult(referenced - seen, self.repo)
1608
def collapse_linear_regions(parent_map):
1609
"""Collapse regions of the graph that are 'linear'.
1615
can be collapsed by removing B and getting::
1619
:param parent_map: A dictionary mapping children to their parents
1620
:return: Another dictionary with 'linear' chains collapsed
1622
# Note: this isn't a strictly minimal collapse. For example:
1630
# Will not have 'D' removed, even though 'E' could fit. Also:
1636
# A and C are both kept because they are edges of the graph. We *could* get
1637
# rid of A if we wanted.
1645
# Will not have any nodes removed, even though you do have an
1646
# 'uninteresting' linear D->B and E->C
1648
for child, parents in parent_map.iteritems():
1649
children.setdefault(child, [])
1651
children.setdefault(p, []).append(child)
1653
orig_children = dict(children)
1655
result = dict(parent_map)
1656
for node in parent_map:
1657
parents = result[node]
1658
if len(parents) == 1:
1659
parent_children = children[parents[0]]
1660
if len(parent_children) != 1:
1661
# This is not the only child
1663
node_children = children[node]
1664
if len(node_children) != 1:
1666
child_parents = result.get(node_children[0], None)
1667
if len(child_parents) != 1:
1668
# This is not its only parent
1670
# The child of this node only points at it, and the parent only has
1671
# this as a child. remove this node, and join the others together
1672
result[node_children[0]] = parents
1673
children[parents[0]] = node_children
1681
_counters = [0,0,0,0,0,0,0]
1683
from bzrlib._known_graph_pyx import KnownGraph
1685
from bzrlib._known_graph_py import KnownGraph