1
# Copyright (C) 2007 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21
from bzrlib.deprecated_graph import (node_distances, select_farthest)
22
from bzrlib.revision import NULL_REVISION
24
# DIAGRAM of terminology
34
# In this diagram, relative to G and H:
35
# A, B, C, D, E are common ancestors.
36
# C, D and E are border ancestors, because each has a non-common descendant.
37
# D and E are least common ancestors because none of their descendants are
39
# C is not a least common ancestor because its descendant, E, is a common
42
# The find_unique_lca algorithm will pick A in two steps:
43
# 1. find_lca('G', 'H') => ['D', 'E']
44
# 2. Since len(['D', 'E']) > 1, find_lca('D', 'E') => ['A']
48
class _StackedParentsProvider(object):
50
def __init__(self, parent_providers):
51
self._parent_providers = parent_providers
54
return "_StackedParentsProvider(%r)" % self._parent_providers
56
def get_parents(self, revision_ids):
57
"""Find revision ids of the parents of a list of revisions
59
A list is returned of the same length as the input. Each entry
60
is a list of parent ids for the corresponding input revision.
62
[NULL_REVISION] is used as the parent of the first user-committed
63
revision. Its parent list is empty.
65
If the revision is not present (i.e. a ghost), None is used in place
66
of the list of parents.
69
for parents_provider in self._parent_providers:
70
pending_revisions = [r for r in revision_ids if r not in found]
71
parent_list = parents_provider.get_parents(pending_revisions)
72
new_found = dict((k, v) for k, v in zip(pending_revisions,
73
parent_list) if v is not None)
74
found.update(new_found)
75
if len(found) == len(revision_ids):
77
return [found.get(r, None) for r in revision_ids]
81
"""Provide incremental access to revision graphs.
83
This is the generic implementation; it is intended to be subclassed to
84
specialize it for other repository types.
87
def __init__(self, parents_provider):
88
"""Construct a Graph that uses several graphs as its input
90
This should not normally be invoked directly, because there may be
91
specialized implementations for particular repository types. See
92
Repository.get_graph()
94
:param parents_func: an object providing a get_parents call
95
conforming to the behavior of StackedParentsProvider.get_parents
97
self.get_parents = parents_provider.get_parents
98
self._parents_provider = parents_provider
101
return 'Graph(%r)' % self._parents_provider
103
def find_lca(self, *revisions):
104
"""Determine the lowest common ancestors of the provided revisions
106
A lowest common ancestor is a common ancestor none of whose
107
descendants are common ancestors. In graphs, unlike trees, there may
108
be multiple lowest common ancestors.
110
This algorithm has two phases. Phase 1 identifies border ancestors,
111
and phase 2 filters border ancestors to determine lowest common
114
In phase 1, border ancestors are identified, using a breadth-first
115
search starting at the bottom of the graph. Searches are stopped
116
whenever a node or one of its descendants is determined to be common
118
In phase 2, the border ancestors are filtered to find the least
119
common ancestors. This is done by searching the ancestries of each
122
Phase 2 is perfomed on the principle that a border ancestor that is
123
not an ancestor of any other border ancestor is a least common
126
Searches are stopped when they find a node that is determined to be a
127
common ancestor of all border ancestors, because this shows that it
128
cannot be a descendant of any border ancestor.
130
The scaling of this operation should be proportional to
131
1. The number of uncommon ancestors
132
2. The number of border ancestors
133
3. The length of the shortest path between a border ancestor and an
134
ancestor of all border ancestors.
136
border_common, common, sides = self._find_border_ancestors(revisions)
137
# We may have common ancestors that can be reached from each other.
138
# - ask for the heads of them to filter it down to only ones that
139
# cannot be reached from each other - phase 2.
140
return self.heads(border_common)
142
def find_difference(self, left_revision, right_revision):
143
"""Determine the graph difference between two revisions"""
144
border, common, (left, right) = self._find_border_ancestors(
145
[left_revision, right_revision])
146
return (left.difference(right).difference(common),
147
right.difference(left).difference(common))
149
def _make_breadth_first_searcher(self, revisions):
150
return _BreadthFirstSearcher(revisions, self)
152
def _find_border_ancestors(self, revisions):
153
"""Find common ancestors with at least one uncommon descendant.
155
Border ancestors are identified using a breadth-first
156
search starting at the bottom of the graph. Searches are stopped
157
whenever a node or one of its descendants is determined to be common.
159
This will scale with the number of uncommon ancestors.
161
As well as the border ancestors, a set of seen common ancestors and a
162
list of sets of seen ancestors for each input revision is returned.
163
This allows calculation of graph difference from the results of this
166
if None in revisions:
167
raise errors.InvalidRevisionId(None, self)
168
common_searcher = self._make_breadth_first_searcher([])
169
common_ancestors = set()
170
searchers = [self._make_breadth_first_searcher([r])
172
active_searchers = searchers[:]
173
border_ancestors = set()
174
def update_common(searcher, revisions):
175
w_seen_ancestors = searcher.find_seen_ancestors(
177
stopped = searcher.stop_searching_any(w_seen_ancestors)
178
common_ancestors.update(w_seen_ancestors)
179
common_searcher.start_searching(stopped)
182
if len(active_searchers) == 0:
183
return border_ancestors, common_ancestors, [s.seen for s in
186
new_common = common_searcher.next()
187
common_ancestors.update(new_common)
188
except StopIteration:
191
for searcher in active_searchers:
192
for revision in new_common.intersection(searcher.seen):
193
update_common(searcher, revision)
196
new_active_searchers = []
197
for searcher in active_searchers:
199
newly_seen.update(searcher.next())
200
except StopIteration:
203
new_active_searchers.append(searcher)
204
active_searchers = new_active_searchers
205
for revision in newly_seen:
206
if revision in common_ancestors:
207
for searcher in searchers:
208
update_common(searcher, revision)
210
for searcher in searchers:
211
if revision not in searcher.seen:
214
border_ancestors.add(revision)
215
for searcher in searchers:
216
update_common(searcher, revision)
218
def heads(self, keys):
219
"""Return the heads from amongst keys.
221
This is done by searching the ancestries of each key. Any key that is
222
reachable from another key is not returned; all the others are.
224
This operation scales with the relative depth between any two keys. If
225
any two keys are completely disconnected all ancestry of both sides
228
:param keys: An iterable of keys.
229
:return: A set of the heads. Note that as a set there is no ordering
230
information. Callers will need to filter their input to create
231
order if they need it.
233
candidate_heads = set(keys)
234
searchers = dict((c, self._make_breadth_first_searcher([c]))
235
for c in candidate_heads)
236
active_searchers = dict(searchers)
237
# skip over the actual candidate for each searcher
238
for searcher in active_searchers.itervalues():
240
while len(active_searchers) > 0:
241
for candidate in active_searchers.keys():
243
searcher = active_searchers[candidate]
245
# rare case: we deleted candidate in a previous iteration
246
# through this for loop, because it was determined to be
247
# a descendant of another candidate.
250
ancestors = searcher.next()
251
except StopIteration:
252
del active_searchers[candidate]
254
for ancestor in ancestors:
255
if ancestor in candidate_heads:
256
candidate_heads.remove(ancestor)
257
del searchers[ancestor]
258
if ancestor in active_searchers:
259
del active_searchers[ancestor]
260
for searcher in searchers.itervalues():
261
if ancestor not in searcher.seen:
264
# if this revision was seen by all searchers, then it
265
# is a descendant of all candidates, so we can stop
266
# searching it, and any seen ancestors
267
for searcher in searchers.itervalues():
269
searcher.find_seen_ancestors(ancestor)
270
searcher.stop_searching_any(seen_ancestors)
271
return candidate_heads
273
def find_unique_lca(self, left_revision, right_revision):
274
"""Find a unique LCA.
276
Find lowest common ancestors. If there is no unique common
277
ancestor, find the lowest common ancestors of those ancestors.
279
Iteration stops when a unique lowest common ancestor is found.
280
The graph origin is necessarily a unique lowest common ancestor.
282
Note that None is not an acceptable substitute for NULL_REVISION.
283
in the input for this method.
285
revisions = [left_revision, right_revision]
287
lca = self.find_lca(*revisions)
291
raise errors.NoCommonAncestor(left_revision, right_revision)
294
def iter_topo_order(self, revisions):
295
"""Iterate through the input revisions in topological order.
297
This sorting only ensures that parents come before their children.
298
An ancestor may sort after a descendant if the relationship is not
299
visible in the supplied list of revisions.
301
sorter = tsort.TopoSorter(zip(revisions, self.get_parents(revisions)))
302
return sorter.iter_topo_order()
304
def is_ancestor(self, candidate_ancestor, candidate_descendant):
305
"""Determine whether a revision is an ancestor of another.
307
There are two possible outcomes: True and False, but there are three
308
possible relationships:
310
a) candidate_ancestor is an ancestor of candidate_descendant
311
b) candidate_ancestor is an descendant of candidate_descendant
312
c) candidate_ancestor is an sibling of candidate_descendant
314
To check for a, we walk from candidate_descendant, looking for
317
To check for b, we walk from candidate_ancestor, looking for
318
candidate_descendant.
320
To make a and b more efficient, we can stop any searches that hit
323
If we exhaust our searches, but neither a or b is true, then c is true.
325
In order to find c efficiently, we must avoid searching from
326
candidate_descendant or candidate_ancestor into common ancestors. But
327
if we don't search common ancestors at all, we won't know if we hit
328
common ancestors. So we have a walker for common ancestors. Note that
329
its searches are not required to terminate in order to determine c to
332
ancestor_walker = self._make_breadth_first_searcher(
333
[candidate_ancestor])
334
descendant_walker = self._make_breadth_first_searcher(
335
[candidate_descendant])
336
common_walker = self._make_breadth_first_searcher([])
337
active_ancestor = True
338
active_descendant = True
339
while (active_ancestor or active_descendant):
341
if active_descendant:
343
nodes = descendant_walker.next()
344
except StopIteration:
345
active_descendant = False
347
if candidate_ancestor in nodes:
349
new_common.update(nodes.intersection(ancestor_walker.seen))
352
nodes = ancestor_walker.next()
353
except StopIteration:
354
active_ancestor = False
356
if candidate_descendant in nodes:
358
new_common.update(nodes.intersection(
359
descendant_walker.seen))
361
new_common.update(common_walker.next())
362
except StopIteration:
364
for walker in (ancestor_walker, descendant_walker):
365
for node in new_common:
366
c_ancestors = walker.find_seen_ancestors(node)
367
walker.stop_searching_any(c_ancestors)
368
common_walker.start_searching(new_common)
372
class _BreadthFirstSearcher(object):
373
"""Parallel search the breadth-first the ancestry of revisions.
375
This class implements the iterator protocol, but additionally
376
1. provides a set of seen ancestors, and
377
2. allows some ancestries to be unsearched, via stop_searching_any
380
def __init__(self, revisions, parents_provider):
381
self._start = set(revisions)
382
self._search_revisions = None
383
self.seen = set(revisions)
384
self._parents_provider = parents_provider
387
return ('_BreadthFirstSearcher(self._search_revisions=%r,'
388
' self.seen=%r)' % (self._search_revisions, self.seen))
391
"""Return the next ancestors of this revision.
393
Ancestors are returned in the order they are seen in a breadth-first
394
traversal. No ancestor will be returned more than once.
396
if self._search_revisions is None:
397
self._search_revisions = self._start
399
new_search_revisions = set()
400
for parents in self._parents_provider.get_parents(
401
self._search_revisions):
404
new_search_revisions.update(p for p in parents if
406
self._search_revisions = new_search_revisions
407
if len(self._search_revisions) == 0:
408
raise StopIteration()
409
self.seen.update(self._search_revisions)
410
return self._search_revisions
415
def find_seen_ancestors(self, revision):
416
"""Find ancestors of this revision that have already been seen."""
417
searcher = _BreadthFirstSearcher([revision], self._parents_provider)
418
seen_ancestors = set()
419
for ancestors in searcher:
420
for ancestor in ancestors:
421
if ancestor not in self.seen:
422
searcher.stop_searching_any([ancestor])
424
seen_ancestors.add(ancestor)
425
return seen_ancestors
427
def stop_searching_any(self, revisions):
429
Remove any of the specified revisions from the search list.
431
None of the specified revisions are required to be present in the
432
search list. In this case, the call is a no-op.
434
stopped = self._search_revisions.intersection(revisions)
435
self._search_revisions = self._search_revisions.difference(revisions)
438
def start_searching(self, revisions):
439
if self._search_revisions is None:
440
self._start = set(revisions)
442
self._search_revisions.update(revisions.difference(self.seen))
443
self.seen.update(revisions)