112
97
if not element or _whitespace_re.search(element) is not None:
113
98
raise errors.BadIndexKey(element)
115
def _external_references(self):
116
"""Return references that are not present in this index.
120
# TODO: JAM 2008-11-21 This makes an assumption about how the reference
121
# lists are used. It is currently correct for pack-0.92 through
122
# 1.9, which use the node references (3rd column) second
123
# reference list as the compression parent. Perhaps this should
124
# be moved into something higher up the stack, since it
125
# makes assumptions about how the index is used.
126
if self.reference_lists > 1:
127
for node in self.iter_all_entries():
129
refs.update(node[3][1])
132
# If reference_lists == 0 there can be no external references, and
133
# if reference_lists == 1, then there isn't a place to store the
137
100
def _get_nodes_by_key(self):
138
101
if self._nodes_by_key is None:
139
102
nodes_by_key = {}
229
189
# There may be duplicates, but I don't think it is worth worrying
231
191
self._nodes[reference] = ('a', (), '')
232
self._absent_keys.update(absent_references)
233
self._absent_keys.discard(key)
234
192
self._nodes[key] = ('', node_refs, value)
235
194
if self._nodes_by_key is not None and self._key_length > 1:
236
195
self._update_nodes_by_key(key, value, node_refs)
238
def clear_cache(self):
239
"""See GraphIndex.clear_cache()
241
This is a no-op, but we need the api to conform to a generic 'Index'
245
197
def finish(self):
246
198
lines = [_SIGNATURE]
247
199
lines.append(_OPTION_NODE_REFS + str(self.reference_lists) + '\n')
248
200
lines.append(_OPTION_KEY_ELEMENTS + str(self._key_length) + '\n')
249
key_count = len(self._nodes) - len(self._absent_keys)
250
lines.append(_OPTION_LEN + str(key_count) + '\n')
201
lines.append(_OPTION_LEN + str(len(self._keys)) + '\n')
251
202
prefix_length = sum(len(x) for x in lines)
252
203
# references are byte offsets. To avoid having to do nasty
253
204
# polynomial work to resolve offsets (references to later in the
329
280
(len(result.getvalue()), expected_bytes))
332
def set_optimize(self, for_size=None, combine_backing_indices=None):
283
def set_optimize(self, for_size=True):
333
284
"""Change how the builder tries to optimize the result.
335
286
:param for_size: Tell the builder to try and make the index as small as
337
:param combine_backing_indices: If the builder spills to disk to save
338
memory, should the on-disk indices be combined. Set to True if you
339
are going to be probing the index, but to False if you are not. (If
340
you are not querying, then the time spent combining is wasted.)
343
290
# GraphIndexBuilder itself doesn't pay attention to the flag yet, but
344
291
# other builders do.
345
if for_size is not None:
346
self._optimize_for_size = for_size
347
if combine_backing_indices is not None:
348
self._combine_backing_indices = combine_backing_indices
350
def find_ancestry(self, keys, ref_list_num):
351
"""See CombinedGraphIndex.find_ancestry()"""
357
for _, key, value, ref_lists in self.iter_entries(pending):
358
parent_keys = ref_lists[ref_list_num]
359
parent_map[key] = parent_keys
360
next_pending.update([p for p in parent_keys if p not in
362
missing_keys.update(pending.difference(parent_map))
363
pending = next_pending
364
return parent_map, missing_keys
292
self._optimize_for_size = for_size
367
295
class GraphIndex(object):
368
296
"""An index for data with embedded graphs.
370
298
The index maps keys to a list of key reference lists, and a value.
371
299
Each node has the same number of key reference lists. Each key reference
372
300
list can be empty or an arbitrary length. The value is an opaque NULL
373
terminated string without any newlines. The storage of the index is
301
terminated string without any newlines. The storage of the index is
374
302
hidden in the interface: keys and key references are always tuples of
375
303
bytestrings, never the internal representation (e.g. dictionary offsets).
475
395
node_value = value
476
396
self._nodes[key] = node_value
477
397
# cache the keys for quick set intersections
398
self._keys = set(self._nodes)
478
399
if trailers != 1:
479
400
# there must be one line - the empty trailer line.
480
401
raise errors.BadIndexData(self)
482
def clear_cache(self):
483
"""Clear out any cached/memoized values.
485
This can be called at any time, but generally it is used when we have
486
extracted some information, but don't expect to be requesting any more
490
def external_references(self, ref_list_num):
491
"""Return references that are not present in this index.
494
if ref_list_num + 1 > self.node_ref_lists:
495
raise ValueError('No ref list %d, index has %d ref lists'
496
% (ref_list_num, self.node_ref_lists))
499
for key, (value, ref_lists) in nodes.iteritems():
500
ref_list = ref_lists[ref_list_num]
501
refs.update([ref for ref in ref_list if ref not in nodes])
504
403
def _get_nodes_by_key(self):
505
404
if self._nodes_by_key is None:
506
405
nodes_by_key = {}
751
647
# the last thing looked up was a terminal element
752
648
yield (self, ) + key_dict
754
def _find_ancestors(self, keys, ref_list_num, parent_map, missing_keys):
755
"""See BTreeIndex._find_ancestors."""
756
# The api can be implemented as a trivial overlay on top of
757
# iter_entries, it is not an efficient implementation, but it at least
761
for index, key, value, refs in self.iter_entries(keys):
762
parent_keys = refs[ref_list_num]
764
parent_map[key] = parent_keys
765
search_keys.update(parent_keys)
766
# Figure out what, if anything, was missing
767
missing_keys.update(set(keys).difference(found_keys))
768
search_keys = search_keys.difference(parent_map)
771
650
def key_count(self):
772
651
"""Return an estimate of the number of keys in this index.
774
653
For GraphIndex the estimate is exact.
776
655
if self._key_count is None:
1267
1130
self._indices = indices
1268
1131
self._reload_func = reload_func
1269
# Sibling indices are other CombinedGraphIndex that we should call
1270
# _move_to_front_by_name on when we auto-reorder ourself.
1271
self._sibling_indices = []
1272
# A list of names that corresponds to the instances in self._indices,
1273
# so _index_names[0] is always the name for _indices[0], etc. Sibling
1274
# indices must all use the same set of names as each other.
1275
self._index_names = [None] * len(self._indices)
1277
1133
def __repr__(self):
1278
1134
return "%s(%s)" % (
1279
1135
self.__class__.__name__,
1280
1136
', '.join(map(repr, self._indices)))
1282
def clear_cache(self):
1283
"""See GraphIndex.clear_cache()"""
1284
for index in self._indices:
1138
@symbol_versioning.deprecated_method(symbol_versioning.one_one)
1139
def get_parents(self, revision_ids):
1140
"""See graph._StackedParentsProvider.get_parents.
1142
This implementation thunks the graph.Graph.get_parents api across to
1145
:param revision_ids: An iterable of graph keys for this graph.
1146
:return: A list of parent details for each key in revision_ids.
1147
Each parent details will be one of:
1148
* None when the key was missing
1149
* (NULL_REVISION,) when the key has no parents.
1150
* (parent_key, parent_key...) otherwise.
1152
parent_map = self.get_parent_map(revision_ids)
1153
return [parent_map.get(r, None) for r in revision_ids]
1287
1155
def get_parent_map(self, keys):
1288
"""See graph.StackedParentsProvider.get_parent_map"""
1156
"""See graph._StackedParentsProvider.get_parent_map"""
1289
1157
search_keys = set(keys)
1290
1158
if NULL_REVISION in search_keys:
1291
1159
search_keys.discard(NULL_REVISION)
1299
1167
found_parents[key] = parents
1300
1168
return found_parents
1302
has_key = _has_key_from_parent_map
1304
def insert_index(self, pos, index, name=None):
1170
def insert_index(self, pos, index):
1305
1171
"""Insert a new index in the list of indices to query.
1307
1173
:param pos: The position to insert the index.
1308
1174
:param index: The index to insert.
1309
:param name: a name for this index, e.g. a pack name. These names can
1310
be used to reflect index reorderings to related CombinedGraphIndex
1311
instances that use the same names. (see set_sibling_indices)
1313
1176
self._indices.insert(pos, index)
1314
self._index_names.insert(pos, name)
1316
1178
def iter_all_entries(self):
1317
1179
"""Iterate over all keys within the index
1391
1247
seen_keys = set()
1395
1250
for index in self._indices:
1397
1251
for node in index.iter_entries_prefix(keys):
1398
1252
if node[1] in seen_keys:
1400
1254
seen_keys.add(node[1])
1404
hit_indices.append(index)
1406
1257
except errors.NoSuchFile:
1407
1258
self._reload_or_raise()
1408
self._move_to_front(hit_indices)
1410
def _move_to_front(self, hit_indices):
1411
"""Rearrange self._indices so that hit_indices are first.
1413
Order is maintained as much as possible, e.g. the first unhit index
1414
will be the first index in _indices after the hit_indices, and the
1415
hit_indices will be present in exactly the order they are passed to
1418
_move_to_front propagates to all objects in self._sibling_indices by
1419
calling _move_to_front_by_name.
1421
if self._indices[:len(hit_indices)] == hit_indices:
1422
# The 'hit_indices' are already at the front (and in the same
1423
# order), no need to re-order
1425
hit_names = self._move_to_front_by_index(hit_indices)
1426
for sibling_idx in self._sibling_indices:
1427
sibling_idx._move_to_front_by_name(hit_names)
1429
def _move_to_front_by_index(self, hit_indices):
1430
"""Core logic for _move_to_front.
1432
Returns a list of names corresponding to the hit_indices param.
1434
indices_info = zip(self._index_names, self._indices)
1435
if 'index' in debug.debug_flags:
1436
mutter('CombinedGraphIndex reordering: currently %r, promoting %r',
1437
indices_info, hit_indices)
1440
new_hit_indices = []
1443
for offset, (name, idx) in enumerate(indices_info):
1444
if idx in hit_indices:
1445
hit_names.append(name)
1446
new_hit_indices.append(idx)
1447
if len(new_hit_indices) == len(hit_indices):
1448
# We've found all of the hit entries, everything else is
1450
unhit_names.extend(self._index_names[offset+1:])
1451
unhit_indices.extend(self._indices[offset+1:])
1454
unhit_names.append(name)
1455
unhit_indices.append(idx)
1457
self._indices = new_hit_indices + unhit_indices
1458
self._index_names = hit_names + unhit_names
1459
if 'index' in debug.debug_flags:
1460
mutter('CombinedGraphIndex reordered: %r', self._indices)
1463
def _move_to_front_by_name(self, hit_names):
1464
"""Moves indices named by 'hit_names' to front of the search order, as
1465
described in _move_to_front.
1467
# Translate names to index instances, and then call
1468
# _move_to_front_by_index.
1469
indices_info = zip(self._index_names, self._indices)
1471
for name, idx in indices_info:
1472
if name in hit_names:
1473
hit_indices.append(idx)
1474
self._move_to_front_by_index(hit_indices)
1476
def find_ancestry(self, keys, ref_list_num):
1477
"""Find the complete ancestry for the given set of keys.
1479
Note that this is a whole-ancestry request, so it should be used
1482
:param keys: An iterable of keys to look for
1483
:param ref_list_num: The reference list which references the parents
1485
:return: (parent_map, missing_keys)
1487
# XXX: make this call _move_to_front?
1488
missing_keys = set()
1490
keys_to_lookup = set(keys)
1492
while keys_to_lookup:
1493
# keys that *all* indexes claim are missing, stop searching them
1495
all_index_missing = None
1496
# print 'gen\tidx\tsub\tn_keys\tn_pmap\tn_miss'
1497
# print '%4d\t\t\t%4d\t%5d\t%5d' % (generation, len(keys_to_lookup),
1499
# len(missing_keys))
1500
for index_idx, index in enumerate(self._indices):
1501
# TODO: we should probably be doing something with
1502
# 'missing_keys' since we've already determined that
1503
# those revisions have not been found anywhere
1504
index_missing_keys = set()
1505
# Find all of the ancestry we can from this index
1506
# keep looking until the search_keys set is empty, which means
1507
# things we didn't find should be in index_missing_keys
1508
search_keys = keys_to_lookup
1510
# print ' \t%2d\t\t%4d\t%5d\t%5d' % (
1511
# index_idx, len(search_keys),
1512
# len(parent_map), len(index_missing_keys))
1515
# TODO: ref_list_num should really be a parameter, since
1516
# CombinedGraphIndex does not know what the ref lists
1518
search_keys = index._find_ancestors(search_keys,
1519
ref_list_num, parent_map, index_missing_keys)
1520
# print ' \t \t%2d\t%4d\t%5d\t%5d' % (
1521
# sub_generation, len(search_keys),
1522
# len(parent_map), len(index_missing_keys))
1523
# Now set whatever was missing to be searched in the next index
1524
keys_to_lookup = index_missing_keys
1525
if all_index_missing is None:
1526
all_index_missing = set(index_missing_keys)
1528
all_index_missing.intersection_update(index_missing_keys)
1529
if not keys_to_lookup:
1531
if all_index_missing is None:
1532
# There were no indexes, so all search keys are 'missing'
1533
missing_keys.update(keys_to_lookup)
1534
keys_to_lookup = None
1536
missing_keys.update(all_index_missing)
1537
keys_to_lookup.difference_update(all_index_missing)
1538
return parent_map, missing_keys
1540
1260
def key_count(self):
1541
1261
"""Return an estimate of the number of keys in this index.