13
13
# You should have received a copy of the GNU General Public License
14
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17
17
"""Indexing facilities."""
93
93
:param key_elements: The number of bytestrings in each key.
95
95
self.reference_lists = reference_lists
96
97
# A dict of {key: (absent, ref_lists, value)}
98
# Keys that are referenced but not actually present in this index
99
self._absent_keys = set()
100
99
self._nodes_by_key = None
101
100
self._key_length = key_elements
102
101
self._optimize_for_size = False
103
self._combine_backing_indices = True
105
103
def _check_key(self, key):
106
104
"""Raise BadIndexKey if key is not a valid key for this index."""
107
if type(key) not in (tuple, StaticTuple):
105
if type(key) != tuple:
108
106
raise errors.BadIndexKey(key)
109
107
if self._key_length != len(key):
110
108
raise errors.BadIndexKey(key)
167
165
key_dict = self._nodes_by_key
168
166
if self.reference_lists:
169
key_value = StaticTuple(key, value, node_refs)
167
key_value = key, value, node_refs
171
key_value = StaticTuple(key, value)
169
key_value = key, value
172
170
for subkey in key[:-1]:
173
171
key_dict = key_dict.setdefault(subkey, {})
174
172
key_dict[key[-1]] = key_value
205
202
if reference not in self._nodes:
206
203
self._check_key(reference)
207
204
absent_references.append(reference)
208
reference_list = as_st([as_st(ref).intern()
209
for ref in reference_list])
210
node_refs.append(reference_list)
211
return as_st(node_refs), absent_references
205
node_refs.append(tuple(reference_list))
206
return tuple(node_refs), absent_references
213
208
def add_node(self, key, value, references=()):
214
209
"""Add a node to the index.
229
224
# There may be duplicates, but I don't think it is worth worrying
231
226
self._nodes[reference] = ('a', (), '')
232
self._absent_keys.update(absent_references)
233
self._absent_keys.discard(key)
234
227
self._nodes[key] = ('', node_refs, value)
235
229
if self._nodes_by_key is not None and self._key_length > 1:
236
230
self._update_nodes_by_key(key, value, node_refs)
238
def clear_cache(self):
239
"""See GraphIndex.clear_cache()
241
This is a no-op, but we need the api to conform to a generic 'Index'
245
232
def finish(self):
246
233
lines = [_SIGNATURE]
247
234
lines.append(_OPTION_NODE_REFS + str(self.reference_lists) + '\n')
248
235
lines.append(_OPTION_KEY_ELEMENTS + str(self._key_length) + '\n')
249
key_count = len(self._nodes) - len(self._absent_keys)
250
lines.append(_OPTION_LEN + str(key_count) + '\n')
236
lines.append(_OPTION_LEN + str(len(self._keys)) + '\n')
251
237
prefix_length = sum(len(x) for x in lines)
252
238
# references are byte offsets. To avoid having to do nasty
253
239
# polynomial work to resolve offsets (references to later in the
329
315
(len(result.getvalue()), expected_bytes))
332
def set_optimize(self, for_size=None, combine_backing_indices=None):
318
def set_optimize(self, for_size=True):
333
319
"""Change how the builder tries to optimize the result.
335
321
:param for_size: Tell the builder to try and make the index as small as
337
:param combine_backing_indices: If the builder spills to disk to save
338
memory, should the on-disk indices be combined. Set to True if you
339
are going to be probing the index, but to False if you are not. (If
340
you are not querying, then the time spent combining is wasted.)
343
325
# GraphIndexBuilder itself doesn't pay attention to the flag yet, but
344
326
# other builders do.
345
if for_size is not None:
346
self._optimize_for_size = for_size
347
if combine_backing_indices is not None:
348
self._combine_backing_indices = combine_backing_indices
350
def find_ancestry(self, keys, ref_list_num):
351
"""See CombinedGraphIndex.find_ancestry()"""
357
for _, key, value, ref_lists in self.iter_entries(pending):
358
parent_keys = ref_lists[ref_list_num]
359
parent_map[key] = parent_keys
360
next_pending.update([p for p in parent_keys if p not in
362
missing_keys.update(pending.difference(parent_map))
363
pending = next_pending
364
return parent_map, missing_keys
327
self._optimize_for_size = for_size
367
330
class GraphIndex(object):
368
331
"""An index for data with embedded graphs.
370
333
The index maps keys to a list of key reference lists, and a value.
371
334
Each node has the same number of key reference lists. Each key reference
372
335
list can be empty or an arbitrary length. The value is an opaque NULL
373
terminated string without any newlines. The storage of the index is
336
terminated string without any newlines. The storage of the index is
374
337
hidden in the interface: keys and key references are always tuples of
375
338
bytestrings, never the internal representation (e.g. dictionary offsets).
382
345
suitable for production use. :XXX
385
def __init__(self, transport, name, size, unlimited_cache=False):
348
def __init__(self, transport, name, size):
386
349
"""Open an index called name on transport.
388
351
:param transport: A bzrlib.transport.Transport.
468
430
node_value = value
469
431
self._nodes[key] = node_value
470
432
# cache the keys for quick set intersections
433
self._keys = set(self._nodes)
471
434
if trailers != 1:
472
435
# there must be one line - the empty trailer line.
473
436
raise errors.BadIndexData(self)
475
def clear_cache(self):
476
"""Clear out any cached/memoized values.
478
This can be called at any time, but generally it is used when we have
479
extracted some information, but don't expect to be requesting any more
483
def external_references(self, ref_list_num):
484
"""Return references that are not present in this index.
487
if ref_list_num + 1 > self.node_ref_lists:
488
raise ValueError('No ref list %d, index has %d ref lists'
489
% (ref_list_num, self.node_ref_lists))
492
for key, (value, ref_lists) in nodes.iteritems():
493
ref_list = ref_lists[ref_list_num]
494
refs.update([ref for ref in ref_list if ref not in nodes])
497
438
def _get_nodes_by_key(self):
498
439
if self._nodes_by_key is None:
499
440
nodes_by_key = {}
562
503
def _resolve_references(self, references):
563
504
"""Return the resolved key references for references.
565
506
References are resolved by looking up the location of the key in the
566
507
_keys_by_offset map and substituting the key name, preserving ordering.
568
:param references: An iterable of iterables of key locations. e.g.
509
:param references: An iterable of iterables of key locations. e.g.
569
510
[[123, 456], [123]]
570
511
:return: A tuple of tuples of keys.
626
567
def _iter_entries_from_total_buffer(self, keys):
627
568
"""Iterate over keys when the entire index is parsed."""
628
# Note: See the note in BTreeBuilder.iter_entries for why we don't use
629
# .intersection() here
631
keys = [key for key in keys if key in nodes]
569
keys = keys.intersection(self._keys)
632
570
if self.node_ref_lists:
634
value, node_refs = nodes[key]
572
value, node_refs = self._nodes[key]
635
573
yield self, key, value, node_refs
638
yield self, key, nodes[key]
576
yield self, key, self._nodes[key]
640
578
def iter_entries(self, keys):
641
579
"""Iterate over keys within the index.
744
682
# the last thing looked up was a terminal element
745
683
yield (self, ) + key_dict
747
def _find_ancestors(self, keys, ref_list_num, parent_map, missing_keys):
748
"""See BTreeIndex._find_ancestors."""
749
# The api can be implemented as a trivial overlay on top of
750
# iter_entries, it is not an efficient implementation, but it at least
754
for index, key, value, refs in self.iter_entries(keys):
755
parent_keys = refs[ref_list_num]
757
parent_map[key] = parent_keys
758
search_keys.update(parent_keys)
759
# Figure out what, if anything, was missing
760
missing_keys.update(set(keys).difference(found_keys))
761
search_keys = search_keys.difference(parent_map)
764
685
def key_count(self):
765
686
"""Return an estimate of the number of keys in this index.
767
688
For GraphIndex the estimate is exact.
769
690
if self._key_count is None:
811
732
# - if we have examined this part of the file already - yes
812
733
index = self._parsed_byte_index(location)
813
if (len(self._parsed_byte_map) and
734
if (len(self._parsed_byte_map) and
814
735
self._parsed_byte_map[index][0] <= location and
815
736
self._parsed_byte_map[index][1] > location):
816
737
# the byte region has been parsed, so no read is needed.
1249
1170
self.__class__.__name__,
1250
1171
', '.join(map(repr, self._indices)))
1252
def clear_cache(self):
1253
"""See GraphIndex.clear_cache()"""
1254
for index in self._indices:
1173
@symbol_versioning.deprecated_method(symbol_versioning.one_one)
1174
def get_parents(self, revision_ids):
1175
"""See graph._StackedParentsProvider.get_parents.
1177
This implementation thunks the graph.Graph.get_parents api across to
1180
:param revision_ids: An iterable of graph keys for this graph.
1181
:return: A list of parent details for each key in revision_ids.
1182
Each parent details will be one of:
1183
* None when the key was missing
1184
* (NULL_REVISION,) when the key has no parents.
1185
* (parent_key, parent_key...) otherwise.
1187
parent_map = self.get_parent_map(revision_ids)
1188
return [parent_map.get(r, None) for r in revision_ids]
1257
1190
def get_parent_map(self, keys):
1258
"""See graph.StackedParentsProvider.get_parent_map"""
1191
"""See graph._StackedParentsProvider.get_parent_map"""
1259
1192
search_keys = set(keys)
1260
1193
if NULL_REVISION in search_keys:
1261
1194
search_keys.discard(NULL_REVISION)
1361
1294
except errors.NoSuchFile:
1362
1295
self._reload_or_raise()
1364
def find_ancestry(self, keys, ref_list_num):
1365
"""Find the complete ancestry for the given set of keys.
1367
Note that this is a whole-ancestry request, so it should be used
1370
:param keys: An iterable of keys to look for
1371
:param ref_list_num: The reference list which references the parents
1373
:return: (parent_map, missing_keys)
1375
missing_keys = set()
1377
keys_to_lookup = set(keys)
1379
while keys_to_lookup:
1380
# keys that *all* indexes claim are missing, stop searching them
1382
all_index_missing = None
1383
# print 'gen\tidx\tsub\tn_keys\tn_pmap\tn_miss'
1384
# print '%4d\t\t\t%4d\t%5d\t%5d' % (generation, len(keys_to_lookup),
1386
# len(missing_keys))
1387
for index_idx, index in enumerate(self._indices):
1388
# TODO: we should probably be doing something with
1389
# 'missing_keys' since we've already determined that
1390
# those revisions have not been found anywhere
1391
index_missing_keys = set()
1392
# Find all of the ancestry we can from this index
1393
# keep looking until the search_keys set is empty, which means
1394
# things we didn't find should be in index_missing_keys
1395
search_keys = keys_to_lookup
1397
# print ' \t%2d\t\t%4d\t%5d\t%5d' % (
1398
# index_idx, len(search_keys),
1399
# len(parent_map), len(index_missing_keys))
1402
# TODO: ref_list_num should really be a parameter, since
1403
# CombinedGraphIndex does not know what the ref lists
1405
search_keys = index._find_ancestors(search_keys,
1406
ref_list_num, parent_map, index_missing_keys)
1407
# print ' \t \t%2d\t%4d\t%5d\t%5d' % (
1408
# sub_generation, len(search_keys),
1409
# len(parent_map), len(index_missing_keys))
1410
# Now set whatever was missing to be searched in the next index
1411
keys_to_lookup = index_missing_keys
1412
if all_index_missing is None:
1413
all_index_missing = set(index_missing_keys)
1415
all_index_missing.intersection_update(index_missing_keys)
1416
if not keys_to_lookup:
1418
if all_index_missing is None:
1419
# There were no indexes, so all search keys are 'missing'
1420
missing_keys.update(keys_to_lookup)
1421
keys_to_lookup = None
1423
missing_keys.update(all_index_missing)
1424
keys_to_lookup.difference_update(all_index_missing)
1425
return parent_map, missing_keys
1427
1297
def key_count(self):
1428
1298
"""Return an estimate of the number of keys in this index.
1515
1385
defined order for the result iteration - it will be in the most
1516
1386
efficient order for the index (keys iteration order in this case).
1518
# Note: See BTreeBuilder.iter_entries for an explanation of why we
1519
# aren't using set().intersection() here
1521
keys = [key for key in keys if key in nodes]
1522
1389
if self.reference_lists:
1390
for key in keys.intersection(self._keys):
1391
node = self._nodes[key]
1525
1392
if not node[0]:
1526
1393
yield self, key, node[2], node[1]
1395
for key in keys.intersection(self._keys):
1396
node = self._nodes[key]
1530
1397
if not node[0]:
1531
1398
yield self, key, node[2]
1618
1485
Queries against this will emit queries against the adapted Graph with the
1619
1486
prefix added, queries for all items use iter_entries_prefix. The returned
1620
nodes will have their keys and node references adjusted to remove the
1487
nodes will have their keys and node references adjusted to remove the
1621
1488
prefix. Finally, an add_nodes_callback can be supplied - when called the
1622
1489
nodes and references being added will have prefix prepended.