1
# Copyright (C) 2007, 2008, 2009 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17
"""Indexing facilities."""
23
'GraphIndexPrefixAdapter',
27
from bisect import bisect_right
28
from cStringIO import StringIO
32
from bzrlib.lazy_import import lazy_import
33
lazy_import(globals(), """
34
from bzrlib import trace
35
from bzrlib.bisect_multi import bisect_multi_bytes
36
from bzrlib.revision import NULL_REVISION
37
from bzrlib.trace import mutter
44
_HEADER_READV = (0, 200)
45
_OPTION_KEY_ELEMENTS = "key_elements="
47
_OPTION_NODE_REFS = "node_ref_lists="
48
_SIGNATURE = "Bazaar Graph Index 1\n"
51
_whitespace_re = re.compile('[\t\n\x0b\x0c\r\x00 ]')
52
_newline_null_re = re.compile('[\n\0]')
55
def _has_key_from_parent_map(self, key):
56
"""Check if this index has one key.
58
If it's possible to check for multiple keys at once through
59
calling get_parent_map that should be faster.
61
return (key in self.get_parent_map([key]))
64
def _missing_keys_from_parent_map(self, keys):
65
return set(keys) - set(self.get_parent_map(keys))
68
class GraphIndexBuilder(object):
69
"""A builder that can build a GraphIndex.
71
The resulting graph has the structure:
73
_SIGNATURE OPTIONS NODES NEWLINE
74
_SIGNATURE := 'Bazaar Graph Index 1' NEWLINE
75
OPTIONS := 'node_ref_lists=' DIGITS NEWLINE
77
NODE := KEY NULL ABSENT? NULL REFERENCES NULL VALUE NEWLINE
78
KEY := Not-whitespace-utf8
80
REFERENCES := REFERENCE_LIST (TAB REFERENCE_LIST){node_ref_lists - 1}
81
REFERENCE_LIST := (REFERENCE (CR REFERENCE)*)?
82
REFERENCE := DIGITS ; digits is the byte offset in the index of the
84
VALUE := no-newline-no-null-bytes
87
def __init__(self, reference_lists=0, key_elements=1):
88
"""Create a GraphIndex builder.
90
:param reference_lists: The number of node references lists for each
92
:param key_elements: The number of bytestrings in each key.
94
self.reference_lists = reference_lists
96
# A dict of {key: (absent, ref_lists, value)}
98
self._nodes_by_key = None
99
self._key_length = key_elements
100
self._optimize_for_size = False
101
self._combine_backing_indices = True
103
def _check_key(self, key):
104
"""Raise BadIndexKey if key is not a valid key for this index."""
105
if type(key) != tuple:
106
raise errors.BadIndexKey(key)
107
if self._key_length != len(key):
108
raise errors.BadIndexKey(key)
110
if not element or _whitespace_re.search(element) is not None:
111
raise errors.BadIndexKey(element)
113
def _external_references(self):
114
"""Return references that are not present in this index.
118
# TODO: JAM 2008-11-21 This makes an assumption about how the reference
119
# lists are used. It is currently correct for pack-0.92 through
120
# 1.9, which use the node references (3rd column) second
121
# reference list as the compression parent. Perhaps this should
122
# be moved into something higher up the stack, since it
123
# makes assumptions about how the index is used.
124
if self.reference_lists > 1:
125
for node in self.iter_all_entries():
127
refs.update(node[3][1])
130
# If reference_lists == 0 there can be no external references, and
131
# if reference_lists == 1, then there isn't a place to store the
135
def _get_nodes_by_key(self):
136
if self._nodes_by_key is None:
138
if self.reference_lists:
139
for key, (absent, references, value) in self._nodes.iteritems():
142
key_dict = nodes_by_key
143
for subkey in key[:-1]:
144
key_dict = key_dict.setdefault(subkey, {})
145
key_dict[key[-1]] = key, value, references
147
for key, (absent, references, value) in self._nodes.iteritems():
150
key_dict = nodes_by_key
151
for subkey in key[:-1]:
152
key_dict = key_dict.setdefault(subkey, {})
153
key_dict[key[-1]] = key, value
154
self._nodes_by_key = nodes_by_key
155
return self._nodes_by_key
157
def _update_nodes_by_key(self, key, value, node_refs):
158
"""Update the _nodes_by_key dict with a new key.
160
For a key of (foo, bar, baz) create
161
_nodes_by_key[foo][bar][baz] = key_value
163
if self._nodes_by_key is None:
165
key_dict = self._nodes_by_key
166
if self.reference_lists:
167
key_value = key, value, node_refs
169
key_value = key, value
170
for subkey in key[:-1]:
171
key_dict = key_dict.setdefault(subkey, {})
172
key_dict[key[-1]] = key_value
174
def _check_key_ref_value(self, key, references, value):
175
"""Check that 'key' and 'references' are all valid.
177
:param key: A key tuple. Must conform to the key interface (be a tuple,
178
be of the right length, not have any whitespace or nulls in any key
180
:param references: An iterable of reference lists. Something like
181
[[(ref, key)], [(ref, key), (other, key)]]
182
:param value: The value associate with this key. Must not contain
183
newlines or null characters.
184
:return: (node_refs, absent_references)
185
node_refs basically a packed form of 'references' where all
187
absent_references reference keys that are not in self._nodes.
188
This may contain duplicates if the same key is
189
referenced in multiple lists.
192
if _newline_null_re.search(value) is not None:
193
raise errors.BadIndexValue(value)
194
if len(references) != self.reference_lists:
195
raise errors.BadIndexValue(references)
197
absent_references = []
198
for reference_list in references:
199
for reference in reference_list:
200
# If reference *is* in self._nodes, then we know it has already
202
if reference not in self._nodes:
203
self._check_key(reference)
204
absent_references.append(reference)
205
node_refs.append(tuple(reference_list))
206
return tuple(node_refs), absent_references
208
def add_node(self, key, value, references=()):
209
"""Add a node to the index.
211
:param key: The key. keys are non-empty tuples containing
212
as many whitespace-free utf8 bytestrings as the key length
213
defined for this index.
214
:param references: An iterable of iterables of keys. Each is a
215
reference to another key.
216
:param value: The value to associate with the key. It may be any
217
bytes as long as it does not contain \0 or \n.
220
absent_references) = self._check_key_ref_value(key, references, value)
221
if key in self._nodes and self._nodes[key][0] != 'a':
222
raise errors.BadIndexDuplicateKey(key, self)
223
for reference in absent_references:
224
# There may be duplicates, but I don't think it is worth worrying
226
self._nodes[reference] = ('a', (), '')
227
self._nodes[key] = ('', node_refs, value)
229
if self._nodes_by_key is not None and self._key_length > 1:
230
self._update_nodes_by_key(key, value, node_refs)
234
lines.append(_OPTION_NODE_REFS + str(self.reference_lists) + '\n')
235
lines.append(_OPTION_KEY_ELEMENTS + str(self._key_length) + '\n')
236
lines.append(_OPTION_LEN + str(len(self._keys)) + '\n')
237
prefix_length = sum(len(x) for x in lines)
238
# references are byte offsets. To avoid having to do nasty
239
# polynomial work to resolve offsets (references to later in the
240
# file cannot be determined until all the inbetween references have
241
# been calculated too) we pad the offsets with 0's to make them be
242
# of consistent length. Using binary offsets would break the trivial
244
# to calculate the width of zero's needed we do three passes:
245
# one to gather all the non-reference data and the number of references.
246
# one to pad all the data with reference-length and determine entry
250
# forward sorted by key. In future we may consider topological sorting,
251
# at the cost of table scans for direct lookup, or a second index for
253
nodes = sorted(self._nodes.items())
254
# if we do not prepass, we don't know how long it will be up front.
255
expected_bytes = None
256
# we only need to pre-pass if we have reference lists at all.
257
if self.reference_lists:
259
non_ref_bytes = prefix_length
261
# TODO use simple multiplication for the constants in this loop.
262
for key, (absent, references, value) in nodes:
263
# record the offset known *so far* for this key:
264
# the non reference bytes to date, and the total references to
265
# date - saves reaccumulating on the second pass
266
key_offset_info.append((key, non_ref_bytes, total_references))
267
# key is literal, value is literal, there are 3 null's, 1 NL
268
# key is variable length tuple, \x00 between elements
269
non_ref_bytes += sum(len(element) for element in key)
270
if self._key_length > 1:
271
non_ref_bytes += self._key_length - 1
272
# value is literal bytes, there are 3 null's, 1 NL.
273
non_ref_bytes += len(value) + 3 + 1
274
# one byte for absent if set.
277
elif self.reference_lists:
278
# (ref_lists -1) tabs
279
non_ref_bytes += self.reference_lists - 1
280
# (ref-1 cr's per ref_list)
281
for ref_list in references:
282
# how many references across the whole file?
283
total_references += len(ref_list)
284
# accrue reference separators
286
non_ref_bytes += len(ref_list) - 1
287
# how many digits are needed to represent the total byte count?
289
possible_total_bytes = non_ref_bytes + total_references*digits
290
while 10 ** digits < possible_total_bytes:
292
possible_total_bytes = non_ref_bytes + total_references*digits
293
expected_bytes = possible_total_bytes + 1 # terminating newline
294
# resolve key addresses.
296
for key, non_ref_bytes, total_references in key_offset_info:
297
key_addresses[key] = non_ref_bytes + total_references*digits
299
format_string = '%%0%sd' % digits
300
for key, (absent, references, value) in nodes:
301
flattened_references = []
302
for ref_list in references:
304
for reference in ref_list:
305
ref_addresses.append(format_string % key_addresses[reference])
306
flattened_references.append('\r'.join(ref_addresses))
307
string_key = '\x00'.join(key)
308
lines.append("%s\x00%s\x00%s\x00%s\n" % (string_key, absent,
309
'\t'.join(flattened_references), value))
311
result = StringIO(''.join(lines))
312
if expected_bytes and len(result.getvalue()) != expected_bytes:
313
raise errors.BzrError('Failed index creation. Internal error:'
314
' mismatched output length and expected length: %d %d' %
315
(len(result.getvalue()), expected_bytes))
318
def set_optimize(self, for_size=None, combine_backing_indices=None):
319
"""Change how the builder tries to optimize the result.
321
:param for_size: Tell the builder to try and make the index as small as
323
:param combine_backing_indices: If the builder spills to disk to save
324
memory, should the on-disk indices be combined. Set to True if you
325
are going to be probing the index, but to False if you are not. (If
326
you are not querying, then the time spent combining is wasted.)
329
# GraphIndexBuilder itself doesn't pay attention to the flag yet, but
331
if for_size is not None:
332
self._optimize_for_size = for_size
333
if combine_backing_indices is not None:
334
self._combine_backing_indices = combine_backing_indices
336
def find_ancestry(self, keys, ref_list_num):
337
"""See CombinedGraphIndex.find_ancestry()"""
343
for _, key, value, ref_lists in self.iter_entries(pending):
344
parent_keys = ref_lists[ref_list_num]
345
parent_map[key] = parent_keys
346
next_pending.update([p for p in parent_keys if p not in
348
missing_keys.update(pending.difference(parent_map))
349
pending = next_pending
350
return parent_map, missing_keys
353
class GraphIndex(object):
354
"""An index for data with embedded graphs.
356
The index maps keys to a list of key reference lists, and a value.
357
Each node has the same number of key reference lists. Each key reference
358
list can be empty or an arbitrary length. The value is an opaque NULL
359
terminated string without any newlines. The storage of the index is
360
hidden in the interface: keys and key references are always tuples of
361
bytestrings, never the internal representation (e.g. dictionary offsets).
363
It is presumed that the index will not be mutated - it is static data.
365
Successive iter_all_entries calls will read the entire index each time.
366
Additionally, iter_entries calls will read the index linearly until the
367
desired keys are found. XXX: This must be fixed before the index is
368
suitable for production use. :XXX
371
def __init__(self, transport, name, size, unlimited_cache=False):
372
"""Open an index called name on transport.
374
:param transport: A bzrlib.transport.Transport.
375
:param name: A path to provide to transport API calls.
376
:param size: The size of the index in bytes. This is used for bisection
377
logic to perform partial index reads. While the size could be
378
obtained by statting the file this introduced an additional round
379
trip as well as requiring stat'able transports, both of which are
380
avoided by having it supplied. If size is None, then bisection
381
support will be disabled and accessing the index will just stream
384
self._transport = transport
386
# Becomes a dict of key:(value, reference-list-byte-locations) used by
387
# the bisection interface to store parsed but not resolved keys.
388
self._bisect_nodes = None
389
# Becomes a dict of key:(value, reference-list-keys) which are ready to
390
# be returned directly to callers.
392
# a sorted list of slice-addresses for the parsed bytes of the file.
393
# e.g. (0,1) would mean that byte 0 is parsed.
394
self._parsed_byte_map = []
395
# a sorted list of keys matching each slice address for parsed bytes
396
# e.g. (None, 'foo@bar') would mean that the first byte contained no
397
# key, and the end byte of the slice is the of the data for 'foo@bar'
398
self._parsed_key_map = []
399
self._key_count = None
400
self._keys_by_offset = None
401
self._nodes_by_key = None
403
# The number of bytes we've read so far in trying to process this file
406
def __eq__(self, other):
407
"""Equal when self and other were created with the same parameters."""
409
type(self) == type(other) and
410
self._transport == other._transport and
411
self._name == other._name and
412
self._size == other._size)
414
def __ne__(self, other):
415
return not self.__eq__(other)
418
return "%s(%r)" % (self.__class__.__name__,
419
self._transport.abspath(self._name))
421
def _buffer_all(self, stream=None):
422
"""Buffer all the index data.
424
Mutates self._nodes and self.keys_by_offset.
426
if self._nodes is not None:
427
# We already did this
429
if 'index' in debug.debug_flags:
430
mutter('Reading entire index %s', self._transport.abspath(self._name))
432
stream = self._transport.get(self._name)
433
self._read_prefix(stream)
434
self._expected_elements = 3 + self._key_length
436
# raw data keyed by offset
437
self._keys_by_offset = {}
438
# ready-to-return key:value or key:value, node_ref_lists
440
self._nodes_by_key = None
443
lines = stream.read().split('\n')
445
_, _, _, trailers = self._parse_lines(lines, pos)
446
for key, absent, references, value in self._keys_by_offset.itervalues():
449
# resolve references:
450
if self.node_ref_lists:
451
node_value = (value, self._resolve_references(references))
454
self._nodes[key] = node_value
455
# cache the keys for quick set intersections
456
self._keys = set(self._nodes)
458
# there must be one line - the empty trailer line.
459
raise errors.BadIndexData(self)
461
def external_references(self, ref_list_num):
462
"""Return references that are not present in this index.
465
if ref_list_num + 1 > self.node_ref_lists:
466
raise ValueError('No ref list %d, index has %d ref lists'
467
% (ref_list_num, self.node_ref_lists))
469
for key, (value, ref_lists) in self._nodes.iteritems():
470
ref_list = ref_lists[ref_list_num]
471
refs.update(ref_list)
472
return refs - self._keys
474
def _get_nodes_by_key(self):
475
if self._nodes_by_key is None:
477
if self.node_ref_lists:
478
for key, (value, references) in self._nodes.iteritems():
479
key_dict = nodes_by_key
480
for subkey in key[:-1]:
481
key_dict = key_dict.setdefault(subkey, {})
482
key_dict[key[-1]] = key, value, references
484
for key, value in self._nodes.iteritems():
485
key_dict = nodes_by_key
486
for subkey in key[:-1]:
487
key_dict = key_dict.setdefault(subkey, {})
488
key_dict[key[-1]] = key, value
489
self._nodes_by_key = nodes_by_key
490
return self._nodes_by_key
492
def iter_all_entries(self):
493
"""Iterate over all keys within the index.
495
:return: An iterable of (index, key, value) or (index, key, value, reference_lists).
496
The former tuple is used when there are no reference lists in the
497
index, making the API compatible with simple key:value index types.
498
There is no defined order for the result iteration - it will be in
499
the most efficient order for the index.
501
if 'evil' in debug.debug_flags:
502
trace.mutter_callsite(3,
503
"iter_all_entries scales with size of history.")
504
if self._nodes is None:
506
if self.node_ref_lists:
507
for key, (value, node_ref_lists) in self._nodes.iteritems():
508
yield self, key, value, node_ref_lists
510
for key, value in self._nodes.iteritems():
511
yield self, key, value
513
def _read_prefix(self, stream):
514
signature = stream.read(len(self._signature()))
515
if not signature == self._signature():
516
raise errors.BadIndexFormatSignature(self._name, GraphIndex)
517
options_line = stream.readline()
518
if not options_line.startswith(_OPTION_NODE_REFS):
519
raise errors.BadIndexOptions(self)
521
self.node_ref_lists = int(options_line[len(_OPTION_NODE_REFS):-1])
523
raise errors.BadIndexOptions(self)
524
options_line = stream.readline()
525
if not options_line.startswith(_OPTION_KEY_ELEMENTS):
526
raise errors.BadIndexOptions(self)
528
self._key_length = int(options_line[len(_OPTION_KEY_ELEMENTS):-1])
530
raise errors.BadIndexOptions(self)
531
options_line = stream.readline()
532
if not options_line.startswith(_OPTION_LEN):
533
raise errors.BadIndexOptions(self)
535
self._key_count = int(options_line[len(_OPTION_LEN):-1])
537
raise errors.BadIndexOptions(self)
539
def _resolve_references(self, references):
540
"""Return the resolved key references for references.
542
References are resolved by looking up the location of the key in the
543
_keys_by_offset map and substituting the key name, preserving ordering.
545
:param references: An iterable of iterables of key locations. e.g.
547
:return: A tuple of tuples of keys.
550
for ref_list in references:
551
node_refs.append(tuple([self._keys_by_offset[ref][0] for ref in ref_list]))
552
return tuple(node_refs)
554
def _find_index(self, range_map, key):
555
"""Helper for the _parsed_*_index calls.
557
Given a range map - [(start, end), ...], finds the index of the range
558
in the map for key if it is in the map, and if it is not there, the
559
immediately preceeding range in the map.
561
result = bisect_right(range_map, key) - 1
562
if result + 1 < len(range_map):
563
# check the border condition, it may be in result + 1
564
if range_map[result + 1][0] == key[0]:
568
def _parsed_byte_index(self, offset):
569
"""Return the index of the entry immediately before offset.
571
e.g. if the parsed map has regions 0,10 and 11,12 parsed, meaning that
572
there is one unparsed byte (the 11th, addressed as[10]). then:
573
asking for 0 will return 0
574
asking for 10 will return 0
575
asking for 11 will return 1
576
asking for 12 will return 1
579
return self._find_index(self._parsed_byte_map, key)
581
def _parsed_key_index(self, key):
582
"""Return the index of the entry immediately before key.
584
e.g. if the parsed map has regions (None, 'a') and ('b','c') parsed,
585
meaning that keys from None to 'a' inclusive, and 'b' to 'c' inclusive
586
have been parsed, then:
587
asking for '' will return 0
588
asking for 'a' will return 0
589
asking for 'b' will return 1
590
asking for 'e' will return 1
592
search_key = (key, None)
593
return self._find_index(self._parsed_key_map, search_key)
595
def _is_parsed(self, offset):
596
"""Returns True if offset has been parsed."""
597
index = self._parsed_byte_index(offset)
598
if index == len(self._parsed_byte_map):
599
return offset < self._parsed_byte_map[index - 1][1]
600
start, end = self._parsed_byte_map[index]
601
return offset >= start and offset < end
603
def _iter_entries_from_total_buffer(self, keys):
604
"""Iterate over keys when the entire index is parsed."""
605
keys = keys.intersection(self._keys)
606
if self.node_ref_lists:
608
value, node_refs = self._nodes[key]
609
yield self, key, value, node_refs
612
yield self, key, self._nodes[key]
614
def iter_entries(self, keys):
615
"""Iterate over keys within the index.
617
:param keys: An iterable providing the keys to be retrieved.
618
:return: An iterable as per iter_all_entries, but restricted to the
619
keys supplied. No additional keys will be returned, and every
620
key supplied that is in the index will be returned.
625
if self._size is None and self._nodes is None:
628
# We fit about 20 keys per minimum-read (4K), so if we are looking for
629
# more than 1/20th of the index its likely (assuming homogenous key
630
# spread) that we'll read the entire index. If we're going to do that,
631
# buffer the whole thing. A better analysis might take key spread into
632
# account - but B+Tree indices are better anyway.
633
# We could look at all data read, and use a threshold there, which will
634
# trigger on ancestry walks, but that is not yet fully mapped out.
635
if self._nodes is None and len(keys) * 20 > self.key_count():
637
if self._nodes is not None:
638
return self._iter_entries_from_total_buffer(keys)
640
return (result[1] for result in bisect_multi_bytes(
641
self._lookup_keys_via_location, self._size, keys))
643
def iter_entries_prefix(self, keys):
644
"""Iterate over keys within the index using prefix matching.
646
Prefix matching is applied within the tuple of a key, not to within
647
the bytestring of each key element. e.g. if you have the keys ('foo',
648
'bar'), ('foobar', 'gam') and do a prefix search for ('foo', None) then
649
only the former key is returned.
651
WARNING: Note that this method currently causes a full index parse
652
unconditionally (which is reasonably appropriate as it is a means for
653
thunking many small indices into one larger one and still supplies
654
iter_all_entries at the thunk layer).
656
:param keys: An iterable providing the key prefixes to be retrieved.
657
Each key prefix takes the form of a tuple the length of a key, but
658
with the last N elements 'None' rather than a regular bytestring.
659
The first element cannot be 'None'.
660
:return: An iterable as per iter_all_entries, but restricted to the
661
keys with a matching prefix to those supplied. No additional keys
662
will be returned, and every match that is in the index will be
668
# load data - also finds key lengths
669
if self._nodes is None:
671
if self._key_length == 1:
675
raise errors.BadIndexKey(key)
676
if len(key) != self._key_length:
677
raise errors.BadIndexKey(key)
678
if self.node_ref_lists:
679
value, node_refs = self._nodes[key]
680
yield self, key, value, node_refs
682
yield self, key, self._nodes[key]
684
nodes_by_key = self._get_nodes_by_key()
688
raise errors.BadIndexKey(key)
689
if len(key) != self._key_length:
690
raise errors.BadIndexKey(key)
691
# find what it refers to:
692
key_dict = nodes_by_key
694
# find the subdict whose contents should be returned.
696
while len(elements) and elements[0] is not None:
697
key_dict = key_dict[elements[0]]
700
# a non-existant lookup.
705
key_dict = dicts.pop(-1)
706
# can't be empty or would not exist
707
item, value = key_dict.iteritems().next()
708
if type(value) == dict:
710
dicts.extend(key_dict.itervalues())
713
for value in key_dict.itervalues():
714
# each value is the key:value:node refs tuple
716
yield (self, ) + value
718
# the last thing looked up was a terminal element
719
yield (self, ) + key_dict
721
def _find_ancestors(self, keys, ref_list_num, parent_map, missing_keys):
722
"""See BTreeIndex._find_ancestors."""
723
# The api can be implemented as a trivial overlay on top of
724
# iter_entries, it is not an efficient implementation, but it at least
728
for index, key, value, refs in self.iter_entries(keys):
729
parent_keys = refs[ref_list_num]
731
parent_map[key] = parent_keys
732
search_keys.update(parent_keys)
733
# Figure out what, if anything, was missing
734
missing_keys.update(set(keys).difference(found_keys))
735
search_keys = search_keys.difference(parent_map)
739
"""Return an estimate of the number of keys in this index.
741
For GraphIndex the estimate is exact.
743
if self._key_count is None:
744
self._read_and_parse([_HEADER_READV])
745
return self._key_count
747
def _lookup_keys_via_location(self, location_keys):
748
"""Public interface for implementing bisection.
750
If _buffer_all has been called, then all the data for the index is in
751
memory, and this method should not be called, as it uses a separate
752
cache because it cannot pre-resolve all indices, which buffer_all does
755
:param location_keys: A list of location(byte offset), key tuples.
756
:return: A list of (location_key, result) tuples as expected by
757
bzrlib.bisect_multi.bisect_multi_bytes.
759
# Possible improvements:
760
# - only bisect lookup each key once
761
# - sort the keys first, and use that to reduce the bisection window
763
# this progresses in three parts:
766
# attempt to answer the question from the now in memory data.
767
# build the readv request
768
# for each location, ask for 800 bytes - much more than rows we've seen
771
for location, key in location_keys:
772
# can we answer from cache?
773
if self._bisect_nodes and key in self._bisect_nodes:
774
# We have the key parsed.
776
index = self._parsed_key_index(key)
777
if (len(self._parsed_key_map) and
778
self._parsed_key_map[index][0] <= key and
779
(self._parsed_key_map[index][1] >= key or
780
# end of the file has been parsed
781
self._parsed_byte_map[index][1] == self._size)):
782
# the key has been parsed, so no lookup is needed even if its
785
# - if we have examined this part of the file already - yes
786
index = self._parsed_byte_index(location)
787
if (len(self._parsed_byte_map) and
788
self._parsed_byte_map[index][0] <= location and
789
self._parsed_byte_map[index][1] > location):
790
# the byte region has been parsed, so no read is needed.
793
if location + length > self._size:
794
length = self._size - location
795
# todo, trim out parsed locations.
797
readv_ranges.append((location, length))
798
# read the header if needed
799
if self._bisect_nodes is None:
800
readv_ranges.append(_HEADER_READV)
801
self._read_and_parse(readv_ranges)
803
if self._nodes is not None:
804
# _read_and_parse triggered a _buffer_all because we requested the
806
for location, key in location_keys:
807
if key not in self._nodes: # not present
808
result.append(((location, key), False))
809
elif self.node_ref_lists:
810
value, refs = self._nodes[key]
811
result.append(((location, key),
812
(self, key, value, refs)))
814
result.append(((location, key),
815
(self, key, self._nodes[key])))
818
# - figure out <, >, missing, present
819
# - result present references so we can return them.
820
# keys that we cannot answer until we resolve references
821
pending_references = []
822
pending_locations = set()
823
for location, key in location_keys:
824
# can we answer from cache?
825
if key in self._bisect_nodes:
826
# the key has been parsed, so no lookup is needed
827
if self.node_ref_lists:
828
# the references may not have been all parsed.
829
value, refs = self._bisect_nodes[key]
830
wanted_locations = []
831
for ref_list in refs:
833
if ref not in self._keys_by_offset:
834
wanted_locations.append(ref)
836
pending_locations.update(wanted_locations)
837
pending_references.append((location, key))
839
result.append(((location, key), (self, key,
840
value, self._resolve_references(refs))))
842
result.append(((location, key),
843
(self, key, self._bisect_nodes[key])))
846
# has the region the key should be in, been parsed?
847
index = self._parsed_key_index(key)
848
if (self._parsed_key_map[index][0] <= key and
849
(self._parsed_key_map[index][1] >= key or
850
# end of the file has been parsed
851
self._parsed_byte_map[index][1] == self._size)):
852
result.append(((location, key), False))
854
# no, is the key above or below the probed location:
855
# get the range of the probed & parsed location
856
index = self._parsed_byte_index(location)
857
# if the key is below the start of the range, its below
858
if key < self._parsed_key_map[index][0]:
862
result.append(((location, key), direction))
864
# lookup data to resolve references
865
for location in pending_locations:
867
if location + length > self._size:
868
length = self._size - location
869
# TODO: trim out parsed locations (e.g. if the 800 is into the
870
# parsed region trim it, and dont use the adjust_for_latency
873
readv_ranges.append((location, length))
874
self._read_and_parse(readv_ranges)
875
if self._nodes is not None:
876
# The _read_and_parse triggered a _buffer_all, grab the data and
878
for location, key in pending_references:
879
value, refs = self._nodes[key]
880
result.append(((location, key), (self, key, value, refs)))
882
for location, key in pending_references:
883
# answer key references we had to look-up-late.
884
value, refs = self._bisect_nodes[key]
885
result.append(((location, key), (self, key,
886
value, self._resolve_references(refs))))
889
def _parse_header_from_bytes(self, bytes):
890
"""Parse the header from a region of bytes.
892
:param bytes: The data to parse.
893
:return: An offset, data tuple such as readv yields, for the unparsed
894
data. (which may length 0).
896
signature = bytes[0:len(self._signature())]
897
if not signature == self._signature():
898
raise errors.BadIndexFormatSignature(self._name, GraphIndex)
899
lines = bytes[len(self._signature()):].splitlines()
900
options_line = lines[0]
901
if not options_line.startswith(_OPTION_NODE_REFS):
902
raise errors.BadIndexOptions(self)
904
self.node_ref_lists = int(options_line[len(_OPTION_NODE_REFS):])
906
raise errors.BadIndexOptions(self)
907
options_line = lines[1]
908
if not options_line.startswith(_OPTION_KEY_ELEMENTS):
909
raise errors.BadIndexOptions(self)
911
self._key_length = int(options_line[len(_OPTION_KEY_ELEMENTS):])
913
raise errors.BadIndexOptions(self)
914
options_line = lines[2]
915
if not options_line.startswith(_OPTION_LEN):
916
raise errors.BadIndexOptions(self)
918
self._key_count = int(options_line[len(_OPTION_LEN):])
920
raise errors.BadIndexOptions(self)
921
# calculate the bytes we have processed
922
header_end = (len(signature) + len(lines[0]) + len(lines[1]) +
924
self._parsed_bytes(0, None, header_end, None)
925
# setup parsing state
926
self._expected_elements = 3 + self._key_length
927
# raw data keyed by offset
928
self._keys_by_offset = {}
929
# keys with the value and node references
930
self._bisect_nodes = {}
931
return header_end, bytes[header_end:]
933
def _parse_region(self, offset, data):
934
"""Parse node data returned from a readv operation.
936
:param offset: The byte offset the data starts at.
937
:param data: The data to parse.
941
end = offset + len(data)
944
# Trivial test - if the current index's end is within the
945
# low-matching parsed range, we're done.
946
index = self._parsed_byte_index(high_parsed)
947
if end < self._parsed_byte_map[index][1]:
949
# print "[%d:%d]" % (offset, end), \
950
# self._parsed_byte_map[index:index + 2]
951
high_parsed, last_segment = self._parse_segment(
952
offset, data, end, index)
956
def _parse_segment(self, offset, data, end, index):
957
"""Parse one segment of data.
959
:param offset: Where 'data' begins in the file.
960
:param data: Some data to parse a segment of.
961
:param end: Where data ends
962
:param index: The current index into the parsed bytes map.
963
:return: True if the parsed segment is the last possible one in the
965
:return: high_parsed_byte, last_segment.
966
high_parsed_byte is the location of the highest parsed byte in this
967
segment, last_segment is True if the parsed segment is the last
968
possible one in the data block.
970
# default is to use all data
972
# accomodate overlap with data before this.
973
if offset < self._parsed_byte_map[index][1]:
974
# overlaps the lower parsed region
975
# skip the parsed data
976
trim_start = self._parsed_byte_map[index][1] - offset
977
# don't trim the start for \n
978
start_adjacent = True
979
elif offset == self._parsed_byte_map[index][1]:
980
# abuts the lower parsed region
983
# do not trim anything
984
start_adjacent = True
986
# does not overlap the lower parsed region
989
# but trim the leading \n
990
start_adjacent = False
991
if end == self._size:
992
# lines up to the end of all data:
995
# do not strip to the last \n
998
elif index + 1 == len(self._parsed_byte_map):
999
# at the end of the parsed data
1002
# but strip to the last \n
1003
end_adjacent = False
1005
elif end == self._parsed_byte_map[index + 1][0]:
1006
# buts up against the next parsed region
1009
# do not strip to the last \n
1012
elif end > self._parsed_byte_map[index + 1][0]:
1013
# overlaps into the next parsed region
1014
# only consider the unparsed data
1015
trim_end = self._parsed_byte_map[index + 1][0] - offset
1016
# do not strip to the last \n as we know its an entire record
1018
last_segment = end < self._parsed_byte_map[index + 1][1]
1020
# does not overlap into the next region
1023
# but strip to the last \n
1024
end_adjacent = False
1026
# now find bytes to discard if needed
1027
if not start_adjacent:
1028
# work around python bug in rfind
1029
if trim_start is None:
1030
trim_start = data.find('\n') + 1
1032
trim_start = data.find('\n', trim_start) + 1
1033
if not (trim_start != 0):
1034
raise AssertionError('no \n was present')
1035
# print 'removing start', offset, trim_start, repr(data[:trim_start])
1036
if not end_adjacent:
1037
# work around python bug in rfind
1038
if trim_end is None:
1039
trim_end = data.rfind('\n') + 1
1041
trim_end = data.rfind('\n', None, trim_end) + 1
1042
if not (trim_end != 0):
1043
raise AssertionError('no \n was present')
1044
# print 'removing end', offset, trim_end, repr(data[trim_end:])
1045
# adjust offset and data to the parseable data.
1046
trimmed_data = data[trim_start:trim_end]
1047
if not (trimmed_data):
1048
raise AssertionError('read unneeded data [%d:%d] from [%d:%d]'
1049
% (trim_start, trim_end, offset, offset + len(data)))
1051
offset += trim_start
1052
# print "parsing", repr(trimmed_data)
1053
# splitlines mangles the \r delimiters.. don't use it.
1054
lines = trimmed_data.split('\n')
1057
first_key, last_key, nodes, _ = self._parse_lines(lines, pos)
1058
for key, value in nodes:
1059
self._bisect_nodes[key] = value
1060
self._parsed_bytes(offset, first_key,
1061
offset + len(trimmed_data), last_key)
1062
return offset + len(trimmed_data), last_segment
1064
def _parse_lines(self, lines, pos):
1071
# must be at the end
1073
if not (self._size == pos + 1):
1074
raise AssertionError("%s %s" % (self._size, pos))
1077
elements = line.split('\0')
1078
if len(elements) != self._expected_elements:
1079
raise errors.BadIndexData(self)
1080
# keys are tuples. Each element is a string that may occur many
1081
# times, so we intern them to save space. AB, RC, 200807
1082
key = tuple([intern(element) for element in elements[:self._key_length]])
1083
if first_key is None:
1085
absent, references, value = elements[-3:]
1087
for ref_string in references.split('\t'):
1088
ref_lists.append(tuple([
1089
int(ref) for ref in ref_string.split('\r') if ref
1091
ref_lists = tuple(ref_lists)
1092
self._keys_by_offset[pos] = (key, absent, ref_lists, value)
1093
pos += len(line) + 1 # +1 for the \n
1096
if self.node_ref_lists:
1097
node_value = (value, ref_lists)
1100
nodes.append((key, node_value))
1101
# print "parsed ", key
1102
return first_key, key, nodes, trailers
1104
def _parsed_bytes(self, start, start_key, end, end_key):
1105
"""Mark the bytes from start to end as parsed.
1107
Calling self._parsed_bytes(1,2) will mark one byte (the one at offset
1110
:param start: The start of the parsed region.
1111
:param end: The end of the parsed region.
1113
index = self._parsed_byte_index(start)
1114
new_value = (start, end)
1115
new_key = (start_key, end_key)
1117
# first range parsed is always the beginning.
1118
self._parsed_byte_map.insert(index, new_value)
1119
self._parsed_key_map.insert(index, new_key)
1123
# extend lower region
1124
# extend higher region
1125
# combine two regions
1126
if (index + 1 < len(self._parsed_byte_map) and
1127
self._parsed_byte_map[index][1] == start and
1128
self._parsed_byte_map[index + 1][0] == end):
1129
# combine two regions
1130
self._parsed_byte_map[index] = (self._parsed_byte_map[index][0],
1131
self._parsed_byte_map[index + 1][1])
1132
self._parsed_key_map[index] = (self._parsed_key_map[index][0],
1133
self._parsed_key_map[index + 1][1])
1134
del self._parsed_byte_map[index + 1]
1135
del self._parsed_key_map[index + 1]
1136
elif self._parsed_byte_map[index][1] == start:
1137
# extend the lower entry
1138
self._parsed_byte_map[index] = (
1139
self._parsed_byte_map[index][0], end)
1140
self._parsed_key_map[index] = (
1141
self._parsed_key_map[index][0], end_key)
1142
elif (index + 1 < len(self._parsed_byte_map) and
1143
self._parsed_byte_map[index + 1][0] == end):
1144
# extend the higher entry
1145
self._parsed_byte_map[index + 1] = (
1146
start, self._parsed_byte_map[index + 1][1])
1147
self._parsed_key_map[index + 1] = (
1148
start_key, self._parsed_key_map[index + 1][1])
1151
self._parsed_byte_map.insert(index + 1, new_value)
1152
self._parsed_key_map.insert(index + 1, new_key)
1154
def _read_and_parse(self, readv_ranges):
1155
"""Read the the ranges and parse the resulting data.
1157
:param readv_ranges: A prepared readv range list.
1159
if not readv_ranges:
1161
if self._nodes is None and self._bytes_read * 2 >= self._size:
1162
# We've already read more than 50% of the file and we are about to
1163
# request more data, just _buffer_all() and be done
1167
readv_data = self._transport.readv(self._name, readv_ranges, True,
1170
for offset, data in readv_data:
1171
self._bytes_read += len(data)
1172
if offset == 0 and len(data) == self._size:
1173
# We read the whole range, most likely because the
1174
# Transport upcast our readv ranges into one long request
1175
# for enough total data to grab the whole index.
1176
self._buffer_all(StringIO(data))
1178
if self._bisect_nodes is None:
1179
# this must be the start
1180
if not (offset == 0):
1181
raise AssertionError()
1182
offset, data = self._parse_header_from_bytes(data)
1183
# print readv_ranges, "[%d:%d]" % (offset, offset + len(data))
1184
self._parse_region(offset, data)
1186
def _signature(self):
1187
"""The file signature for this index type."""
1191
"""Validate that everything in the index can be accessed."""
1192
# iter_all validates completely at the moment, so just do that.
1193
for node in self.iter_all_entries():
1197
class CombinedGraphIndex(object):
1198
"""A GraphIndex made up from smaller GraphIndices.
1200
The backing indices must implement GraphIndex, and are presumed to be
1203
Queries against the combined index will be made against the first index,
1204
and then the second and so on. The order of index's can thus influence
1205
performance significantly. For example, if one index is on local disk and a
1206
second on a remote server, the local disk index should be before the other
1210
def __init__(self, indices, reload_func=None):
1211
"""Create a CombinedGraphIndex backed by indices.
1213
:param indices: An ordered list of indices to query for data.
1214
:param reload_func: A function to call if we find we are missing an
1215
index. Should have the form reload_func() => True/False to indicate
1216
if reloading actually changed anything.
1218
self._indices = indices
1219
self._reload_func = reload_func
1223
self.__class__.__name__,
1224
', '.join(map(repr, self._indices)))
1226
def get_parent_map(self, keys):
1227
"""See graph.StackedParentsProvider.get_parent_map"""
1228
search_keys = set(keys)
1229
if NULL_REVISION in search_keys:
1230
search_keys.discard(NULL_REVISION)
1231
found_parents = {NULL_REVISION:[]}
1234
for index, key, value, refs in self.iter_entries(search_keys):
1237
parents = (NULL_REVISION,)
1238
found_parents[key] = parents
1239
return found_parents
1241
has_key = _has_key_from_parent_map
1243
def insert_index(self, pos, index):
1244
"""Insert a new index in the list of indices to query.
1246
:param pos: The position to insert the index.
1247
:param index: The index to insert.
1249
self._indices.insert(pos, index)
1251
def iter_all_entries(self):
1252
"""Iterate over all keys within the index
1254
Duplicate keys across child indices are presumed to have the same
1255
value and are only reported once.
1257
:return: An iterable of (index, key, reference_lists, value).
1258
There is no defined order for the result iteration - it will be in
1259
the most efficient order for the index.
1264
for index in self._indices:
1265
for node in index.iter_all_entries():
1266
if node[1] not in seen_keys:
1268
seen_keys.add(node[1])
1270
except errors.NoSuchFile:
1271
self._reload_or_raise()
1273
def iter_entries(self, keys):
1274
"""Iterate over keys within the index.
1276
Duplicate keys across child indices are presumed to have the same
1277
value and are only reported once.
1279
:param keys: An iterable providing the keys to be retrieved.
1280
:return: An iterable of (index, key, reference_lists, value). There is no
1281
defined order for the result iteration - it will be in the most
1282
efficient order for the index.
1287
for index in self._indices:
1290
for node in index.iter_entries(keys):
1291
keys.remove(node[1])
1294
except errors.NoSuchFile:
1295
self._reload_or_raise()
1297
def iter_entries_prefix(self, keys):
1298
"""Iterate over keys within the index using prefix matching.
1300
Duplicate keys across child indices are presumed to have the same
1301
value and are only reported once.
1303
Prefix matching is applied within the tuple of a key, not to within
1304
the bytestring of each key element. e.g. if you have the keys ('foo',
1305
'bar'), ('foobar', 'gam') and do a prefix search for ('foo', None) then
1306
only the former key is returned.
1308
:param keys: An iterable providing the key prefixes to be retrieved.
1309
Each key prefix takes the form of a tuple the length of a key, but
1310
with the last N elements 'None' rather than a regular bytestring.
1311
The first element cannot be 'None'.
1312
:return: An iterable as per iter_all_entries, but restricted to the
1313
keys with a matching prefix to those supplied. No additional keys
1314
will be returned, and every match that is in the index will be
1323
for index in self._indices:
1324
for node in index.iter_entries_prefix(keys):
1325
if node[1] in seen_keys:
1327
seen_keys.add(node[1])
1330
except errors.NoSuchFile:
1331
self._reload_or_raise()
1333
def find_ancestry(self, keys, ref_list_num):
1334
"""Find the complete ancestry for the given set of keys.
1336
Note that this is a whole-ancestry request, so it should be used
1339
:param keys: An iterable of keys to look for
1340
:param ref_list_num: The reference list which references the parents
1342
:return: (parent_map, missing_keys)
1344
missing_keys = set()
1346
keys_to_lookup = set(keys)
1348
while keys_to_lookup:
1349
# keys that *all* indexes claim are missing, stop searching them
1351
all_index_missing = None
1352
# print 'gen\tidx\tsub\tn_keys\tn_pmap\tn_miss'
1353
# print '%4d\t\t\t%4d\t%5d\t%5d' % (generation, len(keys_to_lookup),
1355
# len(missing_keys))
1356
for index_idx, index in enumerate(self._indices):
1357
# TODO: we should probably be doing something with
1358
# 'missing_keys' since we've already determined that
1359
# those revisions have not been found anywhere
1360
index_missing_keys = set()
1361
# Find all of the ancestry we can from this index
1362
# keep looking until the search_keys set is empty, which means
1363
# things we didn't find should be in index_missing_keys
1364
search_keys = keys_to_lookup
1366
# print ' \t%2d\t\t%4d\t%5d\t%5d' % (
1367
# index_idx, len(search_keys),
1368
# len(parent_map), len(index_missing_keys))
1371
# TODO: ref_list_num should really be a parameter, since
1372
# CombinedGraphIndex does not know what the ref lists
1374
search_keys = index._find_ancestors(search_keys,
1375
ref_list_num, parent_map, index_missing_keys)
1376
# print ' \t \t%2d\t%4d\t%5d\t%5d' % (
1377
# sub_generation, len(search_keys),
1378
# len(parent_map), len(index_missing_keys))
1379
# Now set whatever was missing to be searched in the next index
1380
keys_to_lookup = index_missing_keys
1381
if all_index_missing is None:
1382
all_index_missing = set(index_missing_keys)
1384
all_index_missing.intersection_update(index_missing_keys)
1385
if not keys_to_lookup:
1387
if all_index_missing is None:
1388
# There were no indexes, so all search keys are 'missing'
1389
missing_keys.update(keys_to_lookup)
1390
keys_to_lookup = None
1392
missing_keys.update(all_index_missing)
1393
keys_to_lookup.difference_update(all_index_missing)
1394
return parent_map, missing_keys
1396
def key_count(self):
1397
"""Return an estimate of the number of keys in this index.
1399
For CombinedGraphIndex this is approximated by the sum of the keys of
1400
the child indices. As child indices may have duplicate keys this can
1401
have a maximum error of the number of child indices * largest number of
1406
return sum((index.key_count() for index in self._indices), 0)
1407
except errors.NoSuchFile:
1408
self._reload_or_raise()
1410
missing_keys = _missing_keys_from_parent_map
1412
def _reload_or_raise(self):
1413
"""We just got a NoSuchFile exception.
1415
Try to reload the indices, if it fails, just raise the current
1418
if self._reload_func is None:
1420
exc_type, exc_value, exc_traceback = sys.exc_info()
1421
trace.mutter('Trying to reload after getting exception: %s',
1423
if not self._reload_func():
1424
# We tried to reload, but nothing changed, so we fail anyway
1425
trace.mutter('_reload_func indicated nothing has changed.'
1426
' Raising original exception.')
1427
raise exc_type, exc_value, exc_traceback
1430
"""Validate that everything in the index can be accessed."""
1433
for index in self._indices:
1436
except errors.NoSuchFile:
1437
self._reload_or_raise()
1440
class InMemoryGraphIndex(GraphIndexBuilder):
1441
"""A GraphIndex which operates entirely out of memory and is mutable.
1443
This is designed to allow the accumulation of GraphIndex entries during a
1444
single write operation, where the accumulated entries need to be immediately
1445
available - for example via a CombinedGraphIndex.
1448
def add_nodes(self, nodes):
1449
"""Add nodes to the index.
1451
:param nodes: An iterable of (key, node_refs, value) entries to add.
1453
if self.reference_lists:
1454
for (key, value, node_refs) in nodes:
1455
self.add_node(key, value, node_refs)
1457
for (key, value) in nodes:
1458
self.add_node(key, value)
1460
def iter_all_entries(self):
1461
"""Iterate over all keys within the index
1463
:return: An iterable of (index, key, reference_lists, value). There is no
1464
defined order for the result iteration - it will be in the most
1465
efficient order for the index (in this case dictionary hash order).
1467
if 'evil' in debug.debug_flags:
1468
trace.mutter_callsite(3,
1469
"iter_all_entries scales with size of history.")
1470
if self.reference_lists:
1471
for key, (absent, references, value) in self._nodes.iteritems():
1473
yield self, key, value, references
1475
for key, (absent, references, value) in self._nodes.iteritems():
1477
yield self, key, value
1479
def iter_entries(self, keys):
1480
"""Iterate over keys within the index.
1482
:param keys: An iterable providing the keys to be retrieved.
1483
:return: An iterable of (index, key, value, reference_lists). There is no
1484
defined order for the result iteration - it will be in the most
1485
efficient order for the index (keys iteration order in this case).
1488
if self.reference_lists:
1489
for key in keys.intersection(self._keys):
1490
node = self._nodes[key]
1492
yield self, key, node[2], node[1]
1494
for key in keys.intersection(self._keys):
1495
node = self._nodes[key]
1497
yield self, key, node[2]
1499
def iter_entries_prefix(self, keys):
1500
"""Iterate over keys within the index using prefix matching.
1502
Prefix matching is applied within the tuple of a key, not to within
1503
the bytestring of each key element. e.g. if you have the keys ('foo',
1504
'bar'), ('foobar', 'gam') and do a prefix search for ('foo', None) then
1505
only the former key is returned.
1507
:param keys: An iterable providing the key prefixes to be retrieved.
1508
Each key prefix takes the form of a tuple the length of a key, but
1509
with the last N elements 'None' rather than a regular bytestring.
1510
The first element cannot be 'None'.
1511
:return: An iterable as per iter_all_entries, but restricted to the
1512
keys with a matching prefix to those supplied. No additional keys
1513
will be returned, and every match that is in the index will be
1516
# XXX: To much duplication with the GraphIndex class; consider finding
1517
# a good place to pull out the actual common logic.
1521
if self._key_length == 1:
1525
raise errors.BadIndexKey(key)
1526
if len(key) != self._key_length:
1527
raise errors.BadIndexKey(key)
1528
node = self._nodes[key]
1531
if self.reference_lists:
1532
yield self, key, node[2], node[1]
1534
yield self, key, node[2]
1536
nodes_by_key = self._get_nodes_by_key()
1540
raise errors.BadIndexKey(key)
1541
if len(key) != self._key_length:
1542
raise errors.BadIndexKey(key)
1543
# find what it refers to:
1544
key_dict = nodes_by_key
1545
elements = list(key)
1546
# find the subdict to return
1548
while len(elements) and elements[0] is not None:
1549
key_dict = key_dict[elements[0]]
1552
# a non-existant lookup.
1557
key_dict = dicts.pop(-1)
1558
# can't be empty or would not exist
1559
item, value = key_dict.iteritems().next()
1560
if type(value) == dict:
1562
dicts.extend(key_dict.itervalues())
1565
for value in key_dict.itervalues():
1566
yield (self, ) + value
1568
yield (self, ) + key_dict
1570
def key_count(self):
1571
"""Return an estimate of the number of keys in this index.
1573
For InMemoryGraphIndex the estimate is exact.
1575
return len(self._keys)
1578
"""In memory index's have no known corruption at the moment."""
1581
class GraphIndexPrefixAdapter(object):
1582
"""An adapter between GraphIndex with different key lengths.
1584
Queries against this will emit queries against the adapted Graph with the
1585
prefix added, queries for all items use iter_entries_prefix. The returned
1586
nodes will have their keys and node references adjusted to remove the
1587
prefix. Finally, an add_nodes_callback can be supplied - when called the
1588
nodes and references being added will have prefix prepended.
1591
def __init__(self, adapted, prefix, missing_key_length,
1592
add_nodes_callback=None):
1593
"""Construct an adapter against adapted with prefix."""
1594
self.adapted = adapted
1595
self.prefix_key = prefix + (None,)*missing_key_length
1596
self.prefix = prefix
1597
self.prefix_len = len(prefix)
1598
self.add_nodes_callback = add_nodes_callback
1600
def add_nodes(self, nodes):
1601
"""Add nodes to the index.
1603
:param nodes: An iterable of (key, node_refs, value) entries to add.
1605
# save nodes in case its an iterator
1606
nodes = tuple(nodes)
1607
translated_nodes = []
1609
# Add prefix_key to each reference node_refs is a tuple of tuples,
1610
# so split it apart, and add prefix_key to the internal reference
1611
for (key, value, node_refs) in nodes:
1612
adjusted_references = (
1613
tuple(tuple(self.prefix + ref_node for ref_node in ref_list)
1614
for ref_list in node_refs))
1615
translated_nodes.append((self.prefix + key, value,
1616
adjusted_references))
1618
# XXX: TODO add an explicit interface for getting the reference list
1619
# status, to handle this bit of user-friendliness in the API more
1621
for (key, value) in nodes:
1622
translated_nodes.append((self.prefix + key, value))
1623
self.add_nodes_callback(translated_nodes)
1625
def add_node(self, key, value, references=()):
1626
"""Add a node to the index.
1628
:param key: The key. keys are non-empty tuples containing
1629
as many whitespace-free utf8 bytestrings as the key length
1630
defined for this index.
1631
:param references: An iterable of iterables of keys. Each is a
1632
reference to another key.
1633
:param value: The value to associate with the key. It may be any
1634
bytes as long as it does not contain \0 or \n.
1636
self.add_nodes(((key, value, references), ))
1638
def _strip_prefix(self, an_iter):
1639
"""Strip prefix data from nodes and return it."""
1640
for node in an_iter:
1642
if node[1][:self.prefix_len] != self.prefix:
1643
raise errors.BadIndexData(self)
1644
for ref_list in node[3]:
1645
for ref_node in ref_list:
1646
if ref_node[:self.prefix_len] != self.prefix:
1647
raise errors.BadIndexData(self)
1648
yield node[0], node[1][self.prefix_len:], node[2], (
1649
tuple(tuple(ref_node[self.prefix_len:] for ref_node in ref_list)
1650
for ref_list in node[3]))
1652
def iter_all_entries(self):
1653
"""Iterate over all keys within the index
1655
iter_all_entries is implemented against the adapted index using
1656
iter_entries_prefix.
1658
:return: An iterable of (index, key, reference_lists, value). There is no
1659
defined order for the result iteration - it will be in the most
1660
efficient order for the index (in this case dictionary hash order).
1662
return self._strip_prefix(self.adapted.iter_entries_prefix([self.prefix_key]))
1664
def iter_entries(self, keys):
1665
"""Iterate over keys within the index.
1667
:param keys: An iterable providing the keys to be retrieved.
1668
:return: An iterable of (index, key, value, reference_lists). There is no
1669
defined order for the result iteration - it will be in the most
1670
efficient order for the index (keys iteration order in this case).
1672
return self._strip_prefix(self.adapted.iter_entries(
1673
self.prefix + key for key in keys))
1675
def iter_entries_prefix(self, keys):
1676
"""Iterate over keys within the index using prefix matching.
1678
Prefix matching is applied within the tuple of a key, not to within
1679
the bytestring of each key element. e.g. if you have the keys ('foo',
1680
'bar'), ('foobar', 'gam') and do a prefix search for ('foo', None) then
1681
only the former key is returned.
1683
:param keys: An iterable providing the key prefixes to be retrieved.
1684
Each key prefix takes the form of a tuple the length of a key, but
1685
with the last N elements 'None' rather than a regular bytestring.
1686
The first element cannot be 'None'.
1687
:return: An iterable as per iter_all_entries, but restricted to the
1688
keys with a matching prefix to those supplied. No additional keys
1689
will be returned, and every match that is in the index will be
1692
return self._strip_prefix(self.adapted.iter_entries_prefix(
1693
self.prefix + key for key in keys))
1695
def key_count(self):
1696
"""Return an estimate of the number of keys in this index.
1698
For GraphIndexPrefixAdapter this is relatively expensive - key
1699
iteration with the prefix is done.
1701
return len(list(self.iter_all_entries()))
1704
"""Call the adapted's validate."""
1705
self.adapted.validate()