1
# Copyright (C) 2007-2011 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17
"""Indexing facilities."""
23
'GraphIndexPrefixAdapter',
27
from bisect import bisect_right
28
from cStringIO import StringIO
32
from bzrlib.lazy_import import lazy_import
33
lazy_import(globals(), """
36
revision as _mod_revision,
44
from bzrlib.static_tuple import StaticTuple
46
_HEADER_READV = (0, 200)
47
_OPTION_KEY_ELEMENTS = "key_elements="
49
_OPTION_NODE_REFS = "node_ref_lists="
50
_SIGNATURE = "Bazaar Graph Index 1\n"
53
_whitespace_re = re.compile('[\t\n\x0b\x0c\r\x00 ]')
54
_newline_null_re = re.compile('[\n\0]')
57
def _has_key_from_parent_map(self, key):
58
"""Check if this index has one key.
60
If it's possible to check for multiple keys at once through
61
calling get_parent_map that should be faster.
63
return (key in self.get_parent_map([key]))
66
def _missing_keys_from_parent_map(self, keys):
67
return set(keys) - set(self.get_parent_map(keys))
70
class GraphIndexBuilder(object):
71
"""A builder that can build a GraphIndex.
73
The resulting graph has the structure::
75
_SIGNATURE OPTIONS NODES NEWLINE
76
_SIGNATURE := 'Bazaar Graph Index 1' NEWLINE
77
OPTIONS := 'node_ref_lists=' DIGITS NEWLINE
79
NODE := KEY NULL ABSENT? NULL REFERENCES NULL VALUE NEWLINE
80
KEY := Not-whitespace-utf8
82
REFERENCES := REFERENCE_LIST (TAB REFERENCE_LIST){node_ref_lists - 1}
83
REFERENCE_LIST := (REFERENCE (CR REFERENCE)*)?
84
REFERENCE := DIGITS ; digits is the byte offset in the index of the
86
VALUE := no-newline-no-null-bytes
89
def __init__(self, reference_lists=0, key_elements=1):
90
"""Create a GraphIndex builder.
92
:param reference_lists: The number of node references lists for each
94
:param key_elements: The number of bytestrings in each key.
96
self.reference_lists = reference_lists
97
# A dict of {key: (absent, ref_lists, value)}
99
# Keys that are referenced but not actually present in this index
100
self._absent_keys = set()
101
self._nodes_by_key = None
102
self._key_length = key_elements
103
self._optimize_for_size = False
104
self._combine_backing_indices = True
106
def _check_key(self, key):
107
"""Raise BadIndexKey if key is not a valid key for this index."""
108
if type(key) not in (tuple, StaticTuple):
109
raise errors.BadIndexKey(key)
110
if self._key_length != len(key):
111
raise errors.BadIndexKey(key)
113
if not element or _whitespace_re.search(element) is not None:
114
raise errors.BadIndexKey(element)
116
def _external_references(self):
117
"""Return references that are not present in this index.
121
# TODO: JAM 2008-11-21 This makes an assumption about how the reference
122
# lists are used. It is currently correct for pack-0.92 through
123
# 1.9, which use the node references (3rd column) second
124
# reference list as the compression parent. Perhaps this should
125
# be moved into something higher up the stack, since it
126
# makes assumptions about how the index is used.
127
if self.reference_lists > 1:
128
for node in self.iter_all_entries():
130
refs.update(node[3][1])
133
# If reference_lists == 0 there can be no external references, and
134
# if reference_lists == 1, then there isn't a place to store the
138
def _get_nodes_by_key(self):
139
if self._nodes_by_key is None:
141
if self.reference_lists:
142
for key, (absent, references, value) in self._nodes.iteritems():
145
key_dict = nodes_by_key
146
for subkey in key[:-1]:
147
key_dict = key_dict.setdefault(subkey, {})
148
key_dict[key[-1]] = key, value, references
150
for key, (absent, references, value) in self._nodes.iteritems():
153
key_dict = nodes_by_key
154
for subkey in key[:-1]:
155
key_dict = key_dict.setdefault(subkey, {})
156
key_dict[key[-1]] = key, value
157
self._nodes_by_key = nodes_by_key
158
return self._nodes_by_key
160
def _update_nodes_by_key(self, key, value, node_refs):
161
"""Update the _nodes_by_key dict with a new key.
163
For a key of (foo, bar, baz) create
164
_nodes_by_key[foo][bar][baz] = key_value
166
if self._nodes_by_key is None:
168
key_dict = self._nodes_by_key
169
if self.reference_lists:
170
key_value = StaticTuple(key, value, node_refs)
172
key_value = StaticTuple(key, value)
173
for subkey in key[:-1]:
174
key_dict = key_dict.setdefault(subkey, {})
175
key_dict[key[-1]] = key_value
177
def _check_key_ref_value(self, key, references, value):
178
"""Check that 'key' and 'references' are all valid.
180
:param key: A key tuple. Must conform to the key interface (be a tuple,
181
be of the right length, not have any whitespace or nulls in any key
183
:param references: An iterable of reference lists. Something like
184
[[(ref, key)], [(ref, key), (other, key)]]
185
:param value: The value associate with this key. Must not contain
186
newlines or null characters.
187
:return: (node_refs, absent_references)
189
* node_refs: basically a packed form of 'references' where all
191
* absent_references: reference keys that are not in self._nodes.
192
This may contain duplicates if the same key is referenced in
195
as_st = StaticTuple.from_sequence
197
if _newline_null_re.search(value) is not None:
198
raise errors.BadIndexValue(value)
199
if len(references) != self.reference_lists:
200
raise errors.BadIndexValue(references)
202
absent_references = []
203
for reference_list in references:
204
for reference in reference_list:
205
# If reference *is* in self._nodes, then we know it has already
207
if reference not in self._nodes:
208
self._check_key(reference)
209
absent_references.append(reference)
210
reference_list = as_st([as_st(ref).intern()
211
for ref in reference_list])
212
node_refs.append(reference_list)
213
return as_st(node_refs), absent_references
215
def add_node(self, key, value, references=()):
216
"""Add a node to the index.
218
:param key: The key. keys are non-empty tuples containing
219
as many whitespace-free utf8 bytestrings as the key length
220
defined for this index.
221
:param references: An iterable of iterables of keys. Each is a
222
reference to another key.
223
:param value: The value to associate with the key. It may be any
224
bytes as long as it does not contain \\0 or \\n.
227
absent_references) = self._check_key_ref_value(key, references, value)
228
if key in self._nodes and self._nodes[key][0] != 'a':
229
raise errors.BadIndexDuplicateKey(key, self)
230
for reference in absent_references:
231
# There may be duplicates, but I don't think it is worth worrying
233
self._nodes[reference] = ('a', (), '')
234
self._absent_keys.update(absent_references)
235
self._absent_keys.discard(key)
236
self._nodes[key] = ('', node_refs, value)
237
if self._nodes_by_key is not None and self._key_length > 1:
238
self._update_nodes_by_key(key, value, node_refs)
240
def clear_cache(self):
241
"""See GraphIndex.clear_cache()
243
This is a no-op, but we need the api to conform to a generic 'Index'
249
lines.append(_OPTION_NODE_REFS + str(self.reference_lists) + '\n')
250
lines.append(_OPTION_KEY_ELEMENTS + str(self._key_length) + '\n')
251
key_count = len(self._nodes) - len(self._absent_keys)
252
lines.append(_OPTION_LEN + str(key_count) + '\n')
253
prefix_length = sum(len(x) for x in lines)
254
# references are byte offsets. To avoid having to do nasty
255
# polynomial work to resolve offsets (references to later in the
256
# file cannot be determined until all the inbetween references have
257
# been calculated too) we pad the offsets with 0's to make them be
258
# of consistent length. Using binary offsets would break the trivial
260
# to calculate the width of zero's needed we do three passes:
261
# one to gather all the non-reference data and the number of references.
262
# one to pad all the data with reference-length and determine entry
266
# forward sorted by key. In future we may consider topological sorting,
267
# at the cost of table scans for direct lookup, or a second index for
269
nodes = sorted(self._nodes.items())
270
# if we do not prepass, we don't know how long it will be up front.
271
expected_bytes = None
272
# we only need to pre-pass if we have reference lists at all.
273
if self.reference_lists:
275
non_ref_bytes = prefix_length
277
# TODO use simple multiplication for the constants in this loop.
278
for key, (absent, references, value) in nodes:
279
# record the offset known *so far* for this key:
280
# the non reference bytes to date, and the total references to
281
# date - saves reaccumulating on the second pass
282
key_offset_info.append((key, non_ref_bytes, total_references))
283
# key is literal, value is literal, there are 3 null's, 1 NL
284
# key is variable length tuple, \x00 between elements
285
non_ref_bytes += sum(len(element) for element in key)
286
if self._key_length > 1:
287
non_ref_bytes += self._key_length - 1
288
# value is literal bytes, there are 3 null's, 1 NL.
289
non_ref_bytes += len(value) + 3 + 1
290
# one byte for absent if set.
293
elif self.reference_lists:
294
# (ref_lists -1) tabs
295
non_ref_bytes += self.reference_lists - 1
296
# (ref-1 cr's per ref_list)
297
for ref_list in references:
298
# how many references across the whole file?
299
total_references += len(ref_list)
300
# accrue reference separators
302
non_ref_bytes += len(ref_list) - 1
303
# how many digits are needed to represent the total byte count?
305
possible_total_bytes = non_ref_bytes + total_references*digits
306
while 10 ** digits < possible_total_bytes:
308
possible_total_bytes = non_ref_bytes + total_references*digits
309
expected_bytes = possible_total_bytes + 1 # terminating newline
310
# resolve key addresses.
312
for key, non_ref_bytes, total_references in key_offset_info:
313
key_addresses[key] = non_ref_bytes + total_references*digits
315
format_string = '%%0%sd' % digits
316
for key, (absent, references, value) in nodes:
317
flattened_references = []
318
for ref_list in references:
320
for reference in ref_list:
321
ref_addresses.append(format_string % key_addresses[reference])
322
flattened_references.append('\r'.join(ref_addresses))
323
string_key = '\x00'.join(key)
324
lines.append("%s\x00%s\x00%s\x00%s\n" % (string_key, absent,
325
'\t'.join(flattened_references), value))
327
result = StringIO(''.join(lines))
328
if expected_bytes and len(result.getvalue()) != expected_bytes:
329
raise errors.BzrError('Failed index creation. Internal error:'
330
' mismatched output length and expected length: %d %d' %
331
(len(result.getvalue()), expected_bytes))
334
def set_optimize(self, for_size=None, combine_backing_indices=None):
335
"""Change how the builder tries to optimize the result.
337
:param for_size: Tell the builder to try and make the index as small as
339
:param combine_backing_indices: If the builder spills to disk to save
340
memory, should the on-disk indices be combined. Set to True if you
341
are going to be probing the index, but to False if you are not. (If
342
you are not querying, then the time spent combining is wasted.)
345
# GraphIndexBuilder itself doesn't pay attention to the flag yet, but
347
if for_size is not None:
348
self._optimize_for_size = for_size
349
if combine_backing_indices is not None:
350
self._combine_backing_indices = combine_backing_indices
352
def find_ancestry(self, keys, ref_list_num):
353
"""See CombinedGraphIndex.find_ancestry()"""
359
for _, key, value, ref_lists in self.iter_entries(pending):
360
parent_keys = ref_lists[ref_list_num]
361
parent_map[key] = parent_keys
362
next_pending.update([p for p in parent_keys if p not in
364
missing_keys.update(pending.difference(parent_map))
365
pending = next_pending
366
return parent_map, missing_keys
369
class GraphIndex(object):
370
"""An index for data with embedded graphs.
372
The index maps keys to a list of key reference lists, and a value.
373
Each node has the same number of key reference lists. Each key reference
374
list can be empty or an arbitrary length. The value is an opaque NULL
375
terminated string without any newlines. The storage of the index is
376
hidden in the interface: keys and key references are always tuples of
377
bytestrings, never the internal representation (e.g. dictionary offsets).
379
It is presumed that the index will not be mutated - it is static data.
381
Successive iter_all_entries calls will read the entire index each time.
382
Additionally, iter_entries calls will read the index linearly until the
383
desired keys are found. XXX: This must be fixed before the index is
384
suitable for production use. :XXX
387
def __init__(self, transport, name, size, unlimited_cache=False, offset=0):
388
"""Open an index called name on transport.
390
:param transport: A bzrlib.transport.Transport.
391
:param name: A path to provide to transport API calls.
392
:param size: The size of the index in bytes. This is used for bisection
393
logic to perform partial index reads. While the size could be
394
obtained by statting the file this introduced an additional round
395
trip as well as requiring stat'able transports, both of which are
396
avoided by having it supplied. If size is None, then bisection
397
support will be disabled and accessing the index will just stream
399
:param offset: Instead of starting the index data at offset 0, start it
400
at an arbitrary offset.
402
self._transport = transport
404
# Becomes a dict of key:(value, reference-list-byte-locations) used by
405
# the bisection interface to store parsed but not resolved keys.
406
self._bisect_nodes = None
407
# Becomes a dict of key:(value, reference-list-keys) which are ready to
408
# be returned directly to callers.
410
# a sorted list of slice-addresses for the parsed bytes of the file.
411
# e.g. (0,1) would mean that byte 0 is parsed.
412
self._parsed_byte_map = []
413
# a sorted list of keys matching each slice address for parsed bytes
414
# e.g. (None, 'foo@bar') would mean that the first byte contained no
415
# key, and the end byte of the slice is the of the data for 'foo@bar'
416
self._parsed_key_map = []
417
self._key_count = None
418
self._keys_by_offset = None
419
self._nodes_by_key = None
421
# The number of bytes we've read so far in trying to process this file
423
self._base_offset = offset
425
def __eq__(self, other):
426
"""Equal when self and other were created with the same parameters."""
428
type(self) == type(other) and
429
self._transport == other._transport and
430
self._name == other._name and
431
self._size == other._size)
433
def __ne__(self, other):
434
return not self.__eq__(other)
437
return "%s(%r)" % (self.__class__.__name__,
438
self._transport.abspath(self._name))
440
def _buffer_all(self, stream=None):
441
"""Buffer all the index data.
443
Mutates self._nodes and self.keys_by_offset.
445
if self._nodes is not None:
446
# We already did this
448
if 'index' in debug.debug_flags:
449
trace.mutter('Reading entire index %s',
450
self._transport.abspath(self._name))
452
stream = self._transport.get(self._name)
453
if self._base_offset != 0:
454
# This is wasteful, but it is better than dealing with
455
# adjusting all the offsets, etc.
456
stream = StringIO(stream.read()[self._base_offset:])
457
self._read_prefix(stream)
458
self._expected_elements = 3 + self._key_length
460
# raw data keyed by offset
461
self._keys_by_offset = {}
462
# ready-to-return key:value or key:value, node_ref_lists
464
self._nodes_by_key = None
467
lines = stream.read().split('\n')
468
# GZ 2009-09-20: Should really use a try/finally block to ensure close
471
_, _, _, trailers = self._parse_lines(lines, pos)
472
for key, absent, references, value in self._keys_by_offset.itervalues():
475
# resolve references:
476
if self.node_ref_lists:
477
node_value = (value, self._resolve_references(references))
480
self._nodes[key] = node_value
481
# cache the keys for quick set intersections
483
# there must be one line - the empty trailer line.
484
raise errors.BadIndexData(self)
486
def clear_cache(self):
487
"""Clear out any cached/memoized values.
489
This can be called at any time, but generally it is used when we have
490
extracted some information, but don't expect to be requesting any more
494
def external_references(self, ref_list_num):
495
"""Return references that are not present in this index.
498
if ref_list_num + 1 > self.node_ref_lists:
499
raise ValueError('No ref list %d, index has %d ref lists'
500
% (ref_list_num, self.node_ref_lists))
503
for key, (value, ref_lists) in nodes.iteritems():
504
ref_list = ref_lists[ref_list_num]
505
refs.update([ref for ref in ref_list if ref not in nodes])
508
def _get_nodes_by_key(self):
509
if self._nodes_by_key is None:
511
if self.node_ref_lists:
512
for key, (value, references) in self._nodes.iteritems():
513
key_dict = nodes_by_key
514
for subkey in key[:-1]:
515
key_dict = key_dict.setdefault(subkey, {})
516
key_dict[key[-1]] = key, value, references
518
for key, value in self._nodes.iteritems():
519
key_dict = nodes_by_key
520
for subkey in key[:-1]:
521
key_dict = key_dict.setdefault(subkey, {})
522
key_dict[key[-1]] = key, value
523
self._nodes_by_key = nodes_by_key
524
return self._nodes_by_key
526
def iter_all_entries(self):
527
"""Iterate over all keys within the index.
529
:return: An iterable of (index, key, value) or (index, key, value, reference_lists).
530
The former tuple is used when there are no reference lists in the
531
index, making the API compatible with simple key:value index types.
532
There is no defined order for the result iteration - it will be in
533
the most efficient order for the index.
535
if 'evil' in debug.debug_flags:
536
trace.mutter_callsite(3,
537
"iter_all_entries scales with size of history.")
538
if self._nodes is None:
540
if self.node_ref_lists:
541
for key, (value, node_ref_lists) in self._nodes.iteritems():
542
yield self, key, value, node_ref_lists
544
for key, value in self._nodes.iteritems():
545
yield self, key, value
547
def _read_prefix(self, stream):
548
signature = stream.read(len(self._signature()))
549
if not signature == self._signature():
550
raise errors.BadIndexFormatSignature(self._name, GraphIndex)
551
options_line = stream.readline()
552
if not options_line.startswith(_OPTION_NODE_REFS):
553
raise errors.BadIndexOptions(self)
555
self.node_ref_lists = int(options_line[len(_OPTION_NODE_REFS):-1])
557
raise errors.BadIndexOptions(self)
558
options_line = stream.readline()
559
if not options_line.startswith(_OPTION_KEY_ELEMENTS):
560
raise errors.BadIndexOptions(self)
562
self._key_length = int(options_line[len(_OPTION_KEY_ELEMENTS):-1])
564
raise errors.BadIndexOptions(self)
565
options_line = stream.readline()
566
if not options_line.startswith(_OPTION_LEN):
567
raise errors.BadIndexOptions(self)
569
self._key_count = int(options_line[len(_OPTION_LEN):-1])
571
raise errors.BadIndexOptions(self)
573
def _resolve_references(self, references):
574
"""Return the resolved key references for references.
576
References are resolved by looking up the location of the key in the
577
_keys_by_offset map and substituting the key name, preserving ordering.
579
:param references: An iterable of iterables of key locations. e.g.
581
:return: A tuple of tuples of keys.
584
for ref_list in references:
585
node_refs.append(tuple([self._keys_by_offset[ref][0] for ref in ref_list]))
586
return tuple(node_refs)
588
def _find_index(self, range_map, key):
589
"""Helper for the _parsed_*_index calls.
591
Given a range map - [(start, end), ...], finds the index of the range
592
in the map for key if it is in the map, and if it is not there, the
593
immediately preceeding range in the map.
595
result = bisect_right(range_map, key) - 1
596
if result + 1 < len(range_map):
597
# check the border condition, it may be in result + 1
598
if range_map[result + 1][0] == key[0]:
602
def _parsed_byte_index(self, offset):
603
"""Return the index of the entry immediately before offset.
605
e.g. if the parsed map has regions 0,10 and 11,12 parsed, meaning that
606
there is one unparsed byte (the 11th, addressed as[10]). then:
607
asking for 0 will return 0
608
asking for 10 will return 0
609
asking for 11 will return 1
610
asking for 12 will return 1
613
return self._find_index(self._parsed_byte_map, key)
615
def _parsed_key_index(self, key):
616
"""Return the index of the entry immediately before key.
618
e.g. if the parsed map has regions (None, 'a') and ('b','c') parsed,
619
meaning that keys from None to 'a' inclusive, and 'b' to 'c' inclusive
620
have been parsed, then:
621
asking for '' will return 0
622
asking for 'a' will return 0
623
asking for 'b' will return 1
624
asking for 'e' will return 1
626
search_key = (key, None)
627
return self._find_index(self._parsed_key_map, search_key)
629
def _is_parsed(self, offset):
630
"""Returns True if offset has been parsed."""
631
index = self._parsed_byte_index(offset)
632
if index == len(self._parsed_byte_map):
633
return offset < self._parsed_byte_map[index - 1][1]
634
start, end = self._parsed_byte_map[index]
635
return offset >= start and offset < end
637
def _iter_entries_from_total_buffer(self, keys):
638
"""Iterate over keys when the entire index is parsed."""
639
# Note: See the note in BTreeBuilder.iter_entries for why we don't use
640
# .intersection() here
642
keys = [key for key in keys if key in nodes]
643
if self.node_ref_lists:
645
value, node_refs = nodes[key]
646
yield self, key, value, node_refs
649
yield self, key, nodes[key]
651
def iter_entries(self, keys):
652
"""Iterate over keys within the index.
654
:param keys: An iterable providing the keys to be retrieved.
655
:return: An iterable as per iter_all_entries, but restricted to the
656
keys supplied. No additional keys will be returned, and every
657
key supplied that is in the index will be returned.
662
if self._size is None and self._nodes is None:
665
# We fit about 20 keys per minimum-read (4K), so if we are looking for
666
# more than 1/20th of the index its likely (assuming homogenous key
667
# spread) that we'll read the entire index. If we're going to do that,
668
# buffer the whole thing. A better analysis might take key spread into
669
# account - but B+Tree indices are better anyway.
670
# We could look at all data read, and use a threshold there, which will
671
# trigger on ancestry walks, but that is not yet fully mapped out.
672
if self._nodes is None and len(keys) * 20 > self.key_count():
674
if self._nodes is not None:
675
return self._iter_entries_from_total_buffer(keys)
677
return (result[1] for result in bisect_multi.bisect_multi_bytes(
678
self._lookup_keys_via_location, self._size, keys))
680
def iter_entries_prefix(self, keys):
681
"""Iterate over keys within the index using prefix matching.
683
Prefix matching is applied within the tuple of a key, not to within
684
the bytestring of each key element. e.g. if you have the keys ('foo',
685
'bar'), ('foobar', 'gam') and do a prefix search for ('foo', None) then
686
only the former key is returned.
688
WARNING: Note that this method currently causes a full index parse
689
unconditionally (which is reasonably appropriate as it is a means for
690
thunking many small indices into one larger one and still supplies
691
iter_all_entries at the thunk layer).
693
:param keys: An iterable providing the key prefixes to be retrieved.
694
Each key prefix takes the form of a tuple the length of a key, but
695
with the last N elements 'None' rather than a regular bytestring.
696
The first element cannot be 'None'.
697
:return: An iterable as per iter_all_entries, but restricted to the
698
keys with a matching prefix to those supplied. No additional keys
699
will be returned, and every match that is in the index will be
705
# load data - also finds key lengths
706
if self._nodes is None:
708
if self._key_length == 1:
712
raise errors.BadIndexKey(key)
713
if len(key) != self._key_length:
714
raise errors.BadIndexKey(key)
715
if self.node_ref_lists:
716
value, node_refs = self._nodes[key]
717
yield self, key, value, node_refs
719
yield self, key, self._nodes[key]
721
nodes_by_key = self._get_nodes_by_key()
725
raise errors.BadIndexKey(key)
726
if len(key) != self._key_length:
727
raise errors.BadIndexKey(key)
728
# find what it refers to:
729
key_dict = nodes_by_key
731
# find the subdict whose contents should be returned.
733
while len(elements) and elements[0] is not None:
734
key_dict = key_dict[elements[0]]
737
# a non-existant lookup.
742
key_dict = dicts.pop(-1)
743
# can't be empty or would not exist
744
item, value = key_dict.iteritems().next()
745
if type(value) == dict:
747
dicts.extend(key_dict.itervalues())
750
for value in key_dict.itervalues():
751
# each value is the key:value:node refs tuple
753
yield (self, ) + value
755
# the last thing looked up was a terminal element
756
yield (self, ) + key_dict
758
def _find_ancestors(self, keys, ref_list_num, parent_map, missing_keys):
759
"""See BTreeIndex._find_ancestors."""
760
# The api can be implemented as a trivial overlay on top of
761
# iter_entries, it is not an efficient implementation, but it at least
765
for index, key, value, refs in self.iter_entries(keys):
766
parent_keys = refs[ref_list_num]
768
parent_map[key] = parent_keys
769
search_keys.update(parent_keys)
770
# Figure out what, if anything, was missing
771
missing_keys.update(set(keys).difference(found_keys))
772
search_keys = search_keys.difference(parent_map)
776
"""Return an estimate of the number of keys in this index.
778
For GraphIndex the estimate is exact.
780
if self._key_count is None:
781
self._read_and_parse([_HEADER_READV])
782
return self._key_count
784
def _lookup_keys_via_location(self, location_keys):
785
"""Public interface for implementing bisection.
787
If _buffer_all has been called, then all the data for the index is in
788
memory, and this method should not be called, as it uses a separate
789
cache because it cannot pre-resolve all indices, which buffer_all does
792
:param location_keys: A list of location(byte offset), key tuples.
793
:return: A list of (location_key, result) tuples as expected by
794
bzrlib.bisect_multi.bisect_multi_bytes.
796
# Possible improvements:
797
# - only bisect lookup each key once
798
# - sort the keys first, and use that to reduce the bisection window
800
# this progresses in three parts:
803
# attempt to answer the question from the now in memory data.
804
# build the readv request
805
# for each location, ask for 800 bytes - much more than rows we've seen
808
for location, key in location_keys:
809
# can we answer from cache?
810
if self._bisect_nodes and key in self._bisect_nodes:
811
# We have the key parsed.
813
index = self._parsed_key_index(key)
814
if (len(self._parsed_key_map) and
815
self._parsed_key_map[index][0] <= key and
816
(self._parsed_key_map[index][1] >= key or
817
# end of the file has been parsed
818
self._parsed_byte_map[index][1] == self._size)):
819
# the key has been parsed, so no lookup is needed even if its
822
# - if we have examined this part of the file already - yes
823
index = self._parsed_byte_index(location)
824
if (len(self._parsed_byte_map) and
825
self._parsed_byte_map[index][0] <= location and
826
self._parsed_byte_map[index][1] > location):
827
# the byte region has been parsed, so no read is needed.
830
if location + length > self._size:
831
length = self._size - location
832
# todo, trim out parsed locations.
834
readv_ranges.append((location, length))
835
# read the header if needed
836
if self._bisect_nodes is None:
837
readv_ranges.append(_HEADER_READV)
838
self._read_and_parse(readv_ranges)
840
if self._nodes is not None:
841
# _read_and_parse triggered a _buffer_all because we requested the
843
for location, key in location_keys:
844
if key not in self._nodes: # not present
845
result.append(((location, key), False))
846
elif self.node_ref_lists:
847
value, refs = self._nodes[key]
848
result.append(((location, key),
849
(self, key, value, refs)))
851
result.append(((location, key),
852
(self, key, self._nodes[key])))
855
# - figure out <, >, missing, present
856
# - result present references so we can return them.
857
# keys that we cannot answer until we resolve references
858
pending_references = []
859
pending_locations = set()
860
for location, key in location_keys:
861
# can we answer from cache?
862
if key in self._bisect_nodes:
863
# the key has been parsed, so no lookup is needed
864
if self.node_ref_lists:
865
# the references may not have been all parsed.
866
value, refs = self._bisect_nodes[key]
867
wanted_locations = []
868
for ref_list in refs:
870
if ref not in self._keys_by_offset:
871
wanted_locations.append(ref)
873
pending_locations.update(wanted_locations)
874
pending_references.append((location, key))
876
result.append(((location, key), (self, key,
877
value, self._resolve_references(refs))))
879
result.append(((location, key),
880
(self, key, self._bisect_nodes[key])))
883
# has the region the key should be in, been parsed?
884
index = self._parsed_key_index(key)
885
if (self._parsed_key_map[index][0] <= key and
886
(self._parsed_key_map[index][1] >= key or
887
# end of the file has been parsed
888
self._parsed_byte_map[index][1] == self._size)):
889
result.append(((location, key), False))
891
# no, is the key above or below the probed location:
892
# get the range of the probed & parsed location
893
index = self._parsed_byte_index(location)
894
# if the key is below the start of the range, its below
895
if key < self._parsed_key_map[index][0]:
899
result.append(((location, key), direction))
901
# lookup data to resolve references
902
for location in pending_locations:
904
if location + length > self._size:
905
length = self._size - location
906
# TODO: trim out parsed locations (e.g. if the 800 is into the
907
# parsed region trim it, and dont use the adjust_for_latency
910
readv_ranges.append((location, length))
911
self._read_and_parse(readv_ranges)
912
if self._nodes is not None:
913
# The _read_and_parse triggered a _buffer_all, grab the data and
915
for location, key in pending_references:
916
value, refs = self._nodes[key]
917
result.append(((location, key), (self, key, value, refs)))
919
for location, key in pending_references:
920
# answer key references we had to look-up-late.
921
value, refs = self._bisect_nodes[key]
922
result.append(((location, key), (self, key,
923
value, self._resolve_references(refs))))
926
def _parse_header_from_bytes(self, bytes):
927
"""Parse the header from a region of bytes.
929
:param bytes: The data to parse.
930
:return: An offset, data tuple such as readv yields, for the unparsed
931
data. (which may length 0).
933
signature = bytes[0:len(self._signature())]
934
if not signature == self._signature():
935
raise errors.BadIndexFormatSignature(self._name, GraphIndex)
936
lines = bytes[len(self._signature()):].splitlines()
937
options_line = lines[0]
938
if not options_line.startswith(_OPTION_NODE_REFS):
939
raise errors.BadIndexOptions(self)
941
self.node_ref_lists = int(options_line[len(_OPTION_NODE_REFS):])
943
raise errors.BadIndexOptions(self)
944
options_line = lines[1]
945
if not options_line.startswith(_OPTION_KEY_ELEMENTS):
946
raise errors.BadIndexOptions(self)
948
self._key_length = int(options_line[len(_OPTION_KEY_ELEMENTS):])
950
raise errors.BadIndexOptions(self)
951
options_line = lines[2]
952
if not options_line.startswith(_OPTION_LEN):
953
raise errors.BadIndexOptions(self)
955
self._key_count = int(options_line[len(_OPTION_LEN):])
957
raise errors.BadIndexOptions(self)
958
# calculate the bytes we have processed
959
header_end = (len(signature) + len(lines[0]) + len(lines[1]) +
961
self._parsed_bytes(0, None, header_end, None)
962
# setup parsing state
963
self._expected_elements = 3 + self._key_length
964
# raw data keyed by offset
965
self._keys_by_offset = {}
966
# keys with the value and node references
967
self._bisect_nodes = {}
968
return header_end, bytes[header_end:]
970
def _parse_region(self, offset, data):
971
"""Parse node data returned from a readv operation.
973
:param offset: The byte offset the data starts at.
974
:param data: The data to parse.
978
end = offset + len(data)
981
# Trivial test - if the current index's end is within the
982
# low-matching parsed range, we're done.
983
index = self._parsed_byte_index(high_parsed)
984
if end < self._parsed_byte_map[index][1]:
986
# print "[%d:%d]" % (offset, end), \
987
# self._parsed_byte_map[index:index + 2]
988
high_parsed, last_segment = self._parse_segment(
989
offset, data, end, index)
993
def _parse_segment(self, offset, data, end, index):
994
"""Parse one segment of data.
996
:param offset: Where 'data' begins in the file.
997
:param data: Some data to parse a segment of.
998
:param end: Where data ends
999
:param index: The current index into the parsed bytes map.
1000
:return: True if the parsed segment is the last possible one in the
1002
:return: high_parsed_byte, last_segment.
1003
high_parsed_byte is the location of the highest parsed byte in this
1004
segment, last_segment is True if the parsed segment is the last
1005
possible one in the data block.
1007
# default is to use all data
1009
# accomodate overlap with data before this.
1010
if offset < self._parsed_byte_map[index][1]:
1011
# overlaps the lower parsed region
1012
# skip the parsed data
1013
trim_start = self._parsed_byte_map[index][1] - offset
1014
# don't trim the start for \n
1015
start_adjacent = True
1016
elif offset == self._parsed_byte_map[index][1]:
1017
# abuts the lower parsed region
1020
# do not trim anything
1021
start_adjacent = True
1023
# does not overlap the lower parsed region
1026
# but trim the leading \n
1027
start_adjacent = False
1028
if end == self._size:
1029
# lines up to the end of all data:
1032
# do not strip to the last \n
1035
elif index + 1 == len(self._parsed_byte_map):
1036
# at the end of the parsed data
1039
# but strip to the last \n
1040
end_adjacent = False
1042
elif end == self._parsed_byte_map[index + 1][0]:
1043
# buts up against the next parsed region
1046
# do not strip to the last \n
1049
elif end > self._parsed_byte_map[index + 1][0]:
1050
# overlaps into the next parsed region
1051
# only consider the unparsed data
1052
trim_end = self._parsed_byte_map[index + 1][0] - offset
1053
# do not strip to the last \n as we know its an entire record
1055
last_segment = end < self._parsed_byte_map[index + 1][1]
1057
# does not overlap into the next region
1060
# but strip to the last \n
1061
end_adjacent = False
1063
# now find bytes to discard if needed
1064
if not start_adjacent:
1065
# work around python bug in rfind
1066
if trim_start is None:
1067
trim_start = data.find('\n') + 1
1069
trim_start = data.find('\n', trim_start) + 1
1070
if not (trim_start != 0):
1071
raise AssertionError('no \n was present')
1072
# print 'removing start', offset, trim_start, repr(data[:trim_start])
1073
if not end_adjacent:
1074
# work around python bug in rfind
1075
if trim_end is None:
1076
trim_end = data.rfind('\n') + 1
1078
trim_end = data.rfind('\n', None, trim_end) + 1
1079
if not (trim_end != 0):
1080
raise AssertionError('no \n was present')
1081
# print 'removing end', offset, trim_end, repr(data[trim_end:])
1082
# adjust offset and data to the parseable data.
1083
trimmed_data = data[trim_start:trim_end]
1084
if not (trimmed_data):
1085
raise AssertionError('read unneeded data [%d:%d] from [%d:%d]'
1086
% (trim_start, trim_end, offset, offset + len(data)))
1088
offset += trim_start
1089
# print "parsing", repr(trimmed_data)
1090
# splitlines mangles the \r delimiters.. don't use it.
1091
lines = trimmed_data.split('\n')
1094
first_key, last_key, nodes, _ = self._parse_lines(lines, pos)
1095
for key, value in nodes:
1096
self._bisect_nodes[key] = value
1097
self._parsed_bytes(offset, first_key,
1098
offset + len(trimmed_data), last_key)
1099
return offset + len(trimmed_data), last_segment
1101
def _parse_lines(self, lines, pos):
1108
# must be at the end
1110
if not (self._size == pos + 1):
1111
raise AssertionError("%s %s" % (self._size, pos))
1114
elements = line.split('\0')
1115
if len(elements) != self._expected_elements:
1116
raise errors.BadIndexData(self)
1117
# keys are tuples. Each element is a string that may occur many
1118
# times, so we intern them to save space. AB, RC, 200807
1119
key = tuple([intern(element) for element in elements[:self._key_length]])
1120
if first_key is None:
1122
absent, references, value = elements[-3:]
1124
for ref_string in references.split('\t'):
1125
ref_lists.append(tuple([
1126
int(ref) for ref in ref_string.split('\r') if ref
1128
ref_lists = tuple(ref_lists)
1129
self._keys_by_offset[pos] = (key, absent, ref_lists, value)
1130
pos += len(line) + 1 # +1 for the \n
1133
if self.node_ref_lists:
1134
node_value = (value, ref_lists)
1137
nodes.append((key, node_value))
1138
# print "parsed ", key
1139
return first_key, key, nodes, trailers
1141
def _parsed_bytes(self, start, start_key, end, end_key):
1142
"""Mark the bytes from start to end as parsed.
1144
Calling self._parsed_bytes(1,2) will mark one byte (the one at offset
1147
:param start: The start of the parsed region.
1148
:param end: The end of the parsed region.
1150
index = self._parsed_byte_index(start)
1151
new_value = (start, end)
1152
new_key = (start_key, end_key)
1154
# first range parsed is always the beginning.
1155
self._parsed_byte_map.insert(index, new_value)
1156
self._parsed_key_map.insert(index, new_key)
1160
# extend lower region
1161
# extend higher region
1162
# combine two regions
1163
if (index + 1 < len(self._parsed_byte_map) and
1164
self._parsed_byte_map[index][1] == start and
1165
self._parsed_byte_map[index + 1][0] == end):
1166
# combine two regions
1167
self._parsed_byte_map[index] = (self._parsed_byte_map[index][0],
1168
self._parsed_byte_map[index + 1][1])
1169
self._parsed_key_map[index] = (self._parsed_key_map[index][0],
1170
self._parsed_key_map[index + 1][1])
1171
del self._parsed_byte_map[index + 1]
1172
del self._parsed_key_map[index + 1]
1173
elif self._parsed_byte_map[index][1] == start:
1174
# extend the lower entry
1175
self._parsed_byte_map[index] = (
1176
self._parsed_byte_map[index][0], end)
1177
self._parsed_key_map[index] = (
1178
self._parsed_key_map[index][0], end_key)
1179
elif (index + 1 < len(self._parsed_byte_map) and
1180
self._parsed_byte_map[index + 1][0] == end):
1181
# extend the higher entry
1182
self._parsed_byte_map[index + 1] = (
1183
start, self._parsed_byte_map[index + 1][1])
1184
self._parsed_key_map[index + 1] = (
1185
start_key, self._parsed_key_map[index + 1][1])
1188
self._parsed_byte_map.insert(index + 1, new_value)
1189
self._parsed_key_map.insert(index + 1, new_key)
1191
def _read_and_parse(self, readv_ranges):
1192
"""Read the ranges and parse the resulting data.
1194
:param readv_ranges: A prepared readv range list.
1196
if not readv_ranges:
1198
if self._nodes is None and self._bytes_read * 2 >= self._size:
1199
# We've already read more than 50% of the file and we are about to
1200
# request more data, just _buffer_all() and be done
1204
base_offset = self._base_offset
1205
if base_offset != 0:
1206
# Rewrite the ranges for the offset
1207
readv_ranges = [(start+base_offset, size)
1208
for start, size in readv_ranges]
1209
readv_data = self._transport.readv(self._name, readv_ranges, True,
1210
self._size + self._base_offset)
1212
for offset, data in readv_data:
1213
offset -= base_offset
1214
self._bytes_read += len(data)
1216
# transport.readv() expanded to extra data which isn't part of
1218
data = data[-offset:]
1220
if offset == 0 and len(data) == self._size:
1221
# We read the whole range, most likely because the
1222
# Transport upcast our readv ranges into one long request
1223
# for enough total data to grab the whole index.
1224
self._buffer_all(StringIO(data))
1226
if self._bisect_nodes is None:
1227
# this must be the start
1228
if not (offset == 0):
1229
raise AssertionError()
1230
offset, data = self._parse_header_from_bytes(data)
1231
# print readv_ranges, "[%d:%d]" % (offset, offset + len(data))
1232
self._parse_region(offset, data)
1234
def _signature(self):
1235
"""The file signature for this index type."""
1239
"""Validate that everything in the index can be accessed."""
1240
# iter_all validates completely at the moment, so just do that.
1241
for node in self.iter_all_entries():
1245
class CombinedGraphIndex(object):
1246
"""A GraphIndex made up from smaller GraphIndices.
1248
The backing indices must implement GraphIndex, and are presumed to be
1251
Queries against the combined index will be made against the first index,
1252
and then the second and so on. The order of indices can thus influence
1253
performance significantly. For example, if one index is on local disk and a
1254
second on a remote server, the local disk index should be before the other
1257
Also, queries tend to need results from the same indices as previous
1258
queries. So the indices will be reordered after every query to put the
1259
indices that had the result(s) of that query first (while otherwise
1260
preserving the relative ordering).
1263
def __init__(self, indices, reload_func=None):
1264
"""Create a CombinedGraphIndex backed by indices.
1266
:param indices: An ordered list of indices to query for data.
1267
:param reload_func: A function to call if we find we are missing an
1268
index. Should have the form reload_func() => True/False to indicate
1269
if reloading actually changed anything.
1271
self._indices = indices
1272
self._reload_func = reload_func
1273
# Sibling indices are other CombinedGraphIndex that we should call
1274
# _move_to_front_by_name on when we auto-reorder ourself.
1275
self._sibling_indices = []
1276
# A list of names that corresponds to the instances in self._indices,
1277
# so _index_names[0] is always the name for _indices[0], etc. Sibling
1278
# indices must all use the same set of names as each other.
1279
self._index_names = [None] * len(self._indices)
1283
self.__class__.__name__,
1284
', '.join(map(repr, self._indices)))
1286
def clear_cache(self):
1287
"""See GraphIndex.clear_cache()"""
1288
for index in self._indices:
1291
def get_parent_map(self, keys):
1292
"""See graph.StackedParentsProvider.get_parent_map"""
1293
search_keys = set(keys)
1294
if _mod_revision.NULL_REVISION in search_keys:
1295
search_keys.discard(_mod_revision.NULL_REVISION)
1296
found_parents = {_mod_revision.NULL_REVISION:[]}
1299
for index, key, value, refs in self.iter_entries(search_keys):
1302
parents = (_mod_revision.NULL_REVISION,)
1303
found_parents[key] = parents
1304
return found_parents
1306
has_key = _has_key_from_parent_map
1308
def insert_index(self, pos, index, name=None):
1309
"""Insert a new index in the list of indices to query.
1311
:param pos: The position to insert the index.
1312
:param index: The index to insert.
1313
:param name: a name for this index, e.g. a pack name. These names can
1314
be used to reflect index reorderings to related CombinedGraphIndex
1315
instances that use the same names. (see set_sibling_indices)
1317
self._indices.insert(pos, index)
1318
self._index_names.insert(pos, name)
1320
def iter_all_entries(self):
1321
"""Iterate over all keys within the index
1323
Duplicate keys across child indices are presumed to have the same
1324
value and are only reported once.
1326
:return: An iterable of (index, key, reference_lists, value).
1327
There is no defined order for the result iteration - it will be in
1328
the most efficient order for the index.
1333
for index in self._indices:
1334
for node in index.iter_all_entries():
1335
if node[1] not in seen_keys:
1337
seen_keys.add(node[1])
1339
except errors.NoSuchFile:
1340
self._reload_or_raise()
1342
def iter_entries(self, keys):
1343
"""Iterate over keys within the index.
1345
Duplicate keys across child indices are presumed to have the same
1346
value and are only reported once.
1348
:param keys: An iterable providing the keys to be retrieved.
1349
:return: An iterable of (index, key, reference_lists, value). There is
1350
no defined order for the result iteration - it will be in the most
1351
efficient order for the index.
1357
for index in self._indices:
1361
for node in index.iter_entries(keys):
1362
keys.remove(node[1])
1366
hit_indices.append(index)
1368
except errors.NoSuchFile:
1369
self._reload_or_raise()
1370
self._move_to_front(hit_indices)
1372
def iter_entries_prefix(self, keys):
1373
"""Iterate over keys within the index using prefix matching.
1375
Duplicate keys across child indices are presumed to have the same
1376
value and are only reported once.
1378
Prefix matching is applied within the tuple of a key, not to within
1379
the bytestring of each key element. e.g. if you have the keys ('foo',
1380
'bar'), ('foobar', 'gam') and do a prefix search for ('foo', None) then
1381
only the former key is returned.
1383
:param keys: An iterable providing the key prefixes to be retrieved.
1384
Each key prefix takes the form of a tuple the length of a key, but
1385
with the last N elements 'None' rather than a regular bytestring.
1386
The first element cannot be 'None'.
1387
:return: An iterable as per iter_all_entries, but restricted to the
1388
keys with a matching prefix to those supplied. No additional keys
1389
will be returned, and every match that is in the index will be
1399
for index in self._indices:
1401
for node in index.iter_entries_prefix(keys):
1402
if node[1] in seen_keys:
1404
seen_keys.add(node[1])
1408
hit_indices.append(index)
1410
except errors.NoSuchFile:
1411
self._reload_or_raise()
1412
self._move_to_front(hit_indices)
1414
def _move_to_front(self, hit_indices):
1415
"""Rearrange self._indices so that hit_indices are first.
1417
Order is maintained as much as possible, e.g. the first unhit index
1418
will be the first index in _indices after the hit_indices, and the
1419
hit_indices will be present in exactly the order they are passed to
1422
_move_to_front propagates to all objects in self._sibling_indices by
1423
calling _move_to_front_by_name.
1425
if self._indices[:len(hit_indices)] == hit_indices:
1426
# The 'hit_indices' are already at the front (and in the same
1427
# order), no need to re-order
1429
hit_names = self._move_to_front_by_index(hit_indices)
1430
for sibling_idx in self._sibling_indices:
1431
sibling_idx._move_to_front_by_name(hit_names)
1433
def _move_to_front_by_index(self, hit_indices):
1434
"""Core logic for _move_to_front.
1436
Returns a list of names corresponding to the hit_indices param.
1438
indices_info = zip(self._index_names, self._indices)
1439
if 'index' in debug.debug_flags:
1440
trace.mutter('CombinedGraphIndex reordering: currently %r, '
1441
'promoting %r', indices_info, hit_indices)
1444
new_hit_indices = []
1447
for offset, (name, idx) in enumerate(indices_info):
1448
if idx in hit_indices:
1449
hit_names.append(name)
1450
new_hit_indices.append(idx)
1451
if len(new_hit_indices) == len(hit_indices):
1452
# We've found all of the hit entries, everything else is
1454
unhit_names.extend(self._index_names[offset+1:])
1455
unhit_indices.extend(self._indices[offset+1:])
1458
unhit_names.append(name)
1459
unhit_indices.append(idx)
1461
self._indices = new_hit_indices + unhit_indices
1462
self._index_names = hit_names + unhit_names
1463
if 'index' in debug.debug_flags:
1464
trace.mutter('CombinedGraphIndex reordered: %r', self._indices)
1467
def _move_to_front_by_name(self, hit_names):
1468
"""Moves indices named by 'hit_names' to front of the search order, as
1469
described in _move_to_front.
1471
# Translate names to index instances, and then call
1472
# _move_to_front_by_index.
1473
indices_info = zip(self._index_names, self._indices)
1475
for name, idx in indices_info:
1476
if name in hit_names:
1477
hit_indices.append(idx)
1478
self._move_to_front_by_index(hit_indices)
1480
def find_ancestry(self, keys, ref_list_num):
1481
"""Find the complete ancestry for the given set of keys.
1483
Note that this is a whole-ancestry request, so it should be used
1486
:param keys: An iterable of keys to look for
1487
:param ref_list_num: The reference list which references the parents
1489
:return: (parent_map, missing_keys)
1491
# XXX: make this call _move_to_front?
1492
missing_keys = set()
1494
keys_to_lookup = set(keys)
1496
while keys_to_lookup:
1497
# keys that *all* indexes claim are missing, stop searching them
1499
all_index_missing = None
1500
# print 'gen\tidx\tsub\tn_keys\tn_pmap\tn_miss'
1501
# print '%4d\t\t\t%4d\t%5d\t%5d' % (generation, len(keys_to_lookup),
1503
# len(missing_keys))
1504
for index_idx, index in enumerate(self._indices):
1505
# TODO: we should probably be doing something with
1506
# 'missing_keys' since we've already determined that
1507
# those revisions have not been found anywhere
1508
index_missing_keys = set()
1509
# Find all of the ancestry we can from this index
1510
# keep looking until the search_keys set is empty, which means
1511
# things we didn't find should be in index_missing_keys
1512
search_keys = keys_to_lookup
1514
# print ' \t%2d\t\t%4d\t%5d\t%5d' % (
1515
# index_idx, len(search_keys),
1516
# len(parent_map), len(index_missing_keys))
1519
# TODO: ref_list_num should really be a parameter, since
1520
# CombinedGraphIndex does not know what the ref lists
1522
search_keys = index._find_ancestors(search_keys,
1523
ref_list_num, parent_map, index_missing_keys)
1524
# print ' \t \t%2d\t%4d\t%5d\t%5d' % (
1525
# sub_generation, len(search_keys),
1526
# len(parent_map), len(index_missing_keys))
1527
# Now set whatever was missing to be searched in the next index
1528
keys_to_lookup = index_missing_keys
1529
if all_index_missing is None:
1530
all_index_missing = set(index_missing_keys)
1532
all_index_missing.intersection_update(index_missing_keys)
1533
if not keys_to_lookup:
1535
if all_index_missing is None:
1536
# There were no indexes, so all search keys are 'missing'
1537
missing_keys.update(keys_to_lookup)
1538
keys_to_lookup = None
1540
missing_keys.update(all_index_missing)
1541
keys_to_lookup.difference_update(all_index_missing)
1542
return parent_map, missing_keys
1544
def key_count(self):
1545
"""Return an estimate of the number of keys in this index.
1547
For CombinedGraphIndex this is approximated by the sum of the keys of
1548
the child indices. As child indices may have duplicate keys this can
1549
have a maximum error of the number of child indices * largest number of
1554
return sum((index.key_count() for index in self._indices), 0)
1555
except errors.NoSuchFile:
1556
self._reload_or_raise()
1558
missing_keys = _missing_keys_from_parent_map
1560
def _reload_or_raise(self):
1561
"""We just got a NoSuchFile exception.
1563
Try to reload the indices, if it fails, just raise the current
1566
if self._reload_func is None:
1568
exc_type, exc_value, exc_traceback = sys.exc_info()
1569
trace.mutter('Trying to reload after getting exception: %s',
1571
if not self._reload_func():
1572
# We tried to reload, but nothing changed, so we fail anyway
1573
trace.mutter('_reload_func indicated nothing has changed.'
1574
' Raising original exception.')
1575
raise exc_type, exc_value, exc_traceback
1577
def set_sibling_indices(self, sibling_combined_graph_indices):
1578
"""Set the CombinedGraphIndex objects to reorder after reordering self.
1580
self._sibling_indices = sibling_combined_graph_indices
1583
"""Validate that everything in the index can be accessed."""
1586
for index in self._indices:
1589
except errors.NoSuchFile:
1590
self._reload_or_raise()
1593
class InMemoryGraphIndex(GraphIndexBuilder):
1594
"""A GraphIndex which operates entirely out of memory and is mutable.
1596
This is designed to allow the accumulation of GraphIndex entries during a
1597
single write operation, where the accumulated entries need to be immediately
1598
available - for example via a CombinedGraphIndex.
1601
def add_nodes(self, nodes):
1602
"""Add nodes to the index.
1604
:param nodes: An iterable of (key, node_refs, value) entries to add.
1606
if self.reference_lists:
1607
for (key, value, node_refs) in nodes:
1608
self.add_node(key, value, node_refs)
1610
for (key, value) in nodes:
1611
self.add_node(key, value)
1613
def iter_all_entries(self):
1614
"""Iterate over all keys within the index
1616
:return: An iterable of (index, key, reference_lists, value). There is no
1617
defined order for the result iteration - it will be in the most
1618
efficient order for the index (in this case dictionary hash order).
1620
if 'evil' in debug.debug_flags:
1621
trace.mutter_callsite(3,
1622
"iter_all_entries scales with size of history.")
1623
if self.reference_lists:
1624
for key, (absent, references, value) in self._nodes.iteritems():
1626
yield self, key, value, references
1628
for key, (absent, references, value) in self._nodes.iteritems():
1630
yield self, key, value
1632
def iter_entries(self, keys):
1633
"""Iterate over keys within the index.
1635
:param keys: An iterable providing the keys to be retrieved.
1636
:return: An iterable of (index, key, value, reference_lists). There is no
1637
defined order for the result iteration - it will be in the most
1638
efficient order for the index (keys iteration order in this case).
1640
# Note: See BTreeBuilder.iter_entries for an explanation of why we
1641
# aren't using set().intersection() here
1643
keys = [key for key in keys if key in nodes]
1644
if self.reference_lists:
1648
yield self, key, node[2], node[1]
1653
yield self, key, node[2]
1655
def iter_entries_prefix(self, keys):
1656
"""Iterate over keys within the index using prefix matching.
1658
Prefix matching is applied within the tuple of a key, not to within
1659
the bytestring of each key element. e.g. if you have the keys ('foo',
1660
'bar'), ('foobar', 'gam') and do a prefix search for ('foo', None) then
1661
only the former key is returned.
1663
:param keys: An iterable providing the key prefixes to be retrieved.
1664
Each key prefix takes the form of a tuple the length of a key, but
1665
with the last N elements 'None' rather than a regular bytestring.
1666
The first element cannot be 'None'.
1667
:return: An iterable as per iter_all_entries, but restricted to the
1668
keys with a matching prefix to those supplied. No additional keys
1669
will be returned, and every match that is in the index will be
1672
# XXX: To much duplication with the GraphIndex class; consider finding
1673
# a good place to pull out the actual common logic.
1677
if self._key_length == 1:
1681
raise errors.BadIndexKey(key)
1682
if len(key) != self._key_length:
1683
raise errors.BadIndexKey(key)
1684
node = self._nodes[key]
1687
if self.reference_lists:
1688
yield self, key, node[2], node[1]
1690
yield self, key, node[2]
1692
nodes_by_key = self._get_nodes_by_key()
1696
raise errors.BadIndexKey(key)
1697
if len(key) != self._key_length:
1698
raise errors.BadIndexKey(key)
1699
# find what it refers to:
1700
key_dict = nodes_by_key
1701
elements = list(key)
1702
# find the subdict to return
1704
while len(elements) and elements[0] is not None:
1705
key_dict = key_dict[elements[0]]
1708
# a non-existant lookup.
1713
key_dict = dicts.pop(-1)
1714
# can't be empty or would not exist
1715
item, value = key_dict.iteritems().next()
1716
if type(value) == dict:
1718
dicts.extend(key_dict.itervalues())
1721
for value in key_dict.itervalues():
1722
yield (self, ) + value
1724
yield (self, ) + key_dict
1726
def key_count(self):
1727
"""Return an estimate of the number of keys in this index.
1729
For InMemoryGraphIndex the estimate is exact.
1731
return len(self._nodes) - len(self._absent_keys)
1734
"""In memory index's have no known corruption at the moment."""
1737
class GraphIndexPrefixAdapter(object):
1738
"""An adapter between GraphIndex with different key lengths.
1740
Queries against this will emit queries against the adapted Graph with the
1741
prefix added, queries for all items use iter_entries_prefix. The returned
1742
nodes will have their keys and node references adjusted to remove the
1743
prefix. Finally, an add_nodes_callback can be supplied - when called the
1744
nodes and references being added will have prefix prepended.
1747
def __init__(self, adapted, prefix, missing_key_length,
1748
add_nodes_callback=None):
1749
"""Construct an adapter against adapted with prefix."""
1750
self.adapted = adapted
1751
self.prefix_key = prefix + (None,)*missing_key_length
1752
self.prefix = prefix
1753
self.prefix_len = len(prefix)
1754
self.add_nodes_callback = add_nodes_callback
1756
def add_nodes(self, nodes):
1757
"""Add nodes to the index.
1759
:param nodes: An iterable of (key, node_refs, value) entries to add.
1761
# save nodes in case its an iterator
1762
nodes = tuple(nodes)
1763
translated_nodes = []
1765
# Add prefix_key to each reference node_refs is a tuple of tuples,
1766
# so split it apart, and add prefix_key to the internal reference
1767
for (key, value, node_refs) in nodes:
1768
adjusted_references = (
1769
tuple(tuple(self.prefix + ref_node for ref_node in ref_list)
1770
for ref_list in node_refs))
1771
translated_nodes.append((self.prefix + key, value,
1772
adjusted_references))
1774
# XXX: TODO add an explicit interface for getting the reference list
1775
# status, to handle this bit of user-friendliness in the API more
1777
for (key, value) in nodes:
1778
translated_nodes.append((self.prefix + key, value))
1779
self.add_nodes_callback(translated_nodes)
1781
def add_node(self, key, value, references=()):
1782
"""Add a node to the index.
1784
:param key: The key. keys are non-empty tuples containing
1785
as many whitespace-free utf8 bytestrings as the key length
1786
defined for this index.
1787
:param references: An iterable of iterables of keys. Each is a
1788
reference to another key.
1789
:param value: The value to associate with the key. It may be any
1790
bytes as long as it does not contain \0 or \n.
1792
self.add_nodes(((key, value, references), ))
1794
def _strip_prefix(self, an_iter):
1795
"""Strip prefix data from nodes and return it."""
1796
for node in an_iter:
1798
if node[1][:self.prefix_len] != self.prefix:
1799
raise errors.BadIndexData(self)
1800
for ref_list in node[3]:
1801
for ref_node in ref_list:
1802
if ref_node[:self.prefix_len] != self.prefix:
1803
raise errors.BadIndexData(self)
1804
yield node[0], node[1][self.prefix_len:], node[2], (
1805
tuple(tuple(ref_node[self.prefix_len:] for ref_node in ref_list)
1806
for ref_list in node[3]))
1808
def iter_all_entries(self):
1809
"""Iterate over all keys within the index
1811
iter_all_entries is implemented against the adapted index using
1812
iter_entries_prefix.
1814
:return: An iterable of (index, key, reference_lists, value). There is no
1815
defined order for the result iteration - it will be in the most
1816
efficient order for the index (in this case dictionary hash order).
1818
return self._strip_prefix(self.adapted.iter_entries_prefix([self.prefix_key]))
1820
def iter_entries(self, keys):
1821
"""Iterate over keys within the index.
1823
:param keys: An iterable providing the keys to be retrieved.
1824
:return: An iterable of (index, key, value, reference_lists). There is no
1825
defined order for the result iteration - it will be in the most
1826
efficient order for the index (keys iteration order in this case).
1828
return self._strip_prefix(self.adapted.iter_entries(
1829
self.prefix + key for key in keys))
1831
def iter_entries_prefix(self, keys):
1832
"""Iterate over keys within the index using prefix matching.
1834
Prefix matching is applied within the tuple of a key, not to within
1835
the bytestring of each key element. e.g. if you have the keys ('foo',
1836
'bar'), ('foobar', 'gam') and do a prefix search for ('foo', None) then
1837
only the former key is returned.
1839
:param keys: An iterable providing the key prefixes to be retrieved.
1840
Each key prefix takes the form of a tuple the length of a key, but
1841
with the last N elements 'None' rather than a regular bytestring.
1842
The first element cannot be 'None'.
1843
:return: An iterable as per iter_all_entries, but restricted to the
1844
keys with a matching prefix to those supplied. No additional keys
1845
will be returned, and every match that is in the index will be
1848
return self._strip_prefix(self.adapted.iter_entries_prefix(
1849
self.prefix + key for key in keys))
1851
def key_count(self):
1852
"""Return an estimate of the number of keys in this index.
1854
For GraphIndexPrefixAdapter this is relatively expensive - key
1855
iteration with the prefix is done.
1857
return len(list(self.iter_all_entries()))
1860
"""Call the adapted's validate."""
1861
self.adapted.validate()