70
69
class GraphIndexBuilder(object):
71
70
"""A builder that can build a GraphIndex.
73
The resulting graph has the structure::
72
The resulting graph has the structure:
75
_SIGNATURE OPTIONS NODES NEWLINE
76
_SIGNATURE := 'Bazaar Graph Index 1' NEWLINE
77
OPTIONS := 'node_ref_lists=' DIGITS NEWLINE
79
NODE := KEY NULL ABSENT? NULL REFERENCES NULL VALUE NEWLINE
80
KEY := Not-whitespace-utf8
82
REFERENCES := REFERENCE_LIST (TAB REFERENCE_LIST){node_ref_lists - 1}
83
REFERENCE_LIST := (REFERENCE (CR REFERENCE)*)?
84
REFERENCE := DIGITS ; digits is the byte offset in the index of the
86
VALUE := no-newline-no-null-bytes
74
_SIGNATURE OPTIONS NODES NEWLINE
75
_SIGNATURE := 'Bazaar Graph Index 1' NEWLINE
76
OPTIONS := 'node_ref_lists=' DIGITS NEWLINE
78
NODE := KEY NULL ABSENT? NULL REFERENCES NULL VALUE NEWLINE
79
KEY := Not-whitespace-utf8
81
REFERENCES := REFERENCE_LIST (TAB REFERENCE_LIST){node_ref_lists - 1}
82
REFERENCE_LIST := (REFERENCE (CR REFERENCE)*)?
83
REFERENCE := DIGITS ; digits is the byte offset in the index of the
85
VALUE := no-newline-no-null-bytes
89
88
def __init__(self, reference_lists=0, key_elements=1):
94
93
:param key_elements: The number of bytestrings in each key.
96
95
self.reference_lists = reference_lists
97
97
# A dict of {key: (absent, ref_lists, value)}
99
# Keys that are referenced but not actually present in this index
100
self._absent_keys = set()
101
99
self._nodes_by_key = None
102
100
self._key_length = key_elements
103
101
self._optimize_for_size = False
168
166
key_dict = self._nodes_by_key
169
167
if self.reference_lists:
170
key_value = StaticTuple(key, value, node_refs)
168
key_value = key, value, node_refs
172
key_value = StaticTuple(key, value)
170
key_value = key, value
173
171
for subkey in key[:-1]:
174
172
key_dict = key_dict.setdefault(subkey, {})
175
173
key_dict[key[-1]] = key_value
185
183
:param value: The value associate with this key. Must not contain
186
184
newlines or null characters.
187
185
:return: (node_refs, absent_references)
189
* node_refs: basically a packed form of 'references' where all
191
* absent_references: reference keys that are not in self._nodes.
192
This may contain duplicates if the same key is referenced in
186
node_refs basically a packed form of 'references' where all
188
absent_references reference keys that are not in self._nodes.
189
This may contain duplicates if the same key is
190
referenced in multiple lists.
195
as_st = StaticTuple.from_sequence
196
192
self._check_key(key)
197
193
if _newline_null_re.search(value) is not None:
198
194
raise errors.BadIndexValue(value)
207
203
if reference not in self._nodes:
208
204
self._check_key(reference)
209
205
absent_references.append(reference)
210
reference_list = as_st([as_st(ref).intern()
211
for ref in reference_list])
212
node_refs.append(reference_list)
213
return as_st(node_refs), absent_references
207
node_refs.append(tuple(reference_list))
209
return tuple(node_refs), absent_references
215
211
def add_node(self, key, value, references=()):
216
212
"""Add a node to the index.
231
227
# There may be duplicates, but I don't think it is worth worrying
233
229
self._nodes[reference] = ('a', (), '')
234
self._absent_keys.update(absent_references)
235
self._absent_keys.discard(key)
236
230
self._nodes[key] = ('', node_refs, value)
237
232
if self._nodes_by_key is not None and self._key_length > 1:
238
233
self._update_nodes_by_key(key, value, node_refs)
247
242
def finish(self):
250
:returns: cStringIO holding the full context of the index as it
251
should be written to disk.
253
243
lines = [_SIGNATURE]
254
244
lines.append(_OPTION_NODE_REFS + str(self.reference_lists) + '\n')
255
245
lines.append(_OPTION_KEY_ELEMENTS + str(self._key_length) + '\n')
256
key_count = len(self._nodes) - len(self._absent_keys)
257
lines.append(_OPTION_LEN + str(key_count) + '\n')
246
lines.append(_OPTION_LEN + str(len(self._keys)) + '\n')
258
247
prefix_length = sum(len(x) for x in lines)
259
248
# references are byte offsets. To avoid having to do nasty
260
249
# polynomial work to resolve offsets (references to later in the
389
378
suitable for production use. :XXX
392
def __init__(self, transport, name, size, unlimited_cache=False, offset=0):
381
def __init__(self, transport, name, size, unlimited_cache=False):
393
382
"""Open an index called name on transport.
395
384
:param transport: A bzrlib.transport.Transport.
451
437
# We already did this
453
439
if 'index' in debug.debug_flags:
454
trace.mutter('Reading entire index %s',
455
self._transport.abspath(self._name))
440
mutter('Reading entire index %s', self._transport.abspath(self._name))
456
441
if stream is None:
457
442
stream = self._transport.get(self._name)
458
if self._base_offset != 0:
459
# This is wasteful, but it is better than dealing with
460
# adjusting all the offsets, etc.
461
stream = StringIO(stream.read()[self._base_offset:])
462
443
self._read_prefix(stream)
463
444
self._expected_elements = 3 + self._key_length
504
484
raise ValueError('No ref list %d, index has %d ref lists'
505
485
% (ref_list_num, self.node_ref_lists))
508
for key, (value, ref_lists) in nodes.iteritems():
487
for key, (value, ref_lists) in self._nodes.iteritems():
509
488
ref_list = ref_lists[ref_list_num]
510
refs.update([ref for ref in ref_list if ref not in nodes])
489
refs.update(ref_list)
490
return refs - self._keys
513
492
def _get_nodes_by_key(self):
514
493
if self._nodes_by_key is None:
642
621
def _iter_entries_from_total_buffer(self, keys):
643
622
"""Iterate over keys when the entire index is parsed."""
644
# Note: See the note in BTreeBuilder.iter_entries for why we don't use
645
# .intersection() here
647
keys = [key for key in keys if key in nodes]
623
keys = keys.intersection(self._keys)
648
624
if self.node_ref_lists:
650
value, node_refs = nodes[key]
626
value, node_refs = self._nodes[key]
651
627
yield self, key, value, node_refs
654
yield self, key, nodes[key]
630
yield self, key, self._nodes[key]
656
632
def iter_entries(self, keys):
657
633
"""Iterate over keys within the index.
1206
1182
self._buffer_all()
1209
base_offset = self._base_offset
1210
if base_offset != 0:
1211
# Rewrite the ranges for the offset
1212
readv_ranges = [(start+base_offset, size)
1213
for start, size in readv_ranges]
1214
1185
readv_data = self._transport.readv(self._name, readv_ranges, True,
1215
self._size + self._base_offset)
1217
1188
for offset, data in readv_data:
1218
offset -= base_offset
1219
1189
self._bytes_read += len(data)
1221
# transport.readv() expanded to extra data which isn't part of
1223
data = data[-offset:]
1225
1190
if offset == 0 and len(data) == self._size:
1226
1191
# We read the whole range, most likely because the
1227
1192
# Transport upcast our readv ranges into one long request
1256
1221
Queries against the combined index will be made against the first index,
1257
and then the second and so on. The order of indices can thus influence
1222
and then the second and so on. The order of index's can thus influence
1258
1223
performance significantly. For example, if one index is on local disk and a
1259
1224
second on a remote server, the local disk index should be before the other
1260
1225
in the index list.
1262
Also, queries tend to need results from the same indices as previous
1263
queries. So the indices will be reordered after every query to put the
1264
indices that had the result(s) of that query first (while otherwise
1265
preserving the relative ordering).
1268
1228
def __init__(self, indices, reload_func=None):
1276
1236
self._indices = indices
1277
1237
self._reload_func = reload_func
1278
# Sibling indices are other CombinedGraphIndex that we should call
1279
# _move_to_front_by_name on when we auto-reorder ourself.
1280
self._sibling_indices = []
1281
# A list of names that corresponds to the instances in self._indices,
1282
# so _index_names[0] is always the name for _indices[0], etc. Sibling
1283
# indices must all use the same set of names as each other.
1284
self._index_names = [None] * len(self._indices)
1286
1239
def __repr__(self):
1287
1240
return "%s(%s)" % (
1296
1249
def get_parent_map(self, keys):
1297
1250
"""See graph.StackedParentsProvider.get_parent_map"""
1298
1251
search_keys = set(keys)
1299
if _mod_revision.NULL_REVISION in search_keys:
1300
search_keys.discard(_mod_revision.NULL_REVISION)
1301
found_parents = {_mod_revision.NULL_REVISION:[]}
1252
if NULL_REVISION in search_keys:
1253
search_keys.discard(NULL_REVISION)
1254
found_parents = {NULL_REVISION:[]}
1303
1256
found_parents = {}
1304
1257
for index, key, value, refs in self.iter_entries(search_keys):
1305
1258
parents = refs[0]
1306
1259
if not parents:
1307
parents = (_mod_revision.NULL_REVISION,)
1260
parents = (NULL_REVISION,)
1308
1261
found_parents[key] = parents
1309
1262
return found_parents
1311
1264
has_key = _has_key_from_parent_map
1313
def insert_index(self, pos, index, name=None):
1266
def insert_index(self, pos, index):
1314
1267
"""Insert a new index in the list of indices to query.
1316
1269
:param pos: The position to insert the index.
1317
1270
:param index: The index to insert.
1318
:param name: a name for this index, e.g. a pack name. These names can
1319
be used to reflect index reorderings to related CombinedGraphIndex
1320
instances that use the same names. (see set_sibling_indices)
1322
1272
self._indices.insert(pos, index)
1323
self._index_names.insert(pos, name)
1325
1274
def iter_all_entries(self):
1326
1275
"""Iterate over all keys within the index
1351
1300
value and are only reported once.
1353
1302
:param keys: An iterable providing the keys to be retrieved.
1354
:return: An iterable of (index, key, reference_lists, value). There is
1355
no defined order for the result iteration - it will be in the most
1303
:return: An iterable of (index, key, reference_lists, value). There is no
1304
defined order for the result iteration - it will be in the most
1356
1305
efficient order for the index.
1358
1307
keys = set(keys)
1362
1310
for index in self._indices:
1366
1313
for node in index.iter_entries(keys):
1367
1314
keys.remove(node[1])
1371
hit_indices.append(index)
1373
1317
except errors.NoSuchFile:
1374
1318
self._reload_or_raise()
1375
self._move_to_front(hit_indices)
1377
1320
def iter_entries_prefix(self, keys):
1378
1321
"""Iterate over keys within the index using prefix matching.
1400
1343
seen_keys = set()
1404
1346
for index in self._indices:
1406
1347
for node in index.iter_entries_prefix(keys):
1407
1348
if node[1] in seen_keys:
1409
1350
seen_keys.add(node[1])
1413
hit_indices.append(index)
1415
1353
except errors.NoSuchFile:
1416
1354
self._reload_or_raise()
1417
self._move_to_front(hit_indices)
1419
def _move_to_front(self, hit_indices):
1420
"""Rearrange self._indices so that hit_indices are first.
1422
Order is maintained as much as possible, e.g. the first unhit index
1423
will be the first index in _indices after the hit_indices, and the
1424
hit_indices will be present in exactly the order they are passed to
1427
_move_to_front propagates to all objects in self._sibling_indices by
1428
calling _move_to_front_by_name.
1430
if self._indices[:len(hit_indices)] == hit_indices:
1431
# The 'hit_indices' are already at the front (and in the same
1432
# order), no need to re-order
1434
hit_names = self._move_to_front_by_index(hit_indices)
1435
for sibling_idx in self._sibling_indices:
1436
sibling_idx._move_to_front_by_name(hit_names)
1438
def _move_to_front_by_index(self, hit_indices):
1439
"""Core logic for _move_to_front.
1441
Returns a list of names corresponding to the hit_indices param.
1443
indices_info = zip(self._index_names, self._indices)
1444
if 'index' in debug.debug_flags:
1445
trace.mutter('CombinedGraphIndex reordering: currently %r, '
1446
'promoting %r', indices_info, hit_indices)
1449
new_hit_indices = []
1452
for offset, (name, idx) in enumerate(indices_info):
1453
if idx in hit_indices:
1454
hit_names.append(name)
1455
new_hit_indices.append(idx)
1456
if len(new_hit_indices) == len(hit_indices):
1457
# We've found all of the hit entries, everything else is
1459
unhit_names.extend(self._index_names[offset+1:])
1460
unhit_indices.extend(self._indices[offset+1:])
1463
unhit_names.append(name)
1464
unhit_indices.append(idx)
1466
self._indices = new_hit_indices + unhit_indices
1467
self._index_names = hit_names + unhit_names
1468
if 'index' in debug.debug_flags:
1469
trace.mutter('CombinedGraphIndex reordered: %r', self._indices)
1472
def _move_to_front_by_name(self, hit_names):
1473
"""Moves indices named by 'hit_names' to front of the search order, as
1474
described in _move_to_front.
1476
# Translate names to index instances, and then call
1477
# _move_to_front_by_index.
1478
indices_info = zip(self._index_names, self._indices)
1480
for name, idx in indices_info:
1481
if name in hit_names:
1482
hit_indices.append(idx)
1483
self._move_to_front_by_index(hit_indices)
1485
1356
def find_ancestry(self, keys, ref_list_num):
1486
1357
"""Find the complete ancestry for the given set of keys.
1642
1507
defined order for the result iteration - it will be in the most
1643
1508
efficient order for the index (keys iteration order in this case).
1645
# Note: See BTreeBuilder.iter_entries for an explanation of why we
1646
# aren't using set().intersection() here
1648
keys = [key for key in keys if key in nodes]
1649
1511
if self.reference_lists:
1512
for key in keys.intersection(self._keys):
1513
node = self._nodes[key]
1652
1514
if not node[0]:
1653
1515
yield self, key, node[2], node[1]
1517
for key in keys.intersection(self._keys):
1518
node = self._nodes[key]
1657
1519
if not node[0]:
1658
1520
yield self, key, node[2]