1
# Copyright (C) 2008, 2009, 2010 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
18
"""Tests for btree indices."""
31
from bzrlib.tests import (
32
TestCaseWithTransport,
35
split_suite_by_condition,
37
from bzrlib.transport import get_transport
40
def load_tests(standard_tests, module, loader):
41
# parameterise the TestBTreeNodes tests
42
node_tests, others = split_suite_by_condition(standard_tests,
43
condition_isinstance(TestBTreeNodes))
44
import bzrlib._btree_serializer_py as py_module
45
scenarios = [('python', {'parse_btree': py_module})]
46
if compiled_btreeparser_feature.available():
47
scenarios.append(('C', {'parse_btree':
48
compiled_btreeparser_feature.module}))
49
return multiply_tests(node_tests, scenarios, others)
52
compiled_btreeparser_feature = tests.ModuleAvailableFeature(
53
'bzrlib._btree_serializer_pyx')
56
class BTreeTestCase(TestCaseWithTransport):
57
# test names here are suffixed by the key length and reference list count
61
TestCaseWithTransport.setUp(self)
62
self.overrideAttr(btree_index, '_RESERVED_HEADER_BYTES', 100)
64
def make_nodes(self, count, key_elements, reference_lists):
65
"""Generate count*key_elements sample nodes."""
67
for prefix_pos in range(key_elements):
69
prefix = (str(prefix_pos) * 40,)
72
for pos in xrange(count):
73
# TODO: This creates odd keys. When count == 100,000, it
74
# creates a 240 byte key
75
key = prefix + (str(pos) * 40,)
76
value = "value:%s" % pos
78
# generate some references
80
for list_pos in range(reference_lists):
81
# as many keys in each list as its index + the key depth
82
# mod 2 - this generates both 0 length lists and
83
# ones slightly longer than the number of lists.
84
# It also ensures we have non homogeneous lists.
86
for ref_pos in range(list_pos + pos % 2):
88
# refer to a nearby key
89
refs[-1].append(prefix + ("ref" + str(pos - 1) * 40,))
91
# serial of this ref in the ref list
92
refs[-1].append(prefix + ("ref" + str(ref_pos) * 40,))
93
refs[-1] = tuple(refs[-1])
97
keys.append((key, value, refs))
100
def shrink_page_size(self):
101
"""Shrink the default page size so that less fits in a page."""
102
self.overrideAttr(btree_index, '_PAGE_SIZE')
103
btree_index._PAGE_SIZE = 2048
106
class TestBTreeBuilder(BTreeTestCase):
108
def test_clear_cache(self):
109
builder = btree_index.BTreeBuilder(reference_lists=0, key_elements=1)
110
# This is a no-op, but we need the api to be consistent with other
111
# BTreeGraphIndex apis.
112
builder.clear_cache()
114
def test_empty_1_0(self):
115
builder = btree_index.BTreeBuilder(key_elements=1, reference_lists=0)
116
# NamedTemporaryFile dies on builder.finish().read(). weird.
117
temp_file = builder.finish()
118
content = temp_file.read()
121
"B+Tree Graph Index 2\nnode_ref_lists=0\nkey_elements=1\nlen=0\n"
125
def test_empty_2_1(self):
126
builder = btree_index.BTreeBuilder(key_elements=2, reference_lists=1)
127
# NamedTemporaryFile dies on builder.finish().read(). weird.
128
temp_file = builder.finish()
129
content = temp_file.read()
132
"B+Tree Graph Index 2\nnode_ref_lists=1\nkey_elements=2\nlen=0\n"
136
def test_root_leaf_1_0(self):
137
builder = btree_index.BTreeBuilder(key_elements=1, reference_lists=0)
138
nodes = self.make_nodes(5, 1, 0)
140
builder.add_node(*node)
141
# NamedTemporaryFile dies on builder.finish().read(). weird.
142
temp_file = builder.finish()
143
content = temp_file.read()
145
self.assertEqual(131, len(content))
147
"B+Tree Graph Index 2\nnode_ref_lists=0\nkey_elements=1\nlen=5\n"
150
node_content = content[73:]
151
node_bytes = zlib.decompress(node_content)
152
expected_node = ("type=leaf\n"
153
"0000000000000000000000000000000000000000\x00\x00value:0\n"
154
"1111111111111111111111111111111111111111\x00\x00value:1\n"
155
"2222222222222222222222222222222222222222\x00\x00value:2\n"
156
"3333333333333333333333333333333333333333\x00\x00value:3\n"
157
"4444444444444444444444444444444444444444\x00\x00value:4\n")
158
self.assertEqual(expected_node, node_bytes)
160
def test_root_leaf_2_2(self):
161
builder = btree_index.BTreeBuilder(key_elements=2, reference_lists=2)
162
nodes = self.make_nodes(5, 2, 2)
164
builder.add_node(*node)
165
# NamedTemporaryFile dies on builder.finish().read(). weird.
166
temp_file = builder.finish()
167
content = temp_file.read()
169
self.assertEqual(238, len(content))
171
"B+Tree Graph Index 2\nnode_ref_lists=2\nkey_elements=2\nlen=10\n"
174
node_content = content[74:]
175
node_bytes = zlib.decompress(node_content)
178
"0000000000000000000000000000000000000000\x000000000000000000000000000000000000000000\x00\t0000000000000000000000000000000000000000\x00ref0000000000000000000000000000000000000000\x00value:0\n"
179
"0000000000000000000000000000000000000000\x001111111111111111111111111111111111111111\x000000000000000000000000000000000000000000\x00ref0000000000000000000000000000000000000000\t0000000000000000000000000000000000000000\x00ref0000000000000000000000000000000000000000\r0000000000000000000000000000000000000000\x00ref0000000000000000000000000000000000000000\x00value:1\n"
180
"0000000000000000000000000000000000000000\x002222222222222222222222222222222222222222\x00\t0000000000000000000000000000000000000000\x00ref0000000000000000000000000000000000000000\x00value:2\n"
181
"0000000000000000000000000000000000000000\x003333333333333333333333333333333333333333\x000000000000000000000000000000000000000000\x00ref2222222222222222222222222222222222222222\t0000000000000000000000000000000000000000\x00ref2222222222222222222222222222222222222222\r0000000000000000000000000000000000000000\x00ref2222222222222222222222222222222222222222\x00value:3\n"
182
"0000000000000000000000000000000000000000\x004444444444444444444444444444444444444444\x00\t0000000000000000000000000000000000000000\x00ref0000000000000000000000000000000000000000\x00value:4\n"
183
"1111111111111111111111111111111111111111\x000000000000000000000000000000000000000000\x00\t1111111111111111111111111111111111111111\x00ref0000000000000000000000000000000000000000\x00value:0\n"
184
"1111111111111111111111111111111111111111\x001111111111111111111111111111111111111111\x001111111111111111111111111111111111111111\x00ref0000000000000000000000000000000000000000\t1111111111111111111111111111111111111111\x00ref0000000000000000000000000000000000000000\r1111111111111111111111111111111111111111\x00ref0000000000000000000000000000000000000000\x00value:1\n"
185
"1111111111111111111111111111111111111111\x002222222222222222222222222222222222222222\x00\t1111111111111111111111111111111111111111\x00ref0000000000000000000000000000000000000000\x00value:2\n"
186
"1111111111111111111111111111111111111111\x003333333333333333333333333333333333333333\x001111111111111111111111111111111111111111\x00ref2222222222222222222222222222222222222222\t1111111111111111111111111111111111111111\x00ref2222222222222222222222222222222222222222\r1111111111111111111111111111111111111111\x00ref2222222222222222222222222222222222222222\x00value:3\n"
187
"1111111111111111111111111111111111111111\x004444444444444444444444444444444444444444\x00\t1111111111111111111111111111111111111111\x00ref0000000000000000000000000000000000000000\x00value:4\n"
190
self.assertEqual(expected_node, node_bytes)
192
def test_2_leaves_1_0(self):
193
builder = btree_index.BTreeBuilder(key_elements=1, reference_lists=0)
194
nodes = self.make_nodes(400, 1, 0)
196
builder.add_node(*node)
197
# NamedTemporaryFile dies on builder.finish().read(). weird.
198
temp_file = builder.finish()
199
content = temp_file.read()
201
self.assertEqual(9283, len(content))
203
"B+Tree Graph Index 2\nnode_ref_lists=0\nkey_elements=1\nlen=400\n"
206
root = content[77:4096]
207
leaf1 = content[4096:8192]
208
leaf2 = content[8192:]
209
root_bytes = zlib.decompress(root)
213
) + ("307" * 40) + "\n"
214
self.assertEqual(expected_root, root_bytes)
215
# We already know serialisation works for leaves, check key selection:
216
leaf1_bytes = zlib.decompress(leaf1)
217
sorted_node_keys = sorted(node[0] for node in nodes)
218
node = btree_index._LeafNode(leaf1_bytes, 1, 0)
219
self.assertEqual(231, len(node.keys))
220
self.assertEqual(sorted_node_keys[:231], sorted(node.keys))
221
leaf2_bytes = zlib.decompress(leaf2)
222
node = btree_index._LeafNode(leaf2_bytes, 1, 0)
223
self.assertEqual(400 - 231, len(node.keys))
224
self.assertEqual(sorted_node_keys[231:], sorted(node.keys))
226
def test_last_page_rounded_1_layer(self):
227
builder = btree_index.BTreeBuilder(key_elements=1, reference_lists=0)
228
nodes = self.make_nodes(10, 1, 0)
230
builder.add_node(*node)
231
# NamedTemporaryFile dies on builder.finish().read(). weird.
232
temp_file = builder.finish()
233
content = temp_file.read()
235
self.assertEqual(155, len(content))
237
"B+Tree Graph Index 2\nnode_ref_lists=0\nkey_elements=1\nlen=10\n"
240
# Check thelast page is well formed
242
leaf2_bytes = zlib.decompress(leaf2)
243
node = btree_index._LeafNode(leaf2_bytes, 1, 0)
244
self.assertEqual(10, len(node.keys))
245
sorted_node_keys = sorted(node[0] for node in nodes)
246
self.assertEqual(sorted_node_keys, sorted(node.keys))
248
def test_last_page_not_rounded_2_layer(self):
249
builder = btree_index.BTreeBuilder(key_elements=1, reference_lists=0)
250
nodes = self.make_nodes(400, 1, 0)
252
builder.add_node(*node)
253
# NamedTemporaryFile dies on builder.finish().read(). weird.
254
temp_file = builder.finish()
255
content = temp_file.read()
257
self.assertEqual(9283, len(content))
259
"B+Tree Graph Index 2\nnode_ref_lists=0\nkey_elements=1\nlen=400\n"
262
# Check the last page is well formed
263
leaf2 = content[8192:]
264
leaf2_bytes = zlib.decompress(leaf2)
265
node = btree_index._LeafNode(leaf2_bytes, 1, 0)
266
self.assertEqual(400 - 231, len(node.keys))
267
sorted_node_keys = sorted(node[0] for node in nodes)
268
self.assertEqual(sorted_node_keys[231:], sorted(node.keys))
270
def test_three_level_tree_details(self):
271
# The left most pointer in the second internal node in a row should
272
# pointer to the second node that the internal node is for, _not_
273
# the first, otherwise the first node overlaps with the last node of
274
# the prior internal node on that row.
275
self.shrink_page_size()
276
builder = btree_index.BTreeBuilder(key_elements=2, reference_lists=2)
277
# 40K nodes is enough to create a two internal nodes on the second
278
# level, with a 2K page size
279
nodes = self.make_nodes(20000, 2, 2)
282
builder.add_node(*node)
283
transport = get_transport('trace+' + self.get_url(''))
284
size = transport.put_file('index', self.time(builder.finish))
286
index = btree_index.BTreeGraphIndex(transport, 'index', size)
287
# Seed the metadata, we're using internal calls now.
289
self.assertEqual(3, len(index._row_lengths),
290
"Not enough rows: %r" % index._row_lengths)
291
self.assertEqual(4, len(index._row_offsets))
292
self.assertEqual(sum(index._row_lengths), index._row_offsets[-1])
293
internal_nodes = index._get_internal_nodes([0, 1, 2])
294
root_node = internal_nodes[0]
295
internal_node1 = internal_nodes[1]
296
internal_node2 = internal_nodes[2]
297
# The left most node node2 points at should be one after the right most
298
# node pointed at by node1.
299
self.assertEqual(internal_node2.offset, 1 + len(internal_node1.keys))
300
# The left most key of the second node pointed at by internal_node2
301
# should be its first key. We can check this by looking for its first key
302
# in the second node it points at
303
pos = index._row_offsets[2] + internal_node2.offset + 1
304
leaf = index._get_leaf_nodes([pos])[pos]
305
self.assertTrue(internal_node2.keys[0] in leaf.keys)
307
def test_2_leaves_2_2(self):
308
builder = btree_index.BTreeBuilder(key_elements=2, reference_lists=2)
309
nodes = self.make_nodes(100, 2, 2)
311
builder.add_node(*node)
312
# NamedTemporaryFile dies on builder.finish().read(). weird.
313
temp_file = builder.finish()
314
content = temp_file.read()
316
self.assertEqual(12643, len(content))
318
"B+Tree Graph Index 2\nnode_ref_lists=2\nkey_elements=2\nlen=200\n"
321
root = content[77:4096]
322
leaf1 = content[4096:8192]
323
leaf2 = content[8192:12288]
324
leaf3 = content[12288:]
325
root_bytes = zlib.decompress(root)
329
+ ("0" * 40) + "\x00" + ("91" * 40) + "\n"
330
+ ("1" * 40) + "\x00" + ("81" * 40) + "\n"
332
self.assertEqual(expected_root, root_bytes)
333
# We assume the other leaf nodes have been written correctly - layering
336
def test_spill_index_stress_1_1(self):
337
builder = btree_index.BTreeBuilder(key_elements=1, spill_at=2)
338
nodes = [node[0:2] for node in self.make_nodes(16, 1, 0)]
339
builder.add_node(*nodes[0])
340
# Test the parts of the index that take up memory are doing so
342
self.assertEqual(1, len(builder._nodes))
343
self.assertIs(None, builder._nodes_by_key)
344
builder.add_node(*nodes[1])
345
self.assertEqual(0, len(builder._nodes))
346
self.assertIs(None, builder._nodes_by_key)
347
self.assertEqual(1, len(builder._backing_indices))
348
self.assertEqual(2, builder._backing_indices[0].key_count())
350
builder.add_node(*nodes[2])
351
self.assertEqual(1, len(builder._nodes))
352
self.assertIs(None, builder._nodes_by_key)
353
# And spills to a second backing index combing all
354
builder.add_node(*nodes[3])
355
self.assertEqual(0, len(builder._nodes))
356
self.assertIs(None, builder._nodes_by_key)
357
self.assertEqual(2, len(builder._backing_indices))
358
self.assertEqual(None, builder._backing_indices[0])
359
self.assertEqual(4, builder._backing_indices[1].key_count())
360
# The next spills to the 2-len slot
361
builder.add_node(*nodes[4])
362
builder.add_node(*nodes[5])
363
self.assertEqual(0, len(builder._nodes))
364
self.assertIs(None, builder._nodes_by_key)
365
self.assertEqual(2, len(builder._backing_indices))
366
self.assertEqual(2, builder._backing_indices[0].key_count())
367
self.assertEqual(4, builder._backing_indices[1].key_count())
368
# Next spill combines
369
builder.add_node(*nodes[6])
370
builder.add_node(*nodes[7])
371
self.assertEqual(3, len(builder._backing_indices))
372
self.assertEqual(None, builder._backing_indices[0])
373
self.assertEqual(None, builder._backing_indices[1])
374
self.assertEqual(8, builder._backing_indices[2].key_count())
375
# And so forth - counting up in binary.
376
builder.add_node(*nodes[8])
377
builder.add_node(*nodes[9])
378
self.assertEqual(3, len(builder._backing_indices))
379
self.assertEqual(2, builder._backing_indices[0].key_count())
380
self.assertEqual(None, builder._backing_indices[1])
381
self.assertEqual(8, builder._backing_indices[2].key_count())
382
builder.add_node(*nodes[10])
383
builder.add_node(*nodes[11])
384
self.assertEqual(3, len(builder._backing_indices))
385
self.assertEqual(None, builder._backing_indices[0])
386
self.assertEqual(4, builder._backing_indices[1].key_count())
387
self.assertEqual(8, builder._backing_indices[2].key_count())
388
builder.add_node(*nodes[12])
389
# Test that memory and disk are both used for query methods; and that
390
# None is skipped over happily.
391
self.assertEqual([(builder,) + node for node in sorted(nodes[:13])],
392
list(builder.iter_all_entries()))
393
# Two nodes - one memory one disk
394
self.assertEqual(set([(builder,) + node for node in nodes[11:13]]),
395
set(builder.iter_entries([nodes[12][0], nodes[11][0]])))
396
self.assertEqual(13, builder.key_count())
397
self.assertEqual(set([(builder,) + node for node in nodes[11:13]]),
398
set(builder.iter_entries_prefix([nodes[12][0], nodes[11][0]])))
399
builder.add_node(*nodes[13])
400
self.assertEqual(3, len(builder._backing_indices))
401
self.assertEqual(2, builder._backing_indices[0].key_count())
402
self.assertEqual(4, builder._backing_indices[1].key_count())
403
self.assertEqual(8, builder._backing_indices[2].key_count())
404
builder.add_node(*nodes[14])
405
builder.add_node(*nodes[15])
406
self.assertEqual(4, len(builder._backing_indices))
407
self.assertEqual(None, builder._backing_indices[0])
408
self.assertEqual(None, builder._backing_indices[1])
409
self.assertEqual(None, builder._backing_indices[2])
410
self.assertEqual(16, builder._backing_indices[3].key_count())
411
# Now finish, and check we got a correctly ordered tree
412
transport = self.get_transport('')
413
size = transport.put_file('index', builder.finish())
414
index = btree_index.BTreeGraphIndex(transport, 'index', size)
415
nodes = list(index.iter_all_entries())
416
self.assertEqual(sorted(nodes), nodes)
417
self.assertEqual(16, len(nodes))
419
def test_spill_index_stress_1_1_no_combine(self):
420
builder = btree_index.BTreeBuilder(key_elements=1, spill_at=2)
421
builder.set_optimize(for_size=False, combine_backing_indices=False)
422
nodes = [node[0:2] for node in self.make_nodes(16, 1, 0)]
423
builder.add_node(*nodes[0])
424
# Test the parts of the index that take up memory are doing so
426
self.assertEqual(1, len(builder._nodes))
427
self.assertIs(None, builder._nodes_by_key)
428
builder.add_node(*nodes[1])
429
self.assertEqual(0, len(builder._nodes))
430
self.assertIs(None, builder._nodes_by_key)
431
self.assertEqual(1, len(builder._backing_indices))
432
self.assertEqual(2, builder._backing_indices[0].key_count())
434
builder.add_node(*nodes[2])
435
self.assertEqual(1, len(builder._nodes))
436
self.assertIs(None, builder._nodes_by_key)
437
# And spills to a second backing index but doesn't combine
438
builder.add_node(*nodes[3])
439
self.assertEqual(0, len(builder._nodes))
440
self.assertIs(None, builder._nodes_by_key)
441
self.assertEqual(2, len(builder._backing_indices))
442
for backing_index in builder._backing_indices:
443
self.assertEqual(2, backing_index.key_count())
444
# The next spills to the 3rd slot
445
builder.add_node(*nodes[4])
446
builder.add_node(*nodes[5])
447
self.assertEqual(0, len(builder._nodes))
448
self.assertIs(None, builder._nodes_by_key)
449
self.assertEqual(3, len(builder._backing_indices))
450
for backing_index in builder._backing_indices:
451
self.assertEqual(2, backing_index.key_count())
452
# Now spill a few more, and check that we don't combine
453
builder.add_node(*nodes[6])
454
builder.add_node(*nodes[7])
455
builder.add_node(*nodes[8])
456
builder.add_node(*nodes[9])
457
builder.add_node(*nodes[10])
458
builder.add_node(*nodes[11])
459
builder.add_node(*nodes[12])
460
self.assertEqual(6, len(builder._backing_indices))
461
for backing_index in builder._backing_indices:
462
self.assertEqual(2, backing_index.key_count())
463
# Test that memory and disk are both used for query methods; and that
464
# None is skipped over happily.
465
self.assertEqual([(builder,) + node for node in sorted(nodes[:13])],
466
list(builder.iter_all_entries()))
467
# Two nodes - one memory one disk
468
self.assertEqual(set([(builder,) + node for node in nodes[11:13]]),
469
set(builder.iter_entries([nodes[12][0], nodes[11][0]])))
470
self.assertEqual(13, builder.key_count())
471
self.assertEqual(set([(builder,) + node for node in nodes[11:13]]),
472
set(builder.iter_entries_prefix([nodes[12][0], nodes[11][0]])))
473
builder.add_node(*nodes[13])
474
builder.add_node(*nodes[14])
475
builder.add_node(*nodes[15])
476
self.assertEqual(8, len(builder._backing_indices))
477
for backing_index in builder._backing_indices:
478
self.assertEqual(2, backing_index.key_count())
479
# Now finish, and check we got a correctly ordered tree
480
transport = self.get_transport('')
481
size = transport.put_file('index', builder.finish())
482
index = btree_index.BTreeGraphIndex(transport, 'index', size)
483
nodes = list(index.iter_all_entries())
484
self.assertEqual(sorted(nodes), nodes)
485
self.assertEqual(16, len(nodes))
487
def test_set_optimize(self):
488
builder = btree_index.BTreeBuilder(key_elements=2, reference_lists=2)
489
builder.set_optimize(for_size=True)
490
self.assertTrue(builder._optimize_for_size)
491
builder.set_optimize(for_size=False)
492
self.assertFalse(builder._optimize_for_size)
493
# test that we can set combine_backing_indices without effecting
496
builder._optimize_for_size = obj
497
builder.set_optimize(combine_backing_indices=False)
498
self.assertFalse(builder._combine_backing_indices)
499
self.assertIs(obj, builder._optimize_for_size)
500
builder.set_optimize(combine_backing_indices=True)
501
self.assertTrue(builder._combine_backing_indices)
502
self.assertIs(obj, builder._optimize_for_size)
504
def test_spill_index_stress_2_2(self):
505
# test that references and longer keys don't confuse things.
506
builder = btree_index.BTreeBuilder(key_elements=2, reference_lists=2,
508
nodes = self.make_nodes(16, 2, 2)
509
builder.add_node(*nodes[0])
510
# Test the parts of the index that take up memory are doing so
512
self.assertEqual(1, len(builder._nodes))
513
self.assertIs(None, builder._nodes_by_key)
514
builder.add_node(*nodes[1])
515
self.assertEqual(0, len(builder._nodes))
516
self.assertIs(None, builder._nodes_by_key)
517
self.assertEqual(1, len(builder._backing_indices))
518
self.assertEqual(2, builder._backing_indices[0].key_count())
520
old = dict(builder._get_nodes_by_key()) #Build up the nodes by key dict
521
builder.add_node(*nodes[2])
522
self.assertEqual(1, len(builder._nodes))
523
self.assertIsNot(None, builder._nodes_by_key)
524
self.assertNotEqual({}, builder._nodes_by_key)
525
# We should have a new entry
526
self.assertNotEqual(old, builder._nodes_by_key)
527
# And spills to a second backing index combing all
528
builder.add_node(*nodes[3])
529
self.assertEqual(0, len(builder._nodes))
530
self.assertIs(None, builder._nodes_by_key)
531
self.assertEqual(2, len(builder._backing_indices))
532
self.assertEqual(None, builder._backing_indices[0])
533
self.assertEqual(4, builder._backing_indices[1].key_count())
534
# The next spills to the 2-len slot
535
builder.add_node(*nodes[4])
536
builder.add_node(*nodes[5])
537
self.assertEqual(0, len(builder._nodes))
538
self.assertIs(None, builder._nodes_by_key)
539
self.assertEqual(2, len(builder._backing_indices))
540
self.assertEqual(2, builder._backing_indices[0].key_count())
541
self.assertEqual(4, builder._backing_indices[1].key_count())
542
# Next spill combines
543
builder.add_node(*nodes[6])
544
builder.add_node(*nodes[7])
545
self.assertEqual(3, len(builder._backing_indices))
546
self.assertEqual(None, builder._backing_indices[0])
547
self.assertEqual(None, builder._backing_indices[1])
548
self.assertEqual(8, builder._backing_indices[2].key_count())
549
# And so forth - counting up in binary.
550
builder.add_node(*nodes[8])
551
builder.add_node(*nodes[9])
552
self.assertEqual(3, len(builder._backing_indices))
553
self.assertEqual(2, builder._backing_indices[0].key_count())
554
self.assertEqual(None, builder._backing_indices[1])
555
self.assertEqual(8, builder._backing_indices[2].key_count())
556
builder.add_node(*nodes[10])
557
builder.add_node(*nodes[11])
558
self.assertEqual(3, len(builder._backing_indices))
559
self.assertEqual(None, builder._backing_indices[0])
560
self.assertEqual(4, builder._backing_indices[1].key_count())
561
self.assertEqual(8, builder._backing_indices[2].key_count())
562
builder.add_node(*nodes[12])
563
# Test that memory and disk are both used for query methods; and that
564
# None is skipped over happily.
565
self.assertEqual([(builder,) + node for node in sorted(nodes[:13])],
566
list(builder.iter_all_entries()))
567
# Two nodes - one memory one disk
568
self.assertEqual(set([(builder,) + node for node in nodes[11:13]]),
569
set(builder.iter_entries([nodes[12][0], nodes[11][0]])))
570
self.assertEqual(13, builder.key_count())
571
self.assertEqual(set([(builder,) + node for node in nodes[11:13]]),
572
set(builder.iter_entries_prefix([nodes[12][0], nodes[11][0]])))
573
builder.add_node(*nodes[13])
574
self.assertEqual(3, len(builder._backing_indices))
575
self.assertEqual(2, builder._backing_indices[0].key_count())
576
self.assertEqual(4, builder._backing_indices[1].key_count())
577
self.assertEqual(8, builder._backing_indices[2].key_count())
578
builder.add_node(*nodes[14])
579
builder.add_node(*nodes[15])
580
self.assertEqual(4, len(builder._backing_indices))
581
self.assertEqual(None, builder._backing_indices[0])
582
self.assertEqual(None, builder._backing_indices[1])
583
self.assertEqual(None, builder._backing_indices[2])
584
self.assertEqual(16, builder._backing_indices[3].key_count())
585
# Now finish, and check we got a correctly ordered tree
586
transport = self.get_transport('')
587
size = transport.put_file('index', builder.finish())
588
index = btree_index.BTreeGraphIndex(transport, 'index', size)
589
nodes = list(index.iter_all_entries())
590
self.assertEqual(sorted(nodes), nodes)
591
self.assertEqual(16, len(nodes))
593
def test_spill_index_duplicate_key_caught_on_finish(self):
594
builder = btree_index.BTreeBuilder(key_elements=1, spill_at=2)
595
nodes = [node[0:2] for node in self.make_nodes(16, 1, 0)]
596
builder.add_node(*nodes[0])
597
builder.add_node(*nodes[1])
598
builder.add_node(*nodes[0])
599
self.assertRaises(errors.BadIndexDuplicateKey, builder.finish)
602
class TestBTreeIndex(BTreeTestCase):
604
def make_index(self, ref_lists=0, key_elements=1, nodes=[]):
605
builder = btree_index.BTreeBuilder(reference_lists=ref_lists,
606
key_elements=key_elements)
607
for key, value, references in nodes:
608
builder.add_node(key, value, references)
609
stream = builder.finish()
610
trans = get_transport('trace+' + self.get_url())
611
size = trans.put_file('index', stream)
612
return btree_index.BTreeGraphIndex(trans, 'index', size)
614
def make_index_with_offset(self, ref_lists=1, key_elements=1, nodes=[],
616
builder = btree_index.BTreeBuilder(key_elements=key_elements,
617
reference_lists=ref_lists)
618
builder.add_nodes(nodes)
619
transport = self.get_transport('')
620
# NamedTemporaryFile dies on builder.finish().read(). weird.
621
temp_file = builder.finish()
622
content = temp_file.read()
625
transport.put_bytes('index', (' '*offset)+content)
626
return btree_index.BTreeGraphIndex(transport, 'index', size=size,
629
def test_clear_cache(self):
630
nodes = self.make_nodes(160, 2, 2)
631
index = self.make_index(ref_lists=2, key_elements=2, nodes=nodes)
632
self.assertEqual(1, len(list(index.iter_entries([nodes[30][0]]))))
633
self.assertEqual([1, 4], index._row_lengths)
634
self.assertIsNot(None, index._root_node)
635
internal_node_pre_clear = index._internal_node_cache.keys()
636
self.assertTrue(len(index._leaf_node_cache) > 0)
638
# We don't touch _root_node or _internal_node_cache, both should be
639
# small, and can save a round trip or two
640
self.assertIsNot(None, index._root_node)
641
# NOTE: We don't want to affect the _internal_node_cache, as we expect
642
# it will be small, and if we ever do touch this index again, it
643
# will save round-trips. This assertion isn't very strong,
644
# becuase without a 3-level index, we don't have any internal
646
self.assertEqual(internal_node_pre_clear,
647
index._internal_node_cache.keys())
648
self.assertEqual(0, len(index._leaf_node_cache))
650
def test_trivial_constructor(self):
651
transport = get_transport('trace+' + self.get_url(''))
652
index = btree_index.BTreeGraphIndex(transport, 'index', None)
653
# Checks the page size at load, but that isn't logged yet.
654
self.assertEqual([], transport._activity)
656
def test_with_size_constructor(self):
657
transport = get_transport('trace+' + self.get_url(''))
658
index = btree_index.BTreeGraphIndex(transport, 'index', 1)
659
# Checks the page size at load, but that isn't logged yet.
660
self.assertEqual([], transport._activity)
662
def test_empty_key_count_no_size(self):
663
builder = btree_index.BTreeBuilder(key_elements=1, reference_lists=0)
664
transport = get_transport('trace+' + self.get_url(''))
665
transport.put_file('index', builder.finish())
666
index = btree_index.BTreeGraphIndex(transport, 'index', None)
667
del transport._activity[:]
668
self.assertEqual([], transport._activity)
669
self.assertEqual(0, index.key_count())
670
# The entire index should have been requested (as we generally have the
671
# size available, and doing many small readvs is inappropriate).
672
# We can't tell how much was actually read here, but - check the code.
673
self.assertEqual([('get', 'index')], transport._activity)
675
def test_empty_key_count(self):
676
builder = btree_index.BTreeBuilder(key_elements=1, reference_lists=0)
677
transport = get_transport('trace+' + self.get_url(''))
678
size = transport.put_file('index', builder.finish())
679
self.assertEqual(72, size)
680
index = btree_index.BTreeGraphIndex(transport, 'index', size)
681
del transport._activity[:]
682
self.assertEqual([], transport._activity)
683
self.assertEqual(0, index.key_count())
684
# The entire index should have been read, as 4K > size
685
self.assertEqual([('readv', 'index', [(0, 72)], False, None)],
688
def test_non_empty_key_count_2_2(self):
689
builder = btree_index.BTreeBuilder(key_elements=2, reference_lists=2)
690
nodes = self.make_nodes(35, 2, 2)
692
builder.add_node(*node)
693
transport = get_transport('trace+' + self.get_url(''))
694
size = transport.put_file('index', builder.finish())
695
index = btree_index.BTreeGraphIndex(transport, 'index', size)
696
del transport._activity[:]
697
self.assertEqual([], transport._activity)
698
self.assertEqual(70, index.key_count())
699
# The entire index should have been read, as it is one page long.
700
self.assertEqual([('readv', 'index', [(0, size)], False, None)],
702
self.assertEqual(1173, size)
704
def test_with_offset_no_size(self):
705
index = self.make_index_with_offset(key_elements=1, ref_lists=1,
707
nodes=self.make_nodes(200, 1, 1))
708
index._size = None # throw away the size info
709
self.assertEqual(200, index.key_count())
711
def test_with_small_offset(self):
712
index = self.make_index_with_offset(key_elements=1, ref_lists=1,
714
nodes=self.make_nodes(200, 1, 1))
715
self.assertEqual(200, index.key_count())
717
def test_with_large_offset(self):
718
index = self.make_index_with_offset(key_elements=1, ref_lists=1,
720
nodes=self.make_nodes(200, 1, 1))
721
self.assertEqual(200, index.key_count())
723
def test__read_nodes_no_size_one_page_reads_once(self):
724
self.make_index(nodes=[(('key',), 'value', ())])
725
trans = get_transport('trace+' + self.get_url())
726
index = btree_index.BTreeGraphIndex(trans, 'index', None)
727
del trans._activity[:]
728
nodes = dict(index._read_nodes([0]))
729
self.assertEqual([0], nodes.keys())
731
self.assertEqual([('key',)], node.keys.keys())
732
self.assertEqual([('get', 'index')], trans._activity)
734
def test__read_nodes_no_size_multiple_pages(self):
735
index = self.make_index(2, 2, nodes=self.make_nodes(160, 2, 2))
737
num_pages = index._row_offsets[-1]
738
# Reopen with a traced transport and no size
739
trans = get_transport('trace+' + self.get_url())
740
index = btree_index.BTreeGraphIndex(trans, 'index', None)
741
del trans._activity[:]
742
nodes = dict(index._read_nodes([0]))
743
self.assertEqual(range(num_pages), nodes.keys())
745
def test_2_levels_key_count_2_2(self):
746
builder = btree_index.BTreeBuilder(key_elements=2, reference_lists=2)
747
nodes = self.make_nodes(160, 2, 2)
749
builder.add_node(*node)
750
transport = get_transport('trace+' + self.get_url(''))
751
size = transport.put_file('index', builder.finish())
752
self.assertEqual(17692, size)
753
index = btree_index.BTreeGraphIndex(transport, 'index', size)
754
del transport._activity[:]
755
self.assertEqual([], transport._activity)
756
self.assertEqual(320, index.key_count())
757
# The entire index should not have been read.
758
self.assertEqual([('readv', 'index', [(0, 4096)], False, None)],
761
def test_validate_one_page(self):
762
builder = btree_index.BTreeBuilder(key_elements=2, reference_lists=2)
763
nodes = self.make_nodes(45, 2, 2)
765
builder.add_node(*node)
766
transport = get_transport('trace+' + self.get_url(''))
767
size = transport.put_file('index', builder.finish())
768
index = btree_index.BTreeGraphIndex(transport, 'index', size)
769
del transport._activity[:]
770
self.assertEqual([], transport._activity)
772
# The entire index should have been read linearly.
773
self.assertEqual([('readv', 'index', [(0, size)], False, None)],
775
self.assertEqual(1488, size)
777
def test_validate_two_pages(self):
778
builder = btree_index.BTreeBuilder(key_elements=2, reference_lists=2)
779
nodes = self.make_nodes(80, 2, 2)
781
builder.add_node(*node)
782
transport = get_transport('trace+' + self.get_url(''))
783
size = transport.put_file('index', builder.finish())
784
# Root page, 2 leaf pages
785
self.assertEqual(9339, size)
786
index = btree_index.BTreeGraphIndex(transport, 'index', size)
787
del transport._activity[:]
788
self.assertEqual([], transport._activity)
790
# The entire index should have been read linearly.
791
self.assertEqual([('readv', 'index', [(0, 4096)], False, None),
792
('readv', 'index', [(4096, 4096), (8192, 1147)], False, None)],
794
# XXX: TODO: write some badly-ordered nodes, and some pointers-to-wrong
795
# node and make validate find them.
797
def test_eq_ne(self):
798
# two indices are equal when constructed with the same parameters:
799
transport1 = get_transport('trace+' + self.get_url(''))
800
transport2 = get_transport(self.get_url(''))
802
btree_index.BTreeGraphIndex(transport1, 'index', None) ==
803
btree_index.BTreeGraphIndex(transport1, 'index', None))
805
btree_index.BTreeGraphIndex(transport1, 'index', 20) ==
806
btree_index.BTreeGraphIndex(transport1, 'index', 20))
808
btree_index.BTreeGraphIndex(transport1, 'index', 20) ==
809
btree_index.BTreeGraphIndex(transport2, 'index', 20))
811
btree_index.BTreeGraphIndex(transport1, 'inde1', 20) ==
812
btree_index.BTreeGraphIndex(transport1, 'inde2', 20))
814
btree_index.BTreeGraphIndex(transport1, 'index', 10) ==
815
btree_index.BTreeGraphIndex(transport1, 'index', 20))
817
btree_index.BTreeGraphIndex(transport1, 'index', None) !=
818
btree_index.BTreeGraphIndex(transport1, 'index', None))
820
btree_index.BTreeGraphIndex(transport1, 'index', 20) !=
821
btree_index.BTreeGraphIndex(transport1, 'index', 20))
823
btree_index.BTreeGraphIndex(transport1, 'index', 20) !=
824
btree_index.BTreeGraphIndex(transport2, 'index', 20))
826
btree_index.BTreeGraphIndex(transport1, 'inde1', 20) !=
827
btree_index.BTreeGraphIndex(transport1, 'inde2', 20))
829
btree_index.BTreeGraphIndex(transport1, 'index', 10) !=
830
btree_index.BTreeGraphIndex(transport1, 'index', 20))
832
def test_iter_all_only_root_no_size(self):
833
self.make_index(nodes=[(('key',), 'value', ())])
834
trans = get_transport('trace+' + self.get_url(''))
835
index = btree_index.BTreeGraphIndex(trans, 'index', None)
836
del trans._activity[:]
837
self.assertEqual([(('key',), 'value')],
838
[x[1:] for x in index.iter_all_entries()])
839
self.assertEqual([('get', 'index')], trans._activity)
841
def test_iter_all_entries_reads(self):
842
# iterating all entries reads the header, then does a linear
844
self.shrink_page_size()
845
builder = btree_index.BTreeBuilder(key_elements=2, reference_lists=2)
846
# 20k nodes is enough to create a two internal nodes on the second
847
# level, with a 2K page size
848
nodes = self.make_nodes(10000, 2, 2)
850
builder.add_node(*node)
851
transport = get_transport('trace+' + self.get_url(''))
852
size = transport.put_file('index', builder.finish())
853
self.assertEqual(1303220, size, 'number of expected bytes in the'
855
page_size = btree_index._PAGE_SIZE
857
index = btree_index.BTreeGraphIndex(transport, 'index', size)
858
del transport._activity[:]
859
self.assertEqual([], transport._activity)
860
found_nodes = self.time(list, index.iter_all_entries())
862
for node in found_nodes:
863
self.assertTrue(node[0] is index)
864
bare_nodes.append(node[1:])
865
self.assertEqual(3, len(index._row_lengths),
866
"Not enough rows: %r" % index._row_lengths)
867
# Should be as long as the nodes we supplied
868
self.assertEqual(20000, len(found_nodes))
869
# Should have the same content
870
self.assertEqual(set(nodes), set(bare_nodes))
871
# Should have done linear scan IO up the index, ignoring
872
# the internal nodes:
873
# The entire index should have been read
874
total_pages = sum(index._row_lengths)
875
self.assertEqual(total_pages, index._row_offsets[-1])
876
self.assertEqual(1303220, size)
877
# The start of the leaves
878
first_byte = index._row_offsets[-2] * page_size
880
for offset in range(first_byte, size, page_size):
881
readv_request.append((offset, page_size))
882
# The last page is truncated
883
readv_request[-1] = (readv_request[-1][0], 1303220 % page_size)
884
expected = [('readv', 'index', [(0, page_size)], False, None),
885
('readv', 'index', readv_request, False, None)]
886
if expected != transport._activity:
887
self.assertEqualDiff(pprint.pformat(expected),
888
pprint.pformat(transport._activity))
890
def _test_iter_entries_references_resolved(self):
891
index = self.make_index(1, nodes=[
892
(('name', ), 'data', ([('ref', ), ('ref', )], )),
893
(('ref', ), 'refdata', ([], ))])
894
self.assertEqual(set([(index, ('name', ), 'data', ((('ref',),('ref',)),)),
895
(index, ('ref', ), 'refdata', ((), ))]),
896
set(index.iter_entries([('name',), ('ref',)])))
898
def test_iter_entries_references_2_refs_resolved(self):
899
# iterating some entries reads just the pages needed. For now, to
900
# get it working and start measuring, only 4K pages are read.
901
builder = btree_index.BTreeBuilder(key_elements=2, reference_lists=2)
902
# 80 nodes is enough to create a two-level index.
903
nodes = self.make_nodes(160, 2, 2)
905
builder.add_node(*node)
906
transport = get_transport('trace+' + self.get_url(''))
907
size = transport.put_file('index', builder.finish())
909
index = btree_index.BTreeGraphIndex(transport, 'index', size)
910
del transport._activity[:]
911
self.assertEqual([], transport._activity)
913
found_nodes = list(index.iter_entries([nodes[30][0]]))
915
for node in found_nodes:
916
self.assertTrue(node[0] is index)
917
bare_nodes.append(node[1:])
918
# Should be as long as the nodes we supplied
919
self.assertEqual(1, len(found_nodes))
920
# Should have the same content
921
self.assertEqual(nodes[30], bare_nodes[0])
922
# Should have read the root node, then one leaf page:
923
self.assertEqual([('readv', 'index', [(0, 4096)], False, None),
924
('readv', 'index', [(8192, 4096), ], False, None)],
927
def test_iter_key_prefix_1_element_key_None(self):
928
index = self.make_index()
929
self.assertRaises(errors.BadIndexKey, list,
930
index.iter_entries_prefix([(None, )]))
932
def test_iter_key_prefix_wrong_length(self):
933
index = self.make_index()
934
self.assertRaises(errors.BadIndexKey, list,
935
index.iter_entries_prefix([('foo', None)]))
936
index = self.make_index(key_elements=2)
937
self.assertRaises(errors.BadIndexKey, list,
938
index.iter_entries_prefix([('foo', )]))
939
self.assertRaises(errors.BadIndexKey, list,
940
index.iter_entries_prefix([('foo', None, None)]))
942
def test_iter_key_prefix_1_key_element_no_refs(self):
943
index = self.make_index( nodes=[
944
(('name', ), 'data', ()),
945
(('ref', ), 'refdata', ())])
946
self.assertEqual(set([(index, ('name', ), 'data'),
947
(index, ('ref', ), 'refdata')]),
948
set(index.iter_entries_prefix([('name', ), ('ref', )])))
950
def test_iter_key_prefix_1_key_element_refs(self):
951
index = self.make_index(1, nodes=[
952
(('name', ), 'data', ([('ref', )], )),
953
(('ref', ), 'refdata', ([], ))])
954
self.assertEqual(set([(index, ('name', ), 'data', ((('ref',),),)),
955
(index, ('ref', ), 'refdata', ((), ))]),
956
set(index.iter_entries_prefix([('name', ), ('ref', )])))
958
def test_iter_key_prefix_2_key_element_no_refs(self):
959
index = self.make_index(key_elements=2, nodes=[
960
(('name', 'fin1'), 'data', ()),
961
(('name', 'fin2'), 'beta', ()),
962
(('ref', 'erence'), 'refdata', ())])
963
self.assertEqual(set([(index, ('name', 'fin1'), 'data'),
964
(index, ('ref', 'erence'), 'refdata')]),
965
set(index.iter_entries_prefix([('name', 'fin1'), ('ref', 'erence')])))
966
self.assertEqual(set([(index, ('name', 'fin1'), 'data'),
967
(index, ('name', 'fin2'), 'beta')]),
968
set(index.iter_entries_prefix([('name', None)])))
970
def test_iter_key_prefix_2_key_element_refs(self):
971
index = self.make_index(1, key_elements=2, nodes=[
972
(('name', 'fin1'), 'data', ([('ref', 'erence')], )),
973
(('name', 'fin2'), 'beta', ([], )),
974
(('ref', 'erence'), 'refdata', ([], ))])
975
self.assertEqual(set([(index, ('name', 'fin1'), 'data', ((('ref', 'erence'),),)),
976
(index, ('ref', 'erence'), 'refdata', ((), ))]),
977
set(index.iter_entries_prefix([('name', 'fin1'), ('ref', 'erence')])))
978
self.assertEqual(set([(index, ('name', 'fin1'), 'data', ((('ref', 'erence'),),)),
979
(index, ('name', 'fin2'), 'beta', ((), ))]),
980
set(index.iter_entries_prefix([('name', None)])))
982
# XXX: external_references tests are duplicated in test_index. We
983
# probably should have per_graph_index tests...
984
def test_external_references_no_refs(self):
985
index = self.make_index(ref_lists=0, nodes=[])
986
self.assertRaises(ValueError, index.external_references, 0)
988
def test_external_references_no_results(self):
989
index = self.make_index(ref_lists=1, nodes=[
990
(('key',), 'value', ([],))])
991
self.assertEqual(set(), index.external_references(0))
993
def test_external_references_missing_ref(self):
994
missing_key = ('missing',)
995
index = self.make_index(ref_lists=1, nodes=[
996
(('key',), 'value', ([missing_key],))])
997
self.assertEqual(set([missing_key]), index.external_references(0))
999
def test_external_references_multiple_ref_lists(self):
1000
missing_key = ('missing',)
1001
index = self.make_index(ref_lists=2, nodes=[
1002
(('key',), 'value', ([], [missing_key]))])
1003
self.assertEqual(set([]), index.external_references(0))
1004
self.assertEqual(set([missing_key]), index.external_references(1))
1006
def test_external_references_two_records(self):
1007
index = self.make_index(ref_lists=1, nodes=[
1008
(('key-1',), 'value', ([('key-2',)],)),
1009
(('key-2',), 'value', ([],)),
1011
self.assertEqual(set([]), index.external_references(0))
1013
def test__find_ancestors_one_page(self):
1016
index = self.make_index(ref_lists=1, key_elements=1, nodes=[
1017
(key1, 'value', ([key2],)),
1018
(key2, 'value', ([],)),
1021
missing_keys = set()
1022
search_keys = index._find_ancestors([key1], 0, parent_map, missing_keys)
1023
self.assertEqual({key1: (key2,), key2: ()}, parent_map)
1024
self.assertEqual(set(), missing_keys)
1025
self.assertEqual(set(), search_keys)
1027
def test__find_ancestors_one_page_w_missing(self):
1031
index = self.make_index(ref_lists=1, key_elements=1, nodes=[
1032
(key1, 'value', ([key2],)),
1033
(key2, 'value', ([],)),
1036
missing_keys = set()
1037
search_keys = index._find_ancestors([key2, key3], 0, parent_map,
1039
self.assertEqual({key2: ()}, parent_map)
1040
# we know that key3 is missing because we read the page that it would
1042
self.assertEqual(set([key3]), missing_keys)
1043
self.assertEqual(set(), search_keys)
1045
def test__find_ancestors_one_parent_missing(self):
1049
index = self.make_index(ref_lists=1, key_elements=1, nodes=[
1050
(key1, 'value', ([key2],)),
1051
(key2, 'value', ([key3],)),
1054
missing_keys = set()
1055
search_keys = index._find_ancestors([key1], 0, parent_map,
1057
self.assertEqual({key1: (key2,), key2: (key3,)}, parent_map)
1058
self.assertEqual(set(), missing_keys)
1059
# all we know is that key3 wasn't present on the page we were reading
1060
# but if you look, the last key is key2 which comes before key3, so we
1061
# don't know whether key3 would land on this page or not.
1062
self.assertEqual(set([key3]), search_keys)
1063
search_keys = index._find_ancestors(search_keys, 0, parent_map,
1065
# passing it back in, we are sure it is 'missing'
1066
self.assertEqual({key1: (key2,), key2: (key3,)}, parent_map)
1067
self.assertEqual(set([key3]), missing_keys)
1068
self.assertEqual(set([]), search_keys)
1070
def test__find_ancestors_dont_search_known(self):
1074
index = self.make_index(ref_lists=1, key_elements=1, nodes=[
1075
(key1, 'value', ([key2],)),
1076
(key2, 'value', ([key3],)),
1077
(key3, 'value', ([],)),
1079
# We already know about key2, so we won't try to search for key3
1080
parent_map = {key2: (key3,)}
1081
missing_keys = set()
1082
search_keys = index._find_ancestors([key1], 0, parent_map,
1084
self.assertEqual({key1: (key2,), key2: (key3,)}, parent_map)
1085
self.assertEqual(set(), missing_keys)
1086
self.assertEqual(set(), search_keys)
1088
def test__find_ancestors_multiple_pages(self):
1089
# We need to use enough keys that we actually cause a split
1090
start_time = 1249671539
1091
email = "joebob@example.com"
1095
for i in xrange(400):
1096
rev_id = '%s-%s-%s' % (email,
1097
osutils.compact_date(start_time + i),
1098
osutils.rand_chars(16))
1100
nodes.append((rev_key, 'value', ref_lists))
1101
# We have a ref 'list' of length 1, with a list of parents, with 1
1102
# parent which is a key
1103
ref_lists = ((rev_key,),)
1104
rev_keys.append(rev_key)
1105
index = self.make_index(ref_lists=1, key_elements=1, nodes=nodes)
1106
self.assertEqual(400, index.key_count())
1107
self.assertEqual(3, len(index._row_offsets))
1108
nodes = dict(index._read_nodes([1, 2]))
1111
min_l2_key = l2.min_key
1112
max_l1_key = l1.max_key
1113
self.assertTrue(max_l1_key < min_l2_key)
1114
parents_min_l2_key = l2.keys[min_l2_key][1][0]
1115
self.assertEqual((l1.max_key,), parents_min_l2_key)
1116
# Now, whatever key we select that would fall on the second page,
1117
# should give us all the parents until the page break
1118
key_idx = rev_keys.index(min_l2_key)
1119
next_key = rev_keys[key_idx+1]
1120
# So now when we get the parent map, we should get the key we are
1121
# looking for, min_l2_key, and then a reference to go look for the
1122
# parent of that key
1124
missing_keys = set()
1125
search_keys = index._find_ancestors([next_key], 0, parent_map,
1127
self.assertEqual([min_l2_key, next_key], sorted(parent_map))
1128
self.assertEqual(set(), missing_keys)
1129
self.assertEqual(set([max_l1_key]), search_keys)
1131
search_keys = index._find_ancestors([max_l1_key], 0, parent_map,
1133
self.assertEqual(sorted(l1.keys), sorted(parent_map))
1134
self.assertEqual(set(), missing_keys)
1135
self.assertEqual(set(), search_keys)
1137
def test__find_ancestors_empty_index(self):
1138
index = self.make_index(ref_lists=1, key_elements=1, nodes=[])
1140
missing_keys = set()
1141
search_keys = index._find_ancestors([('one',), ('two',)], 0, parent_map,
1143
self.assertEqual(set(), search_keys)
1144
self.assertEqual({}, parent_map)
1145
self.assertEqual(set([('one',), ('two',)]), missing_keys)
1147
def test_supports_unlimited_cache(self):
1148
builder = btree_index.BTreeBuilder(reference_lists=0, key_elements=1)
1149
# We need enough nodes to cause a page split (so we have both an
1150
# internal node and a couple leaf nodes. 500 seems to be enough.)
1151
nodes = self.make_nodes(500, 1, 0)
1153
builder.add_node(*node)
1154
stream = builder.finish()
1155
trans = get_transport(self.get_url())
1156
size = trans.put_file('index', stream)
1157
index = btree_index.BTreeGraphIndex(trans, 'index', size)
1158
self.assertEqual(500, index.key_count())
1159
# We have an internal node
1160
self.assertEqual(2, len(index._row_lengths))
1161
# We have at least 2 leaf nodes
1162
self.assertTrue(index._row_lengths[-1] >= 2)
1163
self.assertIsInstance(index._leaf_node_cache, lru_cache.LRUCache)
1164
self.assertEqual(btree_index._NODE_CACHE_SIZE,
1165
index._leaf_node_cache._max_cache)
1166
self.assertIsInstance(index._internal_node_cache, fifo_cache.FIFOCache)
1167
self.assertEqual(100, index._internal_node_cache._max_cache)
1168
# No change if unlimited_cache=False is passed
1169
index = btree_index.BTreeGraphIndex(trans, 'index', size,
1170
unlimited_cache=False)
1171
self.assertIsInstance(index._leaf_node_cache, lru_cache.LRUCache)
1172
self.assertEqual(btree_index._NODE_CACHE_SIZE,
1173
index._leaf_node_cache._max_cache)
1174
self.assertIsInstance(index._internal_node_cache, fifo_cache.FIFOCache)
1175
self.assertEqual(100, index._internal_node_cache._max_cache)
1176
index = btree_index.BTreeGraphIndex(trans, 'index', size,
1177
unlimited_cache=True)
1178
self.assertIsInstance(index._leaf_node_cache, dict)
1179
self.assertIs(type(index._internal_node_cache), dict)
1180
# Exercise the lookup code
1181
entries = set(index.iter_entries([n[0] for n in nodes]))
1182
self.assertEqual(500, len(entries))
1185
class TestBTreeNodes(BTreeTestCase):
1188
BTreeTestCase.setUp(self)
1189
self.overrideAttr(btree_index, '_btree_serializer', self.parse_btree)
1191
def test_LeafNode_1_0(self):
1192
node_bytes = ("type=leaf\n"
1193
"0000000000000000000000000000000000000000\x00\x00value:0\n"
1194
"1111111111111111111111111111111111111111\x00\x00value:1\n"
1195
"2222222222222222222222222222222222222222\x00\x00value:2\n"
1196
"3333333333333333333333333333333333333333\x00\x00value:3\n"
1197
"4444444444444444444444444444444444444444\x00\x00value:4\n")
1198
node = btree_index._LeafNode(node_bytes, 1, 0)
1199
# We do direct access, or don't care about order, to leaf nodes most of
1200
# the time, so a dict is useful:
1202
("0000000000000000000000000000000000000000",): ("value:0", ()),
1203
("1111111111111111111111111111111111111111",): ("value:1", ()),
1204
("2222222222222222222222222222222222222222",): ("value:2", ()),
1205
("3333333333333333333333333333333333333333",): ("value:3", ()),
1206
("4444444444444444444444444444444444444444",): ("value:4", ()),
1209
def test_LeafNode_2_2(self):
1210
node_bytes = ("type=leaf\n"
1211
"00\x0000\x00\t00\x00ref00\x00value:0\n"
1212
"00\x0011\x0000\x00ref00\t00\x00ref00\r01\x00ref01\x00value:1\n"
1213
"11\x0033\x0011\x00ref22\t11\x00ref22\r11\x00ref22\x00value:3\n"
1214
"11\x0044\x00\t11\x00ref00\x00value:4\n"
1217
node = btree_index._LeafNode(node_bytes, 2, 2)
1218
# We do direct access, or don't care about order, to leaf nodes most of
1219
# the time, so a dict is useful:
1221
('00', '00'): ('value:0', ((), (('00', 'ref00'),))),
1222
('00', '11'): ('value:1',
1223
((('00', 'ref00'),), (('00', 'ref00'), ('01', 'ref01')))),
1224
('11', '33'): ('value:3',
1225
((('11', 'ref22'),), (('11', 'ref22'), ('11', 'ref22')))),
1226
('11', '44'): ('value:4', ((), (('11', 'ref00'),)))
1229
def test_InternalNode_1(self):
1230
node_bytes = ("type=internal\n"
1232
"0000000000000000000000000000000000000000\n"
1233
"1111111111111111111111111111111111111111\n"
1234
"2222222222222222222222222222222222222222\n"
1235
"3333333333333333333333333333333333333333\n"
1236
"4444444444444444444444444444444444444444\n"
1238
node = btree_index._InternalNode(node_bytes)
1239
# We want to bisect to find the right children from this node, so a
1240
# vector is most useful.
1242
("0000000000000000000000000000000000000000",),
1243
("1111111111111111111111111111111111111111",),
1244
("2222222222222222222222222222222222222222",),
1245
("3333333333333333333333333333333333333333",),
1246
("4444444444444444444444444444444444444444",),
1248
self.assertEqual(1, node.offset)
1250
def test_LeafNode_2_2(self):
1251
node_bytes = ("type=leaf\n"
1252
"00\x0000\x00\t00\x00ref00\x00value:0\n"
1253
"00\x0011\x0000\x00ref00\t00\x00ref00\r01\x00ref01\x00value:1\n"
1254
"11\x0033\x0011\x00ref22\t11\x00ref22\r11\x00ref22\x00value:3\n"
1255
"11\x0044\x00\t11\x00ref00\x00value:4\n"
1258
node = btree_index._LeafNode(node_bytes, 2, 2)
1259
# We do direct access, or don't care about order, to leaf nodes most of
1260
# the time, so a dict is useful:
1262
('00', '00'): ('value:0', ((), (('00', 'ref00'),))),
1263
('00', '11'): ('value:1',
1264
((('00', 'ref00'),), (('00', 'ref00'), ('01', 'ref01')))),
1265
('11', '33'): ('value:3',
1266
((('11', 'ref22'),), (('11', 'ref22'), ('11', 'ref22')))),
1267
('11', '44'): ('value:4', ((), (('11', 'ref00'),)))
1270
def assertFlattened(self, expected, key, value, refs):
1271
flat_key, flat_line = self.parse_btree._flatten_node(
1272
(None, key, value, refs), bool(refs))
1273
self.assertEqual('\x00'.join(key), flat_key)
1274
self.assertEqual(expected, flat_line)
1276
def test__flatten_node(self):
1277
self.assertFlattened('key\0\0value\n', ('key',), 'value', [])
1278
self.assertFlattened('key\0tuple\0\0value str\n',
1279
('key', 'tuple'), 'value str', [])
1280
self.assertFlattened('key\0tuple\0triple\0\0value str\n',
1281
('key', 'tuple', 'triple'), 'value str', [])
1282
self.assertFlattened('k\0t\0s\0ref\0value str\n',
1283
('k', 't', 's'), 'value str', [[('ref',)]])
1284
self.assertFlattened('key\0tuple\0ref\0key\0value str\n',
1285
('key', 'tuple'), 'value str', [[('ref', 'key')]])
1286
self.assertFlattened("00\x0000\x00\t00\x00ref00\x00value:0\n",
1287
('00', '00'), 'value:0', ((), (('00', 'ref00'),)))
1288
self.assertFlattened(
1289
"00\x0011\x0000\x00ref00\t00\x00ref00\r01\x00ref01\x00value:1\n",
1290
('00', '11'), 'value:1',
1291
((('00', 'ref00'),), (('00', 'ref00'), ('01', 'ref01'))))
1292
self.assertFlattened(
1293
"11\x0033\x0011\x00ref22\t11\x00ref22\r11\x00ref22\x00value:3\n",
1294
('11', '33'), 'value:3',
1295
((('11', 'ref22'),), (('11', 'ref22'), ('11', 'ref22'))))
1296
self.assertFlattened(
1297
"11\x0044\x00\t11\x00ref00\x00value:4\n",
1298
('11', '44'), 'value:4', ((), (('11', 'ref00'),)))
1301
class TestCompiledBtree(tests.TestCase):
1303
def test_exists(self):
1304
# This is just to let the user know if they don't have the feature
1306
self.requireFeature(compiled_btreeparser_feature)
1309
class TestMultiBisectRight(tests.TestCase):
1311
def assertMultiBisectRight(self, offsets, search_keys, fixed_keys):
1312
self.assertEqual(offsets,
1313
btree_index.BTreeGraphIndex._multi_bisect_right(
1314
search_keys, fixed_keys))
1316
def test_after(self):
1317
self.assertMultiBisectRight([(1, ['b'])], ['b'], ['a'])
1318
self.assertMultiBisectRight([(3, ['e', 'f', 'g'])],
1319
['e', 'f', 'g'], ['a', 'b', 'c'])
1321
def test_before(self):
1322
self.assertMultiBisectRight([(0, ['a'])], ['a'], ['b'])
1323
self.assertMultiBisectRight([(0, ['a', 'b', 'c', 'd'])],
1324
['a', 'b', 'c', 'd'], ['e', 'f', 'g'])
1326
def test_exact(self):
1327
self.assertMultiBisectRight([(1, ['a'])], ['a'], ['a'])
1328
self.assertMultiBisectRight([(1, ['a']), (2, ['b'])], ['a', 'b'], ['a', 'b'])
1329
self.assertMultiBisectRight([(1, ['a']), (3, ['c'])],
1330
['a', 'c'], ['a', 'b', 'c'])
1332
def test_inbetween(self):
1333
self.assertMultiBisectRight([(1, ['b'])], ['b'], ['a', 'c'])
1334
self.assertMultiBisectRight([(1, ['b', 'c', 'd']), (2, ['f', 'g'])],
1335
['b', 'c', 'd', 'f', 'g'], ['a', 'e', 'h'])
1337
def test_mixed(self):
1338
self.assertMultiBisectRight([(0, ['a', 'b']), (2, ['d', 'e']),
1340
['a', 'b', 'd', 'e', 'g', 'h'],
1341
['c', 'd', 'f', 'g'])
1344
class TestExpandOffsets(tests.TestCase):
1346
def make_index(self, size, recommended_pages=None):
1347
"""Make an index with a generic size.
1349
This doesn't actually create anything on disk, it just primes a
1350
BTreeGraphIndex with the recommended information.
1352
index = btree_index.BTreeGraphIndex(get_transport('memory:///'),
1353
'test-index', size=size)
1354
if recommended_pages is not None:
1355
index._recommended_pages = recommended_pages
1358
def set_cached_offsets(self, index, cached_offsets):
1359
"""Monkeypatch to give a canned answer for _get_offsets_for...()."""
1360
def _get_offsets_to_cached_pages():
1361
cached = set(cached_offsets)
1363
index._get_offsets_to_cached_pages = _get_offsets_to_cached_pages
1365
def prepare_index(self, index, node_ref_lists, key_length, key_count,
1366
row_lengths, cached_offsets):
1367
"""Setup the BTreeGraphIndex with some pre-canned information."""
1368
index.node_ref_lists = node_ref_lists
1369
index._key_length = key_length
1370
index._key_count = key_count
1371
index._row_lengths = row_lengths
1372
index._compute_row_offsets()
1373
index._root_node = btree_index._InternalNode('internal\noffset=0\n')
1374
self.set_cached_offsets(index, cached_offsets)
1376
def make_100_node_index(self):
1377
index = self.make_index(4096*100, 6)
1378
# Consider we've already made a single request at the middle
1379
self.prepare_index(index, node_ref_lists=0, key_length=1,
1380
key_count=1000, row_lengths=[1, 99],
1381
cached_offsets=[0, 50])
1384
def make_1000_node_index(self):
1385
index = self.make_index(4096*1000, 6)
1386
# Pretend we've already made a single request in the middle
1387
self.prepare_index(index, node_ref_lists=0, key_length=1,
1388
key_count=90000, row_lengths=[1, 9, 990],
1389
cached_offsets=[0, 5, 500])
1392
def assertNumPages(self, expected_pages, index, size):
1394
self.assertEqual(expected_pages, index._compute_total_pages_in_index())
1396
def assertExpandOffsets(self, expected, index, offsets):
1397
self.assertEqual(expected, index._expand_offsets(offsets),
1398
'We did not get the expected value after expanding'
1401
def test_default_recommended_pages(self):
1402
index = self.make_index(None)
1403
# local transport recommends 4096 byte reads, which is 1 page
1404
self.assertEqual(1, index._recommended_pages)
1406
def test__compute_total_pages_in_index(self):
1407
index = self.make_index(None)
1408
self.assertNumPages(1, index, 1024)
1409
self.assertNumPages(1, index, 4095)
1410
self.assertNumPages(1, index, 4096)
1411
self.assertNumPages(2, index, 4097)
1412
self.assertNumPages(2, index, 8192)
1413
self.assertNumPages(76, index, 4096*75 + 10)
1415
def test__find_layer_start_and_stop(self):
1416
index = self.make_1000_node_index()
1417
self.assertEqual((0, 1), index._find_layer_first_and_end(0))
1418
self.assertEqual((1, 10), index._find_layer_first_and_end(1))
1419
self.assertEqual((1, 10), index._find_layer_first_and_end(9))
1420
self.assertEqual((10, 1000), index._find_layer_first_and_end(10))
1421
self.assertEqual((10, 1000), index._find_layer_first_and_end(99))
1422
self.assertEqual((10, 1000), index._find_layer_first_and_end(999))
1424
def test_unknown_size(self):
1425
# We should not expand if we don't know the file size
1426
index = self.make_index(None, 10)
1427
self.assertExpandOffsets([0], index, [0])
1428
self.assertExpandOffsets([1, 4, 9], index, [1, 4, 9])
1430
def test_more_than_recommended(self):
1431
index = self.make_index(4096*100, 2)
1432
self.assertExpandOffsets([1, 10], index, [1, 10])
1433
self.assertExpandOffsets([1, 10, 20], index, [1, 10, 20])
1435
def test_read_all_from_root(self):
1436
index = self.make_index(4096*10, 20)
1437
self.assertExpandOffsets(range(10), index, [0])
1439
def test_read_all_when_cached(self):
1440
# We've read enough that we can grab all the rest in a single request
1441
index = self.make_index(4096*10, 5)
1442
self.prepare_index(index, node_ref_lists=0, key_length=1,
1443
key_count=1000, row_lengths=[1, 9],
1444
cached_offsets=[0, 1, 2, 5, 6])
1445
# It should fill the remaining nodes, regardless of the one requested
1446
self.assertExpandOffsets([3, 4, 7, 8, 9], index, [3])
1447
self.assertExpandOffsets([3, 4, 7, 8, 9], index, [8])
1448
self.assertExpandOffsets([3, 4, 7, 8, 9], index, [9])
1450
def test_no_root_node(self):
1451
index = self.make_index(4096*10, 5)
1452
self.assertExpandOffsets([0], index, [0])
1454
def test_include_neighbors(self):
1455
index = self.make_100_node_index()
1456
# We expand in both directions, until we have at least 'recommended'
1458
self.assertExpandOffsets([9, 10, 11, 12, 13, 14, 15], index, [12])
1459
self.assertExpandOffsets([88, 89, 90, 91, 92, 93, 94], index, [91])
1460
# If we hit an 'edge' we continue in the other direction
1461
self.assertExpandOffsets([1, 2, 3, 4, 5, 6], index, [2])
1462
self.assertExpandOffsets([94, 95, 96, 97, 98, 99], index, [98])
1464
# Requesting many nodes will expand all locations equally
1465
self.assertExpandOffsets([1, 2, 3, 80, 81, 82], index, [2, 81])
1466
self.assertExpandOffsets([1, 2, 3, 9, 10, 11, 80, 81, 82], index,
1469
def test_stop_at_cached(self):
1470
index = self.make_100_node_index()
1471
self.set_cached_offsets(index, [0, 10, 19])
1472
self.assertExpandOffsets([11, 12, 13, 14, 15, 16], index, [11])
1473
self.assertExpandOffsets([11, 12, 13, 14, 15, 16], index, [12])
1474
self.assertExpandOffsets([12, 13, 14, 15, 16, 17, 18], index, [15])
1475
self.assertExpandOffsets([13, 14, 15, 16, 17, 18], index, [16])
1476
self.assertExpandOffsets([13, 14, 15, 16, 17, 18], index, [17])
1477
self.assertExpandOffsets([13, 14, 15, 16, 17, 18], index, [18])
1479
def test_cannot_fully_expand(self):
1480
index = self.make_100_node_index()
1481
self.set_cached_offsets(index, [0, 10, 12])
1482
# We don't go into an endless loop if we are bound by cached nodes
1483
self.assertExpandOffsets([11], index, [11])
1485
def test_overlap(self):
1486
index = self.make_100_node_index()
1487
self.assertExpandOffsets([10, 11, 12, 13, 14, 15], index, [12, 13])
1488
self.assertExpandOffsets([10, 11, 12, 13, 14, 15], index, [11, 14])
1490
def test_stay_within_layer(self):
1491
index = self.make_1000_node_index()
1492
# When expanding a request, we won't read nodes from the next layer
1493
self.assertExpandOffsets([1, 2, 3, 4], index, [2])
1494
self.assertExpandOffsets([6, 7, 8, 9], index, [6])
1495
self.assertExpandOffsets([6, 7, 8, 9], index, [9])
1496
self.assertExpandOffsets([10, 11, 12, 13, 14, 15], index, [10])
1497
self.assertExpandOffsets([10, 11, 12, 13, 14, 15, 16], index, [13])
1499
self.set_cached_offsets(index, [0, 4, 12])
1500
self.assertExpandOffsets([5, 6, 7, 8, 9], index, [7])
1501
self.assertExpandOffsets([10, 11], index, [11])
1503
def test_small_requests_unexpanded(self):
1504
index = self.make_100_node_index()
1505
self.set_cached_offsets(index, [0])
1506
self.assertExpandOffsets([1], index, [1])
1507
self.assertExpandOffsets([50], index, [50])
1508
# If we request more than one node, then we'll expand
1509
self.assertExpandOffsets([49, 50, 51, 59, 60, 61], index, [50, 60])
1511
# The first pass does not expand
1512
index = self.make_1000_node_index()
1513
self.set_cached_offsets(index, [0])
1514
self.assertExpandOffsets([1], index, [1])
1515
self.set_cached_offsets(index, [0, 1])
1516
self.assertExpandOffsets([100], index, [100])
1517
self.set_cached_offsets(index, [0, 1, 100])
1518
# But after the first depth, we will expand
1519
self.assertExpandOffsets([2, 3, 4, 5, 6, 7], index, [2])
1520
self.assertExpandOffsets([2, 3, 4, 5, 6, 7], index, [4])
1521
self.set_cached_offsets(index, [0, 1, 2, 3, 4, 5, 6, 7, 100])
1522
self.assertExpandOffsets([102, 103, 104, 105, 106, 107, 108], index,