1003
1003
(4, ['g', 'h'])],
1004
1004
['a', 'b', 'd', 'e', 'g', 'h'],
1005
1005
['c', 'd', 'f', 'g'])
1008
class TestExpandOffsets(tests.TestCase):
1010
def make_index(self, size, recommended_pages=None):
1011
"""Make an index with a generic size.
1013
This doesn't actually create anything on disk, it just primes a
1014
BTreeGraphIndex with the recommended information.
1016
index = btree_index.BTreeGraphIndex(get_transport('memory:///'),
1017
'test-index', size=size)
1018
if recommended_pages is not None:
1019
index._recommended_pages = recommended_pages
1022
def set_cached_offsets(self, index, cached_offsets):
1023
"""Monkeypatch to give a canned answer for _get_offsets_for...()."""
1024
def _get_offsets_to_cached_pages():
1025
cached = set(cached_offsets)
1027
index._get_offsets_to_cached_pages = _get_offsets_to_cached_pages
1029
def prepare_index(self, index, node_ref_lists, key_length, key_count,
1030
row_lengths, cached_offsets):
1031
"""Setup the BTreeGraphIndex with some pre-canned information."""
1032
index.node_ref_lists = node_ref_lists
1033
index._key_length = key_length
1034
index._key_count = key_count
1035
index._row_lengths = row_lengths
1036
index._compute_row_offsets()
1037
index._root_node = btree_index._InternalNode('internal\noffset=0\n')
1038
self.set_cached_offsets(index, cached_offsets)
1040
def make_100_node_index(self):
1041
index = self.make_index(4096*100, 6)
1042
# Consider we've already made a single request at the middle
1043
self.prepare_index(index, node_ref_lists=0, key_length=1,
1044
key_count=1000, row_lengths=[1, 99],
1045
cached_offsets=[0, 50])
1048
def make_1000_node_index(self):
1049
index = self.make_index(4096*1000, 6)
1050
# Pretend we've already made a single request in the middle
1051
self.prepare_index(index, node_ref_lists=0, key_length=1,
1052
key_count=90000, row_lengths=[1, 9, 990],
1053
cached_offsets=[0, 5, 500])
1056
def assertNumPages(self, expected_pages, index, size):
1058
self.assertEqual(expected_pages, index._compute_total_pages_in_index())
1060
def assertExpandOffsets(self, expected, index, offsets):
1061
self.assertEqual(expected, index._expand_offsets(offsets),
1062
'We did not get the expected value after expanding'
1065
def test_default_recommended_pages(self):
1066
index = self.make_index(None)
1067
# local transport recommends 4096 byte reads, which is 1 page
1068
self.assertEqual(1, index._recommended_pages)
1070
def test__compute_total_pages_in_index(self):
1071
index = self.make_index(None)
1072
self.assertNumPages(1, index, 1024)
1073
self.assertNumPages(1, index, 4095)
1074
self.assertNumPages(1, index, 4096)
1075
self.assertNumPages(2, index, 4097)
1076
self.assertNumPages(2, index, 8192)
1077
self.assertNumPages(76, index, 4096*75 + 10)
1079
def test__find_layer_start_and_stop(self):
1080
index = self.make_1000_node_index()
1081
self.assertEqual((0, 1), index._find_layer_first_and_end(0))
1082
self.assertEqual((1, 10), index._find_layer_first_and_end(1))
1083
self.assertEqual((1, 10), index._find_layer_first_and_end(9))
1084
self.assertEqual((10, 1000), index._find_layer_first_and_end(10))
1085
self.assertEqual((10, 1000), index._find_layer_first_and_end(99))
1086
self.assertEqual((10, 1000), index._find_layer_first_and_end(999))
1088
def test_unknown_size(self):
1089
# We should not expand if we don't know the file size
1090
index = self.make_index(None, 10)
1091
self.assertExpandOffsets([0], index, [0])
1092
self.assertExpandOffsets([1, 4, 9], index, [1, 4, 9])
1094
def test_more_than_recommended(self):
1095
index = self.make_index(4096*100, 2)
1096
self.assertExpandOffsets([1, 10], index, [1, 10])
1097
self.assertExpandOffsets([1, 10, 20], index, [1, 10, 20])
1099
def test_read_all_from_root(self):
1100
index = self.make_index(4096*10, 20)
1101
self.assertExpandOffsets(range(10), index, [0])
1103
def test_read_all_when_cached(self):
1104
# We've read enough that we can grab all the rest in a single request
1105
index = self.make_index(4096*10, 5)
1106
self.prepare_index(index, node_ref_lists=0, key_length=1,
1107
key_count=1000, row_lengths=[1, 9],
1108
cached_offsets=[0, 1, 2, 5, 6])
1109
# It should fill the remaining nodes, regardless of the one requested
1110
self.assertExpandOffsets([3, 4, 7, 8, 9], index, [3])
1111
self.assertExpandOffsets([3, 4, 7, 8, 9], index, [8])
1112
self.assertExpandOffsets([3, 4, 7, 8, 9], index, [9])
1114
def test_no_root_node(self):
1115
index = self.make_index(4096*10, 5)
1116
self.assertExpandOffsets([0], index, [0])
1118
def test_include_neighbors(self):
1119
index = self.make_100_node_index()
1120
# We expand in both directions, until we have at least 'recommended'
1122
self.assertExpandOffsets([9, 10, 11, 12, 13, 14, 15], index, [12])
1123
self.assertExpandOffsets([88, 89, 90, 91, 92, 93, 94], index, [91])
1124
# If we hit an 'edge' we continue in the other direction
1125
self.assertExpandOffsets([1, 2, 3, 4, 5, 6], index, [2])
1126
self.assertExpandOffsets([94, 95, 96, 97, 98, 99], index, [98])
1128
# Requesting many nodes will expand all locations equally
1129
self.assertExpandOffsets([1, 2, 3, 80, 81, 82], index, [2, 81])
1130
self.assertExpandOffsets([1, 2, 3, 9, 10, 11, 80, 81, 82], index,
1133
def test_stop_at_cached(self):
1134
index = self.make_100_node_index()
1135
self.set_cached_offsets(index, [0, 10, 19])
1136
self.assertExpandOffsets([11, 12, 13, 14, 15, 16], index, [11])
1137
self.assertExpandOffsets([11, 12, 13, 14, 15, 16], index, [12])
1138
self.assertExpandOffsets([12, 13, 14, 15, 16, 17, 18], index, [15])
1139
self.assertExpandOffsets([13, 14, 15, 16, 17, 18], index, [16])
1140
self.assertExpandOffsets([13, 14, 15, 16, 17, 18], index, [17])
1141
self.assertExpandOffsets([13, 14, 15, 16, 17, 18], index, [18])
1143
def test_cannot_fully_expand(self):
1144
index = self.make_100_node_index()
1145
self.set_cached_offsets(index, [0, 10, 12])
1146
# We don't go into an endless loop if we are bound by cached nodes
1147
self.assertExpandOffsets([11], index, [11])
1149
def test_overlap(self):
1150
index = self.make_100_node_index()
1151
self.assertExpandOffsets([10, 11, 12, 13, 14, 15], index, [12, 13])
1152
self.assertExpandOffsets([10, 11, 12, 13, 14, 15], index, [11, 14])
1154
def test_stay_within_layer(self):
1155
index = self.make_1000_node_index()
1156
# When expanding a request, we won't read nodes from the next layer
1157
self.assertExpandOffsets([1, 2, 3, 4], index, [2])
1158
self.assertExpandOffsets([6, 7, 8, 9], index, [6])
1159
self.assertExpandOffsets([6, 7, 8, 9], index, [9])
1160
self.assertExpandOffsets([10, 11, 12, 13, 14, 15], index, [10])
1161
self.assertExpandOffsets([10, 11, 12, 13, 14, 15, 16], index, [13])
1163
self.set_cached_offsets(index, [0, 4, 12])
1164
self.assertExpandOffsets([5, 6, 7, 8, 9], index, [7])
1165
self.assertExpandOffsets([10, 11], index, [11])
1167
def test_small_requests_unexpanded(self):
1168
index = self.make_100_node_index()
1169
self.set_cached_offsets(index, [0])
1170
self.assertExpandOffsets([1], index, [1])
1171
self.assertExpandOffsets([50], index, [50])
1172
# If we request more than one node, then we'll expand
1173
self.assertExpandOffsets([49, 50, 51, 59, 60, 61], index, [50, 60])
1175
# The first pass does not expand
1176
index = self.make_1000_node_index()
1177
self.set_cached_offsets(index, [0])
1178
self.assertExpandOffsets([1], index, [1])
1179
self.set_cached_offsets(index, [0, 1])
1180
self.assertExpandOffsets([100], index, [100])
1181
self.set_cached_offsets(index, [0, 1, 100])
1182
# But after the first depth, we will expand
1183
self.assertExpandOffsets([2, 3, 4, 5, 6, 7], index, [2])
1184
self.assertExpandOffsets([2, 3, 4, 5, 6, 7], index, [4])
1185
self.set_cached_offsets(index, [0, 1, 2, 3, 4, 5, 6, 7, 100])
1186
self.assertExpandOffsets([102, 103, 104, 105, 106, 107, 108], index,