1
# Copyright (C) 2008 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17
"""Persistent maps from tuple_of_strings->string using CHK stores.
19
Overview and current status:
21
The CHKMap class implements a dict from tuple_of_strings->string by using a trie
22
with internal nodes of 8-bit fan out; The key tuples are mapped to strings by
23
joining them by \x00, and \x00 padding shorter keys out to the length of the
24
longest key. Leaf nodes are packed as densely as possible, and internal nodes
25
are all an additional 8-bits wide leading to a sparse upper tree.
27
Updates to a CHKMap are done preferentially via the apply_delta method, to
28
allow optimisation of the update operation; but individual map/unmap calls are
29
possible and supported. All changes via map/unmap are buffered in memory until
30
the _save method is called to force serialisation of the tree. apply_delta
31
performs a _save implicitly.
36
Densely packed upper nodes.
43
from bzrlib import lazy_import
44
lazy_import.lazy_import(globals(), """
45
from bzrlib import versionedfile
56
# If each line is 50 bytes, and you have 255 internal pages, with 255-way fan
57
# out, it takes 3.1MB to cache the layer.
58
_PAGE_CACHE_SIZE = 4*1024*1024
59
# We are caching bytes so len(value) is perfectly accurate
60
_page_cache = lru_cache.LRUSizeCache(_PAGE_CACHE_SIZE)
62
# If a ChildNode falls below this many bytes, we check for a remap
63
_INTERESTING_NEW_SIZE = 50
64
# If a ChildNode shrinks by more than this amount, we check for a remap
65
_INTERESTING_SHRINKAGE_LIMIT = 20
66
# If we delete more than this many nodes applying a delta, we check for a remap
67
_INTERESTING_DELETES_LIMIT = 5
70
def _search_key_plain(key):
71
"""Map the key tuple into a search string that just uses the key bytes."""
72
return '\x00'.join(key)
75
search_key_registry = registry.Registry()
76
search_key_registry.register('plain', _search_key_plain)
80
"""A persistent map from string to string backed by a CHK store."""
82
def __init__(self, store, root_key, search_key_func=None):
83
"""Create a CHKMap object.
85
:param store: The store the CHKMap is stored in.
86
:param root_key: The root key of the map. None to create an empty
88
:param search_key_func: A function mapping a key => bytes. These bytes
89
are then used by the internal nodes to split up leaf nodes into
93
if search_key_func is None:
94
search_key_func = _search_key_plain
95
self._search_key_func = search_key_func
97
self._root_node = LeafNode(search_key_func=search_key_func)
99
self._root_node = self._node_key(root_key)
101
def apply_delta(self, delta):
102
"""Apply a delta to the map.
104
:param delta: An iterable of old_key, new_key, new_value tuples.
105
If new_key is not None, then new_key->new_value is inserted
106
into the map; if old_key is not None, then the old mapping
107
of old_key is removed.
110
for old, new, value in delta:
111
if old is not None and old != new:
112
self.unmap(old, check_remap=False)
114
for old, new, value in delta:
117
if delete_count > _INTERESTING_DELETES_LIMIT:
118
trace.mutter("checking remap as %d deletions", delete_count)
122
def _ensure_root(self):
123
"""Ensure that the root node is an object not a key."""
124
if type(self._root_node) == tuple:
125
# Demand-load the root
126
self._root_node = self._get_node(self._root_node)
128
def _get_node(self, node):
131
Note that this does not update the _items dict in objects containing a
132
reference to this node. As such it does not prevent subsequent IO being
135
:param node: A tuple key or node object.
136
:return: A node object.
138
if type(node) == tuple:
139
bytes = self._read_bytes(node)
140
return _deserialise(bytes, node,
141
search_key_func=self._search_key_func)
145
def _read_bytes(self, key):
147
return _page_cache[key]
149
stream = self._store.get_record_stream([key], 'unordered', True)
150
bytes = stream.next().get_bytes_as('fulltext')
151
_page_cache[key] = bytes
154
def _dump_tree(self, include_keys=False):
155
"""Return the tree in a string representation."""
157
res = self._dump_tree_node(self._root_node, prefix='', indent='',
158
include_keys=include_keys)
159
res.append('') # Give a trailing '\n'
160
return '\n'.join(res)
162
def _dump_tree_node(self, node, prefix, indent, include_keys=True):
163
"""For this node and all children, generate a string representation."""
168
node_key = node.key()
169
if node_key is not None:
170
key_str = ' %s' % (node_key[0],)
173
result.append('%s%r %s%s' % (indent, prefix, node.__class__.__name__,
175
if type(node) is InternalNode:
176
# Trigger all child nodes to get loaded
177
list(node._iter_nodes(self._store))
178
for prefix, sub in sorted(node._items.iteritems()):
179
result.extend(self._dump_tree_node(sub, prefix, indent + ' ',
180
include_keys=include_keys))
182
for key, value in sorted(node._items.iteritems()):
183
# Don't use prefix nor indent here to line up when used in
184
# tests in conjunction with assertEqualDiff
185
result.append(' %r %r' % (key, value))
189
def from_dict(klass, store, initial_value, maximum_size=0, key_width=1,
190
search_key_func=None):
191
"""Create a CHKMap in store with initial_value as the content.
193
:param store: The store to record initial_value in, a VersionedFiles
194
object with 1-tuple keys supporting CHK key generation.
195
:param initial_value: A dict to store in store. Its keys and values
197
:param maximum_size: The maximum_size rule to apply to nodes. This
198
determines the size at which no new data is added to a single node.
199
:param key_width: The number of elements in each key_tuple being stored
201
:param search_key_func: A function mapping a key => bytes. These bytes
202
are then used by the internal nodes to split up leaf nodes into
204
:return: The root chk of the resulting CHKMap.
206
result = CHKMap(store, None, search_key_func=search_key_func)
207
result._root_node.set_maximum_size(maximum_size)
208
result._root_node._key_width = key_width
210
for key, value in initial_value.items():
211
delta.append((None, key, value))
212
return result.apply_delta(delta)
214
def iter_changes(self, basis):
215
"""Iterate over the changes between basis and self.
217
:return: An iterator of tuples: (key, old_value, new_value). Old_value
218
is None for keys only in self; new_value is None for keys only in
222
# Read both trees in lexographic, highest-first order.
223
# Any identical nodes we skip
224
# Any unique prefixes we output immediately.
225
# values in a leaf node are treated as single-value nodes in the tree
226
# which allows them to be not-special-cased. We know to output them
227
# because their value is a string, not a key(tuple) or node.
229
# corner cases to beware of when considering this function:
230
# *) common references are at different heights.
231
# consider two trees:
232
# {'a': LeafNode={'aaa':'foo', 'aab':'bar'}, 'b': LeafNode={'b'}}
233
# {'a': InternalNode={'aa':LeafNode={'aaa':'foo', 'aab':'bar'},
234
# 'ab':LeafNode={'ab':'bar'}}
235
# 'b': LeafNode={'b'}}
236
# the node with aaa/aab will only be encountered in the second tree
237
# after reading the 'a' subtree, but it is encountered in the first
238
# tree immediately. Variations on this may have read internal nodes
239
# like this. we want to cut the entire pending subtree when we
240
# realise we have a common node. For this we use a list of keys -
241
# the path to a node - and check the entire path is clean as we
243
if self._node_key(self._root_node) == self._node_key(basis._root_node):
247
excluded_keys = set()
248
self_node = self._root_node
249
basis_node = basis._root_node
250
# A heap, each element is prefix, node(tuple/NodeObject/string),
251
# key_path (a list of tuples, tail-sharing down the tree.)
254
def process_node(node, path, a_map, pending):
255
# take a node and expand it
256
node = a_map._get_node(node)
257
if type(node) == LeafNode:
258
path = (node._key, path)
259
for key, value in node._items.items():
260
# For a LeafNode, the key is a serialized_key, rather than
261
# a search_key, but the heap is using search_keys
262
search_key = node._search_key_func(key)
263
heapq.heappush(pending, (search_key, key, value, path))
265
# type(node) == InternalNode
266
path = (node._key, path)
267
for prefix, child in node._items.items():
268
heapq.heappush(pending, (prefix, None, child, path))
269
def process_common_internal_nodes(self_node, basis_node):
270
self_items = set(self_node._items.items())
271
basis_items = set(basis_node._items.items())
272
path = (self_node._key, None)
273
for prefix, child in self_items - basis_items:
274
heapq.heappush(self_pending, (prefix, None, child, path))
275
path = (basis_node._key, None)
276
for prefix, child in basis_items - self_items:
277
heapq.heappush(basis_pending, (prefix, None, child, path))
278
def process_common_leaf_nodes(self_node, basis_node):
279
self_items = set(self_node._items.items())
280
basis_items = set(basis_node._items.items())
281
path = (self_node._key, None)
282
for key, value in self_items - basis_items:
283
prefix = self._search_key_func(key)
284
heapq.heappush(self_pending, (prefix, key, value, path))
285
path = (basis_node._key, None)
286
for key, value in basis_items - self_items:
287
prefix = basis._search_key_func(key)
288
heapq.heappush(basis_pending, (prefix, key, value, path))
289
def process_common_prefix_nodes(self_node, self_path,
290
basis_node, basis_path):
291
# Would it be more efficient if we could request both at the same
293
self_node = self._get_node(self_node)
294
basis_node = basis._get_node(basis_node)
295
if (type(self_node) == InternalNode
296
and type(basis_node) == InternalNode):
297
# Matching internal nodes
298
process_common_internal_nodes(self_node, basis_node)
299
elif (type(self_node) == LeafNode
300
and type(basis_node) == LeafNode):
301
process_common_leaf_nodes(self_node, basis_node)
303
process_node(self_node, self_path, self, self_pending)
304
process_node(basis_node, basis_path, basis, basis_pending)
305
process_common_prefix_nodes(self_node, None, basis_node, None)
308
excluded_keys = set()
309
def check_excluded(key_path):
310
# Note that this is N^2, it depends on us trimming trees
311
# aggressively to not become slow.
312
# A better implementation would probably have a reverse map
313
# back to the children of a node, and jump straight to it when
314
# a common node is detected, the proceed to remove the already
315
# pending children. bzrlib.graph has a searcher module with a
317
while key_path is not None:
318
key, key_path = key_path
319
if key in excluded_keys:
324
while self_pending or basis_pending:
327
# self is exhausted: output remainder of basis
328
for prefix, key, node, path in basis_pending:
329
if check_excluded(path):
331
node = basis._get_node(node)
334
yield (key, node, None)
336
# subtree - fastpath the entire thing.
337
for key, value in node.iteritems(basis._store):
338
yield (key, value, None)
340
elif not basis_pending:
341
# basis is exhausted: output remainder of self.
342
for prefix, key, node, path in self_pending:
343
if check_excluded(path):
345
node = self._get_node(node)
348
yield (key, None, node)
350
# subtree - fastpath the entire thing.
351
for key, value in node.iteritems(self._store):
352
yield (key, None, value)
355
# XXX: future optimisation - yield the smaller items
356
# immediately rather than pushing everything on/off the
357
# heaps. Applies to both internal nodes and leafnodes.
358
if self_pending[0][0] < basis_pending[0][0]:
360
prefix, key, node, path = heapq.heappop(self_pending)
361
if check_excluded(path):
365
yield (key, None, node)
367
process_node(node, path, self, self_pending)
369
elif self_pending[0][0] > basis_pending[0][0]:
371
prefix, key, node, path = heapq.heappop(basis_pending)
372
if check_excluded(path):
376
yield (key, node, None)
378
process_node(node, path, basis, basis_pending)
381
# common prefix: possibly expand both
382
if self_pending[0][1] is None:
387
if basis_pending[0][1] is None:
392
if not read_self and not read_basis:
393
# compare a common value
394
self_details = heapq.heappop(self_pending)
395
basis_details = heapq.heappop(basis_pending)
396
if self_details[2] != basis_details[2]:
397
yield (self_details[1],
398
basis_details[2], self_details[2])
400
# At least one side wasn't a simple value
401
if (self._node_key(self_pending[0][2]) ==
402
self._node_key(basis_pending[0][2])):
403
# Identical pointers, skip (and don't bother adding to
404
# excluded, it won't turn up again.
405
heapq.heappop(self_pending)
406
heapq.heappop(basis_pending)
408
# Now we need to expand this node before we can continue
409
if read_self and read_basis:
410
# Both sides start with the same prefix, so process
412
self_prefix, _, self_node, self_path = heapq.heappop(
414
basis_prefix, _, basis_node, basis_path = heapq.heappop(
416
if self_prefix != basis_prefix:
417
raise AssertionError(
418
'%r != %r' % (self_prefix, basis_prefix))
419
process_common_prefix_nodes(
420
self_node, self_path,
421
basis_node, basis_path)
424
prefix, key, node, path = heapq.heappop(self_pending)
425
if check_excluded(path):
427
process_node(node, path, self, self_pending)
429
prefix, key, node, path = heapq.heappop(basis_pending)
430
if check_excluded(path):
432
process_node(node, path, basis, basis_pending)
435
def iteritems(self, key_filter=None):
436
"""Iterate over the entire CHKMap's contents."""
438
return self._root_node.iteritems(self._store, key_filter=key_filter)
441
"""Return the key for this map."""
442
if type(self._root_node) is tuple:
443
return self._root_node
445
return self._root_node._key
449
return len(self._root_node)
451
def map(self, key, value):
452
"""Map a key tuple to value."""
453
# Need a root object.
455
prefix, node_details = self._root_node.map(self._store, key, value)
456
if len(node_details) == 1:
457
self._root_node = node_details[0][1]
459
self._root_node = InternalNode(prefix,
460
search_key_func=self._search_key_func)
461
self._root_node.set_maximum_size(node_details[0][1].maximum_size)
462
self._root_node._key_width = node_details[0][1]._key_width
463
for split, node in node_details:
464
self._root_node.add_node(split, node)
466
def _node_key(self, node):
467
"""Get the key for a node whether it's a tuple or node."""
468
if type(node) == tuple:
473
def unmap(self, key, check_remap=True):
474
"""remove key from the map."""
476
if type(self._root_node) is InternalNode:
477
unmapped = self._root_node.unmap(self._store, key,
478
check_remap=check_remap)
480
unmapped = self._root_node.unmap(self._store, key)
481
self._root_node = unmapped
483
def _check_remap(self):
484
"""Check if nodes can be collapsed."""
486
if type(self._root_node) is InternalNode:
487
self._root_node._check_remap(self._store)
490
"""Save the map completely.
492
:return: The key of the root node.
494
if type(self._root_node) == tuple:
496
return self._root_node
497
keys = list(self._root_node.serialise(self._store))
502
"""Base class defining the protocol for CHK Map nodes.
504
:ivar _raw_size: The total size of the serialized key:value data, before
505
adding the header bytes, and without prefix compression.
508
def __init__(self, key_width=1):
511
:param key_width: The width of keys for this node.
514
# Current number of elements
516
self._maximum_size = 0
517
self._key_width = key_width
518
# current size in bytes
520
# The pointers/values this node has - meaning defined by child classes.
522
# The common search prefix
523
self._search_prefix = None
526
items_str = str(sorted(self._items))
527
if len(items_str) > 20:
528
items_str = items_str[:16] + '...]'
529
return '%s(key:%s len:%s size:%s max:%s prefix:%s items:%s)' % (
530
self.__class__.__name__, self._key, self._len, self._raw_size,
531
self._maximum_size, self._search_prefix, items_str)
540
def maximum_size(self):
541
"""What is the upper limit for adding references to a node."""
542
return self._maximum_size
544
def set_maximum_size(self, new_size):
545
"""Set the size threshold for nodes.
547
:param new_size: The size at which no data is added to a node. 0 for
550
self._maximum_size = new_size
553
def common_prefix(cls, prefix, key):
554
"""Given 2 strings, return the longest prefix common to both.
556
:param prefix: This has been the common prefix for other keys, so it is
557
more likely to be the common prefix in this case as well.
558
:param key: Another string to compare to
560
if key.startswith(prefix):
562
# Is there a better way to do this?
563
for pos, (left, right) in enumerate(zip(prefix, key)):
567
common = prefix[:pos+1]
571
def common_prefix_for_keys(cls, keys):
572
"""Given a list of keys, find their common prefix.
574
:param keys: An iterable of strings.
575
:return: The longest common prefix of all keys.
579
if common_prefix is None:
582
common_prefix = cls.common_prefix(common_prefix, key)
583
if not common_prefix:
584
# if common_prefix is the empty string, then we know it won't
590
# Singleton indicating we have not computed _search_prefix yet
593
class LeafNode(Node):
594
"""A node containing actual key:value pairs.
596
:ivar _items: A dict of key->value items. The key is in tuple form.
597
:ivar _size: The number of bytes that would be used by serializing all of
601
def __init__(self, search_key_func=None):
603
# All of the keys in this leaf node share this common prefix
604
self._common_serialised_prefix = None
605
self._serialise_key = '\x00'.join
606
if search_key_func is None:
607
self._search_key_func = _search_key_plain
609
self._search_key_func = search_key_func
612
items_str = str(sorted(self._items))
613
if len(items_str) > 20:
614
items_str = items_str[:16] + '...]'
616
'%s(key:%s len:%s size:%s max:%s prefix:%s keywidth:%s items:%s)' \
617
% (self.__class__.__name__, self._key, self._len, self._raw_size,
618
self._maximum_size, self._search_prefix, self._key_width, items_str)
620
def _current_size(self):
621
"""Answer the current serialised size of this node.
623
This differs from self._raw_size in that it includes the bytes used for
626
if self._common_serialised_prefix is None:
630
# We will store a single string with the common prefix
631
# And then that common prefix will not be stored in any of the
633
prefix_len = len(self._common_serialised_prefix)
634
bytes_for_items = (self._raw_size - (prefix_len * self._len))
635
return (9 # 'chkleaf:\n'
636
+ len(str(self._maximum_size)) + 1
637
+ len(str(self._key_width)) + 1
638
+ len(str(self._len)) + 1
643
def deserialise(klass, bytes, key, search_key_func=None):
644
"""Deserialise bytes, with key key, into a LeafNode.
646
:param bytes: The bytes of the node.
647
:param key: The key that the serialised node has.
649
return _deserialise_leaf_node(bytes, key,
650
search_key_func=search_key_func)
652
def iteritems(self, store, key_filter=None):
653
"""Iterate over items in the node.
655
:param key_filter: A filter to apply to the node. It should be a
656
list/set/dict or similar repeatedly iterable container.
658
if key_filter is not None:
659
# Adjust the filter - short elements go to a prefix filter. All
660
# other items are looked up directly.
661
# XXX: perhaps defaultdict? Profiling<rinse and repeat>
663
for key in key_filter:
664
if len(key) == self._key_width:
665
# This filter is meant to match exactly one key, yield it
668
yield key, self._items[key]
670
# This key is not present in this map, continue
673
# Short items, we need to match based on a prefix
674
length_filter = filters.setdefault(len(key), set())
675
length_filter.add(key)
677
filters = filters.items()
678
for item in self._items.iteritems():
679
for length, length_filter in filters:
680
if item[0][:length] in length_filter:
684
for item in self._items.iteritems():
687
def _key_value_len(self, key, value):
688
# TODO: Should probably be done without actually joining the key, but
689
# then that can be done via the C extension
690
return (len(self._serialise_key(key)) + 1
691
+ len(str(value.count('\n'))) + 1
694
def _search_key(self, key):
695
return self._search_key_func(key)
697
def _map_no_split(self, key, value):
698
"""Map a key to a value.
700
This assumes either the key does not already exist, or you have already
701
removed its size and length from self.
703
:return: True if adding this node should cause us to split.
705
self._items[key] = value
706
self._raw_size += self._key_value_len(key, value)
708
serialised_key = self._serialise_key(key)
709
if self._common_serialised_prefix is None:
710
self._common_serialised_prefix = serialised_key
712
self._common_serialised_prefix = self.common_prefix(
713
self._common_serialised_prefix, serialised_key)
714
search_key = self._search_key(key)
715
if self._search_prefix is _unknown:
716
self._compute_search_prefix()
717
if self._search_prefix is None:
718
self._search_prefix = search_key
720
self._search_prefix = self.common_prefix(
721
self._search_prefix, search_key)
723
and self._maximum_size
724
and self._current_size() > self._maximum_size):
725
# Check to see if all of the search_keys for this node are
726
# identical. We allow the node to grow under that circumstance
727
# (we could track this as common state, but it is infrequent)
728
if (search_key != self._search_prefix
729
or not self._are_search_keys_identical()):
733
def _split(self, store):
734
"""We have overflowed.
736
Split this node into multiple LeafNodes, return it up the stack so that
737
the next layer creates a new InternalNode and references the new nodes.
739
:return: (common_serialised_prefix, [(node_serialised_prefix, node)])
741
if self._search_prefix is _unknown:
742
raise AssertionError('Search prefix must be known')
743
common_prefix = self._search_prefix
744
split_at = len(common_prefix) + 1
746
for key, value in self._items.iteritems():
747
search_key = self._search_key(key)
748
prefix = search_key[:split_at]
749
# TODO: Generally only 1 key can be exactly the right length,
750
# which means we can only have 1 key in the node pointed
751
# at by the 'prefix\0' key. We might want to consider
752
# folding it into the containing InternalNode rather than
753
# having a fixed length-1 node.
754
# Note this is probably not true for hash keys, as they
755
# may get a '\00' node anywhere, but won't have keys of
757
if len(prefix) < split_at:
758
prefix += '\x00'*(split_at - len(prefix))
759
if prefix not in result:
760
node = LeafNode(search_key_func=self._search_key_func)
761
node.set_maximum_size(self._maximum_size)
762
node._key_width = self._key_width
763
result[prefix] = node
765
node = result[prefix]
766
node.map(store, key, value)
767
return common_prefix, result.items()
769
def map(self, store, key, value):
770
"""Map key to value."""
771
if key in self._items:
772
self._raw_size -= self._key_value_len(key, self._items[key])
775
if self._map_no_split(key, value):
776
return self._split(store)
778
if self._search_prefix is _unknown:
779
raise AssertionError('%r must be known' % self._search_prefix)
780
return self._search_prefix, [("", self)]
782
def serialise(self, store):
783
"""Serialise the LeafNode to store.
785
:param store: A VersionedFiles honouring the CHK extensions.
786
:return: An iterable of the keys inserted by this operation.
788
lines = ["chkleaf:\n"]
789
lines.append("%d\n" % self._maximum_size)
790
lines.append("%d\n" % self._key_width)
791
lines.append("%d\n" % self._len)
792
if self._common_serialised_prefix is None:
794
if len(self._items) != 0:
795
raise AssertionError('If _common_serialised_prefix is None'
796
' we should have no items')
798
lines.append('%s\n' % (self._common_serialised_prefix,))
799
prefix_len = len(self._common_serialised_prefix)
800
for key, value in sorted(self._items.items()):
801
# Always add a final newline
802
value_lines = osutils.chunks_to_lines([value + '\n'])
803
serialized = "%s\x00%s\n" % (self._serialise_key(key),
805
if not serialized.startswith(self._common_serialised_prefix):
806
raise AssertionError('We thought the common prefix was %r'
807
' but entry %r does not have it in common'
808
% (self._common_serialised_prefix, serialized))
809
lines.append(serialized[prefix_len:])
810
lines.extend(value_lines)
811
sha1, _, _ = store.add_lines((None,), (), lines)
812
self._key = ("sha1:" + sha1,)
813
bytes = ''.join(lines)
814
if len(bytes) != self._current_size():
815
raise AssertionError('Invalid _current_size')
816
_page_cache.add(self._key, bytes)
820
"""Return the references to other CHK's held by this node."""
823
def _compute_search_prefix(self):
824
"""Determine the common search prefix for all keys in this node.
826
:return: A bytestring of the longest search key prefix that is
827
unique within this node.
829
search_keys = [self._search_key_func(key) for key in self._items]
830
self._search_prefix = self.common_prefix_for_keys(search_keys)
831
return self._search_prefix
833
def _are_search_keys_identical(self):
834
"""Check to see if the search keys for all entries are the same.
836
When using a hash as the search_key it is possible for non-identical
837
keys to collide. If that happens enough, we may try overflow a
838
LeafNode, but as all are collisions, we must not split.
840
common_search_key = None
841
for key in self._items:
842
search_key = self._search_key(key)
843
if common_search_key is None:
844
common_search_key = search_key
845
elif search_key != common_search_key:
849
def _compute_serialised_prefix(self):
850
"""Determine the common prefix for serialised keys in this node.
852
:return: A bytestring of the longest serialised key prefix that is
853
unique within this node.
855
serialised_keys = [self._serialise_key(key) for key in self._items]
856
self._common_serialised_prefix = self.common_prefix_for_keys(
858
return self._common_serialised_prefix
860
def unmap(self, store, key):
861
"""Unmap key from the node."""
863
self._raw_size -= self._key_value_len(key, self._items[key])
865
trace.mutter("key %s not found in %r", key, self._items)
870
# Recompute from scratch
871
self._compute_search_prefix()
872
self._compute_serialised_prefix()
876
class InternalNode(Node):
877
"""A node that contains references to other nodes.
879
An InternalNode is responsible for mapping search key prefixes to child
882
:ivar _items: serialised_key => node dictionary. node may be a tuple,
883
LeafNode or InternalNode.
886
def __init__(self, prefix='', search_key_func=None):
888
# The size of an internalnode with default values and no children.
889
# How many octets key prefixes within this node are.
891
self._search_prefix = prefix
892
if search_key_func is None:
893
self._search_key_func = _search_key_plain
895
self._search_key_func = search_key_func
897
def add_node(self, prefix, node):
898
"""Add a child node with prefix prefix, and node node.
900
:param prefix: The search key prefix for node.
901
:param node: The node being added.
903
if self._search_prefix is None:
904
raise AssertionError("_search_prefix should not be None")
905
if not prefix.startswith(self._search_prefix):
906
raise AssertionError("prefixes mismatch: %s must start with %s"
907
% (prefix,self._search_prefix))
908
if len(prefix) != len(self._search_prefix) + 1:
909
raise AssertionError("prefix wrong length: len(%s) is not %d" %
910
(prefix, len(self._search_prefix) + 1))
911
self._len += len(node)
912
if not len(self._items):
913
self._node_width = len(prefix)
914
if self._node_width != len(self._search_prefix) + 1:
915
raise AssertionError("node width mismatch: %d is not %d" %
916
(self._node_width, len(self._search_prefix) + 1))
917
self._items[prefix] = node
920
def _current_size(self):
921
"""Answer the current serialised size of this node."""
922
return (self._raw_size + len(str(self._len)) + len(str(self._key_width)) +
923
len(str(self._maximum_size)))
926
def deserialise(klass, bytes, key, search_key_func=None):
927
"""Deserialise bytes to an InternalNode, with key key.
929
:param bytes: The bytes of the node.
930
:param key: The key that the serialised node has.
931
:return: An InternalNode instance.
933
return _deserialise_internal_node(bytes, key,
934
search_key_func=search_key_func)
936
def iteritems(self, store, key_filter=None):
937
for node, node_filter in self._iter_nodes(store, key_filter=key_filter):
938
for item in node.iteritems(store, key_filter=node_filter):
941
def _iter_nodes(self, store, key_filter=None, batch_size=None):
942
"""Iterate over node objects which match key_filter.
944
:param store: A store to use for accessing content.
945
:param key_filter: A key filter to filter nodes. Only nodes that might
946
contain a key in key_filter will be returned.
947
:param batch_size: If not None, then we will return the nodes that had
948
to be read using get_record_stream in batches, rather than reading
950
:return: An iterable of nodes. This function does not have to be fully
951
consumed. (There will be no pending I/O when items are being returned.)
953
# Map from chk key ('sha1:...',) to (prefix, key_filter)
954
# prefix is the key in self._items to use, key_filter is the key_filter
955
# entries that would match this node
957
if key_filter is None:
958
for prefix, node in self._items.iteritems():
959
if type(node) == tuple:
960
keys[node] = (prefix, None)
967
for key in key_filter:
968
search_key = self._search_prefix_filter(key)
969
length_filter = length_filters.setdefault(
970
len(search_key), set())
971
length_filter.add(search_key)
972
prefix_to_keys.setdefault(search_key, []).append(key)
973
length_filters = length_filters.items()
974
for prefix, node in self._items.iteritems():
976
for length, length_filter in length_filters:
977
sub_prefix = prefix[:length]
978
if sub_prefix in length_filter:
979
node_key_filter.extend(prefix_to_keys[sub_prefix])
980
if node_key_filter: # this key matched something, yield it
981
if type(node) == tuple:
982
keys[node] = (prefix, node_key_filter)
984
yield node, node_key_filter
986
# Look in the page cache for some more bytes
990
bytes = _page_cache[key]
994
node = _deserialise(bytes, key,
995
search_key_func=self._search_key_func)
996
prefix, node_key_filter = keys[key]
997
self._items[prefix] = node
999
yield node, node_key_filter
1000
for key in found_keys:
1003
# demand load some pages.
1004
if batch_size is None:
1005
# Read all the keys in
1006
batch_size = len(keys)
1007
key_order = list(keys)
1008
for batch_start in range(0, len(key_order), batch_size):
1009
batch = key_order[batch_start:batch_start + batch_size]
1010
# We have to fully consume the stream so there is no pending
1011
# I/O, so we buffer the nodes for now.
1012
stream = store.get_record_stream(batch, 'unordered', True)
1013
node_and_filters = []
1014
for record in stream:
1015
bytes = record.get_bytes_as('fulltext')
1016
node = _deserialise(bytes, record.key,
1017
search_key_func=self._search_key_func)
1018
prefix, node_key_filter = keys[record.key]
1019
node_and_filters.append((node, node_key_filter))
1020
self._items[prefix] = node
1021
_page_cache.add(record.key, bytes)
1022
for info in node_and_filters:
1025
def map(self, store, key, value):
1026
"""Map key to value."""
1027
if not len(self._items):
1028
raise AssertionError("can't map in an empty InternalNode.")
1029
search_key = self._search_key(key)
1030
if self._node_width != len(self._search_prefix) + 1:
1031
raise AssertionError("node width mismatch: %d is not %d" %
1032
(self._node_width, len(self._search_prefix) + 1))
1033
if not search_key.startswith(self._search_prefix):
1034
# This key doesn't fit in this index, so we need to split at the
1035
# point where it would fit, insert self into that internal node,
1036
# and then map this key into that node.
1037
new_prefix = self.common_prefix(self._search_prefix,
1039
new_parent = InternalNode(new_prefix,
1040
search_key_func=self._search_key_func)
1041
new_parent.set_maximum_size(self._maximum_size)
1042
new_parent._key_width = self._key_width
1043
new_parent.add_node(self._search_prefix[:len(new_prefix)+1],
1045
return new_parent.map(store, key, value)
1046
children = [node for node, _
1047
in self._iter_nodes(store, key_filter=[key])]
1052
child = self._new_child(search_key, LeafNode)
1053
old_len = len(child)
1054
if type(child) is LeafNode:
1055
old_size = child._current_size()
1058
prefix, node_details = child.map(store, key, value)
1059
if len(node_details) == 1:
1060
# child may have shrunk, or might be a new node
1061
child = node_details[0][1]
1062
self._len = self._len - old_len + len(child)
1063
self._items[search_key] = child
1066
if type(child) is LeafNode:
1067
if old_size is None:
1068
# The old node was an InternalNode which means it has now
1069
# collapsed, so we need to check if it will chain to a
1070
# collapse at this level.
1071
trace.mutter("checking remap as InternalNode -> LeafNode")
1072
new_node = self._check_remap(store)
1074
# If the LeafNode has shrunk in size, we may want to run
1075
# a remap check. Checking for a remap is expensive though
1076
# and the frequency of a successful remap is very low.
1077
# Shrinkage by small amounts is common, so we only do the
1078
# remap check if the new_size is low or the shrinkage
1079
# amount is over a configurable limit.
1080
new_size = child._current_size()
1081
shrinkage = old_size - new_size
1082
if (shrinkage > 0 and new_size < _INTERESTING_NEW_SIZE
1083
or shrinkage > _INTERESTING_SHRINKAGE_LIMIT):
1085
"checking remap as size shrunk by %d to be %d",
1086
shrinkage, new_size)
1087
new_node = self._check_remap(store)
1088
if new_node._search_prefix is None:
1089
raise AssertionError("_search_prefix should not be None")
1090
return new_node._search_prefix, [('', new_node)]
1091
# child has overflown - create a new intermediate node.
1092
# XXX: This is where we might want to try and expand our depth
1093
# to refer to more bytes of every child (which would give us
1094
# multiple pointers to child nodes, but less intermediate nodes)
1095
child = self._new_child(search_key, InternalNode)
1096
child._search_prefix = prefix
1097
for split, node in node_details:
1098
child.add_node(split, node)
1099
self._len = self._len - old_len + len(child)
1101
return self._search_prefix, [("", self)]
1103
def _new_child(self, search_key, klass):
1104
"""Create a new child node of type klass."""
1106
child.set_maximum_size(self._maximum_size)
1107
child._key_width = self._key_width
1108
child._search_key_func = self._search_key_func
1109
self._items[search_key] = child
1112
def serialise(self, store):
1113
"""Serialise the node to store.
1115
:param store: A VersionedFiles honouring the CHK extensions.
1116
:return: An iterable of the keys inserted by this operation.
1118
for node in self._items.itervalues():
1119
if type(node) == tuple:
1120
# Never deserialised.
1122
if node._key is not None:
1125
for key in node.serialise(store):
1127
lines = ["chknode:\n"]
1128
lines.append("%d\n" % self._maximum_size)
1129
lines.append("%d\n" % self._key_width)
1130
lines.append("%d\n" % self._len)
1131
if self._search_prefix is None:
1132
raise AssertionError("_search_prefix should not be None")
1133
lines.append('%s\n' % (self._search_prefix,))
1134
prefix_len = len(self._search_prefix)
1135
for prefix, node in sorted(self._items.items()):
1136
if type(node) == tuple:
1140
serialised = "%s\x00%s\n" % (prefix, key)
1141
if not serialised.startswith(self._search_prefix):
1142
raise AssertionError("prefixes mismatch: %s must start with %s"
1143
% (serialised, self._search_prefix))
1144
lines.append(serialised[prefix_len:])
1145
sha1, _, _ = store.add_lines((None,), (), lines)
1146
self._key = ("sha1:" + sha1,)
1147
_page_cache.add(self._key, ''.join(lines))
1150
def _search_key(self, key):
1151
"""Return the serialised key for key in this node."""
1152
# search keys are fixed width. All will be self._node_width wide, so we
1154
return (self._search_key_func(key) + '\x00'*self._node_width)[:self._node_width]
1156
def _search_prefix_filter(self, key):
1157
"""Serialise key for use as a prefix filter in iteritems."""
1158
return self._search_key_func(key)[:self._node_width]
1160
def _split(self, offset):
1161
"""Split this node into smaller nodes starting at offset.
1163
:param offset: The offset to start the new child nodes at.
1164
:return: An iterable of (prefix, node) tuples. prefix is a byte
1165
prefix for reaching node.
1167
if offset >= self._node_width:
1168
for node in self._items.values():
1169
for result in node._split(offset):
1172
for key, node in self._items.items():
1176
"""Return the references to other CHK's held by this node."""
1177
if self._key is None:
1178
raise AssertionError("unserialised nodes have no refs.")
1180
for value in self._items.itervalues():
1181
if type(value) == tuple:
1184
refs.append(value.key())
1187
def _compute_search_prefix(self, extra_key=None):
1188
"""Return the unique key prefix for this node.
1190
:return: A bytestring of the longest search key prefix that is
1191
unique within this node.
1193
self._search_prefix = self.common_prefix_for_keys(self._items)
1194
return self._search_prefix
1196
def unmap(self, store, key, check_remap=True):
1197
"""Remove key from this node and it's children."""
1198
if not len(self._items):
1199
raise AssertionError("can't unmap in an empty InternalNode.")
1200
children = [node for node, _
1201
in self._iter_nodes(store, key_filter=[key])]
1207
unmapped = child.unmap(store, key)
1209
search_key = self._search_key(key)
1210
if len(unmapped) == 0:
1211
# All child nodes are gone, remove the child:
1212
del self._items[search_key]
1215
# Stash the returned node
1216
self._items[search_key] = unmapped
1217
if len(self._items) == 1:
1218
# this node is no longer needed:
1219
return self._items.values()[0]
1220
if type(unmapped) is InternalNode:
1223
return self._check_remap(store)
1227
def _check_remap(self, store):
1228
"""Check if all keys contained by children fit in a single LeafNode.
1230
:param store: A store to use for reading more nodes
1231
:return: Either self, or a new LeafNode which should replace self.
1233
# Logic for how we determine when we need to rebuild
1234
# 1) Implicitly unmap() is removing a key which means that the child
1235
# nodes are going to be shrinking by some extent.
1236
# 2) If all children are LeafNodes, it is possible that they could be
1237
# combined into a single LeafNode, which can then completely replace
1238
# this internal node with a single LeafNode
1239
# 3) If *one* child is an InternalNode, we assume it has already done
1240
# all the work to determine that its children cannot collapse, and
1241
# we can then assume that those nodes *plus* the current nodes don't
1242
# have a chance of collapsing either.
1243
# So a very cheap check is to just say if 'unmapped' is an
1244
# InternalNode, we don't have to check further.
1246
# TODO: Another alternative is to check the total size of all known
1247
# LeafNodes. If there is some formula we can use to determine the
1248
# final size without actually having to read in any more
1249
# children, it would be nice to have. However, we have to be
1250
# careful with stuff like nodes that pull out the common prefix
1251
# of each key, as adding a new key can change the common prefix
1252
# and cause size changes greater than the length of one key.
1253
# So for now, we just add everything to a new Leaf until it
1254
# splits, as we know that will give the right answer
1255
new_leaf = LeafNode(search_key_func=self._search_key_func)
1256
new_leaf.set_maximum_size(self._maximum_size)
1257
new_leaf._key_width = self._key_width
1258
# A batch_size of 16 was chosen because:
1259
# a) In testing, a 4k page held 14 times. So if we have more than 16
1260
# leaf nodes we are unlikely to hold them in a single new leaf
1261
# node. This still allows for 1 round trip
1262
# b) With 16-way fan out, we can still do a single round trip
1263
# c) With 255-way fan out, we don't want to read all 255 and destroy
1264
# the page cache, just to determine that we really don't need it.
1265
for node, _ in self._iter_nodes(store, batch_size=16):
1266
if type(node) is InternalNode:
1267
# Without looking at any leaf nodes, we are sure
1269
for key, value in node._items.iteritems():
1270
if new_leaf._map_no_split(key, value):
1272
trace.mutter("remap generated a new LeafNode")
1276
def _deserialise(bytes, key, search_key_func):
1277
"""Helper for repositorydetails - convert bytes to a node."""
1278
if bytes.startswith("chkleaf:\n"):
1279
node = LeafNode.deserialise(bytes, key, search_key_func=search_key_func)
1280
elif bytes.startswith("chknode:\n"):
1281
node = InternalNode.deserialise(bytes, key,
1282
search_key_func=search_key_func)
1284
raise AssertionError("Unknown node type.")
1288
def _find_children_info(store, interesting_keys, uninteresting_keys, pb):
1289
"""Read the associated records, and determine what is interesting."""
1290
uninteresting_keys = set(uninteresting_keys)
1291
chks_to_read = uninteresting_keys.union(interesting_keys)
1292
next_uninteresting = set()
1293
next_interesting = set()
1294
uninteresting_items = set()
1295
interesting_items = set()
1296
interesting_to_yield = []
1297
for record in store.get_record_stream(chks_to_read, 'unordered', True):
1298
# records_read.add(record.key())
1301
bytes = record.get_bytes_as('fulltext')
1302
# We don't care about search_key_func for this code, because we only
1303
# care about external references.
1304
node = _deserialise(bytes, record.key, search_key_func=None)
1305
if record.key in uninteresting_keys:
1306
if type(node) is InternalNode:
1307
next_uninteresting.update(node.refs())
1309
# We know we are at a LeafNode, so we can pass None for the
1311
uninteresting_items.update(node.iteritems(None))
1313
interesting_to_yield.append(record.key)
1314
if type(node) is InternalNode:
1315
next_interesting.update(node.refs())
1317
interesting_items.update(node.iteritems(None))
1318
return (next_uninteresting, uninteresting_items,
1319
next_interesting, interesting_to_yield, interesting_items)
1322
def _find_all_uninteresting(store, interesting_root_keys,
1323
uninteresting_root_keys, pb):
1324
"""Determine the full set of uninteresting keys."""
1325
# What about duplicates between interesting_root_keys and
1326
# uninteresting_root_keys?
1327
if not uninteresting_root_keys:
1328
# Shortcut case. We know there is nothing uninteresting to filter out
1329
# So we just let the rest of the algorithm do the work
1330
# We know there is nothing uninteresting, and we didn't have to read
1331
# any interesting records yet.
1332
return (set(), set(), set(interesting_root_keys), [], set())
1333
all_uninteresting_chks = set(uninteresting_root_keys)
1334
all_uninteresting_items = set()
1336
# First step, find the direct children of both the interesting and
1338
(uninteresting_keys, uninteresting_items,
1339
interesting_keys, interesting_to_yield,
1340
interesting_items) = _find_children_info(store, interesting_root_keys,
1341
uninteresting_root_keys,
1343
all_uninteresting_chks.update(uninteresting_keys)
1344
all_uninteresting_items.update(uninteresting_items)
1345
del uninteresting_items
1346
# Note: Exact matches between interesting and uninteresting do not need
1347
# to be search further. Non-exact matches need to be searched in case
1348
# there is a future exact-match
1349
uninteresting_keys.difference_update(interesting_keys)
1351
# Second, find the full set of uninteresting bits reachable by the
1352
# uninteresting roots
1353
chks_to_read = uninteresting_keys
1356
for record in store.get_record_stream(chks_to_read, 'unordered', False):
1357
# TODO: Handle 'absent'
1360
bytes = record.get_bytes_as('fulltext')
1361
# We don't care about search_key_func for this code, because we
1362
# only care about external references.
1363
node = _deserialise(bytes, record.key, search_key_func=None)
1364
if type(node) is InternalNode:
1365
# uninteresting_prefix_chks.update(node._items.iteritems())
1366
chks = node._items.values()
1367
# TODO: We remove the entries that are already in
1368
# uninteresting_chks ?
1369
next_chks.update(chks)
1370
all_uninteresting_chks.update(chks)
1372
all_uninteresting_items.update(node._items.iteritems())
1373
chks_to_read = next_chks
1374
return (all_uninteresting_chks, all_uninteresting_items,
1375
interesting_keys, interesting_to_yield, interesting_items)
1378
def iter_interesting_nodes(store, interesting_root_keys,
1379
uninteresting_root_keys, pb=None):
1380
"""Given root keys, find interesting nodes.
1382
Evaluate nodes referenced by interesting_root_keys. Ones that are also
1383
referenced from uninteresting_root_keys are not considered interesting.
1385
:param interesting_root_keys: keys which should be part of the
1386
"interesting" nodes (which will be yielded)
1387
:param uninteresting_root_keys: keys which should be filtered out of the
1390
(interesting record, {interesting key:values})
1392
# TODO: consider that it may be more memory efficient to use the 20-byte
1393
# sha1 string, rather than tuples of hexidecimal sha1 strings.
1394
# TODO: Try to factor out a lot of the get_record_stream() calls into a
1395
# helper function similar to _read_bytes. This function should be
1396
# able to use nodes from the _page_cache as well as actually
1397
# requesting bytes from the store.
1399
(all_uninteresting_chks, all_uninteresting_items, interesting_keys,
1400
interesting_to_yield, interesting_items) = _find_all_uninteresting(store,
1401
interesting_root_keys, uninteresting_root_keys, pb)
1403
# Now that we know everything uninteresting, we can yield information from
1405
interesting_items.difference_update(all_uninteresting_items)
1406
interesting_to_yield = set(interesting_to_yield) - all_uninteresting_chks
1407
if interesting_items:
1408
yield None, interesting_items
1409
if interesting_to_yield:
1410
# We request these records again, rather than buffering the root
1411
# records, most likely they are still in the _group_cache anyway.
1412
for record in store.get_record_stream(interesting_to_yield,
1413
'unordered', False):
1415
all_uninteresting_chks.update(interesting_to_yield)
1416
interesting_keys.difference_update(all_uninteresting_chks)
1418
chks_to_read = interesting_keys
1422
for record in store.get_record_stream(chks_to_read, 'unordered', False):
1425
pb.update('find chk pages', counter)
1426
# TODO: Handle 'absent'?
1427
bytes = record.get_bytes_as('fulltext')
1428
# We don't care about search_key_func for this code, because we
1429
# only care about external references.
1430
node = _deserialise(bytes, record.key, search_key_func=None)
1431
if type(node) is InternalNode:
1432
# all_uninteresting_chks grows large, as it lists all nodes we
1433
# don't want to process (including already seen interesting
1435
# small.difference_update(large) scales O(large), but
1436
# small.difference(large) scales O(small).
1437
# Also, we know we just _deserialised this node, so we can
1438
# access the dict directly.
1439
chks = set(node._items.itervalues()).difference(
1440
all_uninteresting_chks)
1441
# Is set() and .difference_update better than:
1442
# chks = [chk for chk in node.refs()
1443
# if chk not in all_uninteresting_chks]
1444
next_chks.update(chks)
1445
# These are now uninteresting everywhere else
1446
all_uninteresting_chks.update(chks)
1447
interesting_items = []
1449
interesting_items = [item for item in node._items.iteritems()
1450
if item not in all_uninteresting_items]
1451
# TODO: Do we need to filter out items that we have already
1452
# seen on other pages? We don't really want to buffer the
1453
# whole thing, but it does mean that callers need to
1454
# understand they may get duplicate values.
1455
# all_uninteresting_items.update(interesting_items)
1456
yield record, interesting_items
1457
chks_to_read = next_chks
1461
from bzrlib._chk_map_pyx import (
1464
_deserialise_leaf_node,
1465
_deserialise_internal_node,
1468
from bzrlib._chk_map_py import (
1471
_deserialise_leaf_node,
1472
_deserialise_internal_node,
1474
search_key_registry.register('hash-16-way', _search_key_16)
1475
search_key_registry.register('hash-255-way', _search_key_255)