~bzr-pqm/bzr/bzr.dev

4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1
# Copyright (C) 2008 Canonical Ltd
2
#
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
7
#
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11
# GNU General Public License for more details.
12
#
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
16
17
"""Persistent maps from tuple_of_strings->string using CHK stores.
18
19
Overview and current status:
20
21
The CHKMap class implements a dict from tuple_of_strings->string by using a trie
22
with internal nodes of 8-bit fan out; The key tuples are mapped to strings by
23
joining them by \x00, and \x00 padding shorter keys out to the length of the
24
longest key. Leaf nodes are packed as densely as possible, and internal nodes
3735.19.1 by Ian Clatworthy
CHKMap cleanups
25
are all an additional 8-bits wide leading to a sparse upper tree.
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
26
27
Updates to a CHKMap are done preferentially via the apply_delta method, to
28
allow optimisation of the update operation; but individual map/unmap calls are
29
possible and supported. All changes via map/unmap are buffered in memory until
30
the _save method is called to force serialisation of the tree. apply_delta
31
performs a _save implicitly.
32
33
TODO:
34
-----
35
36
Densely packed upper nodes.
37
38
"""
39
40
import heapq
41
import time
42
43
from bzrlib import lazy_import
44
lazy_import.lazy_import(globals(), """
45
from bzrlib import versionedfile
46
""")
47
from bzrlib import (
48
    errors,
49
    lru_cache,
50
    osutils,
51
    registry,
52
    trace,
53
    )
54
3735.19.1 by Ian Clatworthy
CHKMap cleanups
55
# approx 4MB
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
56
# If each line is 50 bytes, and you have 255 internal pages, with 255-way fan
57
# out, it takes 3.1MB to cache the layer.
58
_PAGE_CACHE_SIZE = 4*1024*1024
59
# We are caching bytes so len(value) is perfectly accurate
60
_page_cache = lru_cache.LRUSizeCache(_PAGE_CACHE_SIZE)
61
3735.2.123 by Ian Clatworthy
only check for remap if changes are interesting in size
62
# If a ChildNode falls below this many bytes, we check for a remap
63
_INTERESTING_NEW_SIZE = 50
64
# If a ChildNode shrinks by more than this amount, we check for a remap
65
_INTERESTING_SHRINKAGE_LIMIT = 20
66
# If we delete more than this many nodes applying a delta, we check for a remap
67
_INTERESTING_DELETES_LIMIT = 5
68
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
69
70
def _search_key_plain(key):
71
    """Map the key tuple into a search string that just uses the key bytes."""
72
    return '\x00'.join(key)
73
74
75
search_key_registry = registry.Registry()
76
search_key_registry.register('plain', _search_key_plain)
77
78
79
class CHKMap(object):
80
    """A persistent map from string to string backed by a CHK store."""
81
82
    def __init__(self, store, root_key, search_key_func=None):
83
        """Create a CHKMap object.
84
85
        :param store: The store the CHKMap is stored in.
86
        :param root_key: The root key of the map. None to create an empty
87
            CHKMap.
88
        :param search_key_func: A function mapping a key => bytes. These bytes
89
            are then used by the internal nodes to split up leaf nodes into
90
            multiple pages.
91
        """
92
        self._store = store
93
        if search_key_func is None:
94
            search_key_func = _search_key_plain
95
        self._search_key_func = search_key_func
96
        if root_key is None:
97
            self._root_node = LeafNode(search_key_func=search_key_func)
98
        else:
99
            self._root_node = self._node_key(root_key)
100
101
    def apply_delta(self, delta):
102
        """Apply a delta to the map.
103
104
        :param delta: An iterable of old_key, new_key, new_value tuples.
105
            If new_key is not None, then new_key->new_value is inserted
106
            into the map; if old_key is not None, then the old mapping
107
            of old_key is removed.
108
        """
3735.2.123 by Ian Clatworthy
only check for remap if changes are interesting in size
109
        delete_count = 0
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
110
        for old, new, value in delta:
111
            if old is not None and old != new:
3735.2.122 by Ian Clatworthy
don't check_remap on every unmap call in CHKMap.apply_delta()
112
                self.unmap(old, check_remap=False)
3735.2.123 by Ian Clatworthy
only check for remap if changes are interesting in size
113
                delete_count += 1
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
114
        for old, new, value in delta:
115
            if new is not None:
116
                self.map(new, value)
3735.2.123 by Ian Clatworthy
only check for remap if changes are interesting in size
117
        if delete_count > _INTERESTING_DELETES_LIMIT:
118
            trace.mutter("checking remap as %d deletions", delete_count)
3735.2.122 by Ian Clatworthy
don't check_remap on every unmap call in CHKMap.apply_delta()
119
            self._check_remap()
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
120
        return self._save()
121
122
    def _ensure_root(self):
123
        """Ensure that the root node is an object not a key."""
124
        if type(self._root_node) == tuple:
125
            # Demand-load the root
126
            self._root_node = self._get_node(self._root_node)
127
128
    def _get_node(self, node):
129
        """Get a node.
130
3735.19.1 by Ian Clatworthy
CHKMap cleanups
131
        Note that this does not update the _items dict in objects containing a
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
132
        reference to this node. As such it does not prevent subsequent IO being
133
        performed.
134
135
        :param node: A tuple key or node object.
136
        :return: A node object.
137
        """
138
        if type(node) == tuple:
139
            bytes = self._read_bytes(node)
140
            return _deserialise(bytes, node,
141
                search_key_func=self._search_key_func)
142
        else:
143
            return node
144
145
    def _read_bytes(self, key):
3735.2.124 by Ian Clatworthy
use the page cache in CHKMap._read_bytes()
146
        try:
147
            return _page_cache[key]
148
        except KeyError:
149
            stream = self._store.get_record_stream([key], 'unordered', True)
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
150
            bytes = stream.next().get_bytes_as('fulltext')
151
            _page_cache[key] = bytes
152
            return bytes
153
154
    def _dump_tree(self, include_keys=False):
155
        """Return the tree in a string representation."""
156
        self._ensure_root()
157
        res = self._dump_tree_node(self._root_node, prefix='', indent='',
158
                                   include_keys=include_keys)
159
        res.append('') # Give a trailing '\n'
160
        return '\n'.join(res)
161
162
    def _dump_tree_node(self, node, prefix, indent, include_keys=True):
163
        """For this node and all children, generate a string representation."""
164
        result = []
165
        if not include_keys:
166
            key_str = ''
167
        else:
168
            node_key = node.key()
169
            if node_key is not None:
170
                key_str = ' %s' % (node_key[0],)
171
            else:
172
                key_str = ' None'
173
        result.append('%s%r %s%s' % (indent, prefix, node.__class__.__name__,
174
                                     key_str))
175
        if type(node) is InternalNode:
176
            # Trigger all child nodes to get loaded
177
            list(node._iter_nodes(self._store))
178
            for prefix, sub in sorted(node._items.iteritems()):
179
                result.extend(self._dump_tree_node(sub, prefix, indent + '  ',
180
                                                   include_keys=include_keys))
181
        else:
182
            for key, value in sorted(node._items.iteritems()):
183
                # Don't use prefix nor indent here to line up when used in
184
                # tests in conjunction with assertEqualDiff
185
                result.append('      %r %r' % (key, value))
186
        return result
187
188
    @classmethod
3735.19.1 by Ian Clatworthy
CHKMap cleanups
189
    def from_dict(klass, store, initial_value, maximum_size=0, key_width=1,
190
        search_key_func=None):
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
191
        """Create a CHKMap in store with initial_value as the content.
192
193
        :param store: The store to record initial_value in, a VersionedFiles
194
            object with 1-tuple keys supporting CHK key generation.
195
        :param initial_value: A dict to store in store. Its keys and values
196
            must be bytestrings.
197
        :param maximum_size: The maximum_size rule to apply to nodes. This
198
            determines the size at which no new data is added to a single node.
199
        :param key_width: The number of elements in each key_tuple being stored
200
            in this map.
3735.19.1 by Ian Clatworthy
CHKMap cleanups
201
        :param search_key_func: A function mapping a key => bytes. These bytes
202
            are then used by the internal nodes to split up leaf nodes into
203
            multiple pages.
204
        :return: The root chk of the resulting CHKMap.
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
205
        """
3735.19.1 by Ian Clatworthy
CHKMap cleanups
206
        result = CHKMap(store, None, search_key_func=search_key_func)
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
207
        result._root_node.set_maximum_size(maximum_size)
208
        result._root_node._key_width = key_width
209
        delta = []
210
        for key, value in initial_value.items():
211
            delta.append((None, key, value))
3735.19.1 by Ian Clatworthy
CHKMap cleanups
212
        return result.apply_delta(delta)
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
213
214
    def iter_changes(self, basis):
215
        """Iterate over the changes between basis and self.
216
217
        :return: An iterator of tuples: (key, old_value, new_value). Old_value
218
            is None for keys only in self; new_value is None for keys only in
219
            basis.
220
        """
221
        # Overview:
222
        # Read both trees in lexographic, highest-first order.
223
        # Any identical nodes we skip
224
        # Any unique prefixes we output immediately.
225
        # values in a leaf node are treated as single-value nodes in the tree
226
        # which allows them to be not-special-cased. We know to output them
227
        # because their value is a string, not a key(tuple) or node.
228
        #
229
        # corner cases to beware of when considering this function:
230
        # *) common references are at different heights.
231
        #    consider two trees:
232
        #    {'a': LeafNode={'aaa':'foo', 'aab':'bar'}, 'b': LeafNode={'b'}}
233
        #    {'a': InternalNode={'aa':LeafNode={'aaa':'foo', 'aab':'bar'},
234
        #                        'ab':LeafNode={'ab':'bar'}}
235
        #     'b': LeafNode={'b'}}
236
        #    the node with aaa/aab will only be encountered in the second tree
237
        #    after reading the 'a' subtree, but it is encountered in the first
238
        #    tree immediately. Variations on this may have read internal nodes
239
        #    like this.  we want to cut the entire pending subtree when we
240
        #    realise we have a common node.  For this we use a list of keys -
241
        #    the path to a node - and check the entire path is clean as we
242
        #    process each item.
243
        if self._node_key(self._root_node) == self._node_key(basis._root_node):
244
            return
245
        self._ensure_root()
246
        basis._ensure_root()
247
        excluded_keys = set()
248
        self_node = self._root_node
249
        basis_node = basis._root_node
250
        # A heap, each element is prefix, node(tuple/NodeObject/string),
251
        # key_path (a list of tuples, tail-sharing down the tree.)
252
        self_pending = []
253
        basis_pending = []
254
        def process_node(node, path, a_map, pending):
255
            # take a node and expand it
256
            node = a_map._get_node(node)
257
            if type(node) == LeafNode:
258
                path = (node._key, path)
259
                for key, value in node._items.items():
260
                    # For a LeafNode, the key is a serialized_key, rather than
261
                    # a search_key, but the heap is using search_keys
262
                    search_key = node._search_key_func(key)
263
                    heapq.heappush(pending, (search_key, key, value, path))
264
            else:
265
                # type(node) == InternalNode
266
                path = (node._key, path)
267
                for prefix, child in node._items.items():
268
                    heapq.heappush(pending, (prefix, None, child, path))
269
        def process_common_internal_nodes(self_node, basis_node):
270
            self_items = set(self_node._items.items())
271
            basis_items = set(basis_node._items.items())
272
            path = (self_node._key, None)
273
            for prefix, child in self_items - basis_items:
274
                heapq.heappush(self_pending, (prefix, None, child, path))
275
            path = (basis_node._key, None)
276
            for prefix, child in basis_items - self_items:
277
                heapq.heappush(basis_pending, (prefix, None, child, path))
278
        def process_common_leaf_nodes(self_node, basis_node):
279
            self_items = set(self_node._items.items())
280
            basis_items = set(basis_node._items.items())
281
            path = (self_node._key, None)
282
            for key, value in self_items - basis_items:
283
                prefix = self._search_key_func(key)
284
                heapq.heappush(self_pending, (prefix, key, value, path))
285
            path = (basis_node._key, None)
286
            for key, value in basis_items - self_items:
287
                prefix = basis._search_key_func(key)
288
                heapq.heappush(basis_pending, (prefix, key, value, path))
289
        def process_common_prefix_nodes(self_node, self_path,
290
                                        basis_node, basis_path):
291
            # Would it be more efficient if we could request both at the same
292
            # time?
293
            self_node = self._get_node(self_node)
294
            basis_node = basis._get_node(basis_node)
295
            if (type(self_node) == InternalNode
296
                and type(basis_node) == InternalNode):
297
                # Matching internal nodes
298
                process_common_internal_nodes(self_node, basis_node)
299
            elif (type(self_node) == LeafNode
300
                  and type(basis_node) == LeafNode):
301
                process_common_leaf_nodes(self_node, basis_node)
302
            else:
303
                process_node(self_node, self_path, self, self_pending)
304
                process_node(basis_node, basis_path, basis, basis_pending)
305
        process_common_prefix_nodes(self_node, None, basis_node, None)
306
        self_seen = set()
307
        basis_seen = set()
308
        excluded_keys = set()
309
        def check_excluded(key_path):
310
            # Note that this is N^2, it depends on us trimming trees
311
            # aggressively to not become slow.
312
            # A better implementation would probably have a reverse map
313
            # back to the children of a node, and jump straight to it when
314
            # a common node is detected, the proceed to remove the already
315
            # pending children. bzrlib.graph has a searcher module with a
316
            # similar problem.
317
            while key_path is not None:
318
                key, key_path = key_path
319
                if key in excluded_keys:
320
                    return True
321
            return False
322
323
        loop_counter = 0
324
        while self_pending or basis_pending:
325
            loop_counter += 1
326
            if not self_pending:
327
                # self is exhausted: output remainder of basis
328
                for prefix, key, node, path in basis_pending:
329
                    if check_excluded(path):
330
                        continue
331
                    node = basis._get_node(node)
332
                    if key is not None:
333
                        # a value
334
                        yield (key, node, None)
335
                    else:
336
                        # subtree - fastpath the entire thing.
337
                        for key, value in node.iteritems(basis._store):
338
                            yield (key, value, None)
339
                return
340
            elif not basis_pending:
341
                # basis is exhausted: output remainder of self.
342
                for prefix, key, node, path in self_pending:
343
                    if check_excluded(path):
344
                        continue
345
                    node = self._get_node(node)
346
                    if key is not None:
347
                        # a value
348
                        yield (key, None, node)
349
                    else:
350
                        # subtree - fastpath the entire thing.
351
                        for key, value in node.iteritems(self._store):
352
                            yield (key, None, value)
353
                return
354
            else:
355
                # XXX: future optimisation - yield the smaller items
356
                # immediately rather than pushing everything on/off the
357
                # heaps. Applies to both internal nodes and leafnodes.
358
                if self_pending[0][0] < basis_pending[0][0]:
359
                    # expand self
360
                    prefix, key, node, path = heapq.heappop(self_pending)
361
                    if check_excluded(path):
362
                        continue
363
                    if key is not None:
364
                        # a value
365
                        yield (key, None, node)
366
                    else:
367
                        process_node(node, path, self, self_pending)
368
                        continue
369
                elif self_pending[0][0] > basis_pending[0][0]:
370
                    # expand basis
371
                    prefix, key, node, path = heapq.heappop(basis_pending)
372
                    if check_excluded(path):
373
                        continue
374
                    if key is not None:
375
                        # a value
376
                        yield (key, node, None)
377
                    else:
378
                        process_node(node, path, basis, basis_pending)
379
                        continue
380
                else:
381
                    # common prefix: possibly expand both
382
                    if self_pending[0][1] is None:
383
                        # process next self
384
                        read_self = True
385
                    else:
386
                        read_self = False
387
                    if basis_pending[0][1] is None:
388
                        # process next basis
389
                        read_basis = True
390
                    else:
391
                        read_basis = False
392
                    if not read_self and not read_basis:
393
                        # compare a common value
394
                        self_details = heapq.heappop(self_pending)
395
                        basis_details = heapq.heappop(basis_pending)
396
                        if self_details[2] != basis_details[2]:
397
                            yield (self_details[1],
398
                                basis_details[2], self_details[2])
399
                        continue
400
                    # At least one side wasn't a simple value
401
                    if (self._node_key(self_pending[0][2]) ==
402
                        self._node_key(basis_pending[0][2])):
403
                        # Identical pointers, skip (and don't bother adding to
404
                        # excluded, it won't turn up again.
405
                        heapq.heappop(self_pending)
406
                        heapq.heappop(basis_pending)
407
                        continue
408
                    # Now we need to expand this node before we can continue
409
                    if read_self and read_basis:
410
                        # Both sides start with the same prefix, so process
411
                        # them in parallel
412
                        self_prefix, _, self_node, self_path = heapq.heappop(
413
                            self_pending)
414
                        basis_prefix, _, basis_node, basis_path = heapq.heappop(
415
                            basis_pending)
416
                        if self_prefix != basis_prefix:
417
                            raise AssertionError(
418
                                '%r != %r' % (self_prefix, basis_prefix))
419
                        process_common_prefix_nodes(
420
                            self_node, self_path,
421
                            basis_node, basis_path)
422
                        continue
423
                    if read_self:
424
                        prefix, key, node, path = heapq.heappop(self_pending)
425
                        if check_excluded(path):
426
                            continue
427
                        process_node(node, path, self, self_pending)
428
                    if read_basis:
429
                        prefix, key, node, path = heapq.heappop(basis_pending)
430
                        if check_excluded(path):
431
                            continue
432
                        process_node(node, path, basis, basis_pending)
433
        # print loop_counter
434
435
    def iteritems(self, key_filter=None):
436
        """Iterate over the entire CHKMap's contents."""
437
        self._ensure_root()
438
        return self._root_node.iteritems(self._store, key_filter=key_filter)
439
440
    def key(self):
441
        """Return the key for this map."""
442
        if type(self._root_node) is tuple:
443
            return self._root_node
444
        else:
445
            return self._root_node._key
446
447
    def __len__(self):
448
        self._ensure_root()
449
        return len(self._root_node)
450
451
    def map(self, key, value):
452
        """Map a key tuple to value."""
453
        # Need a root object.
454
        self._ensure_root()
455
        prefix, node_details = self._root_node.map(self._store, key, value)
456
        if len(node_details) == 1:
457
            self._root_node = node_details[0][1]
458
        else:
459
            self._root_node = InternalNode(prefix,
460
                                search_key_func=self._search_key_func)
461
            self._root_node.set_maximum_size(node_details[0][1].maximum_size)
462
            self._root_node._key_width = node_details[0][1]._key_width
463
            for split, node in node_details:
464
                self._root_node.add_node(split, node)
465
466
    def _node_key(self, node):
3735.19.1 by Ian Clatworthy
CHKMap cleanups
467
        """Get the key for a node whether it's a tuple or node."""
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
468
        if type(node) == tuple:
469
            return node
470
        else:
471
            return node._key
472
3735.2.122 by Ian Clatworthy
don't check_remap on every unmap call in CHKMap.apply_delta()
473
    def unmap(self, key, check_remap=True):
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
474
        """remove key from the map."""
475
        self._ensure_root()
476
        if type(self._root_node) is InternalNode:
3735.2.122 by Ian Clatworthy
don't check_remap on every unmap call in CHKMap.apply_delta()
477
            unmapped = self._root_node.unmap(self._store, key,
478
                check_remap=check_remap)
479
        else:
480
            unmapped = self._root_node.unmap(self._store, key)
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
481
        self._root_node = unmapped
482
3735.2.122 by Ian Clatworthy
don't check_remap on every unmap call in CHKMap.apply_delta()
483
    def _check_remap(self):
484
        """Check if nodes can be collapsed."""
485
        self._ensure_root()
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
486
        if type(self._root_node) is InternalNode:
3735.2.122 by Ian Clatworthy
don't check_remap on every unmap call in CHKMap.apply_delta()
487
            self._root_node._check_remap(self._store)
488
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
489
    def _save(self):
490
        """Save the map completely.
491
492
        :return: The key of the root node.
493
        """
494
        if type(self._root_node) == tuple:
495
            # Already saved.
496
            return self._root_node
497
        keys = list(self._root_node.serialise(self._store))
498
        return keys[-1]
499
500
501
class Node(object):
502
    """Base class defining the protocol for CHK Map nodes.
503
504
    :ivar _raw_size: The total size of the serialized key:value data, before
505
        adding the header bytes, and without prefix compression.
506
    """
507
508
    def __init__(self, key_width=1):
509
        """Create a node.
510
511
        :param key_width: The width of keys for this node.
512
        """
513
        self._key = None
514
        # Current number of elements
515
        self._len = 0
516
        self._maximum_size = 0
3735.19.1 by Ian Clatworthy
CHKMap cleanups
517
        self._key_width = key_width
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
518
        # current size in bytes
519
        self._raw_size = 0
520
        # The pointers/values this node has - meaning defined by child classes.
521
        self._items = {}
522
        # The common search prefix
523
        self._search_prefix = None
524
525
    def __repr__(self):
526
        items_str = str(sorted(self._items))
527
        if len(items_str) > 20:
3735.2.154 by Ian Clatworthy
fix chk_map Node %r formatting
528
            items_str = items_str[:16] + '...]'
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
529
        return '%s(key:%s len:%s size:%s max:%s prefix:%s items:%s)' % (
530
            self.__class__.__name__, self._key, self._len, self._raw_size,
531
            self._maximum_size, self._search_prefix, items_str)
532
533
    def key(self):
534
        return self._key
535
536
    def __len__(self):
537
        return self._len
538
539
    @property
540
    def maximum_size(self):
541
        """What is the upper limit for adding references to a node."""
542
        return self._maximum_size
543
544
    def set_maximum_size(self, new_size):
545
        """Set the size threshold for nodes.
546
547
        :param new_size: The size at which no data is added to a node. 0 for
548
            unlimited.
549
        """
550
        self._maximum_size = new_size
551
552
    @classmethod
553
    def common_prefix(cls, prefix, key):
554
        """Given 2 strings, return the longest prefix common to both.
555
556
        :param prefix: This has been the common prefix for other keys, so it is
557
            more likely to be the common prefix in this case as well.
558
        :param key: Another string to compare to
559
        """
560
        if key.startswith(prefix):
561
            return prefix
562
        # Is there a better way to do this?
563
        for pos, (left, right) in enumerate(zip(prefix, key)):
564
            if left != right:
565
                pos -= 1
566
                break
567
        common = prefix[:pos+1]
568
        return common
569
570
    @classmethod
571
    def common_prefix_for_keys(cls, keys):
572
        """Given a list of keys, find their common prefix.
573
574
        :param keys: An iterable of strings.
575
        :return: The longest common prefix of all keys.
576
        """
577
        common_prefix = None
578
        for key in keys:
579
            if common_prefix is None:
580
                common_prefix = key
581
                continue
582
            common_prefix = cls.common_prefix(common_prefix, key)
583
            if not common_prefix:
584
                # if common_prefix is the empty string, then we know it won't
585
                # change further
586
                return ''
587
        return common_prefix
588
589
590
# Singleton indicating we have not computed _search_prefix yet
591
_unknown = object()
592
593
class LeafNode(Node):
594
    """A node containing actual key:value pairs.
595
596
    :ivar _items: A dict of key->value items. The key is in tuple form.
597
    :ivar _size: The number of bytes that would be used by serializing all of
598
        the key/value pairs.
599
    """
600
601
    def __init__(self, search_key_func=None):
602
        Node.__init__(self)
603
        # All of the keys in this leaf node share this common prefix
604
        self._common_serialised_prefix = None
605
        self._serialise_key = '\x00'.join
606
        if search_key_func is None:
607
            self._search_key_func = _search_key_plain
608
        else:
609
            self._search_key_func = search_key_func
610
611
    def __repr__(self):
3735.2.154 by Ian Clatworthy
fix chk_map Node %r formatting
612
        items_str = str(sorted(self._items))
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
613
        if len(items_str) > 20:
3735.2.154 by Ian Clatworthy
fix chk_map Node %r formatting
614
            items_str = items_str[:16] + '...]'
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
615
        return \
616
            '%s(key:%s len:%s size:%s max:%s prefix:%s keywidth:%s items:%s)' \
617
            % (self.__class__.__name__, self._key, self._len, self._raw_size,
618
            self._maximum_size, self._search_prefix, self._key_width, items_str)
619
620
    def _current_size(self):
621
        """Answer the current serialised size of this node.
622
623
        This differs from self._raw_size in that it includes the bytes used for
624
        the header.
625
        """
626
        if self._common_serialised_prefix is None:
627
            bytes_for_items = 0
628
            prefix_len = 0
629
        else:
630
            # We will store a single string with the common prefix
631
            # And then that common prefix will not be stored in any of the
632
            # entry lines
633
            prefix_len = len(self._common_serialised_prefix)
634
            bytes_for_items = (self._raw_size - (prefix_len * self._len))
635
        return (9 # 'chkleaf:\n'
636
            + len(str(self._maximum_size)) + 1
637
            + len(str(self._key_width)) + 1
638
            + len(str(self._len)) + 1
639
            + prefix_len + 1
640
            + bytes_for_items)
641
642
    @classmethod
643
    def deserialise(klass, bytes, key, search_key_func=None):
644
        """Deserialise bytes, with key key, into a LeafNode.
645
646
        :param bytes: The bytes of the node.
647
        :param key: The key that the serialised node has.
648
        """
649
        return _deserialise_leaf_node(bytes, key,
650
                                      search_key_func=search_key_func)
651
652
    def iteritems(self, store, key_filter=None):
653
        """Iterate over items in the node.
654
655
        :param key_filter: A filter to apply to the node. It should be a
656
            list/set/dict or similar repeatedly iterable container.
657
        """
658
        if key_filter is not None:
659
            # Adjust the filter - short elements go to a prefix filter. All
660
            # other items are looked up directly.
661
            # XXX: perhaps defaultdict? Profiling<rinse and repeat>
662
            filters = {}
663
            for key in key_filter:
664
                if len(key) == self._key_width:
665
                    # This filter is meant to match exactly one key, yield it
666
                    # if we have it.
667
                    try:
668
                        yield key, self._items[key]
669
                    except KeyError:
670
                        # This key is not present in this map, continue
671
                        pass
672
                else:
673
                    # Short items, we need to match based on a prefix
674
                    length_filter = filters.setdefault(len(key), set())
675
                    length_filter.add(key)
676
            if filters:
677
                filters = filters.items()
678
                for item in self._items.iteritems():
679
                    for length, length_filter in filters:
680
                        if item[0][:length] in length_filter:
681
                            yield item
682
                            break
683
        else:
684
            for item in self._items.iteritems():
685
                yield item
686
687
    def _key_value_len(self, key, value):
688
        # TODO: Should probably be done without actually joining the key, but
689
        #       then that can be done via the C extension
690
        return (len(self._serialise_key(key)) + 1
691
                + len(str(value.count('\n'))) + 1
692
                + len(value) + 1)
693
694
    def _search_key(self, key):
695
        return self._search_key_func(key)
696
697
    def _map_no_split(self, key, value):
698
        """Map a key to a value.
699
700
        This assumes either the key does not already exist, or you have already
701
        removed its size and length from self.
702
703
        :return: True if adding this node should cause us to split.
704
        """
705
        self._items[key] = value
706
        self._raw_size += self._key_value_len(key, value)
707
        self._len += 1
708
        serialised_key = self._serialise_key(key)
709
        if self._common_serialised_prefix is None:
710
            self._common_serialised_prefix = serialised_key
711
        else:
712
            self._common_serialised_prefix = self.common_prefix(
713
                self._common_serialised_prefix, serialised_key)
714
        search_key = self._search_key(key)
715
        if self._search_prefix is _unknown:
716
            self._compute_search_prefix()
717
        if self._search_prefix is None:
718
            self._search_prefix = search_key
719
        else:
720
            self._search_prefix = self.common_prefix(
721
                self._search_prefix, search_key)
722
        if (self._len > 1
723
            and self._maximum_size
724
            and self._current_size() > self._maximum_size):
725
            # Check to see if all of the search_keys for this node are
726
            # identical. We allow the node to grow under that circumstance
727
            # (we could track this as common state, but it is infrequent)
728
            if (search_key != self._search_prefix
729
                or not self._are_search_keys_identical()):
730
                return True
731
        return False
732
733
    def _split(self, store):
734
        """We have overflowed.
735
736
        Split this node into multiple LeafNodes, return it up the stack so that
737
        the next layer creates a new InternalNode and references the new nodes.
738
739
        :return: (common_serialised_prefix, [(node_serialised_prefix, node)])
740
        """
741
        if self._search_prefix is _unknown:
742
            raise AssertionError('Search prefix must be known')
743
        common_prefix = self._search_prefix
744
        split_at = len(common_prefix) + 1
745
        result = {}
746
        for key, value in self._items.iteritems():
747
            search_key = self._search_key(key)
748
            prefix = search_key[:split_at]
749
            # TODO: Generally only 1 key can be exactly the right length,
750
            #       which means we can only have 1 key in the node pointed
751
            #       at by the 'prefix\0' key. We might want to consider
752
            #       folding it into the containing InternalNode rather than
753
            #       having a fixed length-1 node.
754
            #       Note this is probably not true for hash keys, as they
755
            #       may get a '\00' node anywhere, but won't have keys of
756
            #       different lengths.
757
            if len(prefix) < split_at:
758
                prefix += '\x00'*(split_at - len(prefix))
759
            if prefix not in result:
760
                node = LeafNode(search_key_func=self._search_key_func)
761
                node.set_maximum_size(self._maximum_size)
762
                node._key_width = self._key_width
763
                result[prefix] = node
764
            else:
765
                node = result[prefix]
766
            node.map(store, key, value)
767
        return common_prefix, result.items()
768
769
    def map(self, store, key, value):
770
        """Map key to value."""
771
        if key in self._items:
772
            self._raw_size -= self._key_value_len(key, self._items[key])
773
            self._len -= 1
774
        self._key = None
775
        if self._map_no_split(key, value):
776
            return self._split(store)
777
        else:
778
            if self._search_prefix is _unknown:
779
                raise AssertionError('%r must be known' % self._search_prefix)
780
            return self._search_prefix, [("", self)]
781
782
    def serialise(self, store):
783
        """Serialise the LeafNode to store.
784
785
        :param store: A VersionedFiles honouring the CHK extensions.
786
        :return: An iterable of the keys inserted by this operation.
787
        """
788
        lines = ["chkleaf:\n"]
789
        lines.append("%d\n" % self._maximum_size)
790
        lines.append("%d\n" % self._key_width)
791
        lines.append("%d\n" % self._len)
792
        if self._common_serialised_prefix is None:
793
            lines.append('\n')
794
            if len(self._items) != 0:
795
                raise AssertionError('If _common_serialised_prefix is None'
796
                    ' we should have no items')
797
        else:
798
            lines.append('%s\n' % (self._common_serialised_prefix,))
799
            prefix_len = len(self._common_serialised_prefix)
800
        for key, value in sorted(self._items.items()):
801
            # Always add a final newline
802
            value_lines = osutils.chunks_to_lines([value + '\n'])
803
            serialized = "%s\x00%s\n" % (self._serialise_key(key),
804
                                         len(value_lines))
805
            if not serialized.startswith(self._common_serialised_prefix):
806
                raise AssertionError('We thought the common prefix was %r'
807
                    ' but entry %r does not have it in common'
808
                    % (self._common_serialised_prefix, serialized))
809
            lines.append(serialized[prefix_len:])
810
            lines.extend(value_lines)
811
        sha1, _, _ = store.add_lines((None,), (), lines)
812
        self._key = ("sha1:" + sha1,)
813
        bytes = ''.join(lines)
814
        if len(bytes) != self._current_size():
815
            raise AssertionError('Invalid _current_size')
816
        _page_cache.add(self._key, bytes)
817
        return [self._key]
818
819
    def refs(self):
820
        """Return the references to other CHK's held by this node."""
821
        return []
822
823
    def _compute_search_prefix(self):
824
        """Determine the common search prefix for all keys in this node.
825
826
        :return: A bytestring of the longest search key prefix that is
827
            unique within this node.
828
        """
829
        search_keys = [self._search_key_func(key) for key in self._items]
830
        self._search_prefix = self.common_prefix_for_keys(search_keys)
831
        return self._search_prefix
832
833
    def _are_search_keys_identical(self):
834
        """Check to see if the search keys for all entries are the same.
835
836
        When using a hash as the search_key it is possible for non-identical
837
        keys to collide. If that happens enough, we may try overflow a
838
        LeafNode, but as all are collisions, we must not split.
839
        """
840
        common_search_key = None
841
        for key in self._items:
842
            search_key = self._search_key(key)
843
            if common_search_key is None:
844
                common_search_key = search_key
845
            elif search_key != common_search_key:
846
                return False
847
        return True
848
849
    def _compute_serialised_prefix(self):
850
        """Determine the common prefix for serialised keys in this node.
851
852
        :return: A bytestring of the longest serialised key prefix that is
853
            unique within this node.
854
        """
855
        serialised_keys = [self._serialise_key(key) for key in self._items]
856
        self._common_serialised_prefix = self.common_prefix_for_keys(
857
            serialised_keys)
3735.19.1 by Ian Clatworthy
CHKMap cleanups
858
        return self._common_serialised_prefix
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
859
860
    def unmap(self, store, key):
861
        """Unmap key from the node."""
862
        try:
863
            self._raw_size -= self._key_value_len(key, self._items[key])
864
        except KeyError:
865
            trace.mutter("key %s not found in %r", key, self._items)
866
            raise
867
        self._len -= 1
868
        del self._items[key]
869
        self._key = None
870
        # Recompute from scratch
871
        self._compute_search_prefix()
872
        self._compute_serialised_prefix()
873
        return self
874
875
876
class InternalNode(Node):
877
    """A node that contains references to other nodes.
878
879
    An InternalNode is responsible for mapping search key prefixes to child
880
    nodes.
881
882
    :ivar _items: serialised_key => node dictionary. node may be a tuple,
883
        LeafNode or InternalNode.
884
    """
885
886
    def __init__(self, prefix='', search_key_func=None):
887
        Node.__init__(self)
888
        # The size of an internalnode with default values and no children.
889
        # How many octets key prefixes within this node are.
890
        self._node_width = 0
891
        self._search_prefix = prefix
892
        if search_key_func is None:
893
            self._search_key_func = _search_key_plain
894
        else:
895
            self._search_key_func = search_key_func
896
897
    def add_node(self, prefix, node):
898
        """Add a child node with prefix prefix, and node node.
899
900
        :param prefix: The search key prefix for node.
901
        :param node: The node being added.
902
        """
3735.2.126 by Ian Clatworthy
replace asserts in chk_map.py with AssertionErrors
903
        if self._search_prefix is None:
904
            raise AssertionError("_search_prefix should not be None")
905
        if not prefix.startswith(self._search_prefix):
906
            raise AssertionError("prefixes mismatch: %s must start with %s"
907
                % (prefix,self._search_prefix))
908
        if len(prefix) != len(self._search_prefix) + 1:
909
            raise AssertionError("prefix wrong length: len(%s) is not %d" %
910
                (prefix, len(self._search_prefix) + 1))
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
911
        self._len += len(node)
912
        if not len(self._items):
913
            self._node_width = len(prefix)
3735.2.126 by Ian Clatworthy
replace asserts in chk_map.py with AssertionErrors
914
        if self._node_width != len(self._search_prefix) + 1:
915
            raise AssertionError("node width mismatch: %d is not %d" %
916
                (self._node_width, len(self._search_prefix) + 1))
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
917
        self._items[prefix] = node
918
        self._key = None
919
920
    def _current_size(self):
921
        """Answer the current serialised size of this node."""
922
        return (self._raw_size + len(str(self._len)) + len(str(self._key_width)) +
923
            len(str(self._maximum_size)))
924
925
    @classmethod
926
    def deserialise(klass, bytes, key, search_key_func=None):
927
        """Deserialise bytes to an InternalNode, with key key.
928
929
        :param bytes: The bytes of the node.
930
        :param key: The key that the serialised node has.
931
        :return: An InternalNode instance.
932
        """
933
        return _deserialise_internal_node(bytes, key,
934
                                          search_key_func=search_key_func)
935
936
    def iteritems(self, store, key_filter=None):
937
        for node, node_filter in self._iter_nodes(store, key_filter=key_filter):
938
            for item in node.iteritems(store, key_filter=node_filter):
939
                yield item
940
941
    def _iter_nodes(self, store, key_filter=None, batch_size=None):
942
        """Iterate over node objects which match key_filter.
943
944
        :param store: A store to use for accessing content.
945
        :param key_filter: A key filter to filter nodes. Only nodes that might
946
            contain a key in key_filter will be returned.
947
        :param batch_size: If not None, then we will return the nodes that had
948
            to be read using get_record_stream in batches, rather than reading
949
            them all at once.
950
        :return: An iterable of nodes. This function does not have to be fully
951
            consumed.  (There will be no pending I/O when items are being returned.)
952
        """
953
        # Map from chk key ('sha1:...',) to (prefix, key_filter)
954
        # prefix is the key in self._items to use, key_filter is the key_filter
955
        # entries that would match this node
956
        keys = {}
957
        if key_filter is None:
958
            for prefix, node in self._items.iteritems():
959
                if type(node) == tuple:
960
                    keys[node] = (prefix, None)
961
                else:
962
                    yield node, None
963
        else:
964
            # XXX defaultdict ?
965
            prefix_to_keys = {}
966
            length_filters = {}
967
            for key in key_filter:
968
                search_key = self._search_prefix_filter(key)
969
                length_filter = length_filters.setdefault(
970
                                    len(search_key), set())
971
                length_filter.add(search_key)
972
                prefix_to_keys.setdefault(search_key, []).append(key)
973
            length_filters = length_filters.items()
974
            for prefix, node in self._items.iteritems():
975
                node_key_filter = []
976
                for length, length_filter in length_filters:
977
                    sub_prefix = prefix[:length]
978
                    if sub_prefix in length_filter:
979
                        node_key_filter.extend(prefix_to_keys[sub_prefix])
980
                if node_key_filter: # this key matched something, yield it
981
                    if type(node) == tuple:
982
                        keys[node] = (prefix, node_key_filter)
983
                    else:
984
                        yield node, node_key_filter
985
        if keys:
986
            # Look in the page cache for some more bytes
987
            found_keys = set()
988
            for key in keys:
989
                try:
990
                    bytes = _page_cache[key]
991
                except KeyError:
992
                    continue
993
                else:
994
                    node = _deserialise(bytes, key,
995
                        search_key_func=self._search_key_func)
996
                    prefix, node_key_filter = keys[key]
997
                    self._items[prefix] = node
998
                    found_keys.add(key)
999
                    yield node, node_key_filter
1000
            for key in found_keys:
1001
                del keys[key]
1002
        if keys:
1003
            # demand load some pages.
1004
            if batch_size is None:
1005
                # Read all the keys in
1006
                batch_size = len(keys)
1007
            key_order = list(keys)
1008
            for batch_start in range(0, len(key_order), batch_size):
1009
                batch = key_order[batch_start:batch_start + batch_size]
1010
                # We have to fully consume the stream so there is no pending
1011
                # I/O, so we buffer the nodes for now.
1012
                stream = store.get_record_stream(batch, 'unordered', True)
1013
                node_and_filters = []
1014
                for record in stream:
1015
                    bytes = record.get_bytes_as('fulltext')
1016
                    node = _deserialise(bytes, record.key,
1017
                        search_key_func=self._search_key_func)
1018
                    prefix, node_key_filter = keys[record.key]
1019
                    node_and_filters.append((node, node_key_filter))
1020
                    self._items[prefix] = node
1021
                    _page_cache.add(record.key, bytes)
1022
                for info in node_and_filters:
1023
                    yield info
1024
1025
    def map(self, store, key, value):
1026
        """Map key to value."""
1027
        if not len(self._items):
3735.2.122 by Ian Clatworthy
don't check_remap on every unmap call in CHKMap.apply_delta()
1028
            raise AssertionError("can't map in an empty InternalNode.")
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1029
        search_key = self._search_key(key)
3735.2.126 by Ian Clatworthy
replace asserts in chk_map.py with AssertionErrors
1030
        if self._node_width != len(self._search_prefix) + 1:
1031
            raise AssertionError("node width mismatch: %d is not %d" %
1032
                (self._node_width, len(self._search_prefix) + 1))
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1033
        if not search_key.startswith(self._search_prefix):
1034
            # This key doesn't fit in this index, so we need to split at the
1035
            # point where it would fit, insert self into that internal node,
1036
            # and then map this key into that node.
1037
            new_prefix = self.common_prefix(self._search_prefix,
1038
                                            search_key)
1039
            new_parent = InternalNode(new_prefix,
1040
                search_key_func=self._search_key_func)
1041
            new_parent.set_maximum_size(self._maximum_size)
1042
            new_parent._key_width = self._key_width
1043
            new_parent.add_node(self._search_prefix[:len(new_prefix)+1],
1044
                                self)
1045
            return new_parent.map(store, key, value)
1046
        children = [node for node, _
1047
                          in self._iter_nodes(store, key_filter=[key])]
1048
        if children:
1049
            child = children[0]
1050
        else:
1051
            # new child needed:
1052
            child = self._new_child(search_key, LeafNode)
1053
        old_len = len(child)
1054
        if type(child) is LeafNode:
1055
            old_size = child._current_size()
1056
        else:
1057
            old_size = None
1058
        prefix, node_details = child.map(store, key, value)
1059
        if len(node_details) == 1:
1060
            # child may have shrunk, or might be a new node
1061
            child = node_details[0][1]
1062
            self._len = self._len - old_len + len(child)
1063
            self._items[search_key] = child
1064
            self._key = None
1065
            new_node = self
1066
            if type(child) is LeafNode:
3735.2.123 by Ian Clatworthy
only check for remap if changes are interesting in size
1067
                if old_size is None:
1068
                    # The old node was an InternalNode which means it has now
1069
                    # collapsed, so we need to check if it will chain to a
1070
                    # collapse at this level.
1071
                    trace.mutter("checking remap as InternalNode -> LeafNode")
1072
                    new_node = self._check_remap(store)
1073
                else:
1074
                    # If the LeafNode has shrunk in size, we may want to run
1075
                    # a remap check. Checking for a remap is expensive though
1076
                    # and the frequency of a successful remap is very low.
1077
                    # Shrinkage by small amounts is common, so we only do the
1078
                    # remap check if the new_size is low or the shrinkage
1079
                    # amount is over a configurable limit.
1080
                    new_size = child._current_size()
1081
                    shrinkage = old_size - new_size
1082
                    if (shrinkage > 0 and new_size < _INTERESTING_NEW_SIZE
1083
                        or shrinkage > _INTERESTING_SHRINKAGE_LIMIT):
1084
                        trace.mutter(
1085
                            "checking remap as size shrunk by %d to be %d",
1086
                            shrinkage, new_size)
1087
                        new_node = self._check_remap(store)
3735.2.126 by Ian Clatworthy
replace asserts in chk_map.py with AssertionErrors
1088
            if new_node._search_prefix is None:
1089
                raise AssertionError("_search_prefix should not be None")
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1090
            return new_node._search_prefix, [('', new_node)]
1091
        # child has overflown - create a new intermediate node.
1092
        # XXX: This is where we might want to try and expand our depth
1093
        # to refer to more bytes of every child (which would give us
1094
        # multiple pointers to child nodes, but less intermediate nodes)
1095
        child = self._new_child(search_key, InternalNode)
1096
        child._search_prefix = prefix
1097
        for split, node in node_details:
1098
            child.add_node(split, node)
1099
        self._len = self._len - old_len + len(child)
1100
        self._key = None
1101
        return self._search_prefix, [("", self)]
1102
1103
    def _new_child(self, search_key, klass):
1104
        """Create a new child node of type klass."""
1105
        child = klass()
1106
        child.set_maximum_size(self._maximum_size)
1107
        child._key_width = self._key_width
1108
        child._search_key_func = self._search_key_func
1109
        self._items[search_key] = child
1110
        return child
1111
1112
    def serialise(self, store):
1113
        """Serialise the node to store.
1114
1115
        :param store: A VersionedFiles honouring the CHK extensions.
1116
        :return: An iterable of the keys inserted by this operation.
1117
        """
1118
        for node in self._items.itervalues():
1119
            if type(node) == tuple:
1120
                # Never deserialised.
1121
                continue
1122
            if node._key is not None:
1123
                # Never altered
1124
                continue
1125
            for key in node.serialise(store):
1126
                yield key
1127
        lines = ["chknode:\n"]
1128
        lines.append("%d\n" % self._maximum_size)
1129
        lines.append("%d\n" % self._key_width)
1130
        lines.append("%d\n" % self._len)
3735.2.126 by Ian Clatworthy
replace asserts in chk_map.py with AssertionErrors
1131
        if self._search_prefix is None:
1132
            raise AssertionError("_search_prefix should not be None")
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1133
        lines.append('%s\n' % (self._search_prefix,))
1134
        prefix_len = len(self._search_prefix)
1135
        for prefix, node in sorted(self._items.items()):
1136
            if type(node) == tuple:
1137
                key = node[0]
1138
            else:
1139
                key = node._key[0]
1140
            serialised = "%s\x00%s\n" % (prefix, key)
3735.2.126 by Ian Clatworthy
replace asserts in chk_map.py with AssertionErrors
1141
            if not serialised.startswith(self._search_prefix):
1142
                raise AssertionError("prefixes mismatch: %s must start with %s"
1143
                    % (serialised, self._search_prefix))
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1144
            lines.append(serialised[prefix_len:])
1145
        sha1, _, _ = store.add_lines((None,), (), lines)
1146
        self._key = ("sha1:" + sha1,)
1147
        _page_cache.add(self._key, ''.join(lines))
1148
        yield self._key
1149
1150
    def _search_key(self, key):
1151
        """Return the serialised key for key in this node."""
1152
        # search keys are fixed width. All will be self._node_width wide, so we
1153
        # pad as necessary.
1154
        return (self._search_key_func(key) + '\x00'*self._node_width)[:self._node_width]
1155
1156
    def _search_prefix_filter(self, key):
1157
        """Serialise key for use as a prefix filter in iteritems."""
1158
        return self._search_key_func(key)[:self._node_width]
1159
1160
    def _split(self, offset):
1161
        """Split this node into smaller nodes starting at offset.
1162
1163
        :param offset: The offset to start the new child nodes at.
1164
        :return: An iterable of (prefix, node) tuples. prefix is a byte
1165
            prefix for reaching node.
1166
        """
1167
        if offset >= self._node_width:
1168
            for node in self._items.values():
1169
                for result in node._split(offset):
1170
                    yield result
1171
            return
1172
        for key, node in self._items.items():
1173
            pass
1174
1175
    def refs(self):
1176
        """Return the references to other CHK's held by this node."""
1177
        if self._key is None:
1178
            raise AssertionError("unserialised nodes have no refs.")
1179
        refs = []
1180
        for value in self._items.itervalues():
1181
            if type(value) == tuple:
1182
                refs.append(value)
1183
            else:
1184
                refs.append(value.key())
1185
        return refs
1186
1187
    def _compute_search_prefix(self, extra_key=None):
1188
        """Return the unique key prefix for this node.
1189
1190
        :return: A bytestring of the longest search key prefix that is
1191
            unique within this node.
1192
        """
1193
        self._search_prefix = self.common_prefix_for_keys(self._items)
1194
        return self._search_prefix
1195
3735.2.122 by Ian Clatworthy
don't check_remap on every unmap call in CHKMap.apply_delta()
1196
    def unmap(self, store, key, check_remap=True):
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1197
        """Remove key from this node and it's children."""
1198
        if not len(self._items):
3735.2.126 by Ian Clatworthy
replace asserts in chk_map.py with AssertionErrors
1199
            raise AssertionError("can't unmap in an empty InternalNode.")
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1200
        children = [node for node, _
1201
                          in self._iter_nodes(store, key_filter=[key])]
1202
        if children:
1203
            child = children[0]
1204
        else:
1205
            raise KeyError(key)
1206
        self._len -= 1
1207
        unmapped = child.unmap(store, key)
1208
        self._key = None
1209
        search_key = self._search_key(key)
1210
        if len(unmapped) == 0:
1211
            # All child nodes are gone, remove the child:
1212
            del self._items[search_key]
1213
            unmapped = None
1214
        else:
1215
            # Stash the returned node
1216
            self._items[search_key] = unmapped
1217
        if len(self._items) == 1:
1218
            # this node is no longer needed:
1219
            return self._items.values()[0]
1220
        if type(unmapped) is InternalNode:
1221
            return self
3735.2.122 by Ian Clatworthy
don't check_remap on every unmap call in CHKMap.apply_delta()
1222
        if check_remap:
1223
            return self._check_remap(store)
1224
        else:
1225
            return self
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1226
1227
    def _check_remap(self, store):
1228
        """Check if all keys contained by children fit in a single LeafNode.
1229
1230
        :param store: A store to use for reading more nodes
1231
        :return: Either self, or a new LeafNode which should replace self.
1232
        """
1233
        # Logic for how we determine when we need to rebuild
1234
        # 1) Implicitly unmap() is removing a key which means that the child
1235
        #    nodes are going to be shrinking by some extent.
1236
        # 2) If all children are LeafNodes, it is possible that they could be
1237
        #    combined into a single LeafNode, which can then completely replace
1238
        #    this internal node with a single LeafNode
1239
        # 3) If *one* child is an InternalNode, we assume it has already done
1240
        #    all the work to determine that its children cannot collapse, and
1241
        #    we can then assume that those nodes *plus* the current nodes don't
1242
        #    have a chance of collapsing either.
1243
        #    So a very cheap check is to just say if 'unmapped' is an
1244
        #    InternalNode, we don't have to check further.
1245
1246
        # TODO: Another alternative is to check the total size of all known
1247
        #       LeafNodes. If there is some formula we can use to determine the
1248
        #       final size without actually having to read in any more
1249
        #       children, it would be nice to have. However, we have to be
1250
        #       careful with stuff like nodes that pull out the common prefix
1251
        #       of each key, as adding a new key can change the common prefix
1252
        #       and cause size changes greater than the length of one key.
1253
        #       So for now, we just add everything to a new Leaf until it
1254
        #       splits, as we know that will give the right answer
1255
        new_leaf = LeafNode(search_key_func=self._search_key_func)
1256
        new_leaf.set_maximum_size(self._maximum_size)
1257
        new_leaf._key_width = self._key_width
1258
        # A batch_size of 16 was chosen because:
1259
        #   a) In testing, a 4k page held 14 times. So if we have more than 16
1260
        #      leaf nodes we are unlikely to hold them in a single new leaf
1261
        #      node. This still allows for 1 round trip
1262
        #   b) With 16-way fan out, we can still do a single round trip
1263
        #   c) With 255-way fan out, we don't want to read all 255 and destroy
1264
        #      the page cache, just to determine that we really don't need it.
1265
        for node, _ in self._iter_nodes(store, batch_size=16):
1266
            if type(node) is InternalNode:
1267
                # Without looking at any leaf nodes, we are sure
1268
                return self
1269
            for key, value in node._items.iteritems():
1270
                if new_leaf._map_no_split(key, value):
1271
                    return self
3735.2.123 by Ian Clatworthy
only check for remap if changes are interesting in size
1272
        trace.mutter("remap generated a new LeafNode")
4241.6.1 by Ian Clatworthy
chk_map code from brisbane-core
1273
        return new_leaf
1274
1275
1276
def _deserialise(bytes, key, search_key_func):
1277
    """Helper for repositorydetails - convert bytes to a node."""
1278
    if bytes.startswith("chkleaf:\n"):
1279
        node = LeafNode.deserialise(bytes, key, search_key_func=search_key_func)
1280
    elif bytes.startswith("chknode:\n"):
1281
        node = InternalNode.deserialise(bytes, key,
1282
            search_key_func=search_key_func)
1283
    else:
1284
        raise AssertionError("Unknown node type.")
1285
    return node
1286
1287
1288
def _find_children_info(store, interesting_keys, uninteresting_keys, pb):
1289
    """Read the associated records, and determine what is interesting."""
1290
    uninteresting_keys = set(uninteresting_keys)
1291
    chks_to_read = uninteresting_keys.union(interesting_keys)
1292
    next_uninteresting = set()
1293
    next_interesting = set()
1294
    uninteresting_items = set()
1295
    interesting_items = set()
1296
    interesting_to_yield = []
1297
    for record in store.get_record_stream(chks_to_read, 'unordered', True):
1298
        # records_read.add(record.key())
1299
        if pb is not None:
1300
            pb.tick()
1301
        bytes = record.get_bytes_as('fulltext')
1302
        # We don't care about search_key_func for this code, because we only
1303
        # care about external references.
1304
        node = _deserialise(bytes, record.key, search_key_func=None)
1305
        if record.key in uninteresting_keys:
1306
            if type(node) is InternalNode:
1307
                next_uninteresting.update(node.refs())
1308
            else:
1309
                # We know we are at a LeafNode, so we can pass None for the
1310
                # store
1311
                uninteresting_items.update(node.iteritems(None))
1312
        else:
1313
            interesting_to_yield.append(record.key)
1314
            if type(node) is InternalNode:
1315
                next_interesting.update(node.refs())
1316
            else:
1317
                interesting_items.update(node.iteritems(None))
1318
    return (next_uninteresting, uninteresting_items,
1319
            next_interesting, interesting_to_yield, interesting_items)
1320
1321
1322
def _find_all_uninteresting(store, interesting_root_keys,
1323
                            uninteresting_root_keys, pb):
1324
    """Determine the full set of uninteresting keys."""
1325
    # What about duplicates between interesting_root_keys and
1326
    # uninteresting_root_keys?
1327
    if not uninteresting_root_keys:
1328
        # Shortcut case. We know there is nothing uninteresting to filter out
1329
        # So we just let the rest of the algorithm do the work
1330
        # We know there is nothing uninteresting, and we didn't have to read
1331
        # any interesting records yet.
1332
        return (set(), set(), set(interesting_root_keys), [], set())
1333
    all_uninteresting_chks = set(uninteresting_root_keys)
1334
    all_uninteresting_items = set()
1335
1336
    # First step, find the direct children of both the interesting and
1337
    # uninteresting set
1338
    (uninteresting_keys, uninteresting_items,
1339
     interesting_keys, interesting_to_yield,
1340
     interesting_items) = _find_children_info(store, interesting_root_keys,
1341
                                              uninteresting_root_keys,
1342
                                              pb=pb)
1343
    all_uninteresting_chks.update(uninteresting_keys)
1344
    all_uninteresting_items.update(uninteresting_items)
1345
    del uninteresting_items
1346
    # Note: Exact matches between interesting and uninteresting do not need
1347
    #       to be search further. Non-exact matches need to be searched in case
1348
    #       there is a future exact-match
1349
    uninteresting_keys.difference_update(interesting_keys)
1350
1351
    # Second, find the full set of uninteresting bits reachable by the
1352
    # uninteresting roots
1353
    chks_to_read = uninteresting_keys
1354
    while chks_to_read:
1355
        next_chks = set()
1356
        for record in store.get_record_stream(chks_to_read, 'unordered', False):
1357
            # TODO: Handle 'absent'
1358
            if pb is not None:
1359
                pb.tick()
1360
            bytes = record.get_bytes_as('fulltext')
1361
            # We don't care about search_key_func for this code, because we
1362
            # only care about external references.
1363
            node = _deserialise(bytes, record.key, search_key_func=None)
1364
            if type(node) is InternalNode:
1365
                # uninteresting_prefix_chks.update(node._items.iteritems())
1366
                chks = node._items.values()
1367
                # TODO: We remove the entries that are already in
1368
                #       uninteresting_chks ?
1369
                next_chks.update(chks)
1370
                all_uninteresting_chks.update(chks)
1371
            else:
1372
                all_uninteresting_items.update(node._items.iteritems())
1373
        chks_to_read = next_chks
1374
    return (all_uninteresting_chks, all_uninteresting_items,
1375
            interesting_keys, interesting_to_yield, interesting_items)
1376
1377
1378
def iter_interesting_nodes(store, interesting_root_keys,
1379
                           uninteresting_root_keys, pb=None):
1380
    """Given root keys, find interesting nodes.
1381
1382
    Evaluate nodes referenced by interesting_root_keys. Ones that are also
1383
    referenced from uninteresting_root_keys are not considered interesting.
1384
1385
    :param interesting_root_keys: keys which should be part of the
1386
        "interesting" nodes (which will be yielded)
1387
    :param uninteresting_root_keys: keys which should be filtered out of the
1388
        result set.
1389
    :return: Yield
1390
        (interesting record, {interesting key:values})
1391
    """
1392
    # TODO: consider that it may be more memory efficient to use the 20-byte
1393
    #       sha1 string, rather than tuples of hexidecimal sha1 strings.
1394
    # TODO: Try to factor out a lot of the get_record_stream() calls into a
1395
    #       helper function similar to _read_bytes. This function should be
1396
    #       able to use nodes from the _page_cache as well as actually
1397
    #       requesting bytes from the store.
1398
1399
    (all_uninteresting_chks, all_uninteresting_items, interesting_keys,
1400
     interesting_to_yield, interesting_items) = _find_all_uninteresting(store,
1401
        interesting_root_keys, uninteresting_root_keys, pb)
1402
1403
    # Now that we know everything uninteresting, we can yield information from
1404
    # our first request
1405
    interesting_items.difference_update(all_uninteresting_items)
1406
    interesting_to_yield = set(interesting_to_yield) - all_uninteresting_chks
1407
    if interesting_items:
1408
        yield None, interesting_items
1409
    if interesting_to_yield:
1410
        # We request these records again, rather than buffering the root
1411
        # records, most likely they are still in the _group_cache anyway.
1412
        for record in store.get_record_stream(interesting_to_yield,
1413
                                              'unordered', False):
1414
            yield record, []
1415
    all_uninteresting_chks.update(interesting_to_yield)
1416
    interesting_keys.difference_update(all_uninteresting_chks)
1417
1418
    chks_to_read = interesting_keys
1419
    counter = 0
1420
    while chks_to_read:
1421
        next_chks = set()
1422
        for record in store.get_record_stream(chks_to_read, 'unordered', False):
1423
            counter += 1
1424
            if pb is not None:
1425
                pb.update('find chk pages', counter)
1426
            # TODO: Handle 'absent'?
1427
            bytes = record.get_bytes_as('fulltext')
1428
            # We don't care about search_key_func for this code, because we
1429
            # only care about external references.
1430
            node = _deserialise(bytes, record.key, search_key_func=None)
1431
            if type(node) is InternalNode:
1432
                # all_uninteresting_chks grows large, as it lists all nodes we
1433
                # don't want to process (including already seen interesting
1434
                # nodes).
1435
                # small.difference_update(large) scales O(large), but
1436
                # small.difference(large) scales O(small).
1437
                # Also, we know we just _deserialised this node, so we can
1438
                # access the dict directly.
1439
                chks = set(node._items.itervalues()).difference(
1440
                            all_uninteresting_chks)
1441
                # Is set() and .difference_update better than:
1442
                # chks = [chk for chk in node.refs()
1443
                #              if chk not in all_uninteresting_chks]
1444
                next_chks.update(chks)
1445
                # These are now uninteresting everywhere else
1446
                all_uninteresting_chks.update(chks)
1447
                interesting_items = []
1448
            else:
1449
                interesting_items = [item for item in node._items.iteritems()
1450
                                     if item not in all_uninteresting_items]
1451
                # TODO: Do we need to filter out items that we have already
1452
                #       seen on other pages? We don't really want to buffer the
1453
                #       whole thing, but it does mean that callers need to
1454
                #       understand they may get duplicate values.
1455
                # all_uninteresting_items.update(interesting_items)
1456
            yield record, interesting_items
1457
        chks_to_read = next_chks
1458
1459
1460
try:
1461
    from bzrlib._chk_map_pyx import (
1462
        _search_key_16,
1463
        _search_key_255,
1464
        _deserialise_leaf_node,
1465
        _deserialise_internal_node,
1466
        )
1467
except ImportError:
1468
    from bzrlib._chk_map_py import (
1469
        _search_key_16,
1470
        _search_key_255,
1471
        _deserialise_leaf_node,
1472
        _deserialise_internal_node,
1473
        )
1474
search_key_registry.register('hash-16-way', _search_key_16)
1475
search_key_registry.register('hash-255-way', _search_key_255)