5752.3.8
by John Arbash Meinel
Merge bzr.dev 5764 to resolve release-notes (aka NEWS) conflicts |
1 |
# Copyright (C) 2008-2011 Canonical Ltd
|
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
2 |
#
|
3 |
# This program is free software; you can redistribute it and/or modify
|
|
4 |
# it under the terms of the GNU General Public License as published by
|
|
5 |
# the Free Software Foundation; either version 2 of the License, or
|
|
6 |
# (at your option) any later version.
|
|
7 |
#
|
|
8 |
# This program is distributed in the hope that it will be useful,
|
|
9 |
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
10 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
11 |
# GNU General Public License for more details.
|
|
12 |
#
|
|
13 |
# You should have received a copy of the GNU General Public License
|
|
14 |
# along with this program; if not, write to the Free Software
|
|
15 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
16 |
||
17 |
"""Persistent maps from tuple_of_strings->string using CHK stores.
|
|
18 |
||
19 |
Overview and current status:
|
|
20 |
||
21 |
The CHKMap class implements a dict from tuple_of_strings->string by using a trie
|
|
22 |
with internal nodes of 8-bit fan out; The key tuples are mapped to strings by
|
|
23 |
joining them by \x00, and \x00 padding shorter keys out to the length of the
|
|
24 |
longest key. Leaf nodes are packed as densely as possible, and internal nodes
|
|
3735.19.1
by Ian Clatworthy
CHKMap cleanups |
25 |
are all an additional 8-bits wide leading to a sparse upper tree.
|
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
26 |
|
27 |
Updates to a CHKMap are done preferentially via the apply_delta method, to
|
|
28 |
allow optimisation of the update operation; but individual map/unmap calls are
|
|
4526.9.5
by Robert Collins
Require that added ids in inventory deltas be new. |
29 |
possible and supported. Individual changes via map/unmap are buffered in memory
|
30 |
until the _save method is called to force serialisation of the tree.
|
|
31 |
apply_delta records its changes immediately by performing an implicit _save.
|
|
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
32 |
|
33 |
TODO:
|
|
34 |
-----
|
|
35 |
||
36 |
Densely packed upper nodes.
|
|
37 |
||
38 |
"""
|
|
39 |
||
6379.6.1
by Jelmer Vernooij
Import absolute_import in a few places. |
40 |
from __future__ import absolute_import |
41 |
||
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
42 |
import heapq |
4797.7.1
by Robert Collins
Introduce a threading.local to isolate the chk_map page cache from other threads. |
43 |
import threading |
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
44 |
|
45 |
from bzrlib import lazy_import |
|
46 |
lazy_import.lazy_import(globals(), """ |
|
4526.9.5
by Robert Collins
Require that added ids in inventory deltas be new. |
47 |
from bzrlib import (
|
48 |
errors,
|
|
49 |
)
|
|
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
50 |
""") |
51 |
from bzrlib import ( |
|
5904.1.2
by Martin Pool
Various pyflakes import fixes. |
52 |
errors, |
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
53 |
lru_cache, |
54 |
osutils, |
|
55 |
registry, |
|
4668.3.2
by John Arbash Meinel
Don't forget to import the library... |
56 |
static_tuple, |
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
57 |
trace, |
58 |
)
|
|
4679.9.4
by John Arbash Meinel
A bit broken, but getting there. |
59 |
from bzrlib.static_tuple import StaticTuple |
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
60 |
|
3735.19.1
by Ian Clatworthy
CHKMap cleanups |
61 |
# approx 4MB
|
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
62 |
# If each line is 50 bytes, and you have 255 internal pages, with 255-way fan
|
63 |
# out, it takes 3.1MB to cache the layer.
|
|
64 |
_PAGE_CACHE_SIZE = 4*1024*1024 |
|
4797.7.1
by Robert Collins
Introduce a threading.local to isolate the chk_map page cache from other threads. |
65 |
# Per thread caches for 2 reasons:
|
66 |
# - in the server we may be serving very different content, so we get less
|
|
67 |
# cache thrashing.
|
|
68 |
# - we avoid locking on every cache lookup.
|
|
69 |
_thread_caches = threading.local() |
|
70 |
# The page cache.
|
|
71 |
_thread_caches.page_cache = None |
|
72 |
||
73 |
def _get_cache(): |
|
74 |
"""Get the per-thread page cache.
|
|
75 |
||
76 |
We need a function to do this because in a new thread the _thread_caches
|
|
77 |
threading.local object does not have the cache initialized yet.
|
|
78 |
"""
|
|
79 |
page_cache = getattr(_thread_caches, 'page_cache', None) |
|
80 |
if page_cache is None: |
|
81 |
# We are caching bytes so len(value) is perfectly accurate
|
|
82 |
page_cache = lru_cache.LRUSizeCache(_PAGE_CACHE_SIZE) |
|
83 |
_thread_caches.page_cache = page_cache |
|
84 |
return page_cache |
|
85 |
||
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
86 |
|
4543.2.2
by John Arbash Meinel
work out some tests that expose that bundles don't work w/ 2a formats. |
87 |
def clear_cache(): |
4797.7.1
by Robert Collins
Introduce a threading.local to isolate the chk_map page cache from other threads. |
88 |
_get_cache().clear() |
89 |
||
4543.2.2
by John Arbash Meinel
work out some tests that expose that bundles don't work w/ 2a formats. |
90 |
|
3735.2.123
by Ian Clatworthy
only check for remap if changes are interesting in size |
91 |
# If a ChildNode falls below this many bytes, we check for a remap
|
92 |
_INTERESTING_NEW_SIZE = 50 |
|
93 |
# If a ChildNode shrinks by more than this amount, we check for a remap
|
|
94 |
_INTERESTING_SHRINKAGE_LIMIT = 20 |
|
95 |
||
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
96 |
|
97 |
def _search_key_plain(key): |
|
98 |
"""Map the key tuple into a search string that just uses the key bytes."""
|
|
99 |
return '\x00'.join(key) |
|
100 |
||
101 |
||
102 |
search_key_registry = registry.Registry() |
|
103 |
search_key_registry.register('plain', _search_key_plain) |
|
104 |
||
105 |
||
106 |
class CHKMap(object): |
|
107 |
"""A persistent map from string to string backed by a CHK store."""
|
|
108 |
||
4759.1.2
by John Arbash Meinel
Change CHKMap to use __slots__ |
109 |
__slots__ = ('_store', '_root_node', '_search_key_func') |
110 |
||
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
111 |
def __init__(self, store, root_key, search_key_func=None): |
112 |
"""Create a CHKMap object.
|
|
113 |
||
114 |
:param store: The store the CHKMap is stored in.
|
|
115 |
:param root_key: The root key of the map. None to create an empty
|
|
116 |
CHKMap.
|
|
117 |
:param search_key_func: A function mapping a key => bytes. These bytes
|
|
118 |
are then used by the internal nodes to split up leaf nodes into
|
|
119 |
multiple pages.
|
|
120 |
"""
|
|
121 |
self._store = store |
|
122 |
if search_key_func is None: |
|
123 |
search_key_func = _search_key_plain |
|
124 |
self._search_key_func = search_key_func |
|
125 |
if root_key is None: |
|
126 |
self._root_node = LeafNode(search_key_func=search_key_func) |
|
127 |
else: |
|
128 |
self._root_node = self._node_key(root_key) |
|
129 |
||
130 |
def apply_delta(self, delta): |
|
131 |
"""Apply a delta to the map.
|
|
132 |
||
133 |
:param delta: An iterable of old_key, new_key, new_value tuples.
|
|
134 |
If new_key is not None, then new_key->new_value is inserted
|
|
135 |
into the map; if old_key is not None, then the old mapping
|
|
136 |
of old_key is removed.
|
|
137 |
"""
|
|
4797.62.2
by Andrew Bennetts
Change delete_count to has_deletes bool, remove unused INTERESTING_DELETES_LIMIT. |
138 |
has_deletes = False |
4526.9.5
by Robert Collins
Require that added ids in inventory deltas be new. |
139 |
# Check preconditions first.
|
4679.9.9
by John Arbash Meinel
Create a Barrier at the CHKMap interface. |
140 |
as_st = StaticTuple.from_sequence |
141 |
new_items = set([as_st(key) for (old, key, value) in delta |
|
142 |
if key is not None and old is None]) |
|
4526.9.5
by Robert Collins
Require that added ids in inventory deltas be new. |
143 |
existing_new = list(self.iteritems(key_filter=new_items)) |
144 |
if existing_new: |
|
145 |
raise errors.InconsistentDeltaDelta(delta, |
|
146 |
"New items are already in the map %r." % existing_new) |
|
147 |
# Now apply changes.
|
|
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
148 |
for old, new, value in delta: |
149 |
if old is not None and old != new: |
|
3735.2.122
by Ian Clatworthy
don't check_remap on every unmap call in CHKMap.apply_delta() |
150 |
self.unmap(old, check_remap=False) |
4797.62.2
by Andrew Bennetts
Change delete_count to has_deletes bool, remove unused INTERESTING_DELETES_LIMIT. |
151 |
has_deletes = True |
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
152 |
for old, new, value in delta: |
153 |
if new is not None: |
|
154 |
self.map(new, value) |
|
4797.62.2
by Andrew Bennetts
Change delete_count to has_deletes bool, remove unused INTERESTING_DELETES_LIMIT. |
155 |
if has_deletes: |
3735.2.122
by Ian Clatworthy
don't check_remap on every unmap call in CHKMap.apply_delta() |
156 |
self._check_remap() |
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
157 |
return self._save() |
158 |
||
159 |
def _ensure_root(self): |
|
160 |
"""Ensure that the root node is an object not a key."""
|
|
4679.9.4
by John Arbash Meinel
A bit broken, but getting there. |
161 |
if type(self._root_node) is StaticTuple: |
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
162 |
# Demand-load the root
|
163 |
self._root_node = self._get_node(self._root_node) |
|
164 |
||
165 |
def _get_node(self, node): |
|
166 |
"""Get a node.
|
|
167 |
||
3735.19.1
by Ian Clatworthy
CHKMap cleanups |
168 |
Note that this does not update the _items dict in objects containing a
|
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
169 |
reference to this node. As such it does not prevent subsequent IO being
|
170 |
performed.
|
|
171 |
||
172 |
:param node: A tuple key or node object.
|
|
173 |
:return: A node object.
|
|
174 |
"""
|
|
4679.9.4
by John Arbash Meinel
A bit broken, but getting there. |
175 |
if type(node) is StaticTuple: |
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
176 |
bytes = self._read_bytes(node) |
177 |
return _deserialise(bytes, node, |
|
178 |
search_key_func=self._search_key_func) |
|
179 |
else: |
|
180 |
return node |
|
181 |
||
182 |
def _read_bytes(self, key): |
|
3735.2.124
by Ian Clatworthy
use the page cache in CHKMap._read_bytes() |
183 |
try: |
4797.7.1
by Robert Collins
Introduce a threading.local to isolate the chk_map page cache from other threads. |
184 |
return _get_cache()[key] |
3735.2.124
by Ian Clatworthy
use the page cache in CHKMap._read_bytes() |
185 |
except KeyError: |
186 |
stream = self._store.get_record_stream([key], 'unordered', True) |
|
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
187 |
bytes = stream.next().get_bytes_as('fulltext') |
4797.7.1
by Robert Collins
Introduce a threading.local to isolate the chk_map page cache from other threads. |
188 |
_get_cache()[key] = bytes |
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
189 |
return bytes |
190 |
||
191 |
def _dump_tree(self, include_keys=False): |
|
192 |
"""Return the tree in a string representation."""
|
|
193 |
self._ensure_root() |
|
194 |
res = self._dump_tree_node(self._root_node, prefix='', indent='', |
|
195 |
include_keys=include_keys) |
|
196 |
res.append('') # Give a trailing '\n' |
|
197 |
return '\n'.join(res) |
|
198 |
||
199 |
def _dump_tree_node(self, node, prefix, indent, include_keys=True): |
|
200 |
"""For this node and all children, generate a string representation."""
|
|
201 |
result = [] |
|
202 |
if not include_keys: |
|
203 |
key_str = '' |
|
204 |
else: |
|
205 |
node_key = node.key() |
|
206 |
if node_key is not None: |
|
207 |
key_str = ' %s' % (node_key[0],) |
|
208 |
else: |
|
209 |
key_str = ' None' |
|
210 |
result.append('%s%r %s%s' % (indent, prefix, node.__class__.__name__, |
|
211 |
key_str)) |
|
212 |
if type(node) is InternalNode: |
|
213 |
# Trigger all child nodes to get loaded
|
|
214 |
list(node._iter_nodes(self._store)) |
|
215 |
for prefix, sub in sorted(node._items.iteritems()): |
|
216 |
result.extend(self._dump_tree_node(sub, prefix, indent + ' ', |
|
217 |
include_keys=include_keys)) |
|
218 |
else: |
|
219 |
for key, value in sorted(node._items.iteritems()): |
|
220 |
# Don't use prefix nor indent here to line up when used in
|
|
221 |
# tests in conjunction with assertEqualDiff
|
|
4679.9.1
by John Arbash Meinel
Merge in the static-tuple-no-use branch, and bring back the chk_map use. |
222 |
result.append(' %r %r' % (tuple(key), value)) |
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
223 |
return result |
224 |
||
225 |
@classmethod
|
|
3735.19.1
by Ian Clatworthy
CHKMap cleanups |
226 |
def from_dict(klass, store, initial_value, maximum_size=0, key_width=1, |
227 |
search_key_func=None): |
|
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
228 |
"""Create a CHKMap in store with initial_value as the content.
|
229 |
||
230 |
:param store: The store to record initial_value in, a VersionedFiles
|
|
231 |
object with 1-tuple keys supporting CHK key generation.
|
|
232 |
:param initial_value: A dict to store in store. Its keys and values
|
|
233 |
must be bytestrings.
|
|
234 |
:param maximum_size: The maximum_size rule to apply to nodes. This
|
|
235 |
determines the size at which no new data is added to a single node.
|
|
236 |
:param key_width: The number of elements in each key_tuple being stored
|
|
237 |
in this map.
|
|
3735.19.1
by Ian Clatworthy
CHKMap cleanups |
238 |
:param search_key_func: A function mapping a key => bytes. These bytes
|
239 |
are then used by the internal nodes to split up leaf nodes into
|
|
240 |
multiple pages.
|
|
241 |
:return: The root chk of the resulting CHKMap.
|
|
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
242 |
"""
|
4413.5.7
by John Arbash Meinel
Switch to using a single code path for from_dict(). |
243 |
root_key = klass._create_directly(store, initial_value, |
244 |
maximum_size=maximum_size, key_width=key_width, |
|
245 |
search_key_func=search_key_func) |
|
4679.9.15
by John Arbash Meinel
Cleanup some code paths. Make _check_key a helper that can be used |
246 |
if type(root_key) is not StaticTuple: |
247 |
raise AssertionError('we got a %s instead of a StaticTuple' |
|
248 |
% (type(root_key),)) |
|
4413.5.5
by John Arbash Meinel
Make it more obvious how the two creation methods are defined. |
249 |
return root_key |
250 |
||
251 |
@classmethod
|
|
252 |
def _create_via_map(klass, store, initial_value, maximum_size=0, |
|
253 |
key_width=1, search_key_func=None): |
|
254 |
result = klass(store, None, search_key_func=search_key_func) |
|
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
255 |
result._root_node.set_maximum_size(maximum_size) |
256 |
result._root_node._key_width = key_width |
|
257 |
delta = [] |
|
258 |
for key, value in initial_value.items(): |
|
259 |
delta.append((None, key, value)) |
|
4413.5.4
by John Arbash Meinel
Change CHKMap.from_dict to create a LeafNode and split it. |
260 |
root_key = result.apply_delta(delta) |
4413.5.5
by John Arbash Meinel
Make it more obvious how the two creation methods are defined. |
261 |
return root_key |
262 |
||
263 |
@classmethod
|
|
264 |
def _create_directly(klass, store, initial_value, maximum_size=0, |
|
265 |
key_width=1, search_key_func=None): |
|
4413.5.4
by John Arbash Meinel
Change CHKMap.from_dict to create a LeafNode and split it. |
266 |
node = LeafNode(search_key_func=search_key_func) |
267 |
node.set_maximum_size(maximum_size) |
|
268 |
node._key_width = key_width |
|
4679.9.9
by John Arbash Meinel
Create a Barrier at the CHKMap interface. |
269 |
as_st = StaticTuple.from_sequence |
270 |
node._items = dict([(as_st(key), val) for key, val |
|
271 |
in initial_value.iteritems()]) |
|
4413.5.4
by John Arbash Meinel
Change CHKMap.from_dict to create a LeafNode and split it. |
272 |
node._raw_size = sum([node._key_value_len(key, value) |
4679.9.9
by John Arbash Meinel
Create a Barrier at the CHKMap interface. |
273 |
for key,value in node._items.iteritems()]) |
4413.5.4
by John Arbash Meinel
Change CHKMap.from_dict to create a LeafNode and split it. |
274 |
node._len = len(node._items) |
275 |
node._compute_search_prefix() |
|
276 |
node._compute_serialised_prefix() |
|
277 |
if (node._len > 1 |
|
278 |
and maximum_size |
|
279 |
and node._current_size() > maximum_size): |
|
280 |
prefix, node_details = node._split(store) |
|
4413.5.8
by John Arbash Meinel
Change some asserts into raise: calls. |
281 |
if len(node_details) == 1: |
282 |
raise AssertionError('Failed to split using node._split') |
|
4413.5.4
by John Arbash Meinel
Change CHKMap.from_dict to create a LeafNode and split it. |
283 |
node = InternalNode(prefix, search_key_func=search_key_func) |
284 |
node.set_maximum_size(maximum_size) |
|
285 |
node._key_width = key_width |
|
286 |
for split, subnode in node_details: |
|
287 |
node.add_node(split, subnode) |
|
288 |
keys = list(node.serialise(store)) |
|
4679.9.15
by John Arbash Meinel
Cleanup some code paths. Make _check_key a helper that can be used |
289 |
return keys[-1] |
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
290 |
|
291 |
def iter_changes(self, basis): |
|
292 |
"""Iterate over the changes between basis and self.
|
|
293 |
||
294 |
:return: An iterator of tuples: (key, old_value, new_value). Old_value
|
|
295 |
is None for keys only in self; new_value is None for keys only in
|
|
296 |
basis.
|
|
297 |
"""
|
|
298 |
# Overview:
|
|
299 |
# Read both trees in lexographic, highest-first order.
|
|
300 |
# Any identical nodes we skip
|
|
301 |
# Any unique prefixes we output immediately.
|
|
302 |
# values in a leaf node are treated as single-value nodes in the tree
|
|
303 |
# which allows them to be not-special-cased. We know to output them
|
|
304 |
# because their value is a string, not a key(tuple) or node.
|
|
305 |
#
|
|
306 |
# corner cases to beware of when considering this function:
|
|
307 |
# *) common references are at different heights.
|
|
308 |
# consider two trees:
|
|
309 |
# {'a': LeafNode={'aaa':'foo', 'aab':'bar'}, 'b': LeafNode={'b'}}
|
|
310 |
# {'a': InternalNode={'aa':LeafNode={'aaa':'foo', 'aab':'bar'},
|
|
311 |
# 'ab':LeafNode={'ab':'bar'}}
|
|
312 |
# 'b': LeafNode={'b'}}
|
|
313 |
# the node with aaa/aab will only be encountered in the second tree
|
|
314 |
# after reading the 'a' subtree, but it is encountered in the first
|
|
315 |
# tree immediately. Variations on this may have read internal nodes
|
|
316 |
# like this. we want to cut the entire pending subtree when we
|
|
317 |
# realise we have a common node. For this we use a list of keys -
|
|
318 |
# the path to a node - and check the entire path is clean as we
|
|
319 |
# process each item.
|
|
320 |
if self._node_key(self._root_node) == self._node_key(basis._root_node): |
|
321 |
return
|
|
322 |
self._ensure_root() |
|
323 |
basis._ensure_root() |
|
324 |
excluded_keys = set() |
|
325 |
self_node = self._root_node |
|
326 |
basis_node = basis._root_node |
|
327 |
# A heap, each element is prefix, node(tuple/NodeObject/string),
|
|
328 |
# key_path (a list of tuples, tail-sharing down the tree.)
|
|
329 |
self_pending = [] |
|
330 |
basis_pending = [] |
|
331 |
def process_node(node, path, a_map, pending): |
|
332 |
# take a node and expand it
|
|
333 |
node = a_map._get_node(node) |
|
334 |
if type(node) == LeafNode: |
|
335 |
path = (node._key, path) |
|
336 |
for key, value in node._items.items(): |
|
337 |
# For a LeafNode, the key is a serialized_key, rather than
|
|
338 |
# a search_key, but the heap is using search_keys
|
|
339 |
search_key = node._search_key_func(key) |
|
340 |
heapq.heappush(pending, (search_key, key, value, path)) |
|
341 |
else: |
|
342 |
# type(node) == InternalNode
|
|
343 |
path = (node._key, path) |
|
344 |
for prefix, child in node._items.items(): |
|
345 |
heapq.heappush(pending, (prefix, None, child, path)) |
|
346 |
def process_common_internal_nodes(self_node, basis_node): |
|
347 |
self_items = set(self_node._items.items()) |
|
348 |
basis_items = set(basis_node._items.items()) |
|
349 |
path = (self_node._key, None) |
|
350 |
for prefix, child in self_items - basis_items: |
|
351 |
heapq.heappush(self_pending, (prefix, None, child, path)) |
|
352 |
path = (basis_node._key, None) |
|
353 |
for prefix, child in basis_items - self_items: |
|
354 |
heapq.heappush(basis_pending, (prefix, None, child, path)) |
|
355 |
def process_common_leaf_nodes(self_node, basis_node): |
|
356 |
self_items = set(self_node._items.items()) |
|
357 |
basis_items = set(basis_node._items.items()) |
|
358 |
path = (self_node._key, None) |
|
359 |
for key, value in self_items - basis_items: |
|
360 |
prefix = self._search_key_func(key) |
|
361 |
heapq.heappush(self_pending, (prefix, key, value, path)) |
|
362 |
path = (basis_node._key, None) |
|
363 |
for key, value in basis_items - self_items: |
|
364 |
prefix = basis._search_key_func(key) |
|
365 |
heapq.heappush(basis_pending, (prefix, key, value, path)) |
|
366 |
def process_common_prefix_nodes(self_node, self_path, |
|
367 |
basis_node, basis_path): |
|
368 |
# Would it be more efficient if we could request both at the same
|
|
369 |
# time?
|
|
370 |
self_node = self._get_node(self_node) |
|
371 |
basis_node = basis._get_node(basis_node) |
|
372 |
if (type(self_node) == InternalNode |
|
373 |
and type(basis_node) == InternalNode): |
|
374 |
# Matching internal nodes
|
|
375 |
process_common_internal_nodes(self_node, basis_node) |
|
376 |
elif (type(self_node) == LeafNode |
|
377 |
and type(basis_node) == LeafNode): |
|
378 |
process_common_leaf_nodes(self_node, basis_node) |
|
379 |
else: |
|
380 |
process_node(self_node, self_path, self, self_pending) |
|
381 |
process_node(basis_node, basis_path, basis, basis_pending) |
|
382 |
process_common_prefix_nodes(self_node, None, basis_node, None) |
|
383 |
self_seen = set() |
|
384 |
basis_seen = set() |
|
385 |
excluded_keys = set() |
|
386 |
def check_excluded(key_path): |
|
387 |
# Note that this is N^2, it depends on us trimming trees
|
|
388 |
# aggressively to not become slow.
|
|
389 |
# A better implementation would probably have a reverse map
|
|
390 |
# back to the children of a node, and jump straight to it when
|
|
391 |
# a common node is detected, the proceed to remove the already
|
|
392 |
# pending children. bzrlib.graph has a searcher module with a
|
|
393 |
# similar problem.
|
|
394 |
while key_path is not None: |
|
395 |
key, key_path = key_path |
|
396 |
if key in excluded_keys: |
|
397 |
return True |
|
398 |
return False |
|
399 |
||
400 |
loop_counter = 0 |
|
401 |
while self_pending or basis_pending: |
|
402 |
loop_counter += 1 |
|
403 |
if not self_pending: |
|
404 |
# self is exhausted: output remainder of basis
|
|
405 |
for prefix, key, node, path in basis_pending: |
|
406 |
if check_excluded(path): |
|
407 |
continue
|
|
408 |
node = basis._get_node(node) |
|
409 |
if key is not None: |
|
410 |
# a value
|
|
411 |
yield (key, node, None) |
|
412 |
else: |
|
413 |
# subtree - fastpath the entire thing.
|
|
414 |
for key, value in node.iteritems(basis._store): |
|
415 |
yield (key, value, None) |
|
416 |
return
|
|
417 |
elif not basis_pending: |
|
418 |
# basis is exhausted: output remainder of self.
|
|
419 |
for prefix, key, node, path in self_pending: |
|
420 |
if check_excluded(path): |
|
421 |
continue
|
|
422 |
node = self._get_node(node) |
|
423 |
if key is not None: |
|
424 |
# a value
|
|
425 |
yield (key, None, node) |
|
426 |
else: |
|
427 |
# subtree - fastpath the entire thing.
|
|
428 |
for key, value in node.iteritems(self._store): |
|
429 |
yield (key, None, value) |
|
430 |
return
|
|
431 |
else: |
|
432 |
# XXX: future optimisation - yield the smaller items
|
|
433 |
# immediately rather than pushing everything on/off the
|
|
434 |
# heaps. Applies to both internal nodes and leafnodes.
|
|
435 |
if self_pending[0][0] < basis_pending[0][0]: |
|
436 |
# expand self
|
|
437 |
prefix, key, node, path = heapq.heappop(self_pending) |
|
438 |
if check_excluded(path): |
|
439 |
continue
|
|
440 |
if key is not None: |
|
441 |
# a value
|
|
442 |
yield (key, None, node) |
|
443 |
else: |
|
444 |
process_node(node, path, self, self_pending) |
|
445 |
continue
|
|
446 |
elif self_pending[0][0] > basis_pending[0][0]: |
|
447 |
# expand basis
|
|
448 |
prefix, key, node, path = heapq.heappop(basis_pending) |
|
449 |
if check_excluded(path): |
|
450 |
continue
|
|
451 |
if key is not None: |
|
452 |
# a value
|
|
453 |
yield (key, node, None) |
|
454 |
else: |
|
455 |
process_node(node, path, basis, basis_pending) |
|
456 |
continue
|
|
457 |
else: |
|
458 |
# common prefix: possibly expand both
|
|
459 |
if self_pending[0][1] is None: |
|
460 |
# process next self
|
|
461 |
read_self = True |
|
462 |
else: |
|
463 |
read_self = False |
|
464 |
if basis_pending[0][1] is None: |
|
465 |
# process next basis
|
|
466 |
read_basis = True |
|
467 |
else: |
|
468 |
read_basis = False |
|
469 |
if not read_self and not read_basis: |
|
470 |
# compare a common value
|
|
471 |
self_details = heapq.heappop(self_pending) |
|
472 |
basis_details = heapq.heappop(basis_pending) |
|
473 |
if self_details[2] != basis_details[2]: |
|
474 |
yield (self_details[1], |
|
475 |
basis_details[2], self_details[2]) |
|
476 |
continue
|
|
477 |
# At least one side wasn't a simple value
|
|
478 |
if (self._node_key(self_pending[0][2]) == |
|
479 |
self._node_key(basis_pending[0][2])): |
|
480 |
# Identical pointers, skip (and don't bother adding to
|
|
481 |
# excluded, it won't turn up again.
|
|
482 |
heapq.heappop(self_pending) |
|
483 |
heapq.heappop(basis_pending) |
|
484 |
continue
|
|
485 |
# Now we need to expand this node before we can continue
|
|
486 |
if read_self and read_basis: |
|
487 |
# Both sides start with the same prefix, so process
|
|
488 |
# them in parallel
|
|
489 |
self_prefix, _, self_node, self_path = heapq.heappop( |
|
490 |
self_pending) |
|
491 |
basis_prefix, _, basis_node, basis_path = heapq.heappop( |
|
492 |
basis_pending) |
|
493 |
if self_prefix != basis_prefix: |
|
494 |
raise AssertionError( |
|
495 |
'%r != %r' % (self_prefix, basis_prefix)) |
|
496 |
process_common_prefix_nodes( |
|
497 |
self_node, self_path, |
|
498 |
basis_node, basis_path) |
|
499 |
continue
|
|
500 |
if read_self: |
|
501 |
prefix, key, node, path = heapq.heappop(self_pending) |
|
502 |
if check_excluded(path): |
|
503 |
continue
|
|
504 |
process_node(node, path, self, self_pending) |
|
505 |
if read_basis: |
|
506 |
prefix, key, node, path = heapq.heappop(basis_pending) |
|
507 |
if check_excluded(path): |
|
508 |
continue
|
|
509 |
process_node(node, path, basis, basis_pending) |
|
510 |
# print loop_counter
|
|
511 |
||
512 |
def iteritems(self, key_filter=None): |
|
513 |
"""Iterate over the entire CHKMap's contents."""
|
|
514 |
self._ensure_root() |
|
4679.9.10
by John Arbash Meinel
Change the testing layer so that CHKMap is tested using tuples. |
515 |
if key_filter is not None: |
516 |
as_st = StaticTuple.from_sequence |
|
517 |
key_filter = [as_st(key) for key in key_filter] |
|
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
518 |
return self._root_node.iteritems(self._store, key_filter=key_filter) |
519 |
||
520 |
def key(self): |
|
521 |
"""Return the key for this map."""
|
|
4679.9.4
by John Arbash Meinel
A bit broken, but getting there. |
522 |
if type(self._root_node) is StaticTuple: |
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
523 |
return self._root_node |
524 |
else: |
|
525 |
return self._root_node._key |
|
526 |
||
527 |
def __len__(self): |
|
528 |
self._ensure_root() |
|
529 |
return len(self._root_node) |
|
530 |
||
531 |
def map(self, key, value): |
|
4526.9.5
by Robert Collins
Require that added ids in inventory deltas be new. |
532 |
"""Map a key tuple to value.
|
533 |
|
|
534 |
:param key: A key to map.
|
|
535 |
:param value: The value to assign to key.
|
|
536 |
"""
|
|
4679.9.9
by John Arbash Meinel
Create a Barrier at the CHKMap interface. |
537 |
key = StaticTuple.from_sequence(key) |
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
538 |
# Need a root object.
|
539 |
self._ensure_root() |
|
540 |
prefix, node_details = self._root_node.map(self._store, key, value) |
|
541 |
if len(node_details) == 1: |
|
542 |
self._root_node = node_details[0][1] |
|
543 |
else: |
|
544 |
self._root_node = InternalNode(prefix, |
|
545 |
search_key_func=self._search_key_func) |
|
546 |
self._root_node.set_maximum_size(node_details[0][1].maximum_size) |
|
547 |
self._root_node._key_width = node_details[0][1]._key_width |
|
548 |
for split, node in node_details: |
|
549 |
self._root_node.add_node(split, node) |
|
550 |
||
551 |
def _node_key(self, node): |
|
3735.19.1
by Ian Clatworthy
CHKMap cleanups |
552 |
"""Get the key for a node whether it's a tuple or node."""
|
4679.9.9
by John Arbash Meinel
Create a Barrier at the CHKMap interface. |
553 |
if type(node) is tuple: |
554 |
node = StaticTuple.from_sequence(node) |
|
4679.9.4
by John Arbash Meinel
A bit broken, but getting there. |
555 |
if type(node) is StaticTuple: |
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
556 |
return node |
557 |
else: |
|
558 |
return node._key |
|
559 |
||
3735.2.122
by Ian Clatworthy
don't check_remap on every unmap call in CHKMap.apply_delta() |
560 |
def unmap(self, key, check_remap=True): |
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
561 |
"""remove key from the map."""
|
4679.9.9
by John Arbash Meinel
Create a Barrier at the CHKMap interface. |
562 |
key = StaticTuple.from_sequence(key) |
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
563 |
self._ensure_root() |
564 |
if type(self._root_node) is InternalNode: |
|
3735.2.122
by Ian Clatworthy
don't check_remap on every unmap call in CHKMap.apply_delta() |
565 |
unmapped = self._root_node.unmap(self._store, key, |
566 |
check_remap=check_remap) |
|
567 |
else: |
|
568 |
unmapped = self._root_node.unmap(self._store, key) |
|
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
569 |
self._root_node = unmapped |
570 |
||
3735.2.122
by Ian Clatworthy
don't check_remap on every unmap call in CHKMap.apply_delta() |
571 |
def _check_remap(self): |
572 |
"""Check if nodes can be collapsed."""
|
|
573 |
self._ensure_root() |
|
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
574 |
if type(self._root_node) is InternalNode: |
4797.62.1
by Andrew Bennetts
Fix bug in CHKMap.apply_delta that allowed it to create non-canonical trees. |
575 |
self._root_node = self._root_node._check_remap(self._store) |
3735.2.122
by Ian Clatworthy
don't check_remap on every unmap call in CHKMap.apply_delta() |
576 |
|
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
577 |
def _save(self): |
578 |
"""Save the map completely.
|
|
579 |
||
580 |
:return: The key of the root node.
|
|
581 |
"""
|
|
4679.9.4
by John Arbash Meinel
A bit broken, but getting there. |
582 |
if type(self._root_node) is StaticTuple: |
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
583 |
# Already saved.
|
584 |
return self._root_node |
|
585 |
keys = list(self._root_node.serialise(self._store)) |
|
586 |
return keys[-1] |
|
587 |
||
588 |
||
589 |
class Node(object): |
|
590 |
"""Base class defining the protocol for CHK Map nodes.
|
|
591 |
||
592 |
:ivar _raw_size: The total size of the serialized key:value data, before
|
|
593 |
adding the header bytes, and without prefix compression.
|
|
594 |
"""
|
|
595 |
||
4759.1.2
by John Arbash Meinel
Change CHKMap to use __slots__ |
596 |
__slots__ = ('_key', '_len', '_maximum_size', '_key_width', |
597 |
'_raw_size', '_items', '_search_prefix', '_search_key_func' |
|
598 |
)
|
|
599 |
||
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
600 |
def __init__(self, key_width=1): |
601 |
"""Create a node.
|
|
602 |
||
603 |
:param key_width: The width of keys for this node.
|
|
604 |
"""
|
|
605 |
self._key = None |
|
606 |
# Current number of elements
|
|
607 |
self._len = 0 |
|
608 |
self._maximum_size = 0 |
|
3735.19.1
by Ian Clatworthy
CHKMap cleanups |
609 |
self._key_width = key_width |
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
610 |
# current size in bytes
|
611 |
self._raw_size = 0 |
|
612 |
# The pointers/values this node has - meaning defined by child classes.
|
|
613 |
self._items = {} |
|
614 |
# The common search prefix
|
|
615 |
self._search_prefix = None |
|
616 |
||
617 |
def __repr__(self): |
|
618 |
items_str = str(sorted(self._items)) |
|
619 |
if len(items_str) > 20: |
|
3735.2.154
by Ian Clatworthy
fix chk_map Node %r formatting |
620 |
items_str = items_str[:16] + '...]' |
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
621 |
return '%s(key:%s len:%s size:%s max:%s prefix:%s items:%s)' % ( |
622 |
self.__class__.__name__, self._key, self._len, self._raw_size, |
|
623 |
self._maximum_size, self._search_prefix, items_str) |
|
624 |
||
625 |
def key(self): |
|
626 |
return self._key |
|
627 |
||
628 |
def __len__(self): |
|
629 |
return self._len |
|
630 |
||
631 |
@property
|
|
632 |
def maximum_size(self): |
|
633 |
"""What is the upper limit for adding references to a node."""
|
|
634 |
return self._maximum_size |
|
635 |
||
636 |
def set_maximum_size(self, new_size): |
|
637 |
"""Set the size threshold for nodes.
|
|
638 |
||
639 |
:param new_size: The size at which no data is added to a node. 0 for
|
|
640 |
unlimited.
|
|
641 |
"""
|
|
642 |
self._maximum_size = new_size |
|
643 |
||
644 |
@classmethod
|
|
645 |
def common_prefix(cls, prefix, key): |
|
646 |
"""Given 2 strings, return the longest prefix common to both.
|
|
647 |
||
648 |
:param prefix: This has been the common prefix for other keys, so it is
|
|
649 |
more likely to be the common prefix in this case as well.
|
|
650 |
:param key: Another string to compare to
|
|
651 |
"""
|
|
652 |
if key.startswith(prefix): |
|
653 |
return prefix |
|
4358.1.1
by Jelmer Vernooij
Support empty keys when looking for common prefixes in CHKMap. |
654 |
pos = -1 |
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
655 |
# Is there a better way to do this?
|
656 |
for pos, (left, right) in enumerate(zip(prefix, key)): |
|
657 |
if left != right: |
|
658 |
pos -= 1 |
|
659 |
break
|
|
660 |
common = prefix[:pos+1] |
|
661 |
return common |
|
662 |
||
663 |
@classmethod
|
|
664 |
def common_prefix_for_keys(cls, keys): |
|
665 |
"""Given a list of keys, find their common prefix.
|
|
666 |
||
667 |
:param keys: An iterable of strings.
|
|
668 |
:return: The longest common prefix of all keys.
|
|
669 |
"""
|
|
670 |
common_prefix = None |
|
671 |
for key in keys: |
|
672 |
if common_prefix is None: |
|
673 |
common_prefix = key |
|
674 |
continue
|
|
675 |
common_prefix = cls.common_prefix(common_prefix, key) |
|
676 |
if not common_prefix: |
|
677 |
# if common_prefix is the empty string, then we know it won't
|
|
678 |
# change further
|
|
679 |
return '' |
|
680 |
return common_prefix |
|
681 |
||
682 |
||
683 |
# Singleton indicating we have not computed _search_prefix yet
|
|
684 |
_unknown = object() |
|
685 |
||
686 |
class LeafNode(Node): |
|
687 |
"""A node containing actual key:value pairs.
|
|
688 |
||
689 |
:ivar _items: A dict of key->value items. The key is in tuple form.
|
|
690 |
:ivar _size: The number of bytes that would be used by serializing all of
|
|
691 |
the key/value pairs.
|
|
692 |
"""
|
|
693 |
||
5169.3.1
by Martin
Make LeafNode._serialise_key a static method on the class rather than the instance |
694 |
__slots__ = ('_common_serialised_prefix',) |
4759.1.2
by John Arbash Meinel
Change CHKMap to use __slots__ |
695 |
|
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
696 |
def __init__(self, search_key_func=None): |
697 |
Node.__init__(self) |
|
698 |
# All of the keys in this leaf node share this common prefix
|
|
699 |
self._common_serialised_prefix = None |
|
700 |
if search_key_func is None: |
|
701 |
self._search_key_func = _search_key_plain |
|
702 |
else: |
|
703 |
self._search_key_func = search_key_func |
|
704 |
||
705 |
def __repr__(self): |
|
3735.2.154
by Ian Clatworthy
fix chk_map Node %r formatting |
706 |
items_str = str(sorted(self._items)) |
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
707 |
if len(items_str) > 20: |
3735.2.154
by Ian Clatworthy
fix chk_map Node %r formatting |
708 |
items_str = items_str[:16] + '...]' |
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
709 |
return \
|
710 |
'%s(key:%s len:%s size:%s max:%s prefix:%s keywidth:%s items:%s)' \ |
|
711 |
% (self.__class__.__name__, self._key, self._len, self._raw_size, |
|
712 |
self._maximum_size, self._search_prefix, self._key_width, items_str) |
|
713 |
||
714 |
def _current_size(self): |
|
715 |
"""Answer the current serialised size of this node.
|
|
716 |
||
717 |
This differs from self._raw_size in that it includes the bytes used for
|
|
718 |
the header.
|
|
719 |
"""
|
|
720 |
if self._common_serialised_prefix is None: |
|
721 |
bytes_for_items = 0 |
|
722 |
prefix_len = 0 |
|
723 |
else: |
|
724 |
# We will store a single string with the common prefix
|
|
725 |
# And then that common prefix will not be stored in any of the
|
|
726 |
# entry lines
|
|
727 |
prefix_len = len(self._common_serialised_prefix) |
|
728 |
bytes_for_items = (self._raw_size - (prefix_len * self._len)) |
|
729 |
return (9 # 'chkleaf:\n' |
|
730 |
+ len(str(self._maximum_size)) + 1 |
|
731 |
+ len(str(self._key_width)) + 1 |
|
732 |
+ len(str(self._len)) + 1 |
|
733 |
+ prefix_len + 1 |
|
734 |
+ bytes_for_items) |
|
735 |
||
736 |
@classmethod
|
|
737 |
def deserialise(klass, bytes, key, search_key_func=None): |
|
738 |
"""Deserialise bytes, with key key, into a LeafNode.
|
|
739 |
||
740 |
:param bytes: The bytes of the node.
|
|
741 |
:param key: The key that the serialised node has.
|
|
742 |
"""
|
|
4668.3.1
by John Arbash Meinel
Fix bug #471193, allow tuples into the CHK code. |
743 |
key = static_tuple.expect_static_tuple(key) |
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
744 |
return _deserialise_leaf_node(bytes, key, |
745 |
search_key_func=search_key_func) |
|
746 |
||
747 |
def iteritems(self, store, key_filter=None): |
|
748 |
"""Iterate over items in the node.
|
|
749 |
||
750 |
:param key_filter: A filter to apply to the node. It should be a
|
|
751 |
list/set/dict or similar repeatedly iterable container.
|
|
752 |
"""
|
|
753 |
if key_filter is not None: |
|
754 |
# Adjust the filter - short elements go to a prefix filter. All
|
|
755 |
# other items are looked up directly.
|
|
756 |
# XXX: perhaps defaultdict? Profiling<rinse and repeat>
|
|
757 |
filters = {} |
|
758 |
for key in key_filter: |
|
759 |
if len(key) == self._key_width: |
|
760 |
# This filter is meant to match exactly one key, yield it
|
|
761 |
# if we have it.
|
|
762 |
try: |
|
763 |
yield key, self._items[key] |
|
764 |
except KeyError: |
|
765 |
# This key is not present in this map, continue
|
|
766 |
pass
|
|
767 |
else: |
|
768 |
# Short items, we need to match based on a prefix
|
|
769 |
length_filter = filters.setdefault(len(key), set()) |
|
770 |
length_filter.add(key) |
|
771 |
if filters: |
|
772 |
filters = filters.items() |
|
773 |
for item in self._items.iteritems(): |
|
774 |
for length, length_filter in filters: |
|
775 |
if item[0][:length] in length_filter: |
|
776 |
yield item |
|
777 |
break
|
|
778 |
else: |
|
779 |
for item in self._items.iteritems(): |
|
780 |
yield item |
|
781 |
||
782 |
def _key_value_len(self, key, value): |
|
783 |
# TODO: Should probably be done without actually joining the key, but
|
|
784 |
# then that can be done via the C extension
|
|
785 |
return (len(self._serialise_key(key)) + 1 |
|
786 |
+ len(str(value.count('\n'))) + 1 |
|
787 |
+ len(value) + 1) |
|
788 |
||
789 |
def _search_key(self, key): |
|
790 |
return self._search_key_func(key) |
|
791 |
||
792 |
def _map_no_split(self, key, value): |
|
793 |
"""Map a key to a value.
|
|
794 |
||
795 |
This assumes either the key does not already exist, or you have already
|
|
796 |
removed its size and length from self.
|
|
797 |
||
798 |
:return: True if adding this node should cause us to split.
|
|
799 |
"""
|
|
800 |
self._items[key] = value |
|
801 |
self._raw_size += self._key_value_len(key, value) |
|
802 |
self._len += 1 |
|
803 |
serialised_key = self._serialise_key(key) |
|
804 |
if self._common_serialised_prefix is None: |
|
805 |
self._common_serialised_prefix = serialised_key |
|
806 |
else: |
|
807 |
self._common_serialised_prefix = self.common_prefix( |
|
808 |
self._common_serialised_prefix, serialised_key) |
|
809 |
search_key = self._search_key(key) |
|
810 |
if self._search_prefix is _unknown: |
|
811 |
self._compute_search_prefix() |
|
812 |
if self._search_prefix is None: |
|
813 |
self._search_prefix = search_key |
|
814 |
else: |
|
815 |
self._search_prefix = self.common_prefix( |
|
816 |
self._search_prefix, search_key) |
|
817 |
if (self._len > 1 |
|
818 |
and self._maximum_size |
|
819 |
and self._current_size() > self._maximum_size): |
|
820 |
# Check to see if all of the search_keys for this node are
|
|
821 |
# identical. We allow the node to grow under that circumstance
|
|
822 |
# (we could track this as common state, but it is infrequent)
|
|
823 |
if (search_key != self._search_prefix |
|
824 |
or not self._are_search_keys_identical()): |
|
825 |
return True |
|
826 |
return False |
|
827 |
||
828 |
def _split(self, store): |
|
829 |
"""We have overflowed.
|
|
830 |
||
831 |
Split this node into multiple LeafNodes, return it up the stack so that
|
|
832 |
the next layer creates a new InternalNode and references the new nodes.
|
|
833 |
||
834 |
:return: (common_serialised_prefix, [(node_serialised_prefix, node)])
|
|
835 |
"""
|
|
836 |
if self._search_prefix is _unknown: |
|
837 |
raise AssertionError('Search prefix must be known') |
|
838 |
common_prefix = self._search_prefix |
|
839 |
split_at = len(common_prefix) + 1 |
|
840 |
result = {} |
|
841 |
for key, value in self._items.iteritems(): |
|
842 |
search_key = self._search_key(key) |
|
843 |
prefix = search_key[:split_at] |
|
844 |
# TODO: Generally only 1 key can be exactly the right length,
|
|
845 |
# which means we can only have 1 key in the node pointed
|
|
846 |
# at by the 'prefix\0' key. We might want to consider
|
|
847 |
# folding it into the containing InternalNode rather than
|
|
848 |
# having a fixed length-1 node.
|
|
849 |
# Note this is probably not true for hash keys, as they
|
|
850 |
# may get a '\00' node anywhere, but won't have keys of
|
|
851 |
# different lengths.
|
|
852 |
if len(prefix) < split_at: |
|
853 |
prefix += '\x00'*(split_at - len(prefix)) |
|
854 |
if prefix not in result: |
|
855 |
node = LeafNode(search_key_func=self._search_key_func) |
|
856 |
node.set_maximum_size(self._maximum_size) |
|
857 |
node._key_width = self._key_width |
|
858 |
result[prefix] = node |
|
859 |
else: |
|
860 |
node = result[prefix] |
|
4413.5.4
by John Arbash Meinel
Change CHKMap.from_dict to create a LeafNode and split it. |
861 |
sub_prefix, node_details = node.map(store, key, value) |
862 |
if len(node_details) > 1: |
|
863 |
if prefix != sub_prefix: |
|
864 |
# This node has been split and is now found via a different
|
|
865 |
# path
|
|
866 |
result.pop(prefix) |
|
867 |
new_node = InternalNode(sub_prefix, |
|
868 |
search_key_func=self._search_key_func) |
|
869 |
new_node.set_maximum_size(self._maximum_size) |
|
870 |
new_node._key_width = self._key_width |
|
871 |
for split, node in node_details: |
|
872 |
new_node.add_node(split, node) |
|
873 |
result[prefix] = new_node |
|
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
874 |
return common_prefix, result.items() |
875 |
||
876 |
def map(self, store, key, value): |
|
877 |
"""Map key to value."""
|
|
878 |
if key in self._items: |
|
879 |
self._raw_size -= self._key_value_len(key, self._items[key]) |
|
880 |
self._len -= 1 |
|
881 |
self._key = None |
|
882 |
if self._map_no_split(key, value): |
|
883 |
return self._split(store) |
|
884 |
else: |
|
885 |
if self._search_prefix is _unknown: |
|
886 |
raise AssertionError('%r must be known' % self._search_prefix) |
|
887 |
return self._search_prefix, [("", self)] |
|
888 |
||
5169.3.1
by Martin
Make LeafNode._serialise_key a static method on the class rather than the instance |
889 |
_serialise_key = '\x00'.join |
890 |
||
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
891 |
def serialise(self, store): |
892 |
"""Serialise the LeafNode to store.
|
|
893 |
||
894 |
:param store: A VersionedFiles honouring the CHK extensions.
|
|
895 |
:return: An iterable of the keys inserted by this operation.
|
|
896 |
"""
|
|
897 |
lines = ["chkleaf:\n"] |
|
898 |
lines.append("%d\n" % self._maximum_size) |
|
899 |
lines.append("%d\n" % self._key_width) |
|
900 |
lines.append("%d\n" % self._len) |
|
901 |
if self._common_serialised_prefix is None: |
|
902 |
lines.append('\n') |
|
903 |
if len(self._items) != 0: |
|
904 |
raise AssertionError('If _common_serialised_prefix is None' |
|
905 |
' we should have no items') |
|
906 |
else: |
|
907 |
lines.append('%s\n' % (self._common_serialised_prefix,)) |
|
908 |
prefix_len = len(self._common_serialised_prefix) |
|
909 |
for key, value in sorted(self._items.items()): |
|
910 |
# Always add a final newline
|
|
911 |
value_lines = osutils.chunks_to_lines([value + '\n']) |
|
912 |
serialized = "%s\x00%s\n" % (self._serialise_key(key), |
|
913 |
len(value_lines)) |
|
914 |
if not serialized.startswith(self._common_serialised_prefix): |
|
915 |
raise AssertionError('We thought the common prefix was %r' |
|
916 |
' but entry %r does not have it in common' |
|
917 |
% (self._common_serialised_prefix, serialized)) |
|
918 |
lines.append(serialized[prefix_len:]) |
|
919 |
lines.extend(value_lines) |
|
920 |
sha1, _, _ = store.add_lines((None,), (), lines) |
|
4679.9.4
by John Arbash Meinel
A bit broken, but getting there. |
921 |
self._key = StaticTuple("sha1:" + sha1,).intern() |
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
922 |
bytes = ''.join(lines) |
923 |
if len(bytes) != self._current_size(): |
|
924 |
raise AssertionError('Invalid _current_size') |
|
6215.1.5
by Martin Packman
Stop going through LRUCache.add for some cache stores in chk_map |
925 |
_get_cache()[self._key] = bytes |
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
926 |
return [self._key] |
927 |
||
928 |
def refs(self): |
|
929 |
"""Return the references to other CHK's held by this node."""
|
|
930 |
return [] |
|
931 |
||
932 |
def _compute_search_prefix(self): |
|
933 |
"""Determine the common search prefix for all keys in this node.
|
|
934 |
||
935 |
:return: A bytestring of the longest search key prefix that is
|
|
936 |
unique within this node.
|
|
937 |
"""
|
|
938 |
search_keys = [self._search_key_func(key) for key in self._items] |
|
939 |
self._search_prefix = self.common_prefix_for_keys(search_keys) |
|
940 |
return self._search_prefix |
|
941 |
||
942 |
def _are_search_keys_identical(self): |
|
943 |
"""Check to see if the search keys for all entries are the same.
|
|
944 |
||
945 |
When using a hash as the search_key it is possible for non-identical
|
|
946 |
keys to collide. If that happens enough, we may try overflow a
|
|
947 |
LeafNode, but as all are collisions, we must not split.
|
|
948 |
"""
|
|
949 |
common_search_key = None |
|
950 |
for key in self._items: |
|
951 |
search_key = self._search_key(key) |
|
952 |
if common_search_key is None: |
|
953 |
common_search_key = search_key |
|
954 |
elif search_key != common_search_key: |
|
955 |
return False |
|
956 |
return True |
|
957 |
||
958 |
def _compute_serialised_prefix(self): |
|
959 |
"""Determine the common prefix for serialised keys in this node.
|
|
960 |
||
961 |
:return: A bytestring of the longest serialised key prefix that is
|
|
962 |
unique within this node.
|
|
963 |
"""
|
|
964 |
serialised_keys = [self._serialise_key(key) for key in self._items] |
|
965 |
self._common_serialised_prefix = self.common_prefix_for_keys( |
|
966 |
serialised_keys) |
|
3735.19.1
by Ian Clatworthy
CHKMap cleanups |
967 |
return self._common_serialised_prefix |
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
968 |
|
969 |
def unmap(self, store, key): |
|
970 |
"""Unmap key from the node."""
|
|
971 |
try: |
|
972 |
self._raw_size -= self._key_value_len(key, self._items[key]) |
|
973 |
except KeyError: |
|
974 |
trace.mutter("key %s not found in %r", key, self._items) |
|
975 |
raise
|
|
976 |
self._len -= 1 |
|
977 |
del self._items[key] |
|
978 |
self._key = None |
|
979 |
# Recompute from scratch
|
|
980 |
self._compute_search_prefix() |
|
981 |
self._compute_serialised_prefix() |
|
982 |
return self |
|
983 |
||
984 |
||
985 |
class InternalNode(Node): |
|
986 |
"""A node that contains references to other nodes.
|
|
987 |
||
988 |
An InternalNode is responsible for mapping search key prefixes to child
|
|
989 |
nodes.
|
|
990 |
||
991 |
:ivar _items: serialised_key => node dictionary. node may be a tuple,
|
|
992 |
LeafNode or InternalNode.
|
|
993 |
"""
|
|
994 |
||
4759.1.2
by John Arbash Meinel
Change CHKMap to use __slots__ |
995 |
__slots__ = ('_node_width',) |
996 |
||
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
997 |
def __init__(self, prefix='', search_key_func=None): |
998 |
Node.__init__(self) |
|
999 |
# The size of an internalnode with default values and no children.
|
|
1000 |
# How many octets key prefixes within this node are.
|
|
1001 |
self._node_width = 0 |
|
1002 |
self._search_prefix = prefix |
|
1003 |
if search_key_func is None: |
|
1004 |
self._search_key_func = _search_key_plain |
|
1005 |
else: |
|
1006 |
self._search_key_func = search_key_func |
|
1007 |
||
1008 |
def add_node(self, prefix, node): |
|
1009 |
"""Add a child node with prefix prefix, and node node.
|
|
1010 |
||
1011 |
:param prefix: The search key prefix for node.
|
|
1012 |
:param node: The node being added.
|
|
1013 |
"""
|
|
3735.2.126
by Ian Clatworthy
replace asserts in chk_map.py with AssertionErrors |
1014 |
if self._search_prefix is None: |
1015 |
raise AssertionError("_search_prefix should not be None") |
|
1016 |
if not prefix.startswith(self._search_prefix): |
|
1017 |
raise AssertionError("prefixes mismatch: %s must start with %s" |
|
1018 |
% (prefix,self._search_prefix)) |
|
1019 |
if len(prefix) != len(self._search_prefix) + 1: |
|
1020 |
raise AssertionError("prefix wrong length: len(%s) is not %d" % |
|
1021 |
(prefix, len(self._search_prefix) + 1)) |
|
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
1022 |
self._len += len(node) |
1023 |
if not len(self._items): |
|
1024 |
self._node_width = len(prefix) |
|
3735.2.126
by Ian Clatworthy
replace asserts in chk_map.py with AssertionErrors |
1025 |
if self._node_width != len(self._search_prefix) + 1: |
1026 |
raise AssertionError("node width mismatch: %d is not %d" % |
|
1027 |
(self._node_width, len(self._search_prefix) + 1)) |
|
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
1028 |
self._items[prefix] = node |
1029 |
self._key = None |
|
1030 |
||
1031 |
def _current_size(self): |
|
1032 |
"""Answer the current serialised size of this node."""
|
|
1033 |
return (self._raw_size + len(str(self._len)) + len(str(self._key_width)) + |
|
1034 |
len(str(self._maximum_size))) |
|
1035 |
||
1036 |
@classmethod
|
|
1037 |
def deserialise(klass, bytes, key, search_key_func=None): |
|
1038 |
"""Deserialise bytes to an InternalNode, with key key.
|
|
1039 |
||
1040 |
:param bytes: The bytes of the node.
|
|
1041 |
:param key: The key that the serialised node has.
|
|
1042 |
:return: An InternalNode instance.
|
|
1043 |
"""
|
|
4668.3.1
by John Arbash Meinel
Fix bug #471193, allow tuples into the CHK code. |
1044 |
key = static_tuple.expect_static_tuple(key) |
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
1045 |
return _deserialise_internal_node(bytes, key, |
1046 |
search_key_func=search_key_func) |
|
1047 |
||
1048 |
def iteritems(self, store, key_filter=None): |
|
1049 |
for node, node_filter in self._iter_nodes(store, key_filter=key_filter): |
|
1050 |
for item in node.iteritems(store, key_filter=node_filter): |
|
1051 |
yield item |
|
1052 |
||
1053 |
def _iter_nodes(self, store, key_filter=None, batch_size=None): |
|
1054 |
"""Iterate over node objects which match key_filter.
|
|
1055 |
||
1056 |
:param store: A store to use for accessing content.
|
|
1057 |
:param key_filter: A key filter to filter nodes. Only nodes that might
|
|
1058 |
contain a key in key_filter will be returned.
|
|
1059 |
:param batch_size: If not None, then we will return the nodes that had
|
|
1060 |
to be read using get_record_stream in batches, rather than reading
|
|
1061 |
them all at once.
|
|
1062 |
:return: An iterable of nodes. This function does not have to be fully
|
|
1063 |
consumed. (There will be no pending I/O when items are being returned.)
|
|
1064 |
"""
|
|
1065 |
# Map from chk key ('sha1:...',) to (prefix, key_filter)
|
|
1066 |
# prefix is the key in self._items to use, key_filter is the key_filter
|
|
1067 |
# entries that would match this node
|
|
1068 |
keys = {} |
|
4413.4.1
by John Arbash Meinel
Add a shortcut for the case when we are searching for a single full-width key. |
1069 |
shortcut = False |
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
1070 |
if key_filter is None: |
4413.4.1
by John Arbash Meinel
Add a shortcut for the case when we are searching for a single full-width key. |
1071 |
# yielding all nodes, yield whatever we have, and queue up a read
|
1072 |
# for whatever we are missing
|
|
1073 |
shortcut = True |
|
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
1074 |
for prefix, node in self._items.iteritems(): |
4679.9.4
by John Arbash Meinel
A bit broken, but getting there. |
1075 |
if node.__class__ is StaticTuple: |
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
1076 |
keys[node] = (prefix, None) |
1077 |
else: |
|
1078 |
yield node, None |
|
4413.4.1
by John Arbash Meinel
Add a shortcut for the case when we are searching for a single full-width key. |
1079 |
elif len(key_filter) == 1: |
4413.4.2
by John Arbash Meinel
Rewrite the shortcuts. |
1080 |
# Technically, this path could also be handled by the first check
|
1081 |
# in 'self._node_width' in length_filters. However, we can handle
|
|
1082 |
# this case without spending any time building up the
|
|
1083 |
# prefix_to_keys, etc state.
|
|
1084 |
||
1085 |
# This is a bit ugly, but TIMEIT showed it to be by far the fastest
|
|
1086 |
# 0.626us list(key_filter)[0]
|
|
1087 |
# is a func() for list(), 2 mallocs, and a getitem
|
|
1088 |
# 0.489us [k for k in key_filter][0]
|
|
1089 |
# still has the mallocs, avoids the func() call
|
|
1090 |
# 0.350us iter(key_filter).next()
|
|
1091 |
# has a func() call, and mallocs an iterator
|
|
1092 |
# 0.125us for key in key_filter: pass
|
|
1093 |
# no func() overhead, might malloc an iterator
|
|
1094 |
# 0.105us for key in key_filter: break
|
|
1095 |
# no func() overhead, might malloc an iterator, probably
|
|
1096 |
# avoids checking an 'else' clause as part of the for
|
|
1097 |
for key in key_filter: |
|
1098 |
break
|
|
1099 |
search_prefix = self._search_prefix_filter(key) |
|
1100 |
if len(search_prefix) == self._node_width: |
|
4413.4.1
by John Arbash Meinel
Add a shortcut for the case when we are searching for a single full-width key. |
1101 |
# This item will match exactly, so just do a dict lookup, and
|
1102 |
# see what we can return
|
|
1103 |
shortcut = True |
|
1104 |
try: |
|
4413.4.2
by John Arbash Meinel
Rewrite the shortcuts. |
1105 |
node = self._items[search_prefix] |
4413.4.1
by John Arbash Meinel
Add a shortcut for the case when we are searching for a single full-width key. |
1106 |
except KeyError: |
1107 |
# A given key can only match 1 child node, if it isn't
|
|
1108 |
# there, then we can just return nothing
|
|
1109 |
return
|
|
4679.9.4
by John Arbash Meinel
A bit broken, but getting there. |
1110 |
if node.__class__ is StaticTuple: |
4413.4.2
by John Arbash Meinel
Rewrite the shortcuts. |
1111 |
keys[node] = (search_prefix, [key]) |
4413.4.1
by John Arbash Meinel
Add a shortcut for the case when we are searching for a single full-width key. |
1112 |
else: |
4413.4.2
by John Arbash Meinel
Rewrite the shortcuts. |
1113 |
# This is loaded, and the only thing that can match,
|
1114 |
# return
|
|
1115 |
yield node, [key] |
|
1116 |
return
|
|
4413.4.1
by John Arbash Meinel
Add a shortcut for the case when we are searching for a single full-width key. |
1117 |
if not shortcut: |
4413.4.2
by John Arbash Meinel
Rewrite the shortcuts. |
1118 |
# First, convert all keys into a list of search prefixes
|
1119 |
# Aggregate common prefixes, and track the keys they come from
|
|
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
1120 |
prefix_to_keys = {} |
1121 |
length_filters = {} |
|
1122 |
for key in key_filter: |
|
4413.4.2
by John Arbash Meinel
Rewrite the shortcuts. |
1123 |
search_prefix = self._search_prefix_filter(key) |
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
1124 |
length_filter = length_filters.setdefault( |
4413.4.2
by John Arbash Meinel
Rewrite the shortcuts. |
1125 |
len(search_prefix), set()) |
1126 |
length_filter.add(search_prefix) |
|
1127 |
prefix_to_keys.setdefault(search_prefix, []).append(key) |
|
1128 |
||
1129 |
if (self._node_width in length_filters |
|
1130 |
and len(length_filters) == 1): |
|
1131 |
# all of the search prefixes match exactly _node_width. This
|
|
1132 |
# means that everything is an exact match, and we can do a
|
|
1133 |
# lookup into self._items, rather than iterating over the items
|
|
1134 |
# dict.
|
|
1135 |
search_prefixes = length_filters[self._node_width] |
|
1136 |
for search_prefix in search_prefixes: |
|
1137 |
try: |
|
1138 |
node = self._items[search_prefix] |
|
1139 |
except KeyError: |
|
1140 |
# We can ignore this one
|
|
1141 |
continue
|
|
1142 |
node_key_filter = prefix_to_keys[search_prefix] |
|
4679.9.4
by John Arbash Meinel
A bit broken, but getting there. |
1143 |
if node.__class__ is StaticTuple: |
4413.4.2
by John Arbash Meinel
Rewrite the shortcuts. |
1144 |
keys[node] = (search_prefix, node_key_filter) |
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
1145 |
else: |
1146 |
yield node, node_key_filter |
|
4413.4.2
by John Arbash Meinel
Rewrite the shortcuts. |
1147 |
else: |
1148 |
# The slow way. We walk every item in self._items, and check to
|
|
1149 |
# see if there are any matches
|
|
1150 |
length_filters = length_filters.items() |
|
1151 |
for prefix, node in self._items.iteritems(): |
|
1152 |
node_key_filter = [] |
|
1153 |
for length, length_filter in length_filters: |
|
1154 |
sub_prefix = prefix[:length] |
|
1155 |
if sub_prefix in length_filter: |
|
1156 |
node_key_filter.extend(prefix_to_keys[sub_prefix]) |
|
1157 |
if node_key_filter: # this key matched something, yield it |
|
4679.9.4
by John Arbash Meinel
A bit broken, but getting there. |
1158 |
if node.__class__ is StaticTuple: |
4413.4.2
by John Arbash Meinel
Rewrite the shortcuts. |
1159 |
keys[node] = (prefix, node_key_filter) |
1160 |
else: |
|
1161 |
yield node, node_key_filter |
|
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
1162 |
if keys: |
1163 |
# Look in the page cache for some more bytes
|
|
1164 |
found_keys = set() |
|
1165 |
for key in keys: |
|
1166 |
try: |
|
4797.7.1
by Robert Collins
Introduce a threading.local to isolate the chk_map page cache from other threads. |
1167 |
bytes = _get_cache()[key] |
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
1168 |
except KeyError: |
1169 |
continue
|
|
1170 |
else: |
|
1171 |
node = _deserialise(bytes, key, |
|
1172 |
search_key_func=self._search_key_func) |
|
1173 |
prefix, node_key_filter = keys[key] |
|
1174 |
self._items[prefix] = node |
|
1175 |
found_keys.add(key) |
|
1176 |
yield node, node_key_filter |
|
1177 |
for key in found_keys: |
|
1178 |
del keys[key] |
|
1179 |
if keys: |
|
1180 |
# demand load some pages.
|
|
1181 |
if batch_size is None: |
|
1182 |
# Read all the keys in
|
|
1183 |
batch_size = len(keys) |
|
1184 |
key_order = list(keys) |
|
1185 |
for batch_start in range(0, len(key_order), batch_size): |
|
1186 |
batch = key_order[batch_start:batch_start + batch_size] |
|
1187 |
# We have to fully consume the stream so there is no pending
|
|
1188 |
# I/O, so we buffer the nodes for now.
|
|
1189 |
stream = store.get_record_stream(batch, 'unordered', True) |
|
1190 |
node_and_filters = [] |
|
1191 |
for record in stream: |
|
1192 |
bytes = record.get_bytes_as('fulltext') |
|
1193 |
node = _deserialise(bytes, record.key, |
|
1194 |
search_key_func=self._search_key_func) |
|
1195 |
prefix, node_key_filter = keys[record.key] |
|
1196 |
node_and_filters.append((node, node_key_filter)) |
|
1197 |
self._items[prefix] = node |
|
6215.1.5
by Martin Packman
Stop going through LRUCache.add for some cache stores in chk_map |
1198 |
_get_cache()[record.key] = bytes |
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
1199 |
for info in node_and_filters: |
1200 |
yield info |
|
1201 |
||
1202 |
def map(self, store, key, value): |
|
1203 |
"""Map key to value."""
|
|
1204 |
if not len(self._items): |
|
3735.2.122
by Ian Clatworthy
don't check_remap on every unmap call in CHKMap.apply_delta() |
1205 |
raise AssertionError("can't map in an empty InternalNode.") |
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
1206 |
search_key = self._search_key(key) |
3735.2.126
by Ian Clatworthy
replace asserts in chk_map.py with AssertionErrors |
1207 |
if self._node_width != len(self._search_prefix) + 1: |
1208 |
raise AssertionError("node width mismatch: %d is not %d" % |
|
1209 |
(self._node_width, len(self._search_prefix) + 1)) |
|
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
1210 |
if not search_key.startswith(self._search_prefix): |
1211 |
# This key doesn't fit in this index, so we need to split at the
|
|
1212 |
# point where it would fit, insert self into that internal node,
|
|
1213 |
# and then map this key into that node.
|
|
1214 |
new_prefix = self.common_prefix(self._search_prefix, |
|
1215 |
search_key) |
|
1216 |
new_parent = InternalNode(new_prefix, |
|
1217 |
search_key_func=self._search_key_func) |
|
1218 |
new_parent.set_maximum_size(self._maximum_size) |
|
1219 |
new_parent._key_width = self._key_width |
|
1220 |
new_parent.add_node(self._search_prefix[:len(new_prefix)+1], |
|
1221 |
self) |
|
1222 |
return new_parent.map(store, key, value) |
|
1223 |
children = [node for node, _ |
|
1224 |
in self._iter_nodes(store, key_filter=[key])] |
|
1225 |
if children: |
|
1226 |
child = children[0] |
|
1227 |
else: |
|
1228 |
# new child needed:
|
|
1229 |
child = self._new_child(search_key, LeafNode) |
|
1230 |
old_len = len(child) |
|
1231 |
if type(child) is LeafNode: |
|
1232 |
old_size = child._current_size() |
|
1233 |
else: |
|
1234 |
old_size = None |
|
1235 |
prefix, node_details = child.map(store, key, value) |
|
1236 |
if len(node_details) == 1: |
|
1237 |
# child may have shrunk, or might be a new node
|
|
1238 |
child = node_details[0][1] |
|
1239 |
self._len = self._len - old_len + len(child) |
|
1240 |
self._items[search_key] = child |
|
1241 |
self._key = None |
|
1242 |
new_node = self |
|
1243 |
if type(child) is LeafNode: |
|
3735.2.123
by Ian Clatworthy
only check for remap if changes are interesting in size |
1244 |
if old_size is None: |
1245 |
# The old node was an InternalNode which means it has now
|
|
1246 |
# collapsed, so we need to check if it will chain to a
|
|
1247 |
# collapse at this level.
|
|
1248 |
trace.mutter("checking remap as InternalNode -> LeafNode") |
|
1249 |
new_node = self._check_remap(store) |
|
1250 |
else: |
|
1251 |
# If the LeafNode has shrunk in size, we may want to run
|
|
1252 |
# a remap check. Checking for a remap is expensive though
|
|
1253 |
# and the frequency of a successful remap is very low.
|
|
1254 |
# Shrinkage by small amounts is common, so we only do the
|
|
1255 |
# remap check if the new_size is low or the shrinkage
|
|
1256 |
# amount is over a configurable limit.
|
|
1257 |
new_size = child._current_size() |
|
1258 |
shrinkage = old_size - new_size |
|
1259 |
if (shrinkage > 0 and new_size < _INTERESTING_NEW_SIZE |
|
1260 |
or shrinkage > _INTERESTING_SHRINKAGE_LIMIT): |
|
1261 |
trace.mutter( |
|
1262 |
"checking remap as size shrunk by %d to be %d", |
|
1263 |
shrinkage, new_size) |
|
1264 |
new_node = self._check_remap(store) |
|
3735.2.126
by Ian Clatworthy
replace asserts in chk_map.py with AssertionErrors |
1265 |
if new_node._search_prefix is None: |
1266 |
raise AssertionError("_search_prefix should not be None") |
|
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
1267 |
return new_node._search_prefix, [('', new_node)] |
1268 |
# child has overflown - create a new intermediate node.
|
|
1269 |
# XXX: This is where we might want to try and expand our depth
|
|
1270 |
# to refer to more bytes of every child (which would give us
|
|
1271 |
# multiple pointers to child nodes, but less intermediate nodes)
|
|
1272 |
child = self._new_child(search_key, InternalNode) |
|
1273 |
child._search_prefix = prefix |
|
1274 |
for split, node in node_details: |
|
1275 |
child.add_node(split, node) |
|
1276 |
self._len = self._len - old_len + len(child) |
|
1277 |
self._key = None |
|
1278 |
return self._search_prefix, [("", self)] |
|
1279 |
||
1280 |
def _new_child(self, search_key, klass): |
|
1281 |
"""Create a new child node of type klass."""
|
|
1282 |
child = klass() |
|
1283 |
child.set_maximum_size(self._maximum_size) |
|
1284 |
child._key_width = self._key_width |
|
1285 |
child._search_key_func = self._search_key_func |
|
1286 |
self._items[search_key] = child |
|
1287 |
return child |
|
1288 |
||
1289 |
def serialise(self, store): |
|
1290 |
"""Serialise the node to store.
|
|
1291 |
||
1292 |
:param store: A VersionedFiles honouring the CHK extensions.
|
|
1293 |
:return: An iterable of the keys inserted by this operation.
|
|
1294 |
"""
|
|
1295 |
for node in self._items.itervalues(): |
|
4679.9.4
by John Arbash Meinel
A bit broken, but getting there. |
1296 |
if type(node) is StaticTuple: |
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
1297 |
# Never deserialised.
|
1298 |
continue
|
|
1299 |
if node._key is not None: |
|
1300 |
# Never altered
|
|
1301 |
continue
|
|
1302 |
for key in node.serialise(store): |
|
1303 |
yield key |
|
1304 |
lines = ["chknode:\n"] |
|
1305 |
lines.append("%d\n" % self._maximum_size) |
|
1306 |
lines.append("%d\n" % self._key_width) |
|
1307 |
lines.append("%d\n" % self._len) |
|
3735.2.126
by Ian Clatworthy
replace asserts in chk_map.py with AssertionErrors |
1308 |
if self._search_prefix is None: |
1309 |
raise AssertionError("_search_prefix should not be None") |
|
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
1310 |
lines.append('%s\n' % (self._search_prefix,)) |
1311 |
prefix_len = len(self._search_prefix) |
|
1312 |
for prefix, node in sorted(self._items.items()): |
|
4679.9.4
by John Arbash Meinel
A bit broken, but getting there. |
1313 |
if type(node) is StaticTuple: |
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
1314 |
key = node[0] |
1315 |
else: |
|
1316 |
key = node._key[0] |
|
1317 |
serialised = "%s\x00%s\n" % (prefix, key) |
|
3735.2.126
by Ian Clatworthy
replace asserts in chk_map.py with AssertionErrors |
1318 |
if not serialised.startswith(self._search_prefix): |
1319 |
raise AssertionError("prefixes mismatch: %s must start with %s" |
|
1320 |
% (serialised, self._search_prefix)) |
|
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
1321 |
lines.append(serialised[prefix_len:]) |
1322 |
sha1, _, _ = store.add_lines((None,), (), lines) |
|
4679.9.4
by John Arbash Meinel
A bit broken, but getting there. |
1323 |
self._key = StaticTuple("sha1:" + sha1,).intern() |
6215.1.5
by Martin Packman
Stop going through LRUCache.add for some cache stores in chk_map |
1324 |
_get_cache()[self._key] = ''.join(lines) |
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
1325 |
yield self._key |
1326 |
||
1327 |
def _search_key(self, key): |
|
1328 |
"""Return the serialised key for key in this node."""
|
|
1329 |
# search keys are fixed width. All will be self._node_width wide, so we
|
|
1330 |
# pad as necessary.
|
|
1331 |
return (self._search_key_func(key) + '\x00'*self._node_width)[:self._node_width] |
|
1332 |
||
1333 |
def _search_prefix_filter(self, key): |
|
1334 |
"""Serialise key for use as a prefix filter in iteritems."""
|
|
1335 |
return self._search_key_func(key)[:self._node_width] |
|
1336 |
||
1337 |
def _split(self, offset): |
|
1338 |
"""Split this node into smaller nodes starting at offset.
|
|
1339 |
||
1340 |
:param offset: The offset to start the new child nodes at.
|
|
1341 |
:return: An iterable of (prefix, node) tuples. prefix is a byte
|
|
1342 |
prefix for reaching node.
|
|
1343 |
"""
|
|
1344 |
if offset >= self._node_width: |
|
1345 |
for node in self._items.values(): |
|
1346 |
for result in node._split(offset): |
|
1347 |
yield result |
|
1348 |
return
|
|
1349 |
for key, node in self._items.items(): |
|
1350 |
pass
|
|
1351 |
||
1352 |
def refs(self): |
|
1353 |
"""Return the references to other CHK's held by this node."""
|
|
1354 |
if self._key is None: |
|
1355 |
raise AssertionError("unserialised nodes have no refs.") |
|
1356 |
refs = [] |
|
1357 |
for value in self._items.itervalues(): |
|
4679.9.4
by John Arbash Meinel
A bit broken, but getting there. |
1358 |
if type(value) is StaticTuple: |
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
1359 |
refs.append(value) |
1360 |
else: |
|
1361 |
refs.append(value.key()) |
|
1362 |
return refs |
|
1363 |
||
1364 |
def _compute_search_prefix(self, extra_key=None): |
|
1365 |
"""Return the unique key prefix for this node.
|
|
1366 |
||
1367 |
:return: A bytestring of the longest search key prefix that is
|
|
1368 |
unique within this node.
|
|
1369 |
"""
|
|
1370 |
self._search_prefix = self.common_prefix_for_keys(self._items) |
|
1371 |
return self._search_prefix |
|
1372 |
||
3735.2.122
by Ian Clatworthy
don't check_remap on every unmap call in CHKMap.apply_delta() |
1373 |
def unmap(self, store, key, check_remap=True): |
5448.2.1
by Martin
Fix some "its" vs. "it's" spelling confusion in bzrlib code... also, ahem, a name in the NEWS file |
1374 |
"""Remove key from this node and its children."""
|
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
1375 |
if not len(self._items): |
3735.2.126
by Ian Clatworthy
replace asserts in chk_map.py with AssertionErrors |
1376 |
raise AssertionError("can't unmap in an empty InternalNode.") |
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
1377 |
children = [node for node, _ |
1378 |
in self._iter_nodes(store, key_filter=[key])] |
|
1379 |
if children: |
|
1380 |
child = children[0] |
|
1381 |
else: |
|
1382 |
raise KeyError(key) |
|
1383 |
self._len -= 1 |
|
1384 |
unmapped = child.unmap(store, key) |
|
1385 |
self._key = None |
|
1386 |
search_key = self._search_key(key) |
|
1387 |
if len(unmapped) == 0: |
|
1388 |
# All child nodes are gone, remove the child:
|
|
1389 |
del self._items[search_key] |
|
1390 |
unmapped = None |
|
1391 |
else: |
|
1392 |
# Stash the returned node
|
|
1393 |
self._items[search_key] = unmapped |
|
1394 |
if len(self._items) == 1: |
|
1395 |
# this node is no longer needed:
|
|
1396 |
return self._items.values()[0] |
|
1397 |
if type(unmapped) is InternalNode: |
|
1398 |
return self |
|
3735.2.122
by Ian Clatworthy
don't check_remap on every unmap call in CHKMap.apply_delta() |
1399 |
if check_remap: |
1400 |
return self._check_remap(store) |
|
1401 |
else: |
|
1402 |
return self |
|
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
1403 |
|
1404 |
def _check_remap(self, store): |
|
1405 |
"""Check if all keys contained by children fit in a single LeafNode.
|
|
1406 |
||
1407 |
:param store: A store to use for reading more nodes
|
|
1408 |
:return: Either self, or a new LeafNode which should replace self.
|
|
1409 |
"""
|
|
1410 |
# Logic for how we determine when we need to rebuild
|
|
1411 |
# 1) Implicitly unmap() is removing a key which means that the child
|
|
1412 |
# nodes are going to be shrinking by some extent.
|
|
1413 |
# 2) If all children are LeafNodes, it is possible that they could be
|
|
1414 |
# combined into a single LeafNode, which can then completely replace
|
|
1415 |
# this internal node with a single LeafNode
|
|
1416 |
# 3) If *one* child is an InternalNode, we assume it has already done
|
|
1417 |
# all the work to determine that its children cannot collapse, and
|
|
1418 |
# we can then assume that those nodes *plus* the current nodes don't
|
|
1419 |
# have a chance of collapsing either.
|
|
1420 |
# So a very cheap check is to just say if 'unmapped' is an
|
|
1421 |
# InternalNode, we don't have to check further.
|
|
1422 |
||
1423 |
# TODO: Another alternative is to check the total size of all known
|
|
1424 |
# LeafNodes. If there is some formula we can use to determine the
|
|
1425 |
# final size without actually having to read in any more
|
|
1426 |
# children, it would be nice to have. However, we have to be
|
|
1427 |
# careful with stuff like nodes that pull out the common prefix
|
|
1428 |
# of each key, as adding a new key can change the common prefix
|
|
1429 |
# and cause size changes greater than the length of one key.
|
|
1430 |
# So for now, we just add everything to a new Leaf until it
|
|
1431 |
# splits, as we know that will give the right answer
|
|
1432 |
new_leaf = LeafNode(search_key_func=self._search_key_func) |
|
1433 |
new_leaf.set_maximum_size(self._maximum_size) |
|
1434 |
new_leaf._key_width = self._key_width |
|
1435 |
# A batch_size of 16 was chosen because:
|
|
1436 |
# a) In testing, a 4k page held 14 times. So if we have more than 16
|
|
1437 |
# leaf nodes we are unlikely to hold them in a single new leaf
|
|
1438 |
# node. This still allows for 1 round trip
|
|
1439 |
# b) With 16-way fan out, we can still do a single round trip
|
|
1440 |
# c) With 255-way fan out, we don't want to read all 255 and destroy
|
|
1441 |
# the page cache, just to determine that we really don't need it.
|
|
1442 |
for node, _ in self._iter_nodes(store, batch_size=16): |
|
1443 |
if type(node) is InternalNode: |
|
1444 |
# Without looking at any leaf nodes, we are sure
|
|
1445 |
return self |
|
1446 |
for key, value in node._items.iteritems(): |
|
1447 |
if new_leaf._map_no_split(key, value): |
|
1448 |
return self |
|
3735.2.123
by Ian Clatworthy
only check for remap if changes are interesting in size |
1449 |
trace.mutter("remap generated a new LeafNode") |
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
1450 |
return new_leaf |
1451 |
||
1452 |
||
1453 |
def _deserialise(bytes, key, search_key_func): |
|
1454 |
"""Helper for repositorydetails - convert bytes to a node."""
|
|
1455 |
if bytes.startswith("chkleaf:\n"): |
|
1456 |
node = LeafNode.deserialise(bytes, key, search_key_func=search_key_func) |
|
1457 |
elif bytes.startswith("chknode:\n"): |
|
1458 |
node = InternalNode.deserialise(bytes, key, |
|
1459 |
search_key_func=search_key_func) |
|
1460 |
else: |
|
1461 |
raise AssertionError("Unknown node type.") |
|
1462 |
return node |
|
1463 |
||
1464 |
||
4476.1.38
by John Arbash Meinel
Rename InterestingNodeIterator => CHKMapDifference, update tests. |
1465 |
class CHKMapDifference(object): |
1466 |
"""Iterate the stored pages and key,value pairs for (new - old).
|
|
1467 |
||
1468 |
This class provides a generator over the stored CHK pages and the
|
|
1469 |
(key, value) pairs that are in any of the new maps and not in any of the
|
|
1470 |
old maps.
|
|
1471 |
||
1472 |
Note that it may yield chk pages that are common (especially root nodes),
|
|
1473 |
but it won't yield (key,value) pairs that are common.
|
|
4476.1.5
by John Arbash Meinel
Start working on a new InterestingNodeIterator class. |
1474 |
"""
|
1475 |
||
4476.1.39
by John Arbash Meinel
Rename interesting => new, uninteresting => old |
1476 |
def __init__(self, store, new_root_keys, old_root_keys, |
4476.1.5
by John Arbash Meinel
Start working on a new InterestingNodeIterator class. |
1477 |
search_key_func, pb=None): |
4679.9.15
by John Arbash Meinel
Cleanup some code paths. Make _check_key a helper that can be used |
1478 |
# TODO: Should we add a StaticTuple barrier here? It would be nice to
|
1479 |
# force callers to use StaticTuple, because there will often be
|
|
1480 |
# lots of keys passed in here. And even if we cast it locally,
|
|
1481 |
# that just meanst that we will have *both* a StaticTuple and a
|
|
1482 |
# tuple() in memory, referring to the same object. (so a net
|
|
1483 |
# increase in memory, not a decrease.)
|
|
4476.1.5
by John Arbash Meinel
Start working on a new InterestingNodeIterator class. |
1484 |
self._store = store |
4476.1.39
by John Arbash Meinel
Rename interesting => new, uninteresting => old |
1485 |
self._new_root_keys = new_root_keys |
1486 |
self._old_root_keys = old_root_keys |
|
4476.1.5
by John Arbash Meinel
Start working on a new InterestingNodeIterator class. |
1487 |
self._pb = pb |
4476.1.37
by John Arbash Meinel
Some small code cleanup passes |
1488 |
# All uninteresting chks that we have seen. By the time they are added
|
1489 |
# here, they should be either fully ignored, or queued up for
|
|
1490 |
# processing
|
|
4679.9.23
by John Arbash Meinel
Mostly TODO entries. |
1491 |
# TODO: This might grow to a large size if there are lots of merge
|
1492 |
# parents, etc. However, it probably doesn't scale to O(history)
|
|
1493 |
# like _processed_new_refs does.
|
|
4476.1.39
by John Arbash Meinel
Rename interesting => new, uninteresting => old |
1494 |
self._all_old_chks = set(self._old_root_keys) |
1495 |
# All items that we have seen from the old_root_keys
|
|
1496 |
self._all_old_items = set() |
|
4476.1.32
by John Arbash Meinel
A few more updates. |
1497 |
# These are interesting items which were either read, or already in the
|
4476.1.37
by John Arbash Meinel
Some small code cleanup passes |
1498 |
# interesting queue (so we don't need to walk them again)
|
4679.9.23
by John Arbash Meinel
Mostly TODO entries. |
1499 |
# TODO: processed_new_refs becomes O(all_chks), consider switching to
|
1500 |
# SimpleSet here.
|
|
4476.1.39
by John Arbash Meinel
Rename interesting => new, uninteresting => old |
1501 |
self._processed_new_refs = set() |
4476.1.5
by John Arbash Meinel
Start working on a new InterestingNodeIterator class. |
1502 |
self._search_key_func = search_key_func |
1503 |
||
4476.1.33
by John Arbash Meinel
Simpify the code a lot by ignoring the heapq stuff. |
1504 |
# The uninteresting and interesting nodes to be searched
|
4476.1.39
by John Arbash Meinel
Rename interesting => new, uninteresting => old |
1505 |
self._old_queue = [] |
1506 |
self._new_queue = [] |
|
4476.1.34
by John Arbash Meinel
Major rework, simplify what is put into the queues. |
1507 |
# Holds the (key, value) items found when processing the root nodes,
|
1508 |
# waiting for the uninteresting nodes to be walked
|
|
4476.1.39
by John Arbash Meinel
Rename interesting => new, uninteresting => old |
1509 |
self._new_item_queue = [] |
4476.1.17
by John Arbash Meinel
Start running all of the iter_interesting_nodes tests |
1510 |
self._state = None |
4476.1.5
by John Arbash Meinel
Start working on a new InterestingNodeIterator class. |
1511 |
|
1512 |
def _read_nodes_from_store(self, keys): |
|
4797.7.1
by Robert Collins
Introduce a threading.local to isolate the chk_map page cache from other threads. |
1513 |
# We chose not to use _get_cache(), because we think in
|
1514 |
# terms of records to be yielded. Also, we expect to touch each page
|
|
1515 |
# only 1 time during this code. (We may want to evaluate saving the
|
|
1516 |
# raw bytes into the page cache, which would allow a working tree
|
|
1517 |
# update after the fetch to not have to read the bytes again.)
|
|
4679.9.20
by John Arbash Meinel
as_st(items) saves about 800kB peak memory. |
1518 |
as_st = StaticTuple.from_sequence |
4476.1.12
by John Arbash Meinel
Start testing the new class. |
1519 |
stream = self._store.get_record_stream(keys, 'unordered', True) |
4476.1.5
by John Arbash Meinel
Start working on a new InterestingNodeIterator class. |
1520 |
for record in stream: |
1521 |
if self._pb is not None: |
|
1522 |
self._pb.tick() |
|
1523 |
if record.storage_kind == 'absent': |
|
4476.1.17
by John Arbash Meinel
Start running all of the iter_interesting_nodes tests |
1524 |
raise errors.NoSuchRevision(self._store, record.key) |
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
1525 |
bytes = record.get_bytes_as('fulltext') |
4476.1.5
by John Arbash Meinel
Start working on a new InterestingNodeIterator class. |
1526 |
node = _deserialise(bytes, record.key, |
1527 |
search_key_func=self._search_key_func) |
|
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
1528 |
if type(node) is InternalNode: |
4476.1.5
by John Arbash Meinel
Start working on a new InterestingNodeIterator class. |
1529 |
# Note we don't have to do node.refs() because we know that
|
1530 |
# there are no children that have been pushed into this node
|
|
4679.9.23
by John Arbash Meinel
Mostly TODO entries. |
1531 |
# Note: Using as_st() here seemed to save 1.2MB, which would
|
1532 |
# indicate that we keep 100k prefix_refs around while
|
|
1533 |
# processing. They *should* be shorter lived than that...
|
|
1534 |
# It does cost us ~10s of processing time
|
|
1535 |
#prefix_refs = [as_st(item) for item in node._items.iteritems()]
|
|
1536 |
prefix_refs = node._items.items() |
|
4476.1.5
by John Arbash Meinel
Start working on a new InterestingNodeIterator class. |
1537 |
items = [] |
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
1538 |
else: |
4476.1.5
by John Arbash Meinel
Start working on a new InterestingNodeIterator class. |
1539 |
prefix_refs = [] |
4679.9.23
by John Arbash Meinel
Mostly TODO entries. |
1540 |
# Note: We don't use a StaticTuple here. Profiling showed a
|
1541 |
# minor memory improvement (0.8MB out of 335MB peak 0.2%)
|
|
1542 |
# But a significant slowdown (15s / 145s, or 10%)
|
|
1543 |
items = node._items.items() |
|
4476.1.5
by John Arbash Meinel
Start working on a new InterestingNodeIterator class. |
1544 |
yield record, node, prefix_refs, items |
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
1545 |
|
4476.1.39
by John Arbash Meinel
Rename interesting => new, uninteresting => old |
1546 |
def _read_old_roots(self): |
1547 |
old_chks_to_enqueue = [] |
|
1548 |
all_old_chks = self._all_old_chks |
|
4476.1.5
by John Arbash Meinel
Start working on a new InterestingNodeIterator class. |
1549 |
for record, node, prefix_refs, items in \ |
4476.1.39
by John Arbash Meinel
Rename interesting => new, uninteresting => old |
1550 |
self._read_nodes_from_store(self._old_root_keys): |
4476.1.5
by John Arbash Meinel
Start working on a new InterestingNodeIterator class. |
1551 |
# Uninteresting node
|
4476.1.34
by John Arbash Meinel
Major rework, simplify what is put into the queues. |
1552 |
prefix_refs = [p_r for p_r in prefix_refs |
4476.1.39
by John Arbash Meinel
Rename interesting => new, uninteresting => old |
1553 |
if p_r[1] not in all_old_chks] |
4476.1.34
by John Arbash Meinel
Major rework, simplify what is put into the queues. |
1554 |
new_refs = [p_r[1] for p_r in prefix_refs] |
4476.1.39
by John Arbash Meinel
Rename interesting => new, uninteresting => old |
1555 |
all_old_chks.update(new_refs) |
4679.9.23
by John Arbash Meinel
Mostly TODO entries. |
1556 |
# TODO: This might be a good time to turn items into StaticTuple
|
1557 |
# instances and possibly intern them. However, this does not
|
|
1558 |
# impact 'initial branch' performance, so I'm not worrying
|
|
1559 |
# about this yet
|
|
4476.1.39
by John Arbash Meinel
Rename interesting => new, uninteresting => old |
1560 |
self._all_old_items.update(items) |
4476.1.5
by John Arbash Meinel
Start working on a new InterestingNodeIterator class. |
1561 |
# Queue up the uninteresting references
|
1562 |
# Don't actually put them in the 'to-read' queue until we have
|
|
1563 |
# finished checking the interesting references
|
|
4476.1.39
by John Arbash Meinel
Rename interesting => new, uninteresting => old |
1564 |
old_chks_to_enqueue.extend(prefix_refs) |
1565 |
return old_chks_to_enqueue |
|
4476.1.37
by John Arbash Meinel
Some small code cleanup passes |
1566 |
|
4476.1.40
by John Arbash Meinel
cleanup indentation. |
1567 |
def _enqueue_old(self, new_prefixes, old_chks_to_enqueue): |
4476.1.37
by John Arbash Meinel
Some small code cleanup passes |
1568 |
# At this point, we have read all the uninteresting and interesting
|
1569 |
# items, so we can queue up the uninteresting stuff, knowing that we've
|
|
1570 |
# handled the interesting ones
|
|
4476.1.39
by John Arbash Meinel
Rename interesting => new, uninteresting => old |
1571 |
for prefix, ref in old_chks_to_enqueue: |
4476.1.37
by John Arbash Meinel
Some small code cleanup passes |
1572 |
not_interesting = True |
1573 |
for i in xrange(len(prefix), 0, -1): |
|
4476.1.39
by John Arbash Meinel
Rename interesting => new, uninteresting => old |
1574 |
if prefix[:i] in new_prefixes: |
4476.1.37
by John Arbash Meinel
Some small code cleanup passes |
1575 |
not_interesting = False |
1576 |
break
|
|
1577 |
if not_interesting: |
|
1578 |
# This prefix is not part of the remaining 'interesting set'
|
|
1579 |
continue
|
|
4476.1.39
by John Arbash Meinel
Rename interesting => new, uninteresting => old |
1580 |
self._old_queue.append(ref) |
4476.1.37
by John Arbash Meinel
Some small code cleanup passes |
1581 |
|
1582 |
def _read_all_roots(self): |
|
1583 |
"""Read the root pages.
|
|
1584 |
||
1585 |
This is structured as a generator, so that the root records can be
|
|
1586 |
yielded up to whoever needs them without any buffering.
|
|
1587 |
"""
|
|
1588 |
# This is the bootstrap phase
|
|
4476.1.39
by John Arbash Meinel
Rename interesting => new, uninteresting => old |
1589 |
if not self._old_root_keys: |
1590 |
# With no old_root_keys we can just shortcut and be ready
|
|
1591 |
# for _flush_new_queue
|
|
1592 |
self._new_queue = list(self._new_root_keys) |
|
4476.1.37
by John Arbash Meinel
Some small code cleanup passes |
1593 |
return
|
4476.1.39
by John Arbash Meinel
Rename interesting => new, uninteresting => old |
1594 |
old_chks_to_enqueue = self._read_old_roots() |
4476.1.12
by John Arbash Meinel
Start testing the new class. |
1595 |
# filter out any root keys that are already known to be uninteresting
|
4476.1.40
by John Arbash Meinel
cleanup indentation. |
1596 |
new_keys = set(self._new_root_keys).difference(self._all_old_chks) |
4476.1.39
by John Arbash Meinel
Rename interesting => new, uninteresting => old |
1597 |
# These are prefixes that are present in new_keys that we are
|
4476.1.37
by John Arbash Meinel
Some small code cleanup passes |
1598 |
# thinking to yield
|
4476.1.39
by John Arbash Meinel
Rename interesting => new, uninteresting => old |
1599 |
new_prefixes = set() |
4476.1.18
by John Arbash Meinel
Tracked it down. |
1600 |
# We are about to yield all of these, so we don't want them getting
|
1601 |
# added a second time
|
|
4476.1.39
by John Arbash Meinel
Rename interesting => new, uninteresting => old |
1602 |
processed_new_refs = self._processed_new_refs |
1603 |
processed_new_refs.update(new_keys) |
|
4476.1.5
by John Arbash Meinel
Start working on a new InterestingNodeIterator class. |
1604 |
for record, node, prefix_refs, items in \ |
4476.1.39
by John Arbash Meinel
Rename interesting => new, uninteresting => old |
1605 |
self._read_nodes_from_store(new_keys): |
4476.1.5
by John Arbash Meinel
Start working on a new InterestingNodeIterator class. |
1606 |
# At this level, we now know all the uninteresting references
|
4476.1.35
by John Arbash Meinel
Change some of the inner loop workings into list comprehensions. |
1607 |
# So we filter and queue up whatever is remaining
|
1608 |
prefix_refs = [p_r for p_r in prefix_refs |
|
4476.1.39
by John Arbash Meinel
Rename interesting => new, uninteresting => old |
1609 |
if p_r[1] not in self._all_old_chks |
1610 |
and p_r[1] not in processed_new_refs] |
|
4476.1.35
by John Arbash Meinel
Change some of the inner loop workings into list comprehensions. |
1611 |
refs = [p_r[1] for p_r in prefix_refs] |
4476.1.39
by John Arbash Meinel
Rename interesting => new, uninteresting => old |
1612 |
new_prefixes.update([p_r[0] for p_r in prefix_refs]) |
1613 |
self._new_queue.extend(refs) |
|
4476.1.34
by John Arbash Meinel
Major rework, simplify what is put into the queues. |
1614 |
# TODO: We can potentially get multiple items here, however the
|
1615 |
# current design allows for this, as callers will do the work
|
|
1616 |
# to make the results unique. We might profile whether we
|
|
1617 |
# gain anything by ensuring unique return values for items
|
|
4679.9.23
by John Arbash Meinel
Mostly TODO entries. |
1618 |
# TODO: This might be a good time to cast to StaticTuple, as
|
1619 |
# self._new_item_queue will hold the contents of multiple
|
|
1620 |
# records for an extended lifetime
|
|
4476.1.39
by John Arbash Meinel
Rename interesting => new, uninteresting => old |
1621 |
new_items = [item for item in items |
4476.1.40
by John Arbash Meinel
cleanup indentation. |
1622 |
if item not in self._all_old_items] |
4476.1.39
by John Arbash Meinel
Rename interesting => new, uninteresting => old |
1623 |
self._new_item_queue.extend(new_items) |
1624 |
new_prefixes.update([self._search_key_func(item[0]) |
|
4476.1.40
by John Arbash Meinel
cleanup indentation. |
1625 |
for item in new_items]) |
4476.1.39
by John Arbash Meinel
Rename interesting => new, uninteresting => old |
1626 |
processed_new_refs.update(refs) |
4476.1.13
by John Arbash Meinel
Test that _read_all_roots does what is expected |
1627 |
yield record |
4476.1.39
by John Arbash Meinel
Rename interesting => new, uninteresting => old |
1628 |
# For new_prefixes we have the full length prefixes queued up.
|
4476.1.35
by John Arbash Meinel
Change some of the inner loop workings into list comprehensions. |
1629 |
# However, we also need possible prefixes. (If we have a known ref to
|
1630 |
# 'ab', then we also need to include 'a'.) So expand the
|
|
4476.1.39
by John Arbash Meinel
Rename interesting => new, uninteresting => old |
1631 |
# new_prefixes to include all shorter prefixes
|
1632 |
for prefix in list(new_prefixes): |
|
4476.1.40
by John Arbash Meinel
cleanup indentation. |
1633 |
new_prefixes.update([prefix[:i] for i in xrange(1, len(prefix))]) |
1634 |
self._enqueue_old(new_prefixes, old_chks_to_enqueue) |
|
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
1635 |
|
4476.1.39
by John Arbash Meinel
Rename interesting => new, uninteresting => old |
1636 |
def _flush_new_queue(self): |
4476.1.27
by John Arbash Meinel
Rewrite of _flush_interesting_queue |
1637 |
# No need to maintain the heap invariant anymore, just pull things out
|
1638 |
# and process them
|
|
4476.1.39
by John Arbash Meinel
Rename interesting => new, uninteresting => old |
1639 |
refs = set(self._new_queue) |
1640 |
self._new_queue = [] |
|
4476.1.31
by John Arbash Meinel
streamline the _flush_interesting_queue a bit. |
1641 |
# First pass, flush all interesting items and convert to using direct refs
|
4476.1.39
by John Arbash Meinel
Rename interesting => new, uninteresting => old |
1642 |
all_old_chks = self._all_old_chks |
1643 |
processed_new_refs = self._processed_new_refs |
|
1644 |
all_old_items = self._all_old_items |
|
1645 |
new_items = [item for item in self._new_item_queue |
|
4476.1.40
by John Arbash Meinel
cleanup indentation. |
1646 |
if item not in all_old_items] |
4476.1.39
by John Arbash Meinel
Rename interesting => new, uninteresting => old |
1647 |
self._new_item_queue = [] |
1648 |
if new_items: |
|
1649 |
yield None, new_items |
|
1650 |
refs = refs.difference(all_old_chks) |
|
4679.9.23
by John Arbash Meinel
Mostly TODO entries. |
1651 |
processed_new_refs.update(refs) |
4476.1.31
by John Arbash Meinel
streamline the _flush_interesting_queue a bit. |
1652 |
while refs: |
4679.9.24
by John Arbash Meinel
Note a memory savings with a special set |
1653 |
# TODO: Using a SimpleSet for self._processed_new_refs and
|
1654 |
# saved as much as 10MB of peak memory. However, it requires
|
|
1655 |
# implementing a non-pyrex version.
|
|
4476.1.31
by John Arbash Meinel
streamline the _flush_interesting_queue a bit. |
1656 |
next_refs = set() |
1657 |
next_refs_update = next_refs.update |
|
1658 |
# Inlining _read_nodes_from_store improves 'bzr branch bzr.dev'
|
|
1659 |
# from 1m54s to 1m51s. Consider it.
|
|
4476.1.37
by John Arbash Meinel
Some small code cleanup passes |
1660 |
for record, _, p_refs, items in self._read_nodes_from_store(refs): |
4679.9.23
by John Arbash Meinel
Mostly TODO entries. |
1661 |
if all_old_items: |
1662 |
# using the 'if' check saves about 145s => 141s, when
|
|
1663 |
# streaming initial branch of Launchpad data.
|
|
1664 |
items = [item for item in items |
|
1665 |
if item not in all_old_items] |
|
4476.1.27
by John Arbash Meinel
Rewrite of _flush_interesting_queue |
1666 |
yield record, items |
4476.1.37
by John Arbash Meinel
Some small code cleanup passes |
1667 |
next_refs_update([p_r[1] for p_r in p_refs]) |
4679.9.23
by John Arbash Meinel
Mostly TODO entries. |
1668 |
del p_refs |
1669 |
# set1.difference(set/dict) walks all of set1, and checks if it
|
|
1670 |
# exists in 'other'.
|
|
1671 |
# set1.difference(iterable) walks all of iterable, and does a
|
|
1672 |
# 'difference_update' on a clone of set1. Pick wisely based on the
|
|
1673 |
# expected sizes of objects.
|
|
1674 |
# in our case it is expected that 'new_refs' will always be quite
|
|
1675 |
# small.
|
|
4476.1.39
by John Arbash Meinel
Rename interesting => new, uninteresting => old |
1676 |
next_refs = next_refs.difference(all_old_chks) |
1677 |
next_refs = next_refs.difference(processed_new_refs) |
|
1678 |
processed_new_refs.update(next_refs) |
|
4476.1.31
by John Arbash Meinel
streamline the _flush_interesting_queue a bit. |
1679 |
refs = next_refs |
4476.1.17
by John Arbash Meinel
Start running all of the iter_interesting_nodes tests |
1680 |
|
4476.1.39
by John Arbash Meinel
Rename interesting => new, uninteresting => old |
1681 |
def _process_next_old(self): |
4476.1.37
by John Arbash Meinel
Some small code cleanup passes |
1682 |
# Since we don't filter uninteresting any further than during
|
1683 |
# _read_all_roots, process the whole queue in a single pass.
|
|
4476.1.39
by John Arbash Meinel
Rename interesting => new, uninteresting => old |
1684 |
refs = self._old_queue |
1685 |
self._old_queue = [] |
|
1686 |
all_old_chks = self._all_old_chks |
|
4476.1.32
by John Arbash Meinel
A few more updates. |
1687 |
for record, _, prefix_refs, items in self._read_nodes_from_store(refs): |
4679.9.23
by John Arbash Meinel
Mostly TODO entries. |
1688 |
# TODO: Use StaticTuple here?
|
4476.1.39
by John Arbash Meinel
Rename interesting => new, uninteresting => old |
1689 |
self._all_old_items.update(items) |
1690 |
refs = [r for _,r in prefix_refs if r not in all_old_chks] |
|
1691 |
self._old_queue.extend(refs) |
|
1692 |
all_old_chks.update(refs) |
|
4476.1.17
by John Arbash Meinel
Start running all of the iter_interesting_nodes tests |
1693 |
|
1694 |
def _process_queues(self): |
|
4476.1.39
by John Arbash Meinel
Rename interesting => new, uninteresting => old |
1695 |
while self._old_queue: |
1696 |
self._process_next_old() |
|
1697 |
return self._flush_new_queue() |
|
4476.1.17
by John Arbash Meinel
Start running all of the iter_interesting_nodes tests |
1698 |
|
4476.1.37
by John Arbash Meinel
Some small code cleanup passes |
1699 |
def process(self): |
1700 |
for record in self._read_all_roots(): |
|
1701 |
yield record, [] |
|
1702 |
for record, items in self._process_queues(): |
|
1703 |
yield record, items |
|
1704 |
||
4476.1.25
by John Arbash Meinel
A bit more testing. |
1705 |
|
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
1706 |
def iter_interesting_nodes(store, interesting_root_keys, |
1707 |
uninteresting_root_keys, pb=None): |
|
1708 |
"""Given root keys, find interesting nodes.
|
|
1709 |
||
1710 |
Evaluate nodes referenced by interesting_root_keys. Ones that are also
|
|
1711 |
referenced from uninteresting_root_keys are not considered interesting.
|
|
1712 |
||
1713 |
:param interesting_root_keys: keys which should be part of the
|
|
1714 |
"interesting" nodes (which will be yielded)
|
|
1715 |
:param uninteresting_root_keys: keys which should be filtered out of the
|
|
1716 |
result set.
|
|
1717 |
:return: Yield
|
|
1718 |
(interesting record, {interesting key:values})
|
|
1719 |
"""
|
|
4476.1.38
by John Arbash Meinel
Rename InterestingNodeIterator => CHKMapDifference, update tests. |
1720 |
iterator = CHKMapDifference(store, interesting_root_keys, |
1721 |
uninteresting_root_keys, |
|
1722 |
search_key_func=store._search_key_func, |
|
1723 |
pb=pb) |
|
4476.1.37
by John Arbash Meinel
Some small code cleanup passes |
1724 |
return iterator.process() |
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
1725 |
|
1726 |
||
1727 |
try: |
|
1728 |
from bzrlib._chk_map_pyx import ( |
|
5218.2.1
by John Arbash Meinel
Implement a compiled extension for parsing the text key out of a CHKInventory value. |
1729 |
_bytes_to_text_key, |
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
1730 |
_search_key_16, |
1731 |
_search_key_255, |
|
1732 |
_deserialise_leaf_node, |
|
1733 |
_deserialise_internal_node, |
|
1734 |
)
|
|
4574.3.6
by Martin Pool
More warnings when failing to load extensions |
1735 |
except ImportError, e: |
4574.3.8
by Martin Pool
Only mutter extension load errors when they occur, and record for later |
1736 |
osutils.failed_to_load_extension(e) |
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
1737 |
from bzrlib._chk_map_py import ( |
5218.2.1
by John Arbash Meinel
Implement a compiled extension for parsing the text key out of a CHKInventory value. |
1738 |
_bytes_to_text_key, |
4241.6.1
by Ian Clatworthy
chk_map code from brisbane-core |
1739 |
_search_key_16, |
1740 |
_search_key_255, |
|
1741 |
_deserialise_leaf_node, |
|
1742 |
_deserialise_internal_node, |
|
1743 |
)
|
|
1744 |
search_key_registry.register('hash-16-way', _search_key_16) |
|
1745 |
search_key_registry.register('hash-255-way', _search_key_255) |
|
4679.9.4
by John Arbash Meinel
A bit broken, but getting there. |
1746 |
|
4679.9.15
by John Arbash Meinel
Cleanup some code paths. Make _check_key a helper that can be used |
1747 |
|
4679.9.4
by John Arbash Meinel
A bit broken, but getting there. |
1748 |
def _check_key(key): |
4679.9.15
by John Arbash Meinel
Cleanup some code paths. Make _check_key a helper that can be used |
1749 |
"""Helper function to assert that a key is properly formatted.
|
1750 |
||
1751 |
This generally shouldn't be used in production code, but it can be helpful
|
|
1752 |
to debug problems.
|
|
1753 |
"""
|
|
4679.9.4
by John Arbash Meinel
A bit broken, but getting there. |
1754 |
if type(key) is not StaticTuple: |
1755 |
raise TypeError('key %r is not StaticTuple but %s' % (key, type(key))) |
|
1756 |
if len(key) != 1: |
|
1757 |
raise ValueError('key %r should have length 1, not %d' % (key, len(key),)) |
|
1758 |
if type(key[0]) is not str: |
|
1759 |
raise TypeError('key %r should hold a str, not %r' |
|
1760 |
% (key, type(key[0]))) |
|
1761 |
if not key[0].startswith('sha1:'): |
|
1762 |
raise ValueError('key %r should point to a sha1:' % (key,)) |
|
1763 |
||
1764 |