1
# Copyright (C) 2008-2011 Canonical Ltd
1
# Copyright (C) 2008, 2009 Canonical Ltd
3
3
# This program is free software; you can redistribute it and/or modify
4
4
# it under the terms of the GNU General Public License as published by
60
58
# If each line is 50 bytes, and you have 255 internal pages, with 255-way fan
61
59
# out, it takes 3.1MB to cache the layer.
62
60
_PAGE_CACHE_SIZE = 4*1024*1024
63
# Per thread caches for 2 reasons:
64
# - in the server we may be serving very different content, so we get less
66
# - we avoid locking on every cache lookup.
67
_thread_caches = threading.local()
69
_thread_caches.page_cache = None
72
"""Get the per-thread page cache.
74
We need a function to do this because in a new thread the _thread_caches
75
threading.local object does not have the cache initialized yet.
77
page_cache = getattr(_thread_caches, 'page_cache', None)
78
if page_cache is None:
79
# We are caching bytes so len(value) is perfectly accurate
80
page_cache = lru_cache.LRUSizeCache(_PAGE_CACHE_SIZE)
81
_thread_caches.page_cache = page_cache
61
# We are caching bytes so len(value) is perfectly accurate
62
_page_cache = lru_cache.LRUSizeCache(_PAGE_CACHE_SIZE)
89
67
# If a ChildNode falls below this many bytes, we check for a remap
90
68
_INTERESTING_NEW_SIZE = 50
91
69
# If a ChildNode shrinks by more than this amount, we check for a remap
92
70
_INTERESTING_SHRINKAGE_LIMIT = 20
71
# If we delete more than this many nodes applying a delta, we check for a remap
72
_INTERESTING_DELETES_LIMIT = 5
95
75
def _search_key_plain(key):
133
113
into the map; if old_key is not None, then the old mapping
134
114
of old_key is removed.
137
117
# Check preconditions first.
138
118
as_st = StaticTuple.from_sequence
139
119
new_items = set([as_st(key) for (old, key, value) in delta
146
126
for old, new, value in delta:
147
127
if old is not None and old != new:
148
128
self.unmap(old, check_remap=False)
150
130
for old, new, value in delta:
151
131
if new is not None:
152
132
self.map(new, value)
133
if delete_count > _INTERESTING_DELETES_LIMIT:
134
trace.mutter("checking remap as %d deletions", delete_count)
154
135
self._check_remap()
155
136
return self._save()
180
161
def _read_bytes(self, key):
182
return _get_cache()[key]
163
return _page_cache[key]
184
165
stream = self._store.get_record_stream([key], 'unordered', True)
185
166
bytes = stream.next().get_bytes_as('fulltext')
186
_get_cache()[key] = bytes
167
_page_cache[key] = bytes
189
170
def _dump_tree(self, include_keys=False):
570
551
"""Check if nodes can be collapsed."""
571
552
self._ensure_root()
572
553
if type(self._root_node) is InternalNode:
573
self._root_node = self._root_node._check_remap(self._store)
554
self._root_node._check_remap(self._store)
576
557
"""Save the map completely.
689
670
the key/value pairs.
692
__slots__ = ('_common_serialised_prefix',)
673
__slots__ = ('_common_serialised_prefix', '_serialise_key')
694
675
def __init__(self, search_key_func=None):
695
676
Node.__init__(self)
696
677
# All of the keys in this leaf node share this common prefix
697
678
self._common_serialised_prefix = None
679
self._serialise_key = '\x00'.join
698
680
if search_key_func is None:
699
681
self._search_key_func = _search_key_plain
738
720
:param bytes: The bytes of the node.
739
721
:param key: The key that the serialised node has.
741
key = static_tuple.expect_static_tuple(key)
742
723
return _deserialise_leaf_node(bytes, key,
743
724
search_key_func=search_key_func)
884
865
raise AssertionError('%r must be known' % self._search_prefix)
885
866
return self._search_prefix, [("", self)]
887
_serialise_key = '\x00'.join
889
868
def serialise(self, store):
890
869
"""Serialise the LeafNode to store.
920
899
bytes = ''.join(lines)
921
900
if len(bytes) != self._current_size():
922
901
raise AssertionError('Invalid _current_size')
923
_get_cache().add(self._key, bytes)
902
_page_cache.add(self._key, bytes)
924
903
return [self._key]
1039
1018
:param key: The key that the serialised node has.
1040
1019
:return: An InternalNode instance.
1042
key = static_tuple.expect_static_tuple(key)
1021
if type(key) is not StaticTuple:
1022
raise AssertionError('deserialise should be called with a'
1023
' StaticTuple not %s' % (type(key),))
1043
1024
return _deserialise_internal_node(bytes, key,
1044
1025
search_key_func=search_key_func)
1193
1174
prefix, node_key_filter = keys[record.key]
1194
1175
node_and_filters.append((node, node_key_filter))
1195
1176
self._items[prefix] = node
1196
_get_cache().add(record.key, bytes)
1177
_page_cache.add(record.key, bytes)
1197
1178
for info in node_and_filters:
1319
1300
lines.append(serialised[prefix_len:])
1320
1301
sha1, _, _ = store.add_lines((None,), (), lines)
1321
1302
self._key = StaticTuple("sha1:" + sha1,).intern()
1322
_get_cache().add(self._key, ''.join(lines))
1303
_page_cache.add(self._key, ''.join(lines))
1323
1304
yield self._key
1325
1306
def _search_key(self, key):
1369
1350
return self._search_prefix
1371
1352
def unmap(self, store, key, check_remap=True):
1372
"""Remove key from this node and its children."""
1353
"""Remove key from this node and it's children."""
1373
1354
if not len(self._items):
1374
1355
raise AssertionError("can't unmap in an empty InternalNode.")
1375
1356
children = [node for node, _
1508
1489
self._state = None
1510
1491
def _read_nodes_from_store(self, keys):
1511
# We chose not to use _get_cache(), because we think in
1512
# terms of records to be yielded. Also, we expect to touch each page
1513
# only 1 time during this code. (We may want to evaluate saving the
1514
# raw bytes into the page cache, which would allow a working tree
1515
# update after the fetch to not have to read the bytes again.)
1492
# We chose not to use _page_cache, because we think in terms of records
1493
# to be yielded. Also, we expect to touch each page only 1 time during
1494
# this code. (We may want to evaluate saving the raw bytes into the
1495
# page cache, which would allow a working tree update after the fetch
1496
# to not have to read the bytes again.)
1516
1497
as_st = StaticTuple.from_sequence
1517
1498
stream = self._store.get_record_stream(keys, 'unordered', True)
1518
1499
for record in stream: