1
# Copyright (C) 2008, 2009, 2010 Canonical Ltd
1
# Copyright (C) 2008, 2009 Canonical Ltd
3
3
# This program is free software; you can redistribute it and/or modify
4
4
# it under the terms of the GNU General Public License as published by
60
59
# If each line is 50 bytes, and you have 255 internal pages, with 255-way fan
61
60
# out, it takes 3.1MB to cache the layer.
62
61
_PAGE_CACHE_SIZE = 4*1024*1024
63
# Per thread caches for 2 reasons:
64
# - in the server we may be serving very different content, so we get less
66
# - we avoid locking on every cache lookup.
67
_thread_caches = threading.local()
69
_thread_caches.page_cache = None
72
"""Get the per-thread page cache.
74
We need a function to do this because in a new thread the _thread_caches
75
threading.local object does not have the cache initialized yet.
77
page_cache = getattr(_thread_caches, 'page_cache', None)
78
if page_cache is None:
79
# We are caching bytes so len(value) is perfectly accurate
80
page_cache = lru_cache.LRUSizeCache(_PAGE_CACHE_SIZE)
81
_thread_caches.page_cache = page_cache
62
# We are caching bytes so len(value) is perfectly accurate
63
_page_cache = lru_cache.LRUSizeCache(_PAGE_CACHE_SIZE)
89
68
# If a ChildNode falls below this many bytes, we check for a remap
90
69
_INTERESTING_NEW_SIZE = 50
91
70
# If a ChildNode shrinks by more than this amount, we check for a remap
92
71
_INTERESTING_SHRINKAGE_LIMIT = 20
72
# If we delete more than this many nodes applying a delta, we check for a remap
73
_INTERESTING_DELETES_LIMIT = 5
95
76
def _search_key_plain(key):
133
114
into the map; if old_key is not None, then the old mapping
134
115
of old_key is removed.
137
118
# Check preconditions first.
138
119
as_st = StaticTuple.from_sequence
139
120
new_items = set([as_st(key) for (old, key, value) in delta
146
127
for old, new, value in delta:
147
128
if old is not None and old != new:
148
129
self.unmap(old, check_remap=False)
150
131
for old, new, value in delta:
151
132
if new is not None:
152
133
self.map(new, value)
134
if delete_count > _INTERESTING_DELETES_LIMIT:
135
trace.mutter("checking remap as %d deletions", delete_count)
154
136
self._check_remap()
155
137
return self._save()
180
162
def _read_bytes(self, key):
182
return _get_cache()[key]
164
return _page_cache[key]
184
166
stream = self._store.get_record_stream([key], 'unordered', True)
185
167
bytes = stream.next().get_bytes_as('fulltext')
186
_get_cache()[key] = bytes
168
_page_cache[key] = bytes
189
171
def _dump_tree(self, include_keys=False):
570
552
"""Check if nodes can be collapsed."""
571
553
self._ensure_root()
572
554
if type(self._root_node) is InternalNode:
573
self._root_node = self._root_node._check_remap(self._store)
555
self._root_node._check_remap(self._store)
576
558
"""Save the map completely.
689
671
the key/value pairs.
692
__slots__ = ('_common_serialised_prefix',)
674
__slots__ = ('_common_serialised_prefix', '_serialise_key')
694
676
def __init__(self, search_key_func=None):
695
677
Node.__init__(self)
696
678
# All of the keys in this leaf node share this common prefix
697
679
self._common_serialised_prefix = None
680
self._serialise_key = '\x00'.join
698
681
if search_key_func is None:
699
682
self._search_key_func = _search_key_plain
884
867
raise AssertionError('%r must be known' % self._search_prefix)
885
868
return self._search_prefix, [("", self)]
887
_serialise_key = '\x00'.join
889
870
def serialise(self, store):
890
871
"""Serialise the LeafNode to store.
920
901
bytes = ''.join(lines)
921
902
if len(bytes) != self._current_size():
922
903
raise AssertionError('Invalid _current_size')
923
_get_cache().add(self._key, bytes)
904
_page_cache.add(self._key, bytes)
924
905
return [self._key]
1193
1174
prefix, node_key_filter = keys[record.key]
1194
1175
node_and_filters.append((node, node_key_filter))
1195
1176
self._items[prefix] = node
1196
_get_cache().add(record.key, bytes)
1177
_page_cache.add(record.key, bytes)
1197
1178
for info in node_and_filters:
1319
1300
lines.append(serialised[prefix_len:])
1320
1301
sha1, _, _ = store.add_lines((None,), (), lines)
1321
1302
self._key = StaticTuple("sha1:" + sha1,).intern()
1322
_get_cache().add(self._key, ''.join(lines))
1303
_page_cache.add(self._key, ''.join(lines))
1323
1304
yield self._key
1325
1306
def _search_key(self, key):
1508
1489
self._state = None
1510
1491
def _read_nodes_from_store(self, keys):
1511
# We chose not to use _get_cache(), because we think in
1512
# terms of records to be yielded. Also, we expect to touch each page
1513
# only 1 time during this code. (We may want to evaluate saving the
1514
# raw bytes into the page cache, which would allow a working tree
1515
# update after the fetch to not have to read the bytes again.)
1492
# We chose not to use _page_cache, because we think in terms of records
1493
# to be yielded. Also, we expect to touch each page only 1 time during
1494
# this code. (We may want to evaluate saving the raw bytes into the
1495
# page cache, which would allow a working tree update after the fetch
1496
# to not have to read the bytes again.)
1516
1497
as_st = StaticTuple.from_sequence
1517
1498
stream = self._store.get_record_stream(keys, 'unordered', True)
1518
1499
for record in stream: