13
13
# You should have received a copy of the GNU General Public License
14
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17
17
"""A simple least-recently-used (LRU) cache."""
26
class _LRUNode(object):
27
"""This maintains the linked-list which is the lru internals."""
29
__slots__ = ('prev', 'next_key', 'key', 'value', 'cleanup', 'size')
31
def __init__(self, key, value, cleanup=None):
33
self.next_key = _null_key
36
self.cleanup = cleanup
37
# TODO: We could compute this 'on-the-fly' like we used to, and remove
38
# one pointer from this object, we just need to decide if it
39
# actually costs us much of anything in normal usage
46
prev_key = self.prev.key
47
return '%s(%r n:%r p:%r)' % (self.__class__.__name__, self.key,
48
self.next_key, prev_key)
50
def run_cleanup(self):
52
if self.cleanup is not None:
53
self.cleanup(self.key, self.value)
55
# cleanup might raise an exception, but we want to make sure
56
# to break refcycles, etc
19
from collections import deque
21
from bzrlib import symbol_versioning
61
24
class LRUCache(object):
70
33
DeprecationWarning)
71
34
after_cleanup_count = after_cleanup_size
73
# The "HEAD" of the lru linked list
74
self._most_recently_used = None
75
# The "TAIL" of the lru linked list
76
self._least_recently_used = None
37
self._queue = deque() # Track when things are accessed
38
self._refcount = {} # number of entries in self._queue for each key
77
39
self._update_max_cache(max_cache, after_cleanup_count)
79
41
def __contains__(self, key):
80
42
return key in self._cache
82
44
def __getitem__(self, key):
85
# Inlined from _record_access to decrease the overhead of __getitem__
86
# We also have more knowledge about structure if __getitem__ is
87
# succeeding, then we know that self._most_recently_used must not be
89
mru = self._most_recently_used
91
# Nothing to do, this node is already at the head of the queue
93
# Remove this node from the old location
95
next_key = node.next_key
96
# benchmarking shows that the lookup of _null_key in globals is faster
97
# than the attribute lookup for (node is self._least_recently_used)
98
if next_key is _null_key:
99
# 'node' is the _least_recently_used, because it doesn't have a
100
# 'next' item. So move the current lru to the previous node.
101
self._least_recently_used = node_prev
103
node_next = cache[next_key]
104
node_next.prev = node_prev
105
node_prev.next_key = next_key
106
# Insert this node at the front of the list
107
node.next_key = mru.key
109
self._most_recently_used = node
45
val = self._cache[key]
46
self._record_access(key)
113
49
def __len__(self):
114
50
return len(self._cache)
117
"""Walk the LRU list, only meant to be used in tests."""
118
node = self._most_recently_used
120
if node.prev is not None:
121
raise AssertionError('the _most_recently_used entry is not'
122
' supposed to have a previous entry'
124
while node is not None:
125
if node.next_key is _null_key:
126
if node is not self._least_recently_used:
127
raise AssertionError('only the last node should have'
128
' no next value: %s' % (node,))
131
node_next = self._cache[node.next_key]
132
if node_next.prev is not node:
133
raise AssertionError('inconsistency found, node.next.prev'
134
' != node: %s' % (node,))
135
if node.prev is None:
136
if node is not self._most_recently_used:
137
raise AssertionError('only the _most_recently_used should'
138
' not have a previous node: %s'
141
if node.prev.next_key != node.key:
142
raise AssertionError('inconsistency found, node.prev.next'
143
' != node: %s' % (node,))
147
52
def add(self, key, value, cleanup=None):
148
53
"""Add a new value to the cache.
150
Also, if the entry is ever removed from the cache, call
55
Also, if the entry is ever removed from the queue, call cleanup.
56
Passing it the key and value being removed.
153
58
:param key: The key to store it under
154
59
:param value: The object to store
155
60
:param cleanup: None or a function taking (key, value) to indicate
156
'value' should be cleaned up.
61
'value' sohuld be cleaned up.
159
raise ValueError('cannot use _null_key as a key')
160
63
if key in self._cache:
161
node = self._cache[key]
165
# Maintain the LRU properties, even if cleanup raises an
168
node.cleanup = cleanup
169
self._record_access(node)
171
node = _LRUNode(key, value, cleanup=cleanup)
172
self._cache[key] = node
173
self._record_access(node)
65
self._cache[key] = value
66
if cleanup is not None:
67
self._cleanup[key] = cleanup
68
self._record_access(key)
175
70
if len(self._cache) > self._max_cache:
176
71
# Trigger the cleanup
179
def cache_size(self):
180
"""Get the number of entries we will cache."""
181
return self._max_cache
183
74
def get(self, key, default=None):
184
node = self._cache.get(key, None)
187
self._record_access(node)
75
if key in self._cache:
191
80
"""Get the list of keys currently cached.
211
96
# Make sure the cache is shrunk to the correct size
212
97
while len(self._cache) > self._after_cleanup_count:
213
98
self._remove_lru()
99
# No need to compact the queue at this point, because the code that
100
# calls this would have already triggered it based on queue length
215
102
def __setitem__(self, key, value):
216
103
"""Add a value to the cache, there will be no cleanup function."""
217
104
self.add(key, value, cleanup=None)
219
def _record_access(self, node):
106
def _record_access(self, key):
220
107
"""Record that key was accessed."""
221
# Move 'node' to the front of the queue
222
if self._most_recently_used is None:
223
self._most_recently_used = node
224
self._least_recently_used = node
226
elif node is self._most_recently_used:
227
# Nothing to do, this node is already at the head of the queue
229
# We've taken care of the tail pointer, remove the node, and insert it
232
if node is self._least_recently_used:
233
self._least_recently_used = node.prev
234
if node.prev is not None:
235
node.prev.next_key = node.next_key
236
if node.next_key is not _null_key:
237
node_next = self._cache[node.next_key]
238
node_next.prev = node.prev
240
node.next_key = self._most_recently_used.key
241
self._most_recently_used.prev = node
242
self._most_recently_used = node
245
def _remove_node(self, node):
246
if node is self._least_recently_used:
247
self._least_recently_used = node.prev
248
self._cache.pop(node.key)
249
# If we have removed all entries, remove the head pointer as well
250
if self._least_recently_used is None:
251
self._most_recently_used = None
255
# cleanup might raise an exception, but we want to make sure to
256
# maintain the linked list
257
if node.prev is not None:
258
node.prev.next_key = node.next_key
259
if node.next_key is not _null_key:
260
node_next = self._cache[node.next_key]
261
node_next.prev = node.prev
262
# And remove this node's pointers
264
node.next_key = _null_key
108
self._queue.append(key)
109
# Can't use setdefault because you can't += 1 the result
110
self._refcount[key] = self._refcount.get(key, 0) + 1
112
# If our access queue is too large, clean it up too
113
if len(self._queue) > self._compact_queue_length:
114
self._compact_queue()
116
def _compact_queue(self):
117
"""Compact the queue, leaving things in sorted last appended order."""
119
for item in self._queue:
120
if self._refcount[item] == 1:
121
new_queue.append(item)
123
self._refcount[item] -= 1
124
self._queue = new_queue
125
# All entries should be of the same size. There should be one entry in
126
# queue for each entry in cache, and all refcounts should == 1
127
if not (len(self._queue) == len(self._cache) ==
128
len(self._refcount) == sum(self._refcount.itervalues())):
129
raise AssertionError()
131
def _remove(self, key):
132
"""Remove an entry, making sure to maintain the invariants."""
133
cleanup = self._cleanup.pop(key, None)
134
val = self._cache.pop(key)
135
if cleanup is not None:
266
139
def _remove_lru(self):
267
140
"""Remove one entry from the lru, and handle consequences.
321
200
self._compute_size = compute_size
322
201
if compute_size is None:
323
202
self._compute_size = len
203
# This approximates that texts are > 0.5k in size. It only really
204
# effects when we clean up the queue, so we don't want it to be too
324
206
self._update_max_size(max_size, after_cleanup_size=after_cleanup_size)
325
207
LRUCache.__init__(self, max_cache=max(int(max_size/512), 1))
327
209
def add(self, key, value, cleanup=None):
328
210
"""Add a new value to the cache.
330
Also, if the entry is ever removed from the cache, call
212
Also, if the entry is ever removed from the queue, call cleanup.
213
Passing it the key and value being removed.
333
215
:param key: The key to store it under
334
216
:param value: The object to store
335
217
:param cleanup: None or a function taking (key, value) to indicate
336
'value' should be cleaned up.
218
'value' sohuld be cleaned up.
339
raise ValueError('cannot use _null_key as a key')
340
node = self._cache.get(key, None)
220
if key in self._cache:
341
222
value_len = self._compute_size(value)
342
223
if value_len >= self._after_cleanup_size:
343
# The new value is 'too big to fit', as it would fill up/overflow
344
# the cache all by itself
345
trace.mutter('Adding the key %r to an LRUSizeCache failed.'
346
' value %d is too big to fit in a the cache'
347
' with size %d %d', key, value_len,
348
self._after_cleanup_size, self._max_size)
350
# We won't be replacing the old node, so just remove it
351
self._remove_node(node)
352
if cleanup is not None:
356
node = _LRUNode(key, value, cleanup=cleanup)
357
self._cache[key] = node
359
self._value_size -= node.size
360
node.size = value_len
361
225
self._value_size += value_len
362
self._record_access(node)
226
self._cache[key] = value
227
if cleanup is not None:
228
self._cleanup[key] = cleanup
229
self._record_access(key)
364
231
if self._value_size > self._max_size:
365
232
# Time to cleanup