17
17
"""A simple least-recently-used (LRU) cache."""
19
19
from bzrlib import (
25
25
class _LRUNode(object):
26
26
"""This maintains the linked-list which is the lru internals."""
28
__slots__ = ('prev', 'next', 'key', 'value', 'cleanup', 'size')
28
__slots__ = ('prev', 'next_key', 'key', 'value', 'cleanup', 'size')
30
30
def __init__(self, key, value, cleanup=None):
32
self.next_key = _null_key
35
35
self.cleanup = cleanup
41
41
def __repr__(self):
46
42
if self.prev is None:
45
prev_key = self.prev.key
50
46
return '%s(%r n:%r p:%r)' % (self.__class__.__name__, self.key,
47
self.next_key, prev_key)
53
49
def run_cleanup(self):
54
if self.cleanup is not None:
55
self.cleanup(self.key, self.value)
57
# Just make sure to break any refcycles, etc
51
if self.cleanup is not None:
52
self.cleanup(self.key, self.value)
54
# cleanup might raise an exception, but we want to make sure
55
# to break refcycles, etc
61
60
class LRUCache(object):
62
61
"""A class which manages a cache of entries, removing unused ones."""
64
def __init__(self, max_cache=100, after_cleanup_count=None,
65
after_cleanup_size=symbol_versioning.DEPRECATED_PARAMETER):
66
if symbol_versioning.deprecated_passed(after_cleanup_size):
67
symbol_versioning.warn('LRUCache.__init__(after_cleanup_size) was'
68
' deprecated in 1.11. Use'
69
' after_cleanup_count instead.',
71
after_cleanup_count = after_cleanup_size
63
def __init__(self, max_cache=100, after_cleanup_count=None):
73
65
# The "HEAD" of the lru linked list
74
66
self._most_recently_used = None
75
67
# The "TAIL" of the lru linked list
76
self._last_recently_used = None
68
self._least_recently_used = None
77
69
self._update_max_cache(max_cache, after_cleanup_count)
79
71
def __contains__(self, key):
80
72
return key in self._cache
82
74
def __getitem__(self, key):
83
node = self._cache[key]
84
77
# Inlined from _record_access to decrease the overhead of __getitem__
85
78
# We also have more knowledge about structure if __getitem__ is
86
79
# succeeding, then we know that self._most_recently_used must not be
90
83
# Nothing to do, this node is already at the head of the queue
92
elif node is self._last_recently_used:
93
self._last_recently_used = node.prev
94
85
# Remove this node from the old location
95
86
node_prev = node.prev
97
if node_prev is not None:
98
node_prev.next = node_next
99
if node_next is not None:
87
next_key = node.next_key
88
# benchmarking shows that the lookup of _null_key in globals is faster
89
# than the attribute lookup for (node is self._least_recently_used)
90
if next_key is _null_key:
91
# 'node' is the _least_recently_used, because it doesn't have a
92
# 'next' item. So move the current lru to the previous node.
93
self._least_recently_used = node_prev
95
node_next = cache[next_key]
100
96
node_next.prev = node_prev
101
# Insert this node to the front of the list
97
node_prev.next_key = next_key
98
# Insert this node at the front of the list
99
node.next_key = mru.key
104
101
self._most_recently_used = node
117
114
' supposed to have a previous entry'
119
116
while node is not None:
120
if node.next is None:
121
if node is not self._last_recently_used:
117
if node.next_key is _null_key:
118
if node is not self._least_recently_used:
122
119
raise AssertionError('only the last node should have'
123
120
' no next value: %s' % (node,))
125
if node.next.prev is not node:
123
node_next = self._cache[node.next_key]
124
if node_next.prev is not node:
126
125
raise AssertionError('inconsistency found, node.next.prev'
127
126
' != node: %s' % (node,))
128
127
if node.prev is None:
131
130
' not have a previous node: %s'
134
if node.prev.next is not node:
133
if node.prev.next_key != node.key:
135
134
raise AssertionError('inconsistency found, node.prev.next'
136
135
' != node: %s' % (node,))
140
139
def add(self, key, value, cleanup=None):
141
140
"""Add a new value to the cache.
148
147
:param cleanup: None or a function taking (key, value) to indicate
149
148
'value' should be cleaned up.
151
raise ValueError('cannot use _null_key as a key')
151
152
if key in self._cache:
152
153
node = self._cache[key]
155
node.cleanup = cleanup
157
# Maintain the LRU properties, even if cleanup raises an
160
node.cleanup = cleanup
161
self._record_access(node)
157
163
node = _LRUNode(key, value, cleanup=cleanup)
158
164
self._cache[key] = node
159
self._record_access(node)
165
self._record_access(node)
161
167
if len(self._cache) > self._max_cache:
162
168
# Trigger the cleanup
207
213
# Move 'node' to the front of the queue
208
214
if self._most_recently_used is None:
209
215
self._most_recently_used = node
210
self._last_recently_used = node
216
self._least_recently_used = node
212
218
elif node is self._most_recently_used:
213
219
# Nothing to do, this node is already at the head of the queue
215
elif node is self._last_recently_used:
216
self._last_recently_used = node.prev
217
221
# We've taken care of the tail pointer, remove the node, and insert it
224
if node is self._least_recently_used:
225
self._least_recently_used = node.prev
220
226
if node.prev is not None:
221
node.prev.next = node.next
222
if node.next is not None:
223
node.next.prev = node.prev
227
node.prev.next_key = node.next_key
228
if node.next_key is not _null_key:
229
node_next = self._cache[node.next_key]
230
node_next.prev = node.prev
225
node.next = self._most_recently_used
232
node.next_key = self._most_recently_used.key
233
self._most_recently_used.prev = node
226
234
self._most_recently_used = node
227
node.next.prev = node
230
237
def _remove_node(self, node):
231
if node is self._last_recently_used:
232
self._last_recently_used = node.prev
238
if node is self._least_recently_used:
239
self._least_recently_used = node.prev
233
240
self._cache.pop(node.key)
234
241
# If we have removed all entries, remove the head pointer as well
235
if self._last_recently_used is None:
242
if self._least_recently_used is None:
236
243
self._most_recently_used = None
247
# cleanup might raise an exception, but we want to make sure to
248
# maintain the linked list
249
if node.prev is not None:
250
node.prev.next_key = node.next_key
251
if node.next_key is not _null_key:
252
node_next = self._cache[node.next_key]
253
node_next.prev = node.prev
254
# And remove this node's pointers
256
node.next_key = _null_key
239
258
def _remove_lru(self):
240
259
"""Remove one entry from the lru, and handle consequences.
308
327
:param cleanup: None or a function taking (key, value) to indicate
309
328
'value' should be cleaned up.
331
raise ValueError('cannot use _null_key as a key')
311
332
node = self._cache.get(key, None)
312
333
value_len = self._compute_size(value)
313
334
if value_len >= self._after_cleanup_size: