26
25
class _LRUNode(object):
27
26
"""This maintains the linked-list which is the lru internals."""
29
__slots__ = ('prev', 'next_key', 'key', 'value', 'cleanup', 'size')
28
__slots__ = ('prev', 'next', 'key', 'value', 'cleanup', 'size')
31
30
def __init__(self, key, value, cleanup=None):
33
self.next_key = _null_key
36
35
self.cleanup = cleanup
42
41
def __repr__(self):
43
46
if self.prev is None:
46
prev_key = self.prev.key
47
50
return '%s(%r n:%r p:%r)' % (self.__class__.__name__, self.key,
48
self.next_key, prev_key)
50
53
def run_cleanup(self):
52
if self.cleanup is not None:
53
self.cleanup(self.key, self.value)
55
# cleanup might raise an exception, but we want to make sure
56
# to break refcycles, etc
54
if self.cleanup is not None:
55
self.cleanup(self.key, self.value)
57
# Just make sure to break any refcycles, etc
61
61
class LRUCache(object):
73
73
# The "HEAD" of the lru linked list
74
74
self._most_recently_used = None
75
75
# The "TAIL" of the lru linked list
76
self._least_recently_used = None
76
self._last_recently_used = None
77
77
self._update_max_cache(max_cache, after_cleanup_count)
79
79
def __contains__(self, key):
80
80
return key in self._cache
82
82
def __getitem__(self, key):
83
node = self._cache[key]
85
84
# Inlined from _record_access to decrease the overhead of __getitem__
86
85
# We also have more knowledge about structure if __getitem__ is
87
86
# succeeding, then we know that self._most_recently_used must not be
91
90
# Nothing to do, this node is already at the head of the queue
92
elif node is self._last_recently_used:
93
self._last_recently_used = node.prev
93
94
# Remove this node from the old location
94
95
node_prev = node.prev
95
next_key = node.next_key
96
# benchmarking shows that the lookup of _null_key in globals is faster
97
# than the attribute lookup for (node is self._least_recently_used)
98
if next_key is _null_key:
99
# 'node' is the _least_recently_used, because it doesn't have a
100
# 'next' item. So move the current lru to the previous node.
101
self._least_recently_used = node_prev
103
node_next = cache[next_key]
97
if node_prev is not None:
98
node_prev.next = node_next
99
if node_next is not None:
104
100
node_next.prev = node_prev
105
node_prev.next_key = next_key
106
# Insert this node at the front of the list
107
node.next_key = mru.key
101
# Insert this node to the front of the list
109
104
self._most_recently_used = node
122
117
' supposed to have a previous entry'
124
119
while node is not None:
125
if node.next_key is _null_key:
126
if node is not self._least_recently_used:
120
if node.next is None:
121
if node is not self._last_recently_used:
127
122
raise AssertionError('only the last node should have'
128
123
' no next value: %s' % (node,))
131
node_next = self._cache[node.next_key]
132
if node_next.prev is not node:
125
if node.next.prev is not node:
133
126
raise AssertionError('inconsistency found, node.next.prev'
134
127
' != node: %s' % (node,))
135
128
if node.prev is None:
138
131
' not have a previous node: %s'
141
if node.prev.next_key != node.key:
134
if node.prev.next is not node:
142
135
raise AssertionError('inconsistency found, node.prev.next'
143
136
' != node: %s' % (node,))
147
140
def add(self, key, value, cleanup=None):
148
141
"""Add a new value to the cache.
155
148
:param cleanup: None or a function taking (key, value) to indicate
156
149
'value' should be cleaned up.
159
raise ValueError('cannot use _null_key as a key')
160
151
if key in self._cache:
161
152
node = self._cache[key]
165
# Maintain the LRU properties, even if cleanup raises an
168
node.cleanup = cleanup
169
self._record_access(node)
155
node.cleanup = cleanup
171
157
node = _LRUNode(key, value, cleanup=cleanup)
172
158
self._cache[key] = node
173
self._record_access(node)
159
self._record_access(node)
175
161
if len(self._cache) > self._max_cache:
176
162
# Trigger the cleanup
221
207
# Move 'node' to the front of the queue
222
208
if self._most_recently_used is None:
223
209
self._most_recently_used = node
224
self._least_recently_used = node
210
self._last_recently_used = node
226
212
elif node is self._most_recently_used:
227
213
# Nothing to do, this node is already at the head of the queue
215
elif node is self._last_recently_used:
216
self._last_recently_used = node.prev
229
217
# We've taken care of the tail pointer, remove the node, and insert it
232
if node is self._least_recently_used:
233
self._least_recently_used = node.prev
234
220
if node.prev is not None:
235
node.prev.next_key = node.next_key
236
if node.next_key is not _null_key:
237
node_next = self._cache[node.next_key]
238
node_next.prev = node.prev
221
node.prev.next = node.next
222
if node.next is not None:
223
node.next.prev = node.prev
240
node.next_key = self._most_recently_used.key
241
self._most_recently_used.prev = node
225
node.next = self._most_recently_used
242
226
self._most_recently_used = node
227
node.next.prev = node
245
230
def _remove_node(self, node):
246
if node is self._least_recently_used:
247
self._least_recently_used = node.prev
231
if node is self._last_recently_used:
232
self._last_recently_used = node.prev
248
233
self._cache.pop(node.key)
249
234
# If we have removed all entries, remove the head pointer as well
250
if self._least_recently_used is None:
235
if self._last_recently_used is None:
251
236
self._most_recently_used = None
255
# cleanup might raise an exception, but we want to make sure to
256
# maintain the linked list
257
if node.prev is not None:
258
node.prev.next_key = node.next_key
259
if node.next_key is not _null_key:
260
node_next = self._cache[node.next_key]
261
node_next.prev = node.prev
262
# And remove this node's pointers
264
node.next_key = _null_key
266
239
def _remove_lru(self):
267
240
"""Remove one entry from the lru, and handle consequences.
335
308
:param cleanup: None or a function taking (key, value) to indicate
336
309
'value' should be cleaned up.
339
raise ValueError('cannot use _null_key as a key')
340
311
node = self._cache.get(key, None)
341
312
value_len = self._compute_size(value)
342
313
if value_len >= self._after_cleanup_size: