17
17
"""A simple least-recently-used (LRU) cache."""
19
19
from bzrlib import (
25
25
class _LRUNode(object):
26
26
"""This maintains the linked-list which is the lru internals."""
28
__slots__ = ('prev', 'next_key', 'key', 'value', 'cleanup', 'size')
28
__slots__ = ('prev', 'next', 'key', 'value', 'cleanup', 'size')
30
30
def __init__(self, key, value, cleanup=None):
32
self.next_key = _null_key
35
35
self.cleanup = cleanup
41
41
def __repr__(self):
42
46
if self.prev is None:
45
prev_key = self.prev.key
46
50
return '%s(%r n:%r p:%r)' % (self.__class__.__name__, self.key,
47
self.next_key, prev_key)
49
53
def run_cleanup(self):
51
if self.cleanup is not None:
52
self.cleanup(self.key, self.value)
54
# cleanup might raise an exception, but we want to make sure
55
# to break refcycles, etc
54
if self.cleanup is not None:
55
self.cleanup(self.key, self.value)
57
# Just make sure to break any refcycles, etc
60
61
class LRUCache(object):
61
62
"""A class which manages a cache of entries, removing unused ones."""
63
def __init__(self, max_cache=100, after_cleanup_count=None):
64
def __init__(self, max_cache=100, after_cleanup_count=None,
65
after_cleanup_size=symbol_versioning.DEPRECATED_PARAMETER):
66
if symbol_versioning.deprecated_passed(after_cleanup_size):
67
symbol_versioning.warn('LRUCache.__init__(after_cleanup_size) was'
68
' deprecated in 1.11. Use'
69
' after_cleanup_count instead.',
71
after_cleanup_count = after_cleanup_size
65
73
# The "HEAD" of the lru linked list
66
74
self._most_recently_used = None
67
75
# The "TAIL" of the lru linked list
68
self._least_recently_used = None
76
self._last_recently_used = None
69
77
self._update_max_cache(max_cache, after_cleanup_count)
71
79
def __contains__(self, key):
72
80
return key in self._cache
74
82
def __getitem__(self, key):
83
node = self._cache[key]
77
84
# Inlined from _record_access to decrease the overhead of __getitem__
78
85
# We also have more knowledge about structure if __getitem__ is
79
86
# succeeding, then we know that self._most_recently_used must not be
83
90
# Nothing to do, this node is already at the head of the queue
92
elif node is self._last_recently_used:
93
self._last_recently_used = node.prev
85
94
# Remove this node from the old location
86
95
node_prev = node.prev
87
next_key = node.next_key
88
# benchmarking shows that the lookup of _null_key in globals is faster
89
# than the attribute lookup for (node is self._least_recently_used)
90
if next_key is _null_key:
91
# 'node' is the _least_recently_used, because it doesn't have a
92
# 'next' item. So move the current lru to the previous node.
93
self._least_recently_used = node_prev
95
node_next = cache[next_key]
97
if node_prev is not None:
98
node_prev.next = node_next
99
if node_next is not None:
96
100
node_next.prev = node_prev
97
node_prev.next_key = next_key
98
# Insert this node at the front of the list
99
node.next_key = mru.key
101
# Insert this node to the front of the list
101
104
self._most_recently_used = node
114
117
' supposed to have a previous entry'
116
119
while node is not None:
117
if node.next_key is _null_key:
118
if node is not self._least_recently_used:
120
if node.next is None:
121
if node is not self._last_recently_used:
119
122
raise AssertionError('only the last node should have'
120
123
' no next value: %s' % (node,))
123
node_next = self._cache[node.next_key]
124
if node_next.prev is not node:
125
if node.next.prev is not node:
125
126
raise AssertionError('inconsistency found, node.next.prev'
126
127
' != node: %s' % (node,))
127
128
if node.prev is None:
130
131
' not have a previous node: %s'
133
if node.prev.next_key != node.key:
134
if node.prev.next is not node:
134
135
raise AssertionError('inconsistency found, node.prev.next'
135
136
' != node: %s' % (node,))
139
140
def add(self, key, value, cleanup=None):
140
141
"""Add a new value to the cache.
147
148
:param cleanup: None or a function taking (key, value) to indicate
148
149
'value' should be cleaned up.
151
raise ValueError('cannot use _null_key as a key')
152
151
if key in self._cache:
153
152
node = self._cache[key]
157
# Maintain the LRU properties, even if cleanup raises an
160
node.cleanup = cleanup
161
self._record_access(node)
155
node.cleanup = cleanup
163
157
node = _LRUNode(key, value, cleanup=cleanup)
164
158
self._cache[key] = node
165
self._record_access(node)
159
self._record_access(node)
167
161
if len(self._cache) > self._max_cache:
168
162
# Trigger the cleanup
213
207
# Move 'node' to the front of the queue
214
208
if self._most_recently_used is None:
215
209
self._most_recently_used = node
216
self._least_recently_used = node
210
self._last_recently_used = node
218
212
elif node is self._most_recently_used:
219
213
# Nothing to do, this node is already at the head of the queue
215
elif node is self._last_recently_used:
216
self._last_recently_used = node.prev
221
217
# We've taken care of the tail pointer, remove the node, and insert it
224
if node is self._least_recently_used:
225
self._least_recently_used = node.prev
226
220
if node.prev is not None:
227
node.prev.next_key = node.next_key
228
if node.next_key is not _null_key:
229
node_next = self._cache[node.next_key]
230
node_next.prev = node.prev
221
node.prev.next = node.next
222
if node.next is not None:
223
node.next.prev = node.prev
232
node.next_key = self._most_recently_used.key
233
self._most_recently_used.prev = node
225
node.next = self._most_recently_used
234
226
self._most_recently_used = node
227
node.next.prev = node
237
230
def _remove_node(self, node):
238
if node is self._least_recently_used:
239
self._least_recently_used = node.prev
231
if node is self._last_recently_used:
232
self._last_recently_used = node.prev
240
233
self._cache.pop(node.key)
241
234
# If we have removed all entries, remove the head pointer as well
242
if self._least_recently_used is None:
235
if self._last_recently_used is None:
243
236
self._most_recently_used = None
247
# cleanup might raise an exception, but we want to make sure to
248
# maintain the linked list
249
if node.prev is not None:
250
node.prev.next_key = node.next_key
251
if node.next_key is not _null_key:
252
node_next = self._cache[node.next_key]
253
node_next.prev = node.prev
254
# And remove this node's pointers
256
node.next_key = _null_key
238
# Now remove this node from the linked list
239
if node.prev is not None:
240
node.prev.next = node.next
241
if node.next is not None:
242
node.next.prev = node.prev
243
# And remove this node's pointers
258
247
def _remove_lru(self):
259
248
"""Remove one entry from the lru, and handle consequences.