846
by Martin Pool
- start adding refactored/simplified hash cache |
1 |
# (C) 2005 Canonical Ltd
|
2 |
||
3 |
# This program is free software; you can redistribute it and/or modify
|
|
4 |
# it under the terms of the GNU General Public License as published by
|
|
5 |
# the Free Software Foundation; either version 2 of the License, or
|
|
6 |
# (at your option) any later version.
|
|
7 |
||
8 |
# This program is distributed in the hope that it will be useful,
|
|
9 |
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
10 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
11 |
# GNU General Public License for more details.
|
|
12 |
||
13 |
# You should have received a copy of the GNU General Public License
|
|
14 |
# along with this program; if not, write to the Free Software
|
|
15 |
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
16 |
||
953
by Martin Pool
- refactor imports and stats for hashcache |
17 |
# TODO: Up-front, stat all files in order and remove those which are deleted or
|
18 |
# out-of-date. Don't actually re-read them until they're needed. That ought
|
|
19 |
# to bring all the inodes into core so that future stats to them are fast, and
|
|
20 |
# it preserves the nice property that any caller will always get up-to-date
|
|
21 |
# data except in unavoidable cases.
|
|
864
by Martin Pool
doc |
22 |
|
23 |
# TODO: Perhaps return more details on the file to avoid statting it
|
|
24 |
# again: nonexistent, file type, size, etc
|
|
25 |
||
1213
by Martin Pool
- move import in hashcache |
26 |
# TODO: Perhaps use a Python pickle instead of a text file; might be faster.
|
27 |
||
864
by Martin Pool
doc |
28 |
|
29 |
||
866
by Martin Pool
- use new path-based hashcache for WorkingTree- squash mtime/ctime to whole seconds- update and if necessary write out hashcache when WorkingTree object is created. |
30 |
CACHE_HEADER = "### bzr hashcache v5\n" |
859
by Martin Pool
- add HashCache.write and a simple test for it |
31 |
|
953
by Martin Pool
- refactor imports and stats for hashcache |
32 |
import os, stat, time |
1092.2.6
by Robert Collins
symlink support updated to work |
33 |
import sha |
953
by Martin Pool
- refactor imports and stats for hashcache |
34 |
|
1185.59.6
by Denys Duchier
hashcache: import only once from bzrlib.osutils |
35 |
from bzrlib.osutils import sha_file, pathjoin |
953
by Martin Pool
- refactor imports and stats for hashcache |
36 |
from bzrlib.trace import mutter, warning |
1213
by Martin Pool
- move import in hashcache |
37 |
from bzrlib.atomicfile import AtomicFile |
1185.59.8
by Denys Duchier
hashcache: missing import for BzrError |
38 |
from bzrlib.errors import BzrError |
1213
by Martin Pool
- move import in hashcache |
39 |
|
953
by Martin Pool
- refactor imports and stats for hashcache |
40 |
|
1185.59.10
by Denys Duchier
hashcache: new constants and improved comment |
41 |
FP_MTIME_COLUMN = 1 |
42 |
FP_CTIME_COLUMN = 2 |
|
1092.2.6
by Robert Collins
symlink support updated to work |
43 |
FP_MODE_COLUMN = 5 |
859
by Martin Pool
- add HashCache.write and a simple test for it |
44 |
|
846
by Martin Pool
- start adding refactored/simplified hash cache |
45 |
def _fingerprint(abspath): |
46 |
try: |
|
47 |
fs = os.lstat(abspath) |
|
48 |
except OSError: |
|
49 |
# might be missing, etc
|
|
50 |
return None |
|
51 |
||
52 |
if stat.S_ISDIR(fs.st_mode): |
|
53 |
return None |
|
54 |
||
866
by Martin Pool
- use new path-based hashcache for WorkingTree- squash mtime/ctime to whole seconds- update and if necessary write out hashcache when WorkingTree object is created. |
55 |
# we discard any high precision because it's not reliable; perhaps we
|
56 |
# could do better on some systems?
|
|
57 |
return (fs.st_size, long(fs.st_mtime), |
|
1092.2.6
by Robert Collins
symlink support updated to work |
58 |
long(fs.st_ctime), fs.st_ino, fs.st_dev, fs.st_mode) |
846
by Martin Pool
- start adding refactored/simplified hash cache |
59 |
|
60 |
||
61 |
class HashCache(object): |
|
62 |
"""Cache for looking up file SHA-1.
|
|
63 |
||
64 |
Files are considered to match the cached value if the fingerprint
|
|
65 |
of the file has not changed. This includes its mtime, ctime,
|
|
66 |
device number, inode number, and size. This should catch
|
|
67 |
modifications or replacement of the file by a new one.
|
|
68 |
||
69 |
This may not catch modifications that do not change the file's
|
|
70 |
size and that occur within the resolution window of the
|
|
71 |
timestamps. To handle this we specifically do not cache files
|
|
72 |
which have changed since the start of the present second, since
|
|
73 |
they could undetectably change again.
|
|
74 |
||
75 |
This scheme may fail if the machine's clock steps backwards.
|
|
76 |
Don't do that.
|
|
77 |
||
78 |
This does not canonicalize the paths passed in; that should be
|
|
79 |
done by the caller.
|
|
80 |
||
860
by Martin Pool
- refactor hashcache to use just one dictionary |
81 |
_cache
|
82 |
Indexed by path, points to a two-tuple of the SHA-1 of the file.
|
|
83 |
and its fingerprint.
|
|
846
by Martin Pool
- start adding refactored/simplified hash cache |
84 |
|
85 |
stat_count
|
|
86 |
number of times files have been statted
|
|
87 |
||
88 |
hit_count
|
|
89 |
number of times files have been retrieved from the cache, avoiding a
|
|
90 |
re-read
|
|
91 |
|
|
92 |
miss_count
|
|
93 |
number of misses (times files have been completely re-read)
|
|
94 |
"""
|
|
866
by Martin Pool
- use new path-based hashcache for WorkingTree- squash mtime/ctime to whole seconds- update and if necessary write out hashcache when WorkingTree object is created. |
95 |
needs_write = False |
96 |
||
846
by Martin Pool
- start adding refactored/simplified hash cache |
97 |
def __init__(self, basedir): |
98 |
self.basedir = basedir |
|
99 |
self.hit_count = 0 |
|
100 |
self.miss_count = 0 |
|
101 |
self.stat_count = 0 |
|
102 |
self.danger_count = 0 |
|
953
by Martin Pool
- refactor imports and stats for hashcache |
103 |
self.removed_count = 0 |
954
by Martin Pool
- separate out code that just scans the hash cache to find files that are possibly |
104 |
self.update_count = 0 |
860
by Martin Pool
- refactor hashcache to use just one dictionary |
105 |
self._cache = {} |
846
by Martin Pool
- start adding refactored/simplified hash cache |
106 |
|
866
by Martin Pool
- use new path-based hashcache for WorkingTree- squash mtime/ctime to whole seconds- update and if necessary write out hashcache when WorkingTree object is created. |
107 |
def cache_file_name(self): |
1185.17.1
by Martin Pool
[pick] clear hashcache in format upgrade to avoid worrisome warning |
108 |
# FIXME: duplicate path logic here, this should be
|
109 |
# something like 'branch.controlfile'.
|
|
1185.31.34
by John Arbash Meinel
Removing instances of os.sep |
110 |
return pathjoin(self.basedir, '.bzr', 'stat-cache') |
866
by Martin Pool
- use new path-based hashcache for WorkingTree- squash mtime/ctime to whole seconds- update and if necessary write out hashcache when WorkingTree object is created. |
111 |
|
846
by Martin Pool
- start adding refactored/simplified hash cache |
112 |
def clear(self): |
860
by Martin Pool
- refactor hashcache to use just one dictionary |
113 |
"""Discard all cached information.
|
114 |
||
115 |
This does not reset the counters."""
|
|
866
by Martin Pool
- use new path-based hashcache for WorkingTree- squash mtime/ctime to whole seconds- update and if necessary write out hashcache when WorkingTree object is created. |
116 |
if self._cache: |
117 |
self.needs_write = True |
|
118 |
self._cache = {} |
|
846
by Martin Pool
- start adding refactored/simplified hash cache |
119 |
|
120 |
||
954
by Martin Pool
- separate out code that just scans the hash cache to find files that are possibly |
121 |
def scan(self): |
122 |
"""Scan all files and remove entries where the cache entry is obsolete.
|
|
123 |
|
|
124 |
Obsolete entries are those where the file has been modified or deleted
|
|
125 |
since the entry was inserted.
|
|
126 |
"""
|
|
127 |
prep = [(ce[1][3], path, ce) for (path, ce) in self._cache.iteritems()] |
|
953
by Martin Pool
- refactor imports and stats for hashcache |
128 |
prep.sort() |
129 |
||
954
by Martin Pool
- separate out code that just scans the hash cache to find files that are possibly |
130 |
for inum, path, cache_entry in prep: |
1185.31.34
by John Arbash Meinel
Removing instances of os.sep |
131 |
abspath = pathjoin(self.basedir, path) |
954
by Martin Pool
- separate out code that just scans the hash cache to find files that are possibly |
132 |
fp = _fingerprint(abspath) |
133 |
self.stat_count += 1 |
|
134 |
||
135 |
cache_fp = cache_entry[1] |
|
136 |
||
137 |
if (not fp) or (cache_fp != fp): |
|
138 |
# not here or not a regular file anymore
|
|
139 |
self.removed_count += 1 |
|
140 |
self.needs_write = True |
|
141 |
del self._cache[path] |
|
142 |
||
953
by Martin Pool
- refactor imports and stats for hashcache |
143 |
|
846
by Martin Pool
- start adding refactored/simplified hash cache |
144 |
def get_sha1(self, path): |
953
by Martin Pool
- refactor imports and stats for hashcache |
145 |
"""Return the sha1 of a file.
|
846
by Martin Pool
- start adding refactored/simplified hash cache |
146 |
"""
|
1185.31.34
by John Arbash Meinel
Removing instances of os.sep |
147 |
abspath = pathjoin(self.basedir, path) |
954
by Martin Pool
- separate out code that just scans the hash cache to find files that are possibly |
148 |
self.stat_count += 1 |
149 |
file_fp = _fingerprint(abspath) |
|
150 |
||
151 |
if not file_fp: |
|
152 |
# not a regular file or not existing
|
|
153 |
if path in self._cache: |
|
154 |
self.removed_count += 1 |
|
155 |
self.needs_write = True |
|
156 |
del self._cache[path] |
|
157 |
return None |
|
953
by Martin Pool
- refactor imports and stats for hashcache |
158 |
|
954
by Martin Pool
- separate out code that just scans the hash cache to find files that are possibly |
159 |
if path in self._cache: |
160 |
cache_sha1, cache_fp = self._cache[path] |
|
860
by Martin Pool
- refactor hashcache to use just one dictionary |
161 |
else: |
162 |
cache_sha1, cache_fp = None, None |
|
846
by Martin Pool
- start adding refactored/simplified hash cache |
163 |
|
954
by Martin Pool
- separate out code that just scans the hash cache to find files that are possibly |
164 |
if cache_fp == file_fp: |
846
by Martin Pool
- start adding refactored/simplified hash cache |
165 |
self.hit_count += 1 |
860
by Martin Pool
- refactor hashcache to use just one dictionary |
166 |
return cache_sha1 |
954
by Martin Pool
- separate out code that just scans the hash cache to find files that are possibly |
167 |
|
168 |
self.miss_count += 1 |
|
1092.2.6
by Robert Collins
symlink support updated to work |
169 |
|
170 |
||
171 |
mode = file_fp[FP_MODE_COLUMN] |
|
172 |
if stat.S_ISREG(mode): |
|
173 |
digest = sha_file(file(abspath, 'rb', buffering=65000)) |
|
174 |
elif stat.S_ISLNK(mode): |
|
175 |
digest = sha.new(os.readlink(abspath)).hexdigest() |
|
176 |
else: |
|
177 |
raise BzrError("file %r: unknown file stat mode: %o"%(abspath,mode)) |
|
954
by Martin Pool
- separate out code that just scans the hash cache to find files that are possibly |
178 |
|
179 |
now = int(time.time()) |
|
1185.59.10
by Denys Duchier
hashcache: new constants and improved comment |
180 |
if file_fp[FP_MTIME_COLUMN] >= now or file_fp[FP_CTIME_COLUMN] >= now: |
954
by Martin Pool
- separate out code that just scans the hash cache to find files that are possibly |
181 |
# changed too recently; can't be cached. we can
|
182 |
# return the result and it could possibly be cached
|
|
183 |
# next time.
|
|
1185.59.10
by Denys Duchier
hashcache: new constants and improved comment |
184 |
#
|
185 |
# the point is that we only want to cache when we are sure that any
|
|
186 |
# subsequent modifications of the file can be detected. If a
|
|
187 |
# modification neither changes the inode, the device, the size, nor
|
|
188 |
# the mode, then we can only distinguish it by time; therefore we
|
|
189 |
# need to let sufficient time elapse before we may cache this entry
|
|
190 |
# again. If we didn't do this, then, for example, a very quick 1
|
|
191 |
# byte replacement in the file might go undetected.
|
|
954
by Martin Pool
- separate out code that just scans the hash cache to find files that are possibly |
192 |
self.danger_count += 1 |
193 |
if cache_fp: |
|
194 |
self.removed_count += 1 |
|
195 |
self.needs_write = True |
|
196 |
del self._cache[path] |
|
846
by Martin Pool
- start adding refactored/simplified hash cache |
197 |
else: |
954
by Martin Pool
- separate out code that just scans the hash cache to find files that are possibly |
198 |
self.update_count += 1 |
199 |
self.needs_write = True |
|
200 |
self._cache[path] = (digest, file_fp) |
|
201 |
return digest |
|
202 |
||
866
by Martin Pool
- use new path-based hashcache for WorkingTree- squash mtime/ctime to whole seconds- update and if necessary write out hashcache when WorkingTree object is created. |
203 |
def write(self): |
859
by Martin Pool
- add HashCache.write and a simple test for it |
204 |
"""Write contents of cache to file."""
|
866
by Martin Pool
- use new path-based hashcache for WorkingTree- squash mtime/ctime to whole seconds- update and if necessary write out hashcache when WorkingTree object is created. |
205 |
outf = AtomicFile(self.cache_file_name(), 'wb') |
859
by Martin Pool
- add HashCache.write and a simple test for it |
206 |
try: |
862
by Martin Pool
- code to re-read hashcache from file |
207 |
print >>outf, CACHE_HEADER, |
859
by Martin Pool
- add HashCache.write and a simple test for it |
208 |
|
860
by Martin Pool
- refactor hashcache to use just one dictionary |
209 |
for path, c in self._cache.iteritems(): |
859
by Martin Pool
- add HashCache.write and a simple test for it |
210 |
assert '//' not in path, path |
211 |
outf.write(path.encode('utf-8')) |
|
212 |
outf.write('// ') |
|
860
by Martin Pool
- refactor hashcache to use just one dictionary |
213 |
print >>outf, c[0], # hex sha1 |
214 |
for fld in c[1]: |
|
862
by Martin Pool
- code to re-read hashcache from file |
215 |
print >>outf, "%d" % fld, |
859
by Martin Pool
- add HashCache.write and a simple test for it |
216 |
print >>outf |
217 |
||
218 |
outf.commit() |
|
866
by Martin Pool
- use new path-based hashcache for WorkingTree- squash mtime/ctime to whole seconds- update and if necessary write out hashcache when WorkingTree object is created. |
219 |
self.needs_write = False |
859
by Martin Pool
- add HashCache.write and a simple test for it |
220 |
finally: |
221 |
if not outf.closed: |
|
222 |
outf.abort() |
|
862
by Martin Pool
- code to re-read hashcache from file |
223 |
|
866
by Martin Pool
- use new path-based hashcache for WorkingTree- squash mtime/ctime to whole seconds- update and if necessary write out hashcache when WorkingTree object is created. |
224 |
def read(self): |
862
by Martin Pool
- code to re-read hashcache from file |
225 |
"""Reinstate cache from file.
|
226 |
||
227 |
Overwrites existing cache.
|
|
228 |
||
229 |
If the cache file has the wrong version marker, this just clears
|
|
230 |
the cache."""
|
|
231 |
self._cache = {} |
|
232 |
||
866
by Martin Pool
- use new path-based hashcache for WorkingTree- squash mtime/ctime to whole seconds- update and if necessary write out hashcache when WorkingTree object is created. |
233 |
fn = self.cache_file_name() |
234 |
try: |
|
948
by Martin Pool
- more buffering when reading/writing hashcache |
235 |
inf = file(fn, 'rb', buffering=65000) |
866
by Martin Pool
- use new path-based hashcache for WorkingTree- squash mtime/ctime to whole seconds- update and if necessary write out hashcache when WorkingTree object is created. |
236 |
except IOError, e: |
1185.31.4
by John Arbash Meinel
Fixing mutter() calls to not have to do string processing. |
237 |
mutter("failed to open %s: %s", fn, e) |
1214
by Martin Pool
- hashcache should be written out if it can't be read |
238 |
# better write it now so it is valid
|
239 |
self.needs_write = True |
|
866
by Martin Pool
- use new path-based hashcache for WorkingTree- squash mtime/ctime to whole seconds- update and if necessary write out hashcache when WorkingTree object is created. |
240 |
return
|
241 |
||
242 |
||
862
by Martin Pool
- code to re-read hashcache from file |
243 |
hdr = inf.readline() |
244 |
if hdr != CACHE_HEADER: |
|
1185.31.4
by John Arbash Meinel
Fixing mutter() calls to not have to do string processing. |
245 |
mutter('cache header marker not found at top of %s;' |
246 |
' discarding cache', fn) |
|
1214
by Martin Pool
- hashcache should be written out if it can't be read |
247 |
self.needs_write = True |
862
by Martin Pool
- code to re-read hashcache from file |
248 |
return
|
249 |
||
250 |
for l in inf: |
|
251 |
pos = l.index('// ') |
|
252 |
path = l[:pos].decode('utf-8') |
|
253 |
if path in self._cache: |
|
254 |
warning('duplicated path %r in cache' % path) |
|
255 |
continue
|
|
256 |
||
257 |
pos += 3 |
|
258 |
fields = l[pos:].split(' ') |
|
1092.2.6
by Robert Collins
symlink support updated to work |
259 |
if len(fields) != 7: |
862
by Martin Pool
- code to re-read hashcache from file |
260 |
warning("bad line in hashcache: %r" % l) |
261 |
continue
|
|
262 |
||
263 |
sha1 = fields[0] |
|
264 |
if len(sha1) != 40: |
|
265 |
warning("bad sha1 in hashcache: %r" % sha1) |
|
266 |
continue
|
|
267 |
||
268 |
fp = tuple(map(long, fields[1:])) |
|
269 |
||
270 |
self._cache[path] = (sha1, fp) |
|
271 |
||
866
by Martin Pool
- use new path-based hashcache for WorkingTree- squash mtime/ctime to whole seconds- update and if necessary write out hashcache when WorkingTree object is created. |
272 |
self.needs_write = False |
273 |
||
274 |
||
862
by Martin Pool
- code to re-read hashcache from file |
275 |
|
276 |