2052.3.2
by John Arbash Meinel
Change Copyright .. by Canonical to Copyright ... Canonical |
1 |
# Copyright (C) 2005, 2006 Canonical Ltd
|
1887.1.1
by Adeodato Simó
Do not separate paragraphs in the copyright statement with blank lines, |
2 |
#
|
846
by Martin Pool
- start adding refactored/simplified hash cache |
3 |
# This program is free software; you can redistribute it and/or modify
|
4 |
# it under the terms of the GNU General Public License as published by
|
|
5 |
# the Free Software Foundation; either version 2 of the License, or
|
|
6 |
# (at your option) any later version.
|
|
1887.1.1
by Adeodato Simó
Do not separate paragraphs in the copyright statement with blank lines, |
7 |
#
|
846
by Martin Pool
- start adding refactored/simplified hash cache |
8 |
# This program is distributed in the hope that it will be useful,
|
9 |
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
10 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
11 |
# GNU General Public License for more details.
|
|
1887.1.1
by Adeodato Simó
Do not separate paragraphs in the copyright statement with blank lines, |
12 |
#
|
846
by Martin Pool
- start adding refactored/simplified hash cache |
13 |
# You should have received a copy of the GNU General Public License
|
14 |
# along with this program; if not, write to the Free Software
|
|
15 |
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
16 |
||
953
by Martin Pool
- refactor imports and stats for hashcache |
17 |
# TODO: Up-front, stat all files in order and remove those which are deleted or
|
18 |
# out-of-date. Don't actually re-read them until they're needed. That ought
|
|
19 |
# to bring all the inodes into core so that future stats to them are fast, and
|
|
20 |
# it preserves the nice property that any caller will always get up-to-date
|
|
21 |
# data except in unavoidable cases.
|
|
864
by Martin Pool
doc |
22 |
|
23 |
# TODO: Perhaps return more details on the file to avoid statting it
|
|
24 |
# again: nonexistent, file type, size, etc
|
|
25 |
||
1213
by Martin Pool
- move import in hashcache |
26 |
# TODO: Perhaps use a Python pickle instead of a text file; might be faster.
|
27 |
||
864
by Martin Pool
doc |
28 |
|
29 |
||
866
by Martin Pool
- use new path-based hashcache for WorkingTree- squash mtime/ctime to whole seconds- update and if necessary write out hashcache when WorkingTree object is created. |
30 |
CACHE_HEADER = "### bzr hashcache v5\n" |
859
by Martin Pool
- add HashCache.write and a simple test for it |
31 |
|
953
by Martin Pool
- refactor imports and stats for hashcache |
32 |
import os, stat, time |
1092.2.6
by Robert Collins
symlink support updated to work |
33 |
import sha |
953
by Martin Pool
- refactor imports and stats for hashcache |
34 |
|
1534.4.51
by Robert Collins
Test the disk layout of format3 working trees. |
35 |
from bzrlib.osutils import sha_file, pathjoin, safe_unicode |
953
by Martin Pool
- refactor imports and stats for hashcache |
36 |
from bzrlib.trace import mutter, warning |
1213
by Martin Pool
- move import in hashcache |
37 |
from bzrlib.atomicfile import AtomicFile |
1185.59.8
by Denys Duchier
hashcache: missing import for BzrError |
38 |
from bzrlib.errors import BzrError |
1540.1.1
by Martin Pool
[patch] stat-cache fixes from Denys |
39 |
|
40 |
||
1185.59.10
by Denys Duchier
hashcache: new constants and improved comment |
41 |
FP_MTIME_COLUMN = 1 |
42 |
FP_CTIME_COLUMN = 2 |
|
1092.2.6
by Robert Collins
symlink support updated to work |
43 |
FP_MODE_COLUMN = 5 |
859
by Martin Pool
- add HashCache.write and a simple test for it |
44 |
|
846
by Martin Pool
- start adding refactored/simplified hash cache |
45 |
|
46 |
||
47 |
class HashCache(object): |
|
48 |
"""Cache for looking up file SHA-1.
|
|
49 |
||
50 |
Files are considered to match the cached value if the fingerprint
|
|
51 |
of the file has not changed. This includes its mtime, ctime,
|
|
52 |
device number, inode number, and size. This should catch
|
|
53 |
modifications or replacement of the file by a new one.
|
|
54 |
||
55 |
This may not catch modifications that do not change the file's
|
|
56 |
size and that occur within the resolution window of the
|
|
57 |
timestamps. To handle this we specifically do not cache files
|
|
58 |
which have changed since the start of the present second, since
|
|
59 |
they could undetectably change again.
|
|
60 |
||
61 |
This scheme may fail if the machine's clock steps backwards.
|
|
62 |
Don't do that.
|
|
63 |
||
64 |
This does not canonicalize the paths passed in; that should be
|
|
65 |
done by the caller.
|
|
66 |
||
860
by Martin Pool
- refactor hashcache to use just one dictionary |
67 |
_cache
|
68 |
Indexed by path, points to a two-tuple of the SHA-1 of the file.
|
|
69 |
and its fingerprint.
|
|
846
by Martin Pool
- start adding refactored/simplified hash cache |
70 |
|
71 |
stat_count
|
|
72 |
number of times files have been statted
|
|
73 |
||
74 |
hit_count
|
|
75 |
number of times files have been retrieved from the cache, avoiding a
|
|
76 |
re-read
|
|
77 |
|
|
78 |
miss_count
|
|
79 |
number of misses (times files have been completely re-read)
|
|
80 |
"""
|
|
866
by Martin Pool
- use new path-based hashcache for WorkingTree- squash mtime/ctime to whole seconds- update and if necessary write out hashcache when WorkingTree object is created. |
81 |
needs_write = False |
82 |
||
1534.4.51
by Robert Collins
Test the disk layout of format3 working trees. |
83 |
def __init__(self, root, cache_file_name, mode=None): |
1534.4.50
by Robert Collins
Got the bzrdir api straightened out, plenty of refactoring to use it pending, but the api is up and running. |
84 |
"""Create a hash cache in base dir, and set the file mode to mode."""
|
1534.4.51
by Robert Collins
Test the disk layout of format3 working trees. |
85 |
self.root = safe_unicode(root) |
2255.2.149
by Robert Collins
Crufty but existing _iter_changes implementation for WorkingTreeFormat4. |
86 |
self.root_utf8 = self.root.encode('utf8') # where is the filesystem encoding ? |
846
by Martin Pool
- start adding refactored/simplified hash cache |
87 |
self.hit_count = 0 |
88 |
self.miss_count = 0 |
|
89 |
self.stat_count = 0 |
|
90 |
self.danger_count = 0 |
|
953
by Martin Pool
- refactor imports and stats for hashcache |
91 |
self.removed_count = 0 |
954
by Martin Pool
- separate out code that just scans the hash cache to find files that are possibly |
92 |
self.update_count = 0 |
860
by Martin Pool
- refactor hashcache to use just one dictionary |
93 |
self._cache = {} |
1534.4.50
by Robert Collins
Got the bzrdir api straightened out, plenty of refactoring to use it pending, but the api is up and running. |
94 |
self._mode = mode |
1534.4.51
by Robert Collins
Test the disk layout of format3 working trees. |
95 |
self._cache_file_name = safe_unicode(cache_file_name) |
846
by Martin Pool
- start adding refactored/simplified hash cache |
96 |
|
866
by Martin Pool
- use new path-based hashcache for WorkingTree- squash mtime/ctime to whole seconds- update and if necessary write out hashcache when WorkingTree object is created. |
97 |
def cache_file_name(self): |
1534.4.51
by Robert Collins
Test the disk layout of format3 working trees. |
98 |
return self._cache_file_name |
866
by Martin Pool
- use new path-based hashcache for WorkingTree- squash mtime/ctime to whole seconds- update and if necessary write out hashcache when WorkingTree object is created. |
99 |
|
846
by Martin Pool
- start adding refactored/simplified hash cache |
100 |
def clear(self): |
860
by Martin Pool
- refactor hashcache to use just one dictionary |
101 |
"""Discard all cached information.
|
102 |
||
103 |
This does not reset the counters."""
|
|
866
by Martin Pool
- use new path-based hashcache for WorkingTree- squash mtime/ctime to whole seconds- update and if necessary write out hashcache when WorkingTree object is created. |
104 |
if self._cache: |
105 |
self.needs_write = True |
|
106 |
self._cache = {} |
|
846
by Martin Pool
- start adding refactored/simplified hash cache |
107 |
|
954
by Martin Pool
- separate out code that just scans the hash cache to find files that are possibly |
108 |
def scan(self): |
109 |
"""Scan all files and remove entries where the cache entry is obsolete.
|
|
110 |
|
|
111 |
Obsolete entries are those where the file has been modified or deleted
|
|
112 |
since the entry was inserted.
|
|
113 |
"""
|
|
1534.4.50
by Robert Collins
Got the bzrdir api straightened out, plenty of refactoring to use it pending, but the api is up and running. |
114 |
# FIXME optimisation opportunity, on linux [and check other oses]:
|
115 |
# rather than iteritems order, stat in inode order.
|
|
954
by Martin Pool
- separate out code that just scans the hash cache to find files that are possibly |
116 |
prep = [(ce[1][3], path, ce) for (path, ce) in self._cache.iteritems()] |
953
by Martin Pool
- refactor imports and stats for hashcache |
117 |
prep.sort() |
118 |
||
954
by Martin Pool
- separate out code that just scans the hash cache to find files that are possibly |
119 |
for inum, path, cache_entry in prep: |
1534.4.51
by Robert Collins
Test the disk layout of format3 working trees. |
120 |
abspath = pathjoin(self.root, path) |
1845.1.3
by Martin Pool
Improvements to hashcache testing: |
121 |
fp = self._fingerprint(abspath) |
954
by Martin Pool
- separate out code that just scans the hash cache to find files that are possibly |
122 |
self.stat_count += 1 |
123 |
||
124 |
cache_fp = cache_entry[1] |
|
125 |
||
126 |
if (not fp) or (cache_fp != fp): |
|
127 |
# not here or not a regular file anymore
|
|
128 |
self.removed_count += 1 |
|
129 |
self.needs_write = True |
|
130 |
del self._cache[path] |
|
131 |
||
2012.1.7
by Aaron Bentley
Get tree._iter_changed down to ~ 1 stat per file |
132 |
def get_sha1(self, path, stat_value=None): |
953
by Martin Pool
- refactor imports and stats for hashcache |
133 |
"""Return the sha1 of a file.
|
846
by Martin Pool
- start adding refactored/simplified hash cache |
134 |
"""
|
2255.2.149
by Robert Collins
Crufty but existing _iter_changes implementation for WorkingTreeFormat4. |
135 |
if path.__class__ is str: |
136 |
abspath = pathjoin(self.root_utf8, path) |
|
137 |
else: |
|
138 |
abspath = pathjoin(self.root, path) |
|
954
by Martin Pool
- separate out code that just scans the hash cache to find files that are possibly |
139 |
self.stat_count += 1 |
2012.1.7
by Aaron Bentley
Get tree._iter_changed down to ~ 1 stat per file |
140 |
file_fp = self._fingerprint(abspath, stat_value) |
954
by Martin Pool
- separate out code that just scans the hash cache to find files that are possibly |
141 |
|
142 |
if not file_fp: |
|
143 |
# not a regular file or not existing
|
|
144 |
if path in self._cache: |
|
145 |
self.removed_count += 1 |
|
146 |
self.needs_write = True |
|
147 |
del self._cache[path] |
|
148 |
return None |
|
953
by Martin Pool
- refactor imports and stats for hashcache |
149 |
|
954
by Martin Pool
- separate out code that just scans the hash cache to find files that are possibly |
150 |
if path in self._cache: |
151 |
cache_sha1, cache_fp = self._cache[path] |
|
860
by Martin Pool
- refactor hashcache to use just one dictionary |
152 |
else: |
153 |
cache_sha1, cache_fp = None, None |
|
846
by Martin Pool
- start adding refactored/simplified hash cache |
154 |
|
954
by Martin Pool
- separate out code that just scans the hash cache to find files that are possibly |
155 |
if cache_fp == file_fp: |
1845.1.2
by mbp at sourcefrog
Use larger time window on hashcache to be safe with fractional times |
156 |
## mutter("hashcache hit for %s %r -> %s", path, file_fp, cache_sha1)
|
157 |
## mutter("now = %s", time.time())
|
|
846
by Martin Pool
- start adding refactored/simplified hash cache |
158 |
self.hit_count += 1 |
860
by Martin Pool
- refactor hashcache to use just one dictionary |
159 |
return cache_sha1 |
954
by Martin Pool
- separate out code that just scans the hash cache to find files that are possibly |
160 |
|
161 |
self.miss_count += 1 |
|
1092.2.6
by Robert Collins
symlink support updated to work |
162 |
|
163 |
mode = file_fp[FP_MODE_COLUMN] |
|
164 |
if stat.S_ISREG(mode): |
|
1845.1.3
by Martin Pool
Improvements to hashcache testing: |
165 |
digest = self._really_sha1_file(abspath) |
1092.2.6
by Robert Collins
symlink support updated to work |
166 |
elif stat.S_ISLNK(mode): |
167 |
digest = sha.new(os.readlink(abspath)).hexdigest() |
|
168 |
else: |
|
169 |
raise BzrError("file %r: unknown file stat mode: %o"%(abspath,mode)) |
|
954
by Martin Pool
- separate out code that just scans the hash cache to find files that are possibly |
170 |
|
1845.1.2
by mbp at sourcefrog
Use larger time window on hashcache to be safe with fractional times |
171 |
# window of 3 seconds to allow for 2s resolution on windows,
|
172 |
# unsynchronized file servers, etc.
|
|
1845.1.3
by Martin Pool
Improvements to hashcache testing: |
173 |
cutoff = self._cutoff_time() |
1845.1.2
by mbp at sourcefrog
Use larger time window on hashcache to be safe with fractional times |
174 |
if file_fp[FP_MTIME_COLUMN] >= cutoff \ |
175 |
or file_fp[FP_CTIME_COLUMN] >= cutoff: |
|
954
by Martin Pool
- separate out code that just scans the hash cache to find files that are possibly |
176 |
# changed too recently; can't be cached. we can
|
177 |
# return the result and it could possibly be cached
|
|
178 |
# next time.
|
|
1185.59.10
by Denys Duchier
hashcache: new constants and improved comment |
179 |
#
|
180 |
# the point is that we only want to cache when we are sure that any
|
|
181 |
# subsequent modifications of the file can be detected. If a
|
|
182 |
# modification neither changes the inode, the device, the size, nor
|
|
183 |
# the mode, then we can only distinguish it by time; therefore we
|
|
184 |
# need to let sufficient time elapse before we may cache this entry
|
|
185 |
# again. If we didn't do this, then, for example, a very quick 1
|
|
186 |
# byte replacement in the file might go undetected.
|
|
1845.1.2
by mbp at sourcefrog
Use larger time window on hashcache to be safe with fractional times |
187 |
## mutter('%r modified too recently; not caching', path)
|
188 |
self.danger_count += 1 |
|
954
by Martin Pool
- separate out code that just scans the hash cache to find files that are possibly |
189 |
if cache_fp: |
190 |
self.removed_count += 1 |
|
191 |
self.needs_write = True |
|
192 |
del self._cache[path] |
|
846
by Martin Pool
- start adding refactored/simplified hash cache |
193 |
else: |
1845.1.2
by mbp at sourcefrog
Use larger time window on hashcache to be safe with fractional times |
194 |
## mutter('%r added to cache: now=%f, mtime=%d, ctime=%d',
|
195 |
## path, time.time(), file_fp[FP_MTIME_COLUMN],
|
|
196 |
## file_fp[FP_CTIME_COLUMN])
|
|
954
by Martin Pool
- separate out code that just scans the hash cache to find files that are possibly |
197 |
self.update_count += 1 |
198 |
self.needs_write = True |
|
199 |
self._cache[path] = (digest, file_fp) |
|
200 |
return digest |
|
1845.1.3
by Martin Pool
Improvements to hashcache testing: |
201 |
|
202 |
def _really_sha1_file(self, abspath): |
|
203 |
"""Calculate the SHA1 of a file by reading the full text"""
|
|
204 |
return sha_file(file(abspath, 'rb', buffering=65000)) |
|
954
by Martin Pool
- separate out code that just scans the hash cache to find files that are possibly |
205 |
|
866
by Martin Pool
- use new path-based hashcache for WorkingTree- squash mtime/ctime to whole seconds- update and if necessary write out hashcache when WorkingTree object is created. |
206 |
def write(self): |
859
by Martin Pool
- add HashCache.write and a simple test for it |
207 |
"""Write contents of cache to file."""
|
1534.4.50
by Robert Collins
Got the bzrdir api straightened out, plenty of refactoring to use it pending, but the api is up and running. |
208 |
outf = AtomicFile(self.cache_file_name(), 'wb', new_mode=self._mode) |
859
by Martin Pool
- add HashCache.write and a simple test for it |
209 |
try: |
1908.4.8
by John Arbash Meinel
Small tweak to hashcache to make it write out faster |
210 |
outf.write(CACHE_HEADER) |
859
by Martin Pool
- add HashCache.write and a simple test for it |
211 |
|
860
by Martin Pool
- refactor hashcache to use just one dictionary |
212 |
for path, c in self._cache.iteritems(): |
1908.4.8
by John Arbash Meinel
Small tweak to hashcache to make it write out faster |
213 |
line_info = [path.encode('utf-8'), '// ', c[0], ' '] |
214 |
line_info.append(' '.join([str(fld) for fld in c[1]])) |
|
215 |
line_info.append('\n') |
|
216 |
outf.write(''.join(line_info)) |
|
859
by Martin Pool
- add HashCache.write and a simple test for it |
217 |
outf.commit() |
866
by Martin Pool
- use new path-based hashcache for WorkingTree- squash mtime/ctime to whole seconds- update and if necessary write out hashcache when WorkingTree object is created. |
218 |
self.needs_write = False |
1845.1.1
by mbp at sourcefrog
Refactor and improve hashcache tests |
219 |
## mutter("write hash cache: %s hits=%d misses=%d stat=%d recent=%d updates=%d",
|
220 |
## self.cache_file_name(), self.hit_count, self.miss_count,
|
|
221 |
## self.stat_count,
|
|
222 |
## self.danger_count, self.update_count)
|
|
859
by Martin Pool
- add HashCache.write and a simple test for it |
223 |
finally: |
1755.3.1
by Robert Collins
Tune the time to build our kernel_like tree : make LocalTransport.put faster, AtomicFile faster, LocalTransport.append faster. |
224 |
outf.close() |
862
by Martin Pool
- code to re-read hashcache from file |
225 |
|
866
by Martin Pool
- use new path-based hashcache for WorkingTree- squash mtime/ctime to whole seconds- update and if necessary write out hashcache when WorkingTree object is created. |
226 |
def read(self): |
862
by Martin Pool
- code to re-read hashcache from file |
227 |
"""Reinstate cache from file.
|
228 |
||
229 |
Overwrites existing cache.
|
|
230 |
||
231 |
If the cache file has the wrong version marker, this just clears
|
|
232 |
the cache."""
|
|
233 |
self._cache = {} |
|
234 |
||
866
by Martin Pool
- use new path-based hashcache for WorkingTree- squash mtime/ctime to whole seconds- update and if necessary write out hashcache when WorkingTree object is created. |
235 |
fn = self.cache_file_name() |
236 |
try: |
|
948
by Martin Pool
- more buffering when reading/writing hashcache |
237 |
inf = file(fn, 'rb', buffering=65000) |
866
by Martin Pool
- use new path-based hashcache for WorkingTree- squash mtime/ctime to whole seconds- update and if necessary write out hashcache when WorkingTree object is created. |
238 |
except IOError, e: |
1185.31.4
by John Arbash Meinel
Fixing mutter() calls to not have to do string processing. |
239 |
mutter("failed to open %s: %s", fn, e) |
1214
by Martin Pool
- hashcache should be written out if it can't be read |
240 |
# better write it now so it is valid
|
241 |
self.needs_write = True |
|
866
by Martin Pool
- use new path-based hashcache for WorkingTree- squash mtime/ctime to whole seconds- update and if necessary write out hashcache when WorkingTree object is created. |
242 |
return
|
243 |
||
862
by Martin Pool
- code to re-read hashcache from file |
244 |
hdr = inf.readline() |
245 |
if hdr != CACHE_HEADER: |
|
1185.31.4
by John Arbash Meinel
Fixing mutter() calls to not have to do string processing. |
246 |
mutter('cache header marker not found at top of %s;' |
247 |
' discarding cache', fn) |
|
1214
by Martin Pool
- hashcache should be written out if it can't be read |
248 |
self.needs_write = True |
862
by Martin Pool
- code to re-read hashcache from file |
249 |
return
|
250 |
||
251 |
for l in inf: |
|
252 |
pos = l.index('// ') |
|
253 |
path = l[:pos].decode('utf-8') |
|
254 |
if path in self._cache: |
|
255 |
warning('duplicated path %r in cache' % path) |
|
256 |
continue
|
|
257 |
||
258 |
pos += 3 |
|
259 |
fields = l[pos:].split(' ') |
|
1092.2.6
by Robert Collins
symlink support updated to work |
260 |
if len(fields) != 7: |
862
by Martin Pool
- code to re-read hashcache from file |
261 |
warning("bad line in hashcache: %r" % l) |
262 |
continue
|
|
263 |
||
264 |
sha1 = fields[0] |
|
265 |
if len(sha1) != 40: |
|
266 |
warning("bad sha1 in hashcache: %r" % sha1) |
|
267 |
continue
|
|
268 |
||
269 |
fp = tuple(map(long, fields[1:])) |
|
270 |
||
271 |
self._cache[path] = (sha1, fp) |
|
272 |
||
866
by Martin Pool
- use new path-based hashcache for WorkingTree- squash mtime/ctime to whole seconds- update and if necessary write out hashcache when WorkingTree object is created. |
273 |
self.needs_write = False |
1845.1.3
by Martin Pool
Improvements to hashcache testing: |
274 |
|
275 |
def _cutoff_time(self): |
|
276 |
"""Return cutoff time.
|
|
277 |
||
278 |
Files modified more recently than this time are at risk of being
|
|
279 |
undetectably modified and so can't be cached.
|
|
280 |
"""
|
|
281 |
return int(time.time()) - 3 |
|
866
by Martin Pool
- use new path-based hashcache for WorkingTree- squash mtime/ctime to whole seconds- update and if necessary write out hashcache when WorkingTree object is created. |
282 |
|
2012.1.18
by Aaron Bentley
rename fs param to stat_value |
283 |
def _fingerprint(self, abspath, stat_value=None): |
284 |
if stat_value is None: |
|
2012.1.7
by Aaron Bentley
Get tree._iter_changed down to ~ 1 stat per file |
285 |
try: |
2012.1.18
by Aaron Bentley
rename fs param to stat_value |
286 |
stat_value = os.lstat(abspath) |
2012.1.7
by Aaron Bentley
Get tree._iter_changed down to ~ 1 stat per file |
287 |
except OSError: |
288 |
# might be missing, etc
|
|
289 |
return None |
|
2012.1.18
by Aaron Bentley
rename fs param to stat_value |
290 |
if stat.S_ISDIR(stat_value.st_mode): |
1845.1.3
by Martin Pool
Improvements to hashcache testing: |
291 |
return None |
292 |
# we discard any high precision because it's not reliable; perhaps we
|
|
293 |
# could do better on some systems?
|
|
2012.1.18
by Aaron Bentley
rename fs param to stat_value |
294 |
return (stat_value.st_size, long(stat_value.st_mtime), |
295 |
long(stat_value.st_ctime), stat_value.st_ino, |
|
296 |
stat_value.st_dev, stat_value.st_mode) |