5247.1.4
by Vincent Ladeuil
Merge cleanup into first-try |
1 |
# Copyright (C) 2005-2010 Canonical Ltd
|
1887.1.1
by Adeodato Simó
Do not separate paragraphs in the copyright statement with blank lines, |
2 |
#
|
846
by Martin Pool
- start adding refactored/simplified hash cache |
3 |
# This program is free software; you can redistribute it and/or modify
|
4 |
# it under the terms of the GNU General Public License as published by
|
|
5 |
# the Free Software Foundation; either version 2 of the License, or
|
|
6 |
# (at your option) any later version.
|
|
1887.1.1
by Adeodato Simó
Do not separate paragraphs in the copyright statement with blank lines, |
7 |
#
|
846
by Martin Pool
- start adding refactored/simplified hash cache |
8 |
# This program is distributed in the hope that it will be useful,
|
9 |
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
10 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
11 |
# GNU General Public License for more details.
|
|
1887.1.1
by Adeodato Simó
Do not separate paragraphs in the copyright statement with blank lines, |
12 |
#
|
846
by Martin Pool
- start adding refactored/simplified hash cache |
13 |
# You should have received a copy of the GNU General Public License
|
14 |
# along with this program; if not, write to the Free Software
|
|
4183.7.1
by Sabin Iacob
update FSF mailing address |
15 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
846
by Martin Pool
- start adding refactored/simplified hash cache |
16 |
|
6379.6.3
by Jelmer Vernooij
Use absolute_import. |
17 |
from __future__ import absolute_import |
18 |
||
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
19 |
# TODO: Up-front, stat all files in order and remove those which are deleted or
|
20 |
# out-of-date. Don't actually re-read them until they're needed. That ought
|
|
21 |
# to bring all the inodes into core so that future stats to them are fast, and
|
|
953
by Martin Pool
- refactor imports and stats for hashcache |
22 |
# it preserves the nice property that any caller will always get up-to-date
|
23 |
# data except in unavoidable cases.
|
|
864
by Martin Pool
doc |
24 |
|
25 |
# TODO: Perhaps return more details on the file to avoid statting it
|
|
26 |
# again: nonexistent, file type, size, etc
|
|
27 |
||
1213
by Martin Pool
- move import in hashcache |
28 |
# TODO: Perhaps use a Python pickle instead of a text file; might be faster.
|
29 |
||
864
by Martin Pool
doc |
30 |
|
31 |
||
866
by Martin Pool
- use new path-based hashcache for WorkingTree- squash mtime/ctime to whole seconds- update and if necessary write out hashcache when WorkingTree object is created. |
32 |
CACHE_HEADER = "### bzr hashcache v5\n" |
859
by Martin Pool
- add HashCache.write and a simple test for it |
33 |
|
4241.14.15
by Vincent Ladeuil
Fix one unicode readlink related test failure. |
34 |
import os |
35 |
import stat |
|
36 |
import time |
|
953
by Martin Pool
- refactor imports and stats for hashcache |
37 |
|
4241.14.15
by Vincent Ladeuil
Fix one unicode readlink related test failure. |
38 |
from bzrlib import ( |
39 |
atomicfile, |
|
40 |
errors, |
|
41 |
filters as _mod_filters, |
|
42 |
osutils, |
|
43 |
trace, |
|
44 |
)
|
|
1540.1.1
by Martin Pool
[patch] stat-cache fixes from Denys |
45 |
|
46 |
||
1185.59.10
by Denys Duchier
hashcache: new constants and improved comment |
47 |
FP_MTIME_COLUMN = 1 |
48 |
FP_CTIME_COLUMN = 2 |
|
1092.2.6
by Robert Collins
symlink support updated to work |
49 |
FP_MODE_COLUMN = 5 |
859
by Martin Pool
- add HashCache.write and a simple test for it |
50 |
|
846
by Martin Pool
- start adding refactored/simplified hash cache |
51 |
|
52 |
||
53 |
class HashCache(object): |
|
54 |
"""Cache for looking up file SHA-1.
|
|
55 |
||
56 |
Files are considered to match the cached value if the fingerprint
|
|
57 |
of the file has not changed. This includes its mtime, ctime,
|
|
58 |
device number, inode number, and size. This should catch
|
|
59 |
modifications or replacement of the file by a new one.
|
|
60 |
||
61 |
This may not catch modifications that do not change the file's
|
|
62 |
size and that occur within the resolution window of the
|
|
63 |
timestamps. To handle this we specifically do not cache files
|
|
64 |
which have changed since the start of the present second, since
|
|
65 |
they could undetectably change again.
|
|
66 |
||
67 |
This scheme may fail if the machine's clock steps backwards.
|
|
68 |
Don't do that.
|
|
69 |
||
70 |
This does not canonicalize the paths passed in; that should be
|
|
71 |
done by the caller.
|
|
72 |
||
860
by Martin Pool
- refactor hashcache to use just one dictionary |
73 |
_cache
|
74 |
Indexed by path, points to a two-tuple of the SHA-1 of the file.
|
|
75 |
and its fingerprint.
|
|
846
by Martin Pool
- start adding refactored/simplified hash cache |
76 |
|
77 |
stat_count
|
|
78 |
number of times files have been statted
|
|
79 |
||
80 |
hit_count
|
|
81 |
number of times files have been retrieved from the cache, avoiding a
|
|
82 |
re-read
|
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
83 |
|
846
by Martin Pool
- start adding refactored/simplified hash cache |
84 |
miss_count
|
85 |
number of misses (times files have been completely re-read)
|
|
86 |
"""
|
|
866
by Martin Pool
- use new path-based hashcache for WorkingTree- squash mtime/ctime to whole seconds- update and if necessary write out hashcache when WorkingTree object is created. |
87 |
needs_write = False |
88 |
||
3368.2.4
by Ian Clatworthy
make content filter lookup a tree responsibility |
89 |
def __init__(self, root, cache_file_name, mode=None, |
90 |
content_filter_stack_provider=None): |
|
91 |
"""Create a hash cache in base dir, and set the file mode to mode.
|
|
92 |
||
3368.2.5
by Ian Clatworthy
incorporate jameinel's review feedback |
93 |
:param content_filter_stack_provider: a function that takes a
|
94 |
path (relative to the top of the tree) and a file-id as
|
|
3368.2.19
by Ian Clatworthy
first round of changes from abentley's review |
95 |
parameters and returns a stack of ContentFilters.
|
3368.2.5
by Ian Clatworthy
incorporate jameinel's review feedback |
96 |
If None, no content filtering is performed.
|
3368.2.4
by Ian Clatworthy
make content filter lookup a tree responsibility |
97 |
"""
|
4241.14.15
by Vincent Ladeuil
Fix one unicode readlink related test failure. |
98 |
self.root = osutils.safe_unicode(root) |
2255.2.149
by Robert Collins
Crufty but existing _iter_changes implementation for WorkingTreeFormat4. |
99 |
self.root_utf8 = self.root.encode('utf8') # where is the filesystem encoding ? |
846
by Martin Pool
- start adding refactored/simplified hash cache |
100 |
self.hit_count = 0 |
101 |
self.miss_count = 0 |
|
102 |
self.stat_count = 0 |
|
103 |
self.danger_count = 0 |
|
953
by Martin Pool
- refactor imports and stats for hashcache |
104 |
self.removed_count = 0 |
954
by Martin Pool
- separate out code that just scans the hash cache to find files that are possibly |
105 |
self.update_count = 0 |
860
by Martin Pool
- refactor hashcache to use just one dictionary |
106 |
self._cache = {} |
1534.4.50
by Robert Collins
Got the bzrdir api straightened out, plenty of refactoring to use it pending, but the api is up and running. |
107 |
self._mode = mode |
4241.14.15
by Vincent Ladeuil
Fix one unicode readlink related test failure. |
108 |
self._cache_file_name = osutils.safe_unicode(cache_file_name) |
3368.2.23
by Ian Clatworthy
cleanup some method names |
109 |
self._filter_provider = content_filter_stack_provider |
846
by Martin Pool
- start adding refactored/simplified hash cache |
110 |
|
866
by Martin Pool
- use new path-based hashcache for WorkingTree- squash mtime/ctime to whole seconds- update and if necessary write out hashcache when WorkingTree object is created. |
111 |
def cache_file_name(self): |
1534.4.51
by Robert Collins
Test the disk layout of format3 working trees. |
112 |
return self._cache_file_name |
866
by Martin Pool
- use new path-based hashcache for WorkingTree- squash mtime/ctime to whole seconds- update and if necessary write out hashcache when WorkingTree object is created. |
113 |
|
846
by Martin Pool
- start adding refactored/simplified hash cache |
114 |
def clear(self): |
860
by Martin Pool
- refactor hashcache to use just one dictionary |
115 |
"""Discard all cached information.
|
116 |
||
117 |
This does not reset the counters."""
|
|
866
by Martin Pool
- use new path-based hashcache for WorkingTree- squash mtime/ctime to whole seconds- update and if necessary write out hashcache when WorkingTree object is created. |
118 |
if self._cache: |
119 |
self.needs_write = True |
|
120 |
self._cache = {} |
|
846
by Martin Pool
- start adding refactored/simplified hash cache |
121 |
|
954
by Martin Pool
- separate out code that just scans the hash cache to find files that are possibly |
122 |
def scan(self): |
123 |
"""Scan all files and remove entries where the cache entry is obsolete.
|
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
124 |
|
954
by Martin Pool
- separate out code that just scans the hash cache to find files that are possibly |
125 |
Obsolete entries are those where the file has been modified or deleted
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
126 |
since the entry was inserted.
|
954
by Martin Pool
- separate out code that just scans the hash cache to find files that are possibly |
127 |
"""
|
1534.4.50
by Robert Collins
Got the bzrdir api straightened out, plenty of refactoring to use it pending, but the api is up and running. |
128 |
# FIXME optimisation opportunity, on linux [and check other oses]:
|
129 |
# rather than iteritems order, stat in inode order.
|
|
954
by Martin Pool
- separate out code that just scans the hash cache to find files that are possibly |
130 |
prep = [(ce[1][3], path, ce) for (path, ce) in self._cache.iteritems()] |
953
by Martin Pool
- refactor imports and stats for hashcache |
131 |
prep.sort() |
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
132 |
|
954
by Martin Pool
- separate out code that just scans the hash cache to find files that are possibly |
133 |
for inum, path, cache_entry in prep: |
4241.14.15
by Vincent Ladeuil
Fix one unicode readlink related test failure. |
134 |
abspath = osutils.pathjoin(self.root, path) |
1845.1.3
by Martin Pool
Improvements to hashcache testing: |
135 |
fp = self._fingerprint(abspath) |
954
by Martin Pool
- separate out code that just scans the hash cache to find files that are possibly |
136 |
self.stat_count += 1 |
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
137 |
|
954
by Martin Pool
- separate out code that just scans the hash cache to find files that are possibly |
138 |
cache_fp = cache_entry[1] |
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
139 |
|
954
by Martin Pool
- separate out code that just scans the hash cache to find files that are possibly |
140 |
if (not fp) or (cache_fp != fp): |
141 |
# not here or not a regular file anymore
|
|
142 |
self.removed_count += 1 |
|
143 |
self.needs_write = True |
|
144 |
del self._cache[path] |
|
145 |
||
2012.1.7
by Aaron Bentley
Get tree._iter_changed down to ~ 1 stat per file |
146 |
def get_sha1(self, path, stat_value=None): |
953
by Martin Pool
- refactor imports and stats for hashcache |
147 |
"""Return the sha1 of a file.
|
846
by Martin Pool
- start adding refactored/simplified hash cache |
148 |
"""
|
2255.2.149
by Robert Collins
Crufty but existing _iter_changes implementation for WorkingTreeFormat4. |
149 |
if path.__class__ is str: |
4241.14.15
by Vincent Ladeuil
Fix one unicode readlink related test failure. |
150 |
abspath = osutils.pathjoin(self.root_utf8, path) |
2255.2.149
by Robert Collins
Crufty but existing _iter_changes implementation for WorkingTreeFormat4. |
151 |
else: |
4241.14.15
by Vincent Ladeuil
Fix one unicode readlink related test failure. |
152 |
abspath = osutils.pathjoin(self.root, path) |
954
by Martin Pool
- separate out code that just scans the hash cache to find files that are possibly |
153 |
self.stat_count += 1 |
2012.1.7
by Aaron Bentley
Get tree._iter_changed down to ~ 1 stat per file |
154 |
file_fp = self._fingerprint(abspath, stat_value) |
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
155 |
|
954
by Martin Pool
- separate out code that just scans the hash cache to find files that are possibly |
156 |
if not file_fp: |
157 |
# not a regular file or not existing
|
|
158 |
if path in self._cache: |
|
159 |
self.removed_count += 1 |
|
160 |
self.needs_write = True |
|
161 |
del self._cache[path] |
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
162 |
return None |
953
by Martin Pool
- refactor imports and stats for hashcache |
163 |
|
954
by Martin Pool
- separate out code that just scans the hash cache to find files that are possibly |
164 |
if path in self._cache: |
165 |
cache_sha1, cache_fp = self._cache[path] |
|
860
by Martin Pool
- refactor hashcache to use just one dictionary |
166 |
else: |
167 |
cache_sha1, cache_fp = None, None |
|
846
by Martin Pool
- start adding refactored/simplified hash cache |
168 |
|
954
by Martin Pool
- separate out code that just scans the hash cache to find files that are possibly |
169 |
if cache_fp == file_fp: |
1845.1.2
by mbp at sourcefrog
Use larger time window on hashcache to be safe with fractional times |
170 |
## mutter("hashcache hit for %s %r -> %s", path, file_fp, cache_sha1)
|
171 |
## mutter("now = %s", time.time())
|
|
846
by Martin Pool
- start adding refactored/simplified hash cache |
172 |
self.hit_count += 1 |
860
by Martin Pool
- refactor hashcache to use just one dictionary |
173 |
return cache_sha1 |
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
174 |
|
954
by Martin Pool
- separate out code that just scans the hash cache to find files that are possibly |
175 |
self.miss_count += 1 |
1092.2.6
by Robert Collins
symlink support updated to work |
176 |
|
177 |
mode = file_fp[FP_MODE_COLUMN] |
|
178 |
if stat.S_ISREG(mode): |
|
3368.2.23
by Ian Clatworthy
cleanup some method names |
179 |
if self._filter_provider is None: |
3368.2.5
by Ian Clatworthy
incorporate jameinel's review feedback |
180 |
filters = [] |
3368.2.4
by Ian Clatworthy
make content filter lookup a tree responsibility |
181 |
else: |
3368.2.23
by Ian Clatworthy
cleanup some method names |
182 |
filters = self._filter_provider(path=path, file_id=None) |
3368.2.4
by Ian Clatworthy
make content filter lookup a tree responsibility |
183 |
digest = self._really_sha1_file(abspath, filters) |
1092.2.6
by Robert Collins
symlink support updated to work |
184 |
elif stat.S_ISLNK(mode): |
4241.14.15
by Vincent Ladeuil
Fix one unicode readlink related test failure. |
185 |
target = osutils.readlink(osutils.safe_unicode(abspath)) |
186 |
digest = osutils.sha_string(target.encode('UTF-8')) |
|
1092.2.6
by Robert Collins
symlink support updated to work |
187 |
else: |
4241.14.15
by Vincent Ladeuil
Fix one unicode readlink related test failure. |
188 |
raise errors.BzrError("file %r: unknown file stat mode: %o" |
189 |
% (abspath, mode)) |
|
954
by Martin Pool
- separate out code that just scans the hash cache to find files that are possibly |
190 |
|
1845.1.2
by mbp at sourcefrog
Use larger time window on hashcache to be safe with fractional times |
191 |
# window of 3 seconds to allow for 2s resolution on windows,
|
192 |
# unsynchronized file servers, etc.
|
|
1845.1.3
by Martin Pool
Improvements to hashcache testing: |
193 |
cutoff = self._cutoff_time() |
1845.1.2
by mbp at sourcefrog
Use larger time window on hashcache to be safe with fractional times |
194 |
if file_fp[FP_MTIME_COLUMN] >= cutoff \ |
195 |
or file_fp[FP_CTIME_COLUMN] >= cutoff: |
|
954
by Martin Pool
- separate out code that just scans the hash cache to find files that are possibly |
196 |
# changed too recently; can't be cached. we can
|
197 |
# return the result and it could possibly be cached
|
|
198 |
# next time.
|
|
1185.59.10
by Denys Duchier
hashcache: new constants and improved comment |
199 |
#
|
200 |
# the point is that we only want to cache when we are sure that any
|
|
201 |
# subsequent modifications of the file can be detected. If a
|
|
202 |
# modification neither changes the inode, the device, the size, nor
|
|
203 |
# the mode, then we can only distinguish it by time; therefore we
|
|
204 |
# need to let sufficient time elapse before we may cache this entry
|
|
205 |
# again. If we didn't do this, then, for example, a very quick 1
|
|
206 |
# byte replacement in the file might go undetected.
|
|
1845.1.2
by mbp at sourcefrog
Use larger time window on hashcache to be safe with fractional times |
207 |
## mutter('%r modified too recently; not caching', path)
|
208 |
self.danger_count += 1 |
|
954
by Martin Pool
- separate out code that just scans the hash cache to find files that are possibly |
209 |
if cache_fp: |
210 |
self.removed_count += 1 |
|
211 |
self.needs_write = True |
|
212 |
del self._cache[path] |
|
846
by Martin Pool
- start adding refactored/simplified hash cache |
213 |
else: |
1845.1.2
by mbp at sourcefrog
Use larger time window on hashcache to be safe with fractional times |
214 |
## mutter('%r added to cache: now=%f, mtime=%d, ctime=%d',
|
215 |
## path, time.time(), file_fp[FP_MTIME_COLUMN],
|
|
216 |
## file_fp[FP_CTIME_COLUMN])
|
|
954
by Martin Pool
- separate out code that just scans the hash cache to find files that are possibly |
217 |
self.update_count += 1 |
218 |
self.needs_write = True |
|
219 |
self._cache[path] = (digest, file_fp) |
|
220 |
return digest |
|
1845.1.3
by Martin Pool
Improvements to hashcache testing: |
221 |
|
3368.2.4
by Ian Clatworthy
make content filter lookup a tree responsibility |
222 |
def _really_sha1_file(self, abspath, filters): |
1845.1.3
by Martin Pool
Improvements to hashcache testing: |
223 |
"""Calculate the SHA1 of a file by reading the full text"""
|
4241.14.15
by Vincent Ladeuil
Fix one unicode readlink related test failure. |
224 |
return _mod_filters.internal_size_sha_file_byname(abspath, filters)[1] |
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
225 |
|
866
by Martin Pool
- use new path-based hashcache for WorkingTree- squash mtime/ctime to whole seconds- update and if necessary write out hashcache when WorkingTree object is created. |
226 |
def write(self): |
859
by Martin Pool
- add HashCache.write and a simple test for it |
227 |
"""Write contents of cache to file."""
|
4241.14.15
by Vincent Ladeuil
Fix one unicode readlink related test failure. |
228 |
outf = atomicfile.AtomicFile(self.cache_file_name(), 'wb', |
229 |
new_mode=self._mode) |
|
859
by Martin Pool
- add HashCache.write and a simple test for it |
230 |
try: |
1908.4.8
by John Arbash Meinel
Small tweak to hashcache to make it write out faster |
231 |
outf.write(CACHE_HEADER) |
859
by Martin Pool
- add HashCache.write and a simple test for it |
232 |
|
860
by Martin Pool
- refactor hashcache to use just one dictionary |
233 |
for path, c in self._cache.iteritems(): |
1908.4.8
by John Arbash Meinel
Small tweak to hashcache to make it write out faster |
234 |
line_info = [path.encode('utf-8'), '// ', c[0], ' '] |
235 |
line_info.append(' '.join([str(fld) for fld in c[1]])) |
|
236 |
line_info.append('\n') |
|
237 |
outf.write(''.join(line_info)) |
|
859
by Martin Pool
- add HashCache.write and a simple test for it |
238 |
outf.commit() |
866
by Martin Pool
- use new path-based hashcache for WorkingTree- squash mtime/ctime to whole seconds- update and if necessary write out hashcache when WorkingTree object is created. |
239 |
self.needs_write = False |
1845.1.1
by mbp at sourcefrog
Refactor and improve hashcache tests |
240 |
## mutter("write hash cache: %s hits=%d misses=%d stat=%d recent=%d updates=%d",
|
241 |
## self.cache_file_name(), self.hit_count, self.miss_count,
|
|
242 |
## self.stat_count,
|
|
243 |
## self.danger_count, self.update_count)
|
|
859
by Martin Pool
- add HashCache.write and a simple test for it |
244 |
finally: |
1755.3.1
by Robert Collins
Tune the time to build our kernel_like tree : make LocalTransport.put faster, AtomicFile faster, LocalTransport.append faster. |
245 |
outf.close() |
862
by Martin Pool
- code to re-read hashcache from file |
246 |
|
866
by Martin Pool
- use new path-based hashcache for WorkingTree- squash mtime/ctime to whole seconds- update and if necessary write out hashcache when WorkingTree object is created. |
247 |
def read(self): |
862
by Martin Pool
- code to re-read hashcache from file |
248 |
"""Reinstate cache from file.
|
249 |
||
250 |
Overwrites existing cache.
|
|
251 |
||
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
252 |
If the cache file has the wrong version marker, this just clears
|
862
by Martin Pool
- code to re-read hashcache from file |
253 |
the cache."""
|
254 |
self._cache = {} |
|
255 |
||
866
by Martin Pool
- use new path-based hashcache for WorkingTree- squash mtime/ctime to whole seconds- update and if necessary write out hashcache when WorkingTree object is created. |
256 |
fn = self.cache_file_name() |
257 |
try: |
|
948
by Martin Pool
- more buffering when reading/writing hashcache |
258 |
inf = file(fn, 'rb', buffering=65000) |
866
by Martin Pool
- use new path-based hashcache for WorkingTree- squash mtime/ctime to whole seconds- update and if necessary write out hashcache when WorkingTree object is created. |
259 |
except IOError, e: |
4241.14.15
by Vincent Ladeuil
Fix one unicode readlink related test failure. |
260 |
trace.mutter("failed to open %s: %s", fn, e) |
1214
by Martin Pool
- hashcache should be written out if it can't be read |
261 |
# better write it now so it is valid
|
262 |
self.needs_write = True |
|
866
by Martin Pool
- use new path-based hashcache for WorkingTree- squash mtime/ctime to whole seconds- update and if necessary write out hashcache when WorkingTree object is created. |
263 |
return
|
264 |
||
862
by Martin Pool
- code to re-read hashcache from file |
265 |
hdr = inf.readline() |
266 |
if hdr != CACHE_HEADER: |
|
4241.14.15
by Vincent Ladeuil
Fix one unicode readlink related test failure. |
267 |
trace.mutter('cache header marker not found at top of %s;' |
268 |
' discarding cache', fn) |
|
1214
by Martin Pool
- hashcache should be written out if it can't be read |
269 |
self.needs_write = True |
862
by Martin Pool
- code to re-read hashcache from file |
270 |
return
|
271 |
||
272 |
for l in inf: |
|
273 |
pos = l.index('// ') |
|
274 |
path = l[:pos].decode('utf-8') |
|
275 |
if path in self._cache: |
|
4241.14.15
by Vincent Ladeuil
Fix one unicode readlink related test failure. |
276 |
trace.warning('duplicated path %r in cache' % path) |
862
by Martin Pool
- code to re-read hashcache from file |
277 |
continue
|
278 |
||
279 |
pos += 3 |
|
280 |
fields = l[pos:].split(' ') |
|
1092.2.6
by Robert Collins
symlink support updated to work |
281 |
if len(fields) != 7: |
4241.14.15
by Vincent Ladeuil
Fix one unicode readlink related test failure. |
282 |
trace.warning("bad line in hashcache: %r" % l) |
862
by Martin Pool
- code to re-read hashcache from file |
283 |
continue
|
284 |
||
285 |
sha1 = fields[0] |
|
286 |
if len(sha1) != 40: |
|
4241.14.15
by Vincent Ladeuil
Fix one unicode readlink related test failure. |
287 |
trace.warning("bad sha1 in hashcache: %r" % sha1) |
862
by Martin Pool
- code to re-read hashcache from file |
288 |
continue
|
289 |
||
290 |
fp = tuple(map(long, fields[1:])) |
|
291 |
||
292 |
self._cache[path] = (sha1, fp) |
|
293 |
||
4708.2.1
by Martin
Ensure all files opened by bazaar proper are explicitly closed |
294 |
# GZ 2009-09-20: Should really use a try/finally block to ensure close
|
295 |
inf.close() |
|
296 |
||
866
by Martin Pool
- use new path-based hashcache for WorkingTree- squash mtime/ctime to whole seconds- update and if necessary write out hashcache when WorkingTree object is created. |
297 |
self.needs_write = False |
1845.1.3
by Martin Pool
Improvements to hashcache testing: |
298 |
|
299 |
def _cutoff_time(self): |
|
300 |
"""Return cutoff time.
|
|
301 |
||
302 |
Files modified more recently than this time are at risk of being
|
|
303 |
undetectably modified and so can't be cached.
|
|
304 |
"""
|
|
305 |
return int(time.time()) - 3 |
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
306 |
|
2012.1.18
by Aaron Bentley
rename fs param to stat_value |
307 |
def _fingerprint(self, abspath, stat_value=None): |
308 |
if stat_value is None: |
|
2012.1.7
by Aaron Bentley
Get tree._iter_changed down to ~ 1 stat per file |
309 |
try: |
2012.1.18
by Aaron Bentley
rename fs param to stat_value |
310 |
stat_value = os.lstat(abspath) |
2012.1.7
by Aaron Bentley
Get tree._iter_changed down to ~ 1 stat per file |
311 |
except OSError: |
312 |
# might be missing, etc
|
|
313 |
return None |
|
2012.1.18
by Aaron Bentley
rename fs param to stat_value |
314 |
if stat.S_ISDIR(stat_value.st_mode): |
1845.1.3
by Martin Pool
Improvements to hashcache testing: |
315 |
return None |
316 |
# we discard any high precision because it's not reliable; perhaps we
|
|
317 |
# could do better on some systems?
|
|
2012.1.18
by Aaron Bentley
rename fs param to stat_value |
318 |
return (stat_value.st_size, long(stat_value.st_mtime), |
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
319 |
long(stat_value.st_ctime), stat_value.st_ino, |
2012.1.18
by Aaron Bentley
rename fs param to stat_value |
320 |
stat_value.st_dev, stat_value.st_mode) |