1
# (C) 2005 Canonical Ltd
1
# Copyright (C) 2005, 2006 Canonical Ltd
3
3
# This program is free software; you can redistribute it and/or modify
4
4
# it under the terms of the GNU General Public License as published by
5
5
# the Free Software Foundation; either version 2 of the License, or
6
6
# (at your option) any later version.
8
8
# This program is distributed in the hope that it will be useful,
9
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
11
# GNU General Public License for more details.
13
13
# You should have received a copy of the GNU General Public License
14
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17
# TODO: Up-front, stat all files in order and remove those which are deleted or
18
# out-of-date. Don't actually re-read them until they're needed. That ought
19
# to bring all the inodes into core so that future stats to them are fast, and
17
# TODO: Up-front, stat all files in order and remove those which are deleted or
18
# out-of-date. Don't actually re-read them until they're needed. That ought
19
# to bring all the inodes into core so that future stats to them are fast, and
20
20
# it preserves the nice property that any caller will always get up-to-date
21
21
# data except in unavoidable cases.
30
30
CACHE_HEADER = "### bzr hashcache v5\n"
34
from bzrlib.osutils import sha_file
35
from bzrlib.trace import mutter, warning
36
from bzrlib.atomicfile import AtomicFile
41
def _fingerprint(abspath):
43
fs = os.lstat(abspath)
45
# might be missing, etc
48
if stat.S_ISDIR(fs.st_mode):
51
# we discard any high precision because it's not reliable; perhaps we
52
# could do better on some systems?
53
return (fs.st_size, long(fs.st_mtime),
54
long(fs.st_ctime), fs.st_ino, fs.st_dev)
39
filters as _mod_filters,
57
51
class HashCache(object):
85
79
number of times files have been retrieved from the cache, avoiding a
89
83
number of misses (times files have been completely re-read)
91
85
needs_write = False
93
def __init__(self, basedir):
94
self.basedir = basedir
87
def __init__(self, root, cache_file_name, mode=None,
88
content_filter_stack_provider=None):
89
"""Create a hash cache in base dir, and set the file mode to mode.
91
:param content_filter_stack_provider: a function that takes a
92
path (relative to the top of the tree) and a file-id as
93
parameters and returns a stack of ContentFilters.
94
If None, no content filtering is performed.
96
self.root = osutils.safe_unicode(root)
97
self.root_utf8 = self.root.encode('utf8') # where is the filesystem encoding ?
96
99
self.miss_count = 0
97
100
self.stat_count = 0
115
117
self.needs_write = True
120
121
"""Scan all files and remove entries where the cache entry is obsolete.
122
123
Obsolete entries are those where the file has been modified or deleted
123
since the entry was inserted.
124
since the entry was inserted.
126
# FIXME optimisation opportunity, on linux [and check other oses]:
127
# rather than iteritems order, stat in inode order.
125
128
prep = [(ce[1][3], path, ce) for (path, ce) in self._cache.iteritems()]
128
131
for inum, path, cache_entry in prep:
129
abspath = os.sep.join([self.basedir, path])
130
fp = _fingerprint(abspath)
132
abspath = osutils.pathjoin(self.root, path)
133
fp = self._fingerprint(abspath)
131
134
self.stat_count += 1
133
136
cache_fp = cache_entry[1]
135
138
if (not fp) or (cache_fp != fp):
136
139
# not here or not a regular file anymore
137
140
self.removed_count += 1
138
141
self.needs_write = True
139
142
del self._cache[path]
143
def get_sha1(self, path):
144
def get_sha1(self, path, stat_value=None):
144
145
"""Return the sha1 of a file.
146
abspath = os.sep.join([self.basedir, path])
147
if path.__class__ is str:
148
abspath = osutils.pathjoin(self.root_utf8, path)
150
abspath = osutils.pathjoin(self.root, path)
147
151
self.stat_count += 1
148
file_fp = _fingerprint(abspath)
152
file_fp = self._fingerprint(abspath, stat_value)
151
155
# not a regular file or not existing
152
156
if path in self._cache:
153
157
self.removed_count += 1
154
158
self.needs_write = True
155
159
del self._cache[path]
158
162
if path in self._cache:
159
163
cache_sha1, cache_fp = self._cache[path]
161
165
cache_sha1, cache_fp = None, None
163
167
if cache_fp == file_fp:
168
## mutter("hashcache hit for %s %r -> %s", path, file_fp, cache_sha1)
169
## mutter("now = %s", time.time())
164
170
self.hit_count += 1
165
171
return cache_sha1
167
173
self.miss_count += 1
168
digest = sha_file(file(abspath, 'rb', buffering=65000))
170
now = int(time.time())
171
if file_fp[1] >= now or file_fp[2] >= now:
175
mode = file_fp[FP_MODE_COLUMN]
176
if stat.S_ISREG(mode):
177
if self._filter_provider is None:
180
filters = self._filter_provider(path=path, file_id=None)
181
digest = self._really_sha1_file(abspath, filters)
182
elif stat.S_ISLNK(mode):
183
target = osutils.readlink(osutils.safe_unicode(abspath))
184
digest = osutils.sha_string(target.encode('UTF-8'))
186
raise errors.BzrError("file %r: unknown file stat mode: %o"
189
# window of 3 seconds to allow for 2s resolution on windows,
190
# unsynchronized file servers, etc.
191
cutoff = self._cutoff_time()
192
if file_fp[FP_MTIME_COLUMN] >= cutoff \
193
or file_fp[FP_CTIME_COLUMN] >= cutoff:
172
194
# changed too recently; can't be cached. we can
173
195
# return the result and it could possibly be cached
175
self.danger_count += 1
198
# the point is that we only want to cache when we are sure that any
199
# subsequent modifications of the file can be detected. If a
200
# modification neither changes the inode, the device, the size, nor
201
# the mode, then we can only distinguish it by time; therefore we
202
# need to let sufficient time elapse before we may cache this entry
203
# again. If we didn't do this, then, for example, a very quick 1
204
# byte replacement in the file might go undetected.
205
## mutter('%r modified too recently; not caching', path)
206
self.danger_count += 1
177
208
self.removed_count += 1
178
209
self.needs_write = True
179
210
del self._cache[path]
212
## mutter('%r added to cache: now=%f, mtime=%d, ctime=%d',
213
## path, time.time(), file_fp[FP_MTIME_COLUMN],
214
## file_fp[FP_CTIME_COLUMN])
181
215
self.update_count += 1
182
216
self.needs_write = True
183
217
self._cache[path] = (digest, file_fp)
220
def _really_sha1_file(self, abspath, filters):
221
"""Calculate the SHA1 of a file by reading the full text"""
222
return _mod_filters.internal_size_sha_file_byname(abspath, filters)[1]
191
225
"""Write contents of cache to file."""
192
outf = AtomicFile(self.cache_file_name(), 'wb')
226
outf = atomicfile.AtomicFile(self.cache_file_name(), 'wb',
194
print >>outf, CACHE_HEADER,
229
outf.write(CACHE_HEADER)
196
231
for path, c in self._cache.iteritems():
197
assert '//' not in path, path
198
outf.write(path.encode('utf-8'))
200
print >>outf, c[0], # hex sha1
202
print >>outf, "%d" % fld,
232
line_info = [path.encode('utf-8'), '// ', c[0], ' ']
233
line_info.append(' '.join([str(fld) for fld in c[1]]))
234
line_info.append('\n')
235
outf.write(''.join(line_info))
206
237
self.needs_write = False
238
## mutter("write hash cache: %s hits=%d misses=%d stat=%d recent=%d updates=%d",
239
## self.cache_file_name(), self.hit_count, self.miss_count,
241
## self.danger_count, self.update_count)
214
246
"""Reinstate cache from file.
216
248
Overwrites existing cache.
218
If the cache file has the wrong version marker, this just clears
250
If the cache file has the wrong version marker, this just clears
259
290
self._cache[path] = (sha1, fp)
261
292
self.needs_write = False
294
def _cutoff_time(self):
295
"""Return cutoff time.
297
Files modified more recently than this time are at risk of being
298
undetectably modified and so can't be cached.
300
return int(time.time()) - 3
302
def _fingerprint(self, abspath, stat_value=None):
303
if stat_value is None:
305
stat_value = os.lstat(abspath)
307
# might be missing, etc
309
if stat.S_ISDIR(stat_value.st_mode):
311
# we discard any high precision because it's not reliable; perhaps we
312
# could do better on some systems?
313
return (stat_value.st_size, long(stat_value.st_mtime),
314
long(stat_value.st_ctime), stat_value.st_ino,
315
stat_value.st_dev, stat_value.st_mode)