51
51
from time import time
54
from bzrlib import bzrdir, errors, osutils, urlutils
55
from bzrlib import bzrdir, errors, ignores, osutils, urlutils
55
56
from bzrlib.atomicfile import AtomicFile
56
57
import bzrlib.branch
57
58
from bzrlib.conflicts import Conflict, ConflictList, CONFLICT_SUFFIXES
106
107
import bzrlib.xml5
109
# the regex here does the following:
110
# 1) remove any weird characters; we don't escape them but rather
112
# 2) match leading '.'s to make it not hidden
113
_gen_file_id_re = re.compile(r'[^\w.]|(^\.*)')
110
# the regex removes any weird characters; we don't escape them
111
# but rather just pull them out
112
_gen_file_id_re = re.compile(r'[^\w.]')
114
113
_gen_id_suffix = None
115
114
_gen_id_serial = 0
139
138
The uniqueness is supplied from _next_id_suffix.
141
# XXX TODO: squash the filename to lowercase.
142
# XXX TODO: truncate the filename to something like 20 or 30 chars.
143
# XXX TODO: consider what to do with ids that look like illegal filepaths
144
# on platforms we support.
145
return _gen_file_id_re.sub('', name) + _next_id_suffix()
140
# The real randomness is in the _next_id_suffix, the
141
# rest of the identifier is just to be nice.
143
# 1) Remove non-ascii word characters to keep the ids portable
144
# 2) squash to lowercase, so the file id doesn't have to
145
# be escaped (case insensitive filesystems would bork for ids
146
# that only differred in case without escaping).
147
# 3) truncate the filename to 20 chars. Long filenames also bork on some
149
# 4) Removing starting '.' characters to prevent the file ids from
150
# being considered hidden.
151
ascii_word_only = _gen_file_id_re.sub('', name.lower())
152
short_no_dots = ascii_word_only.lstrip('.')[:20]
153
return short_no_dots + _next_id_suffix()
148
156
def gen_root_id():
446
454
return relpath(self.basedir, path)
448
456
def has_filename(self, filename):
449
return bzrlib.osutils.lexists(self.abspath(filename))
457
return osutils.lexists(self.abspath(filename))
451
459
def get_file(self, file_id):
452
460
return self.get_file_byname(self.id2path(file_id))
462
def get_file_text(self, file_id):
463
return self.get_file(file_id).read()
454
465
def get_file_byname(self, filename):
455
466
return file(self.abspath(filename), 'rb')
536
547
if not inv.has_id(file_id):
538
549
path = inv.id2path(file_id)
539
return bzrlib.osutils.lexists(self.abspath(path))
550
return osutils.lexists(self.abspath(path))
541
552
def has_or_had_id(self, file_id):
542
553
if file_id == self.inventory.root.file_id:
721
732
inv = self._inventory
722
733
# Convert these into local objects to save lookup times
723
pathjoin = bzrlib.osutils.pathjoin
724
file_kind = bzrlib.osutils.file_kind
734
pathjoin = osutils.pathjoin
735
file_kind = osutils.file_kind
726
737
# transport.base ends in a slash, we want the piece
727
738
# between the last two slashes
1098
1109
Cached in the Tree object after the first call.
1100
if hasattr(self, '_ignorelist'):
1101
return self._ignorelist
1111
ignoreset = getattr(self, '_ignoreset', None)
1112
if ignoreset is not None:
1115
ignore_globs = set(bzrlib.DEFAULT_IGNORE)
1116
ignore_globs.update(ignores.get_runtime_ignores())
1118
ignore_globs.update(ignores.get_user_ignores())
1104
1120
if self.has_filename(bzrlib.IGNORE_FILENAME):
1105
1121
f = self.get_file_byname(bzrlib.IGNORE_FILENAME)
1106
l.extend([line.rstrip("\n\r").decode('utf-8')
1107
for line in f.readlines()])
1108
self._ignorelist = l
1109
self._ignore_regex = self._combine_ignore_rules(l)
1123
ignore_globs.update(ignores.parse_ignore_file(f))
1127
self._ignoreset = ignore_globs
1128
self._ignore_regex = self._combine_ignore_rules(ignore_globs)
1112
1131
def _get_ignore_rules_as_regex(self):
1113
1132
"""Return a regex of the ignore rules and a mapping dict.
1115
1134
:return: (ignore rules compiled regex, dictionary mapping rule group
1116
1135
indices to original rule.)
1118
if getattr(self, '_ignorelist', None) is None:
1137
if getattr(self, '_ignoreset', None) is None:
1119
1138
self.get_ignore_list()
1120
1139
return self._ignore_regex
1842
1861
self._transport_readonly_server = transport_readonly_server
1843
1862
self._formats = formats
1864
def _clone_test(self, test, bzrdir_format, workingtree_format, variation):
1865
"""Clone test for adaption."""
1866
new_test = deepcopy(test)
1867
new_test.transport_server = self._transport_server
1868
new_test.transport_readonly_server = self._transport_readonly_server
1869
new_test.bzrdir_format = bzrdir_format
1870
new_test.workingtree_format = workingtree_format
1871
def make_new_test_id():
1872
new_id = "%s(%s)" % (test.id(), variation)
1873
return lambda: new_id
1874
new_test.id = make_new_test_id()
1845
1877
def adapt(self, test):
1846
1878
from bzrlib.tests import TestSuite
1847
1879
result = TestSuite()
1848
1880
for workingtree_format, bzrdir_format in self._formats:
1849
new_test = deepcopy(test)
1850
new_test.transport_server = self._transport_server
1851
new_test.transport_readonly_server = self._transport_readonly_server
1852
new_test.bzrdir_format = bzrdir_format
1853
new_test.workingtree_format = workingtree_format
1854
def make_new_test_id():
1855
new_id = "%s(%s)" % (new_test.id(), workingtree_format.__class__.__name__)
1856
return lambda: new_id
1857
new_test.id = make_new_test_id()
1881
new_test = self._clone_test(
1884
workingtree_format, workingtree_format.__class__.__name__)
1858
1885
result.addTest(new_test)