31
32
escape_invalid_chars,
34
serialize_inventory_flat,
35
unpack_inventory_entry,
36
unpack_inventory_flat,
33
from bzrlib.inventory import InventoryEntry
34
38
from bzrlib.revision import Revision
35
39
from bzrlib.errors import BzrError
42
"'":"'", # FIXME: overkill
49
def _ensure_utf8_re():
50
"""Make sure the _utf8_re and _unicode_re regexes have been compiled."""
51
global _utf8_re, _unicode_re
53
_utf8_re = re.compile('[&<>\'\"]|[\x80-\xff]+')
54
if _unicode_re is None:
55
_unicode_re = re.compile(u'[&<>\'\"\u0080-\uffff]')
58
def _unicode_escape_replace(match, _map=_xml_escape_map):
59
"""Replace a string of non-ascii, non XML safe characters with their escape
61
This will escape both Standard XML escapes, like <>"', etc.
62
As well as escaping non ascii characters, because ElementTree did.
63
This helps us remain compatible to older versions of bzr. We may change
64
our policy in the future, though.
66
# jam 20060816 Benchmarks show that try/KeyError is faster if you
67
# expect the entity to rarely miss. There is about a 10% difference
68
# in overall time. But if you miss frequently, then if None is much
69
# faster. For our use case, we *rarely* have a revision id, file id
70
# or path name that is unicode. So use try/KeyError.
72
return _map[match.group()]
74
return "&#%d;" % ord(match.group())
77
def _utf8_escape_replace(match, _map=_xml_escape_map):
78
"""Escape utf8 characters into XML safe ones.
80
This uses 2 tricks. It is either escaping "standard" characters, like "&<>,
81
or it is handling characters with the high-bit set. For ascii characters,
82
we just lookup the replacement in the dictionary. For everything else, we
83
decode back into Unicode, and then use the XML escape code.
86
return _map[match.group()]
88
return ''.join('&#%d;' % ord(uni_chr)
89
for uni_chr in match.group().decode('utf8'))
94
def _encode_and_escape(unicode_or_utf8_str, _map=_to_escaped_map):
95
"""Encode the string into utf8, and escape invalid XML characters"""
96
# We frequently get entities we have not seen before, so it is better
97
# to check if None, rather than try/KeyError
98
text = _map.get(unicode_or_utf8_str)
100
if unicode_or_utf8_str.__class__ is unicode:
101
# The alternative policy is to do a regular UTF8 encoding
102
# and then escape only XML meta characters.
103
# Performance is equivalent once you use cache_utf8. *However*
104
# this makes the serialized texts incompatible with old versions
105
# of bzr. So no net gain. (Perhaps the read code would handle utf8
106
# better than entity escapes, but cElementTree seems to do just fine
108
text = str(_unicode_re.sub(_unicode_escape_replace,
109
unicode_or_utf8_str)) + '"'
111
# Plain strings are considered to already be in utf-8 so we do a
112
# slightly different method for escaping.
113
text = _utf8_re.sub(_utf8_escape_replace,
114
unicode_or_utf8_str) + '"'
115
_map[unicode_or_utf8_str] = text
119
def _get_utf8_or_ascii(a_str,
120
_encode_utf8=cache_utf8.encode,
121
_get_cached_ascii=cache_utf8.get_cached_ascii):
122
"""Return a cached version of the string.
124
cElementTree will return a plain string if the XML is plain ascii. It only
125
returns Unicode when it needs to. We want to work in utf-8 strings. So if
126
cElementTree returns a plain string, we can just return the cached version.
127
If it is Unicode, then we need to encode it.
129
:param a_str: An 8-bit string or Unicode as returned by
130
cElementTree.Element.get()
131
:return: A utf-8 encoded 8-bit string.
133
# This is fairly optimized because we know what cElementTree does, this is
134
# not meant as a generic function for all cases. Because it is possible for
135
# an 8-bit string to not be ascii or valid utf8.
136
if a_str.__class__ is unicode:
137
return _encode_utf8(a_str)
143
"""Clean out the unicode => escaped map"""
144
_to_escaped_map.clear()
51
def _unescaper(match, _map=_xml_unescape_map):
56
if not code.startswith('#'):
58
return unichr(int(code[1:])).encode('utf8')
61
_unescape_re = lazy_regex.lazy_compile('\&([^;]*);')
63
def _unescape_xml(data):
64
"""Unescape predefined XML entities in a string of data."""
65
return _unescape_re.sub(_unescaper, data)
147
68
class Serializer_v8(XMLSerializer):
224
152
reference_revision, symlink_target.
225
153
:return: The inventory as a list of lines.
228
self._check_revisions(inv)
230
156
append = output.append
231
157
self._append_inventory_root(append, inv)
232
entries = inv.iter_entries()
234
root_path, root_ie = entries.next()
235
for path, ie in entries:
236
if ie.parent_id != self.root_id:
237
parent_str = ' parent_id="'
238
parent_id = _encode_and_escape(ie.parent_id)
242
if ie.kind == 'file':
244
executable = ' executable="yes"'
248
append('<file%s file_id="%s name="%s%s%s revision="%s '
249
'text_sha1="%s" text_size="%d" />\n' % (
250
executable, _encode_and_escape(ie.file_id),
251
_encode_and_escape(ie.name), parent_str, parent_id,
252
_encode_and_escape(ie.revision), ie.text_sha1,
255
append('<file%s file_id="%s name="%s%s%s />\n' % (
256
executable, _encode_and_escape(ie.file_id),
257
_encode_and_escape(ie.name), parent_str, parent_id))
258
elif ie.kind == 'directory':
260
append('<directory file_id="%s name="%s%s%s revision="%s '
262
_encode_and_escape(ie.file_id),
263
_encode_and_escape(ie.name),
264
parent_str, parent_id,
265
_encode_and_escape(ie.revision)))
267
append('<directory file_id="%s name="%s%s%s />\n' % (
268
_encode_and_escape(ie.file_id),
269
_encode_and_escape(ie.name),
270
parent_str, parent_id))
271
elif ie.kind == 'symlink':
273
append('<symlink file_id="%s name="%s%s%s revision="%s '
274
'symlink_target="%s />\n' % (
275
_encode_and_escape(ie.file_id),
276
_encode_and_escape(ie.name),
277
parent_str, parent_id,
278
_encode_and_escape(ie.revision),
279
_encode_and_escape(ie.symlink_target)))
281
append('<symlink file_id="%s name="%s%s%s />\n' % (
282
_encode_and_escape(ie.file_id),
283
_encode_and_escape(ie.name),
284
parent_str, parent_id))
285
elif ie.kind == 'tree-reference':
286
if ie.kind not in self.supported_kinds:
287
raise errors.UnsupportedInventoryKind(ie.kind)
289
append('<tree-reference file_id="%s name="%s%s%s '
290
'revision="%s reference_revision="%s />\n' % (
291
_encode_and_escape(ie.file_id),
292
_encode_and_escape(ie.name),
293
parent_str, parent_id,
294
_encode_and_escape(ie.revision),
295
_encode_and_escape(ie.reference_revision)))
297
append('<tree-reference file_id="%s name="%s%s%s />\n' % (
298
_encode_and_escape(ie.file_id),
299
_encode_and_escape(ie.name),
300
parent_str, parent_id))
302
raise errors.UnsupportedInventoryKind(ie.kind)
303
append('</inventory>\n')
158
serialize_inventory_flat(inv, append,
159
self.root_id, self.supported_kinds, working)
304
160
if f is not None:
305
161
f.writelines(output)
306
162
# Just to keep the cache from growing without bounds
312
168
"""Append the inventory root to output."""
313
169
if inv.revision_id is not None:
314
170
revid1 = ' revision_id="'
315
revid2 = _encode_and_escape(inv.revision_id)
171
revid2 = encode_and_escape(inv.revision_id)
319
175
append('<inventory format="%s"%s%s>\n' % (
320
176
self.format_num, revid1, revid2))
321
177
append('<directory file_id="%s name="%s revision="%s />\n' % (
322
_encode_and_escape(inv.root.file_id),
323
_encode_and_escape(inv.root.name),
324
_encode_and_escape(inv.root.revision)))
178
encode_and_escape(inv.root.file_id),
179
encode_and_escape(inv.root.name),
180
encode_and_escape(inv.root.revision)))
326
182
def _pack_revision(self, rev):
327
183
"""Revision object -> xml tree"""
371
227
prop_elt.tail = '\n'
372
228
top_elt.tail = '\n'
230
def _unpack_entry(self, elt, entry_cache=None, return_from_cache=False):
231
# This is here because it's overridden by xml7
232
return unpack_inventory_entry(elt, entry_cache,
374
235
def _unpack_inventory(self, elt, revision_id=None, entry_cache=None,
375
236
return_from_cache=False):
376
237
"""Construct from XML Element"""
377
if elt.tag != 'inventory':
378
raise errors.UnexpectedInventoryFormat('Root tag is %r' % elt.tag)
379
format = elt.get('format')
380
if format != self.format_num:
381
raise errors.UnexpectedInventoryFormat('Invalid format version %r'
383
revision_id = elt.get('revision_id')
384
if revision_id is not None:
385
revision_id = cache_utf8.encode(revision_id)
386
inv = inventory.Inventory(root_id=None, revision_id=revision_id)
388
ie = self._unpack_entry(e, entry_cache=entry_cache,
389
return_from_cache=return_from_cache)
238
inv = unpack_inventory_flat(elt, self.format_num, self._unpack_entry,
239
entry_cache, return_from_cache)
391
240
self._check_cache_size(len(inv), entry_cache)
394
def _unpack_entry(self, elt, entry_cache=None, return_from_cache=False):
396
file_id = elt_get('file_id')
397
revision = elt_get('revision')
398
# Check and see if we have already unpacked this exact entry
399
# Some timings for "repo.revision_trees(last_100_revs)"
401
# unmodified 4.1s 40.8s
403
# using fifo 2.83s 29.1s
407
# no_copy 2.00s 20.5s
408
# no_c,dict 1.95s 18.0s
409
# Note that a cache of 10k nodes is more than sufficient to hold all of
410
# the inventory for the last 100 revs for bzr, but not for mysql (20k
411
# is enough for mysql, which saves the same 2s as using a dict)
413
# Breakdown of mysql using time.clock()
414
# 4.1s 2 calls to element.get for file_id, revision_id
415
# 4.5s cache_hit lookup
416
# 7.1s InventoryFile.copy()
417
# 2.4s InventoryDirectory.copy()
418
# 0.4s decoding unique entries
419
# 1.6s decoding entries after FIFO fills up
420
# 0.8s Adding nodes to FIFO (including flushes)
421
# 0.1s cache miss lookups
423
# 4.1s 2 calls to element.get for file_id, revision_id
424
# 9.9s cache_hit lookup
425
# 10.8s InventoryEntry.copy()
426
# 0.3s cache miss lookus
427
# 1.2s decoding entries
428
# 1.0s adding nodes to LRU
429
if entry_cache is not None and revision is not None:
430
key = (file_id, revision)
432
# We copy it, because some operations may mutate it
433
cached_ie = entry_cache[key]
437
# Only copying directory entries drops us 2.85s => 2.35s
438
if return_from_cache:
439
if cached_ie.kind == 'directory':
440
return cached_ie.copy()
442
return cached_ie.copy()
445
if not InventoryEntry.versionable_kind(kind):
446
raise AssertionError('unsupported entry kind %s' % kind)
448
get_cached = _get_utf8_or_ascii
450
file_id = get_cached(file_id)
451
if revision is not None:
452
revision = get_cached(revision)
453
parent_id = elt_get('parent_id')
454
if parent_id is not None:
455
parent_id = get_cached(parent_id)
457
if kind == 'directory':
458
ie = inventory.InventoryDirectory(file_id,
462
ie = inventory.InventoryFile(file_id,
465
ie.text_sha1 = elt_get('text_sha1')
466
if elt_get('executable') == 'yes':
468
v = elt_get('text_size')
469
ie.text_size = v and int(v)
470
elif kind == 'symlink':
471
ie = inventory.InventoryLink(file_id,
474
ie.symlink_target = elt_get('symlink_target')
476
raise errors.UnsupportedInventoryKind(kind)
477
ie.revision = revision
478
if revision is not None and entry_cache is not None:
479
# We cache a copy() because callers like to mutate objects, and
480
# that would cause the item in cache to mutate as well.
481
# This has a small effect on many-inventory performance, because
482
# the majority fraction is spent in cache hits, not misses.
483
entry_cache[key] = ie.copy()
487
243
def _unpack_revision(self, elt):
488
244
"""XML Element -> Revision object"""
489
245
format = elt.get('format')
532
288
raise AssertionError("repeated property %r" % name)
533
289
rev.properties[name] = value
291
def _find_text_key_references(self, line_iterator):
292
"""Core routine for extracting references to texts from inventories.
294
This performs the translation of xml lines to revision ids.
296
:param line_iterator: An iterator of lines, origin_version_id
297
:return: A dictionary mapping text keys ((fileid, revision_id) tuples)
298
to whether they were referred to by the inventory of the
299
revision_id that they contain. Note that if that revision_id was
300
not part of the line_iterator's output then False will be given -
301
even though it may actually refer to that key.
303
if not self.support_altered_by_hack:
304
raise AssertionError(
305
"_find_text_key_references only "
306
"supported for branches which store inventory as unnested xml"
307
", not on %r" % self)
310
# this code needs to read every new line in every inventory for the
311
# inventories [revision_ids]. Seeing a line twice is ok. Seeing a line
312
# not present in one of those inventories is unnecessary but not
313
# harmful because we are filtering by the revision id marker in the
314
# inventory lines : we only select file ids altered in one of those
315
# revisions. We don't need to see all lines in the inventory because
316
# only those added in an inventory in rev X can contain a revision=X
318
unescape_revid_cache = {}
319
unescape_fileid_cache = {}
321
# jam 20061218 In a big fetch, this handles hundreds of thousands
322
# of lines, so it has had a lot of inlining and optimizing done.
323
# Sorry that it is a little bit messy.
324
# Move several functions to be local variables, since this is a long
326
search = self._file_ids_altered_regex.search
327
unescape = _unescape_xml
328
setdefault = result.setdefault
329
for line, line_key in line_iterator:
333
# One call to match.group() returning multiple items is quite a
334
# bit faster than 2 calls to match.group() each returning 1
335
file_id, revision_id = match.group('file_id', 'revision_id')
337
# Inlining the cache lookups helps a lot when you make 170,000
338
# lines and 350k ids, versus 8.4 unique ids.
339
# Using a cache helps in 2 ways:
340
# 1) Avoids unnecessary decoding calls
341
# 2) Re-uses cached strings, which helps in future set and
343
# (2) is enough that removing encoding entirely along with
344
# the cache (so we are using plain strings) results in no
345
# performance improvement.
347
revision_id = unescape_revid_cache[revision_id]
349
unescaped = unescape(revision_id)
350
unescape_revid_cache[revision_id] = unescaped
351
revision_id = unescaped
353
# Note that unconditionally unescaping means that we deserialise
354
# every fileid, which for general 'pull' is not great, but we don't
355
# really want to have some many fulltexts that this matters anyway.
358
file_id = unescape_fileid_cache[file_id]
360
unescaped = unescape(file_id)
361
unescape_fileid_cache[file_id] = unescaped
364
key = (file_id, revision_id)
365
setdefault(key, False)
366
if revision_id == line_key[-1]:
536
371
serializer_v8 = Serializer_v8()