34
serialize_inventory_flat,
35
unpack_inventory_entry,
36
unpack_inventory_flat,
32
from bzrlib.inventory import ROOT_ID, Inventory, InventoryEntry
33
38
from bzrlib.revision import Revision
34
39
from bzrlib.errors import BzrError
41
"'":"'", # FIXME: overkill
48
def _ensure_utf8_re():
49
"""Make sure the _utf8_re and _unicode_re regexes have been compiled."""
50
global _utf8_re, _unicode_re
52
_utf8_re = re.compile('[&<>\'\"]|[\x80-\xff]+')
53
if _unicode_re is None:
54
_unicode_re = re.compile(u'[&<>\'\"\u0080-\uffff]')
57
def _unicode_escape_replace(match, _map=_xml_escape_map):
58
"""Replace a string of non-ascii, non XML safe characters with their escape
60
This will escape both Standard XML escapes, like <>"', etc.
61
As well as escaping non ascii characters, because ElementTree did.
62
This helps us remain compatible to older versions of bzr. We may change
63
our policy in the future, though.
65
# jam 20060816 Benchmarks show that try/KeyError is faster if you
66
# expect the entity to rarely miss. There is about a 10% difference
67
# in overall time. But if you miss frequently, then if None is much
68
# faster. For our use case, we *rarely* have a revision id, file id
69
# or path name that is unicode. So use try/KeyError.
71
return _map[match.group()]
73
return "&#%d;" % ord(match.group())
76
def _utf8_escape_replace(match, _map=_xml_escape_map):
77
"""Escape utf8 characters into XML safe ones.
79
This uses 2 tricks. It is either escaping "standard" characters, like "&<>,
80
or it is handling characters with the high-bit set. For ascii characters,
81
we just lookup the replacement in the dictionary. For everything else, we
82
decode back into Unicode, and then use the XML escape code.
85
return _map[match.group()]
87
return ''.join('&#%d;' % ord(uni_chr)
88
for uni_chr in match.group().decode('utf8'))
93
def _encode_and_escape(unicode_or_utf8_str, _map=_to_escaped_map):
94
"""Encode the string into utf8, and escape invalid XML characters"""
95
# We frequently get entities we have not seen before, so it is better
96
# to check if None, rather than try/KeyError
97
text = _map.get(unicode_or_utf8_str)
99
if unicode_or_utf8_str.__class__ is unicode:
100
# The alternative policy is to do a regular UTF8 encoding
101
# and then escape only XML meta characters.
102
# Performance is equivalent once you use cache_utf8. *However*
103
# this makes the serialized texts incompatible with old versions
104
# of bzr. So no net gain. (Perhaps the read code would handle utf8
105
# better than entity escapes, but cElementTree seems to do just fine
107
text = str(_unicode_re.sub(_unicode_escape_replace,
108
unicode_or_utf8_str)) + '"'
110
# Plain strings are considered to already be in utf-8 so we do a
111
# slightly different method for escaping.
112
text = _utf8_re.sub(_utf8_escape_replace,
113
unicode_or_utf8_str) + '"'
114
_map[unicode_or_utf8_str] = text
118
def _get_utf8_or_ascii(a_str,
119
_encode_utf8=cache_utf8.encode,
120
_get_cached_ascii=cache_utf8.get_cached_ascii):
121
"""Return a cached version of the string.
123
cElementTree will return a plain string if the XML is plain ascii. It only
124
returns Unicode when it needs to. We want to work in utf-8 strings. So if
125
cElementTree returns a plain string, we can just return the cached version.
126
If it is Unicode, then we need to encode it.
128
:param a_str: An 8-bit string or Unicode as returned by
129
cElementTree.Element.get()
130
:return: A utf-8 encoded 8-bit string.
132
# This is fairly optimized because we know what cElementTree does, this is
133
# not meant as a generic function for all cases. Because it is possible for
134
# an 8-bit string to not be ascii or valid utf8.
135
if a_str.__class__ is unicode:
136
return _encode_utf8(a_str)
142
"""Clean out the unicode => escaped map"""
143
_to_escaped_map.clear()
51
def _unescaper(match, _map=_xml_unescape_map):
56
if not code.startswith('#'):
58
return unichr(int(code[1:])).encode('utf8')
61
_unescape_re = lazy_regex.lazy_compile('\&([^;]*);')
63
def _unescape_xml(data):
64
"""Unescape predefined XML entities in a string of data."""
65
return _unescape_re.sub(_unescaper, data)
146
68
class Serializer_v8(XMLSerializer):
223
152
reference_revision, symlink_target.
224
153
:return: The inventory as a list of lines.
227
self._check_revisions(inv)
229
156
append = output.append
230
157
self._append_inventory_root(append, inv)
231
entries = inv.iter_entries()
233
root_path, root_ie = entries.next()
234
for path, ie in entries:
235
if ie.parent_id != self.root_id:
236
parent_str = ' parent_id="'
237
parent_id = _encode_and_escape(ie.parent_id)
241
if ie.kind == 'file':
243
executable = ' executable="yes"'
247
append('<file%s file_id="%s name="%s%s%s revision="%s '
248
'text_sha1="%s" text_size="%d" />\n' % (
249
executable, _encode_and_escape(ie.file_id),
250
_encode_and_escape(ie.name), parent_str, parent_id,
251
_encode_and_escape(ie.revision), ie.text_sha1,
254
append('<file%s file_id="%s name="%s%s%s />\n' % (
255
executable, _encode_and_escape(ie.file_id),
256
_encode_and_escape(ie.name), parent_str, parent_id))
257
elif ie.kind == 'directory':
259
append('<directory file_id="%s name="%s%s%s revision="%s '
261
_encode_and_escape(ie.file_id),
262
_encode_and_escape(ie.name),
263
parent_str, parent_id,
264
_encode_and_escape(ie.revision)))
266
append('<directory file_id="%s name="%s%s%s />\n' % (
267
_encode_and_escape(ie.file_id),
268
_encode_and_escape(ie.name),
269
parent_str, parent_id))
270
elif ie.kind == 'symlink':
272
append('<symlink file_id="%s name="%s%s%s revision="%s '
273
'symlink_target="%s />\n' % (
274
_encode_and_escape(ie.file_id),
275
_encode_and_escape(ie.name),
276
parent_str, parent_id,
277
_encode_and_escape(ie.revision),
278
_encode_and_escape(ie.symlink_target)))
280
append('<symlink file_id="%s name="%s%s%s />\n' % (
281
_encode_and_escape(ie.file_id),
282
_encode_and_escape(ie.name),
283
parent_str, parent_id))
284
elif ie.kind == 'tree-reference':
285
if ie.kind not in self.supported_kinds:
286
raise errors.UnsupportedInventoryKind(ie.kind)
288
append('<tree-reference file_id="%s name="%s%s%s '
289
'revision="%s reference_revision="%s />\n' % (
290
_encode_and_escape(ie.file_id),
291
_encode_and_escape(ie.name),
292
parent_str, parent_id,
293
_encode_and_escape(ie.revision),
294
_encode_and_escape(ie.reference_revision)))
296
append('<tree-reference file_id="%s name="%s%s%s />\n' % (
297
_encode_and_escape(ie.file_id),
298
_encode_and_escape(ie.name),
299
parent_str, parent_id))
301
raise errors.UnsupportedInventoryKind(ie.kind)
302
append('</inventory>\n')
158
serialize_inventory_flat(inv, append,
159
self.root_id, self.supported_kinds, working)
303
160
if f is not None:
304
161
f.writelines(output)
305
162
# Just to keep the cache from growing without bounds
311
168
"""Append the inventory root to output."""
312
169
if inv.revision_id is not None:
313
170
revid1 = ' revision_id="'
314
revid2 = _encode_and_escape(inv.revision_id)
171
revid2 = encode_and_escape(inv.revision_id)
318
175
append('<inventory format="%s"%s%s>\n' % (
319
176
self.format_num, revid1, revid2))
320
177
append('<directory file_id="%s name="%s revision="%s />\n' % (
321
_encode_and_escape(inv.root.file_id),
322
_encode_and_escape(inv.root.name),
323
_encode_and_escape(inv.root.revision)))
178
encode_and_escape(inv.root.file_id),
179
encode_and_escape(inv.root.name),
180
encode_and_escape(inv.root.revision)))
325
182
def _pack_revision(self, rev):
326
183
"""Revision object -> xml tree"""
370
227
prop_elt.tail = '\n'
371
228
top_elt.tail = '\n'
373
def _unpack_inventory(self, elt, revision_id=None, entry_cache=None):
230
def _unpack_entry(self, elt, entry_cache=None, return_from_cache=False):
231
# This is here because it's overridden by xml7
232
return unpack_inventory_entry(elt, entry_cache,
235
def _unpack_inventory(self, elt, revision_id=None, entry_cache=None,
236
return_from_cache=False):
374
237
"""Construct from XML Element"""
375
if elt.tag != 'inventory':
376
raise errors.UnexpectedInventoryFormat('Root tag is %r' % elt.tag)
377
format = elt.get('format')
378
if format != self.format_num:
379
raise errors.UnexpectedInventoryFormat('Invalid format version %r'
381
revision_id = elt.get('revision_id')
382
if revision_id is not None:
383
revision_id = cache_utf8.encode(revision_id)
384
inv = inventory.Inventory(root_id=None, revision_id=revision_id)
386
ie = self._unpack_entry(e, entry_cache=entry_cache)
238
inv = unpack_inventory_flat(elt, self.format_num, self._unpack_entry,
239
entry_cache, return_from_cache)
388
240
self._check_cache_size(len(inv), entry_cache)
391
def _unpack_entry(self, elt, entry_cache=None):
393
file_id = elt_get('file_id')
394
revision = elt_get('revision')
395
# Check and see if we have already unpacked this exact entry
396
# Some timings for "repo.revision_trees(last_100_revs)"
398
# unmodified 4.1s 40.8s
400
# using fifo 2.83s 29.1s
404
# no_copy 2.00s 20.5s
405
# no_c,dict 1.95s 18.0s
406
# Note that a cache of 10k nodes is more than sufficient to hold all of
407
# the inventory for the last 100 revs for bzr, but not for mysql (20k
408
# is enough for mysql, which saves the same 2s as using a dict)
410
# Breakdown of mysql using time.clock()
411
# 4.1s 2 calls to element.get for file_id, revision_id
412
# 4.5s cache_hit lookup
413
# 7.1s InventoryFile.copy()
414
# 2.4s InventoryDirectory.copy()
415
# 0.4s decoding unique entries
416
# 1.6s decoding entries after FIFO fills up
417
# 0.8s Adding nodes to FIFO (including flushes)
418
# 0.1s cache miss lookups
420
# 4.1s 2 calls to element.get for file_id, revision_id
421
# 9.9s cache_hit lookup
422
# 10.8s InventoryEntry.copy()
423
# 0.3s cache miss lookus
424
# 1.2s decoding entries
425
# 1.0s adding nodes to LRU
426
if entry_cache is not None and revision is not None:
427
key = (file_id, revision)
429
# We copy it, because some operations may mutate it
430
cached_ie = entry_cache[key]
434
# Only copying directory entries drops us 2.85s => 2.35s
435
# if cached_ie.kind == 'directory':
436
# return cached_ie.copy()
438
return cached_ie.copy()
441
if not InventoryEntry.versionable_kind(kind):
442
raise AssertionError('unsupported entry kind %s' % kind)
444
get_cached = _get_utf8_or_ascii
446
file_id = get_cached(file_id)
447
if revision is not None:
448
revision = get_cached(revision)
449
parent_id = elt_get('parent_id')
450
if parent_id is not None:
451
parent_id = get_cached(parent_id)
453
if kind == 'directory':
454
ie = inventory.InventoryDirectory(file_id,
458
ie = inventory.InventoryFile(file_id,
461
ie.text_sha1 = elt_get('text_sha1')
462
if elt_get('executable') == 'yes':
464
v = elt_get('text_size')
465
ie.text_size = v and int(v)
466
elif kind == 'symlink':
467
ie = inventory.InventoryLink(file_id,
470
ie.symlink_target = elt_get('symlink_target')
472
raise errors.UnsupportedInventoryKind(kind)
473
ie.revision = revision
474
if revision is not None and entry_cache is not None:
475
# We cache a copy() because callers like to mutate objects, and
476
# that would cause the item in cache to mutate as well.
477
# This has a small effect on many-inventory performance, because
478
# the majority fraction is spent in cache hits, not misses.
479
entry_cache[key] = ie.copy()
483
243
def _unpack_revision(self, elt):
484
244
"""XML Element -> Revision object"""
485
245
format = elt.get('format')
528
288
raise AssertionError("repeated property %r" % name)
529
289
rev.properties[name] = value
291
def _find_text_key_references(self, line_iterator):
292
"""Core routine for extracting references to texts from inventories.
294
This performs the translation of xml lines to revision ids.
296
:param line_iterator: An iterator of lines, origin_version_id
297
:return: A dictionary mapping text keys ((fileid, revision_id) tuples)
298
to whether they were referred to by the inventory of the
299
revision_id that they contain. Note that if that revision_id was
300
not part of the line_iterator's output then False will be given -
301
even though it may actually refer to that key.
303
if not self.support_altered_by_hack:
304
raise AssertionError(
305
"_find_text_key_references only "
306
"supported for branches which store inventory as unnested xml"
307
", not on %r" % self)
310
# this code needs to read every new line in every inventory for the
311
# inventories [revision_ids]. Seeing a line twice is ok. Seeing a line
312
# not present in one of those inventories is unnecessary but not
313
# harmful because we are filtering by the revision id marker in the
314
# inventory lines : we only select file ids altered in one of those
315
# revisions. We don't need to see all lines in the inventory because
316
# only those added in an inventory in rev X can contain a revision=X
318
unescape_revid_cache = {}
319
unescape_fileid_cache = {}
321
# jam 20061218 In a big fetch, this handles hundreds of thousands
322
# of lines, so it has had a lot of inlining and optimizing done.
323
# Sorry that it is a little bit messy.
324
# Move several functions to be local variables, since this is a long
326
search = self._file_ids_altered_regex.search
327
unescape = _unescape_xml
328
setdefault = result.setdefault
329
for line, line_key in line_iterator:
333
# One call to match.group() returning multiple items is quite a
334
# bit faster than 2 calls to match.group() each returning 1
335
file_id, revision_id = match.group('file_id', 'revision_id')
337
# Inlining the cache lookups helps a lot when you make 170,000
338
# lines and 350k ids, versus 8.4 unique ids.
339
# Using a cache helps in 2 ways:
340
# 1) Avoids unnecessary decoding calls
341
# 2) Re-uses cached strings, which helps in future set and
343
# (2) is enough that removing encoding entirely along with
344
# the cache (so we are using plain strings) results in no
345
# performance improvement.
347
revision_id = unescape_revid_cache[revision_id]
349
unescaped = unescape(revision_id)
350
unescape_revid_cache[revision_id] = unescaped
351
revision_id = unescaped
353
# Note that unconditionally unescaping means that we deserialise
354
# every fileid, which for general 'pull' is not great, but we don't
355
# really want to have some many fulltexts that this matters anyway.
358
file_id = unescape_fileid_cache[file_id]
360
unescaped = unescape(file_id)
361
unescape_fileid_cache[file_id] = unescaped
364
key = (file_id, revision_id)
365
setdefault(key, False)
366
if revision_id == line_key[-1]:
532
371
serializer_v8 = Serializer_v8()