569
532
raise AssertionError("repeated property %r" % name)
570
533
rev.properties[name] = value
572
def _find_text_key_references(self, line_iterator):
573
"""Core routine for extracting references to texts from inventories.
575
This performs the translation of xml lines to revision ids.
577
:param line_iterator: An iterator of lines, origin_version_id
578
:return: A dictionary mapping text keys ((fileid, revision_id) tuples)
579
to whether they were referred to by the inventory of the
580
revision_id that they contain. Note that if that revision_id was
581
not part of the line_iterator's output then False will be given -
582
even though it may actually refer to that key.
584
if not self.support_altered_by_hack:
585
raise AssertionError(
586
"_find_text_key_references only "
587
"supported for branches which store inventory as unnested xml"
588
", not on %r" % self)
591
# this code needs to read every new line in every inventory for the
592
# inventories [revision_ids]. Seeing a line twice is ok. Seeing a line
593
# not present in one of those inventories is unnecessary but not
594
# harmful because we are filtering by the revision id marker in the
595
# inventory lines : we only select file ids altered in one of those
596
# revisions. We don't need to see all lines in the inventory because
597
# only those added in an inventory in rev X can contain a revision=X
599
unescape_revid_cache = {}
600
unescape_fileid_cache = {}
602
# jam 20061218 In a big fetch, this handles hundreds of thousands
603
# of lines, so it has had a lot of inlining and optimizing done.
604
# Sorry that it is a little bit messy.
605
# Move several functions to be local variables, since this is a long
607
search = self._file_ids_altered_regex.search
608
unescape = _unescape_xml
609
setdefault = result.setdefault
610
for line, line_key in line_iterator:
614
# One call to match.group() returning multiple items is quite a
615
# bit faster than 2 calls to match.group() each returning 1
616
file_id, revision_id = match.group('file_id', 'revision_id')
618
# Inlining the cache lookups helps a lot when you make 170,000
619
# lines and 350k ids, versus 8.4 unique ids.
620
# Using a cache helps in 2 ways:
621
# 1) Avoids unnecessary decoding calls
622
# 2) Re-uses cached strings, which helps in future set and
624
# (2) is enough that removing encoding entirely along with
625
# the cache (so we are using plain strings) results in no
626
# performance improvement.
628
revision_id = unescape_revid_cache[revision_id]
630
unescaped = unescape(revision_id)
631
unescape_revid_cache[revision_id] = unescaped
632
revision_id = unescaped
634
# Note that unconditionally unescaping means that we deserialise
635
# every fileid, which for general 'pull' is not great, but we don't
636
# really want to have some many fulltexts that this matters anyway.
639
file_id = unescape_fileid_cache[file_id]
641
unescaped = unescape(file_id)
642
unescape_fileid_cache[file_id] = unescaped
645
key = (file_id, revision_id)
646
setdefault(key, False)
647
if revision_id == line_key[-1]:
652
536
serializer_v8 = Serializer_v8()