569
696
if packer.new_pack is not None:
570
697
packer.new_pack.abort()
572
701
for pack in packs:
573
702
self._remove_pack_from_memory(pack)
574
703
# record the newly available packs and stop advertising the old
576
self._save_pack_names(clear_obsolete_packs=True)
577
# Move the old packs out of the way now they are no longer referenced.
578
for revision_count, packs in pack_operations:
579
self._obsolete_packs(packs)
582
# XXX: This format is scheduled for termination
584
# class GCPackRepository(KnitPackRepository):
585
# """GC customisation of KnitPackRepository."""
587
# def __init__(self, _format, a_bzrdir, control_files, _commit_builder_class,
589
# """Overridden to change pack collection class."""
590
# KnitPackRepository.__init__(self, _format, a_bzrdir, control_files,
591
# _commit_builder_class, _serializer)
592
# # and now replace everything it did :)
593
# index_transport = self._transport.clone('indices')
594
# self._pack_collection = GCRepositoryPackCollection(self,
595
# self._transport, index_transport,
596
# self._transport.clone('upload'),
597
# self._transport.clone('packs'),
598
# _format.index_builder_class,
599
# _format.index_class,
600
# use_chk_index=self._format.supports_chks,
602
# self.inventories = GroupCompressVersionedFiles(
603
# _GCGraphIndex(self._pack_collection.inventory_index.combined_index,
604
# add_callback=self._pack_collection.inventory_index.add_callback,
605
# parents=True, is_locked=self.is_locked),
606
# access=self._pack_collection.inventory_index.data_access)
607
# self.revisions = GroupCompressVersionedFiles(
608
# _GCGraphIndex(self._pack_collection.revision_index.combined_index,
609
# add_callback=self._pack_collection.revision_index.add_callback,
610
# parents=True, is_locked=self.is_locked),
611
# access=self._pack_collection.revision_index.data_access,
613
# self.signatures = GroupCompressVersionedFiles(
614
# _GCGraphIndex(self._pack_collection.signature_index.combined_index,
615
# add_callback=self._pack_collection.signature_index.add_callback,
616
# parents=False, is_locked=self.is_locked),
617
# access=self._pack_collection.signature_index.data_access,
619
# self.texts = GroupCompressVersionedFiles(
620
# _GCGraphIndex(self._pack_collection.text_index.combined_index,
621
# add_callback=self._pack_collection.text_index.add_callback,
622
# parents=True, is_locked=self.is_locked),
623
# access=self._pack_collection.text_index.data_access)
624
# if _format.supports_chks:
625
# # No graph, no compression:- references from chks are between
626
# # different objects not temporal versions of the same; and without
627
# # some sort of temporal structure knit compression will just fail.
628
# self.chk_bytes = GroupCompressVersionedFiles(
629
# _GCGraphIndex(self._pack_collection.chk_index.combined_index,
630
# add_callback=self._pack_collection.chk_index.add_callback,
631
# parents=False, is_locked=self.is_locked),
632
# access=self._pack_collection.chk_index.data_access)
634
# self.chk_bytes = None
635
# # True when the repository object is 'write locked' (as opposed to the
636
# # physical lock only taken out around changes to the pack-names list.)
637
# # Another way to represent this would be a decorator around the control
638
# # files object that presents logical locks as physical ones - if this
639
# # gets ugly consider that alternative design. RBC 20071011
640
# self._write_lock_count = 0
641
# self._transaction = None
643
# self._reconcile_does_inventory_gc = True
644
# self._reconcile_fixes_text_parents = True
645
# self._reconcile_backsup_inventory = False
647
# def suspend_write_group(self):
648
# raise errors.UnsuspendableWriteGroup(self)
650
# def _resume_write_group(self, tokens):
651
# raise errors.UnsuspendableWriteGroup(self)
653
# def _reconcile_pack(self, collection, packs, extension, revs, pb):
655
# return packer.pack(pb)
658
class GCCHKPackRepository(CHKInventoryRepository):
659
"""GC customisation of CHKInventoryRepository."""
706
for _, packs in pack_operations:
707
to_be_obsoleted.extend(packs)
708
result = self._save_pack_names(clear_obsolete_packs=True,
709
obsolete_packs=to_be_obsoleted)
713
class CHKInventoryRepository(KnitPackRepository):
714
"""subclass of KnitPackRepository that uses CHK based inventories."""
661
716
def __init__(self, _format, a_bzrdir, control_files, _commit_builder_class,
713
776
self._reconcile_fixes_text_parents = True
714
777
self._reconcile_backsup_inventory = False
716
def suspend_write_group(self):
717
raise errors.UnsuspendableWriteGroup(self)
719
def _resume_write_group(self, tokens):
720
raise errors.UnsuspendableWriteGroup(self)
779
def _add_inventory_checked(self, revision_id, inv, parents):
780
"""Add inv to the repository after checking the inputs.
782
This function can be overridden to allow different inventory styles.
784
:seealso: add_inventory, for the contract.
787
serializer = self._format._serializer
788
result = inventory.CHKInventory.from_inventory(self.chk_bytes, inv,
789
maximum_size=serializer.maximum_size,
790
search_key_name=serializer.search_key_name)
791
inv_lines = result.to_lines()
792
return self._inventory_add_lines(revision_id, parents,
793
inv_lines, check_content=False)
795
def _create_inv_from_null(self, delta, revision_id):
796
"""This will mutate new_inv directly.
798
This is a simplified form of create_by_apply_delta which knows that all
799
the old values must be None, so everything is a create.
801
serializer = self._format._serializer
802
new_inv = inventory.CHKInventory(serializer.search_key_name)
803
new_inv.revision_id = revision_id
804
entry_to_bytes = new_inv._entry_to_bytes
805
id_to_entry_dict = {}
806
parent_id_basename_dict = {}
807
for old_path, new_path, file_id, entry in delta:
808
if old_path is not None:
809
raise ValueError('Invalid delta, somebody tried to delete %r'
810
' from the NULL_REVISION'
811
% ((old_path, file_id),))
813
raise ValueError('Invalid delta, delta from NULL_REVISION has'
814
' no new_path %r' % (file_id,))
816
new_inv.root_id = file_id
817
parent_id_basename_key = StaticTuple('', '').intern()
819
utf8_entry_name = entry.name.encode('utf-8')
820
parent_id_basename_key = StaticTuple(entry.parent_id,
821
utf8_entry_name).intern()
822
new_value = entry_to_bytes(entry)
824
# new_inv._path_to_fileid_cache[new_path] = file_id
825
key = StaticTuple(file_id).intern()
826
id_to_entry_dict[key] = new_value
827
parent_id_basename_dict[parent_id_basename_key] = file_id
829
new_inv._populate_from_dicts(self.chk_bytes, id_to_entry_dict,
830
parent_id_basename_dict, maximum_size=serializer.maximum_size)
833
def add_inventory_by_delta(self, basis_revision_id, delta, new_revision_id,
834
parents, basis_inv=None, propagate_caches=False):
835
"""Add a new inventory expressed as a delta against another revision.
837
:param basis_revision_id: The inventory id the delta was created
839
:param delta: The inventory delta (see Inventory.apply_delta for
841
:param new_revision_id: The revision id that the inventory is being
843
:param parents: The revision ids of the parents that revision_id is
844
known to have and are in the repository already. These are supplied
845
for repositories that depend on the inventory graph for revision
846
graph access, as well as for those that pun ancestry with delta
848
:param basis_inv: The basis inventory if it is already known,
850
:param propagate_caches: If True, the caches for this inventory are
851
copied to and updated for the result if possible.
853
:returns: (validator, new_inv)
854
The validator(which is a sha1 digest, though what is sha'd is
855
repository format specific) of the serialized inventory, and the
858
if not self.is_in_write_group():
859
raise AssertionError("%r not in write group" % (self,))
860
_mod_revision.check_not_reserved_id(new_revision_id)
862
if basis_inv is None:
863
if basis_revision_id == _mod_revision.NULL_REVISION:
864
new_inv = self._create_inv_from_null(delta, new_revision_id)
865
inv_lines = new_inv.to_lines()
866
return self._inventory_add_lines(new_revision_id, parents,
867
inv_lines, check_content=False), new_inv
869
basis_tree = self.revision_tree(basis_revision_id)
870
basis_tree.lock_read()
871
basis_inv = basis_tree.inventory
873
result = basis_inv.create_by_apply_delta(delta, new_revision_id,
874
propagate_caches=propagate_caches)
875
inv_lines = result.to_lines()
876
return self._inventory_add_lines(new_revision_id, parents,
877
inv_lines, check_content=False), result
879
if basis_tree is not None:
882
def _deserialise_inventory(self, revision_id, bytes):
883
return inventory.CHKInventory.deserialise(self.chk_bytes, bytes,
886
def _iter_inventories(self, revision_ids, ordering):
887
"""Iterate over many inventory objects."""
889
ordering = 'unordered'
890
keys = [(revision_id,) for revision_id in revision_ids]
891
stream = self.inventories.get_record_stream(keys, ordering, True)
893
for record in stream:
894
if record.storage_kind != 'absent':
895
texts[record.key] = record.get_bytes_as('fulltext')
897
raise errors.NoSuchRevision(self, record.key)
899
yield inventory.CHKInventory.deserialise(self.chk_bytes, texts[key], key)
901
def _iter_inventory_xmls(self, revision_ids, ordering):
902
# Without a native 'xml' inventory, this method doesn't make sense.
903
# However older working trees, and older bundles want it - so we supply
904
# it allowing _get_inventory_xml to work. Bundles currently use the
905
# serializer directly; this also isn't ideal, but there isn't an xml
906
# iteration interface offered at all for repositories. We could make
907
# _iter_inventory_xmls be part of the contract, even if kept private.
908
inv_to_str = self._serializer.write_inventory_to_string
909
for inv in self.iter_inventories(revision_ids, ordering=ordering):
910
yield inv_to_str(inv), inv.revision_id
912
def _find_present_inventory_keys(self, revision_keys):
913
parent_map = self.inventories.get_parent_map(revision_keys)
914
present_inventory_keys = set(k for k in parent_map)
915
return present_inventory_keys
917
def fileids_altered_by_revision_ids(self, revision_ids, _inv_weave=None):
918
"""Find the file ids and versions affected by revisions.
920
:param revisions: an iterable containing revision ids.
921
:param _inv_weave: The inventory weave from this repository or None.
922
If None, the inventory weave will be opened automatically.
923
:return: a dictionary mapping altered file-ids to an iterable of
924
revision_ids. Each altered file-ids has the exact revision_ids that
925
altered it listed explicitly.
927
rich_root = self.supports_rich_root()
928
bytes_to_info = inventory.CHKInventory._bytes_to_utf8name_key
929
file_id_revisions = {}
930
pb = ui.ui_factory.nested_progress_bar()
932
revision_keys = [(r,) for r in revision_ids]
933
parent_keys = self._find_parent_keys_of_revisions(revision_keys)
934
# TODO: instead of using _find_present_inventory_keys, change the
935
# code paths to allow missing inventories to be tolerated.
936
# However, we only want to tolerate missing parent
937
# inventories, not missing inventories for revision_ids
938
present_parent_inv_keys = self._find_present_inventory_keys(
940
present_parent_inv_ids = set(
941
[k[-1] for k in present_parent_inv_keys])
942
inventories_to_read = set(revision_ids)
943
inventories_to_read.update(present_parent_inv_ids)
944
root_key_info = _build_interesting_key_sets(
945
self, inventories_to_read, present_parent_inv_ids)
946
interesting_root_keys = root_key_info.interesting_root_keys
947
uninteresting_root_keys = root_key_info.uninteresting_root_keys
948
chk_bytes = self.chk_bytes
949
for record, items in chk_map.iter_interesting_nodes(chk_bytes,
950
interesting_root_keys, uninteresting_root_keys,
952
for name, bytes in items:
953
(name_utf8, file_id, revision_id) = bytes_to_info(bytes)
954
# TODO: consider interning file_id, revision_id here, or
955
# pushing that intern() into bytes_to_info()
956
# TODO: rich_root should always be True here, for all
957
# repositories that support chk_bytes
958
if not rich_root and name_utf8 == '':
961
file_id_revisions[file_id].add(revision_id)
963
file_id_revisions[file_id] = set([revision_id])
966
return file_id_revisions
968
def find_text_key_references(self):
969
"""Find the text key references within the repository.
971
:return: A dictionary mapping text keys ((fileid, revision_id) tuples)
972
to whether they were referred to by the inventory of the
973
revision_id that they contain. The inventory texts from all present
974
revision ids are assessed to generate this report.
976
# XXX: Slow version but correct: rewrite as a series of delta
977
# examinations/direct tree traversal. Note that that will require care
978
# as a common node is reachable both from the inventory that added it,
979
# and others afterwards.
980
revision_keys = self.revisions.keys()
982
rich_roots = self.supports_rich_root()
983
pb = ui.ui_factory.nested_progress_bar()
985
all_revs = self.all_revision_ids()
986
total = len(all_revs)
987
for pos, inv in enumerate(self.iter_inventories(all_revs)):
988
pb.update("Finding text references", pos, total)
989
for _, entry in inv.iter_entries():
990
if not rich_roots and entry.file_id == inv.root_id:
992
key = (entry.file_id, entry.revision)
993
result.setdefault(key, False)
994
if entry.revision == inv.revision_id:
722
1000
def _reconcile_pack(self, collection, packs, extension, revs, pb):
723
# assert revs is None
724
1001
packer = GCCHKReconcilePacker(collection, packs, extension)
725
1002
return packer.pack(pb)
728
class RepositoryFormatPackGCCHK16(RepositoryFormatPackDevelopment5Hash16):
1004
def _get_source(self, to_format):
1005
"""Return a source for streaming from this repository."""
1006
if self._format._serializer == to_format._serializer:
1007
# We must be exactly the same format, otherwise stuff like the chk
1008
# page layout might be different.
1009
# Actually, this test is just slightly looser than exact so that
1010
# CHK2 <-> 2a transfers will work.
1011
return GroupCHKStreamSource(self, to_format)
1012
return super(CHKInventoryRepository, self)._get_source(to_format)
1015
class GroupCHKStreamSource(KnitPackStreamSource):
1016
"""Used when both the source and target repo are GroupCHK repos."""
1018
def __init__(self, from_repository, to_format):
1019
"""Create a StreamSource streaming from from_repository."""
1020
super(GroupCHKStreamSource, self).__init__(from_repository, to_format)
1021
self._revision_keys = None
1022
self._text_keys = None
1023
self._text_fetch_order = 'groupcompress'
1024
self._chk_id_roots = None
1025
self._chk_p_id_roots = None
1027
def _get_inventory_stream(self, inventory_keys, allow_absent=False):
1028
"""Get a stream of inventory texts.
1030
When this function returns, self._chk_id_roots and self._chk_p_id_roots
1031
should be populated.
1033
self._chk_id_roots = []
1034
self._chk_p_id_roots = []
1035
def _filtered_inv_stream():
1036
id_roots_set = set()
1037
p_id_roots_set = set()
1038
source_vf = self.from_repository.inventories
1039
stream = source_vf.get_record_stream(inventory_keys,
1040
'groupcompress', True)
1041
for record in stream:
1042
if record.storage_kind == 'absent':
1046
raise errors.NoSuchRevision(self, record.key)
1047
bytes = record.get_bytes_as('fulltext')
1048
chk_inv = inventory.CHKInventory.deserialise(None, bytes,
1050
key = chk_inv.id_to_entry.key()
1051
if key not in id_roots_set:
1052
self._chk_id_roots.append(key)
1053
id_roots_set.add(key)
1054
p_id_map = chk_inv.parent_id_basename_to_file_id
1055
if p_id_map is None:
1056
raise AssertionError('Parent id -> file_id map not set')
1057
key = p_id_map.key()
1058
if key not in p_id_roots_set:
1059
p_id_roots_set.add(key)
1060
self._chk_p_id_roots.append(key)
1062
# We have finished processing all of the inventory records, we
1063
# don't need these sets anymore
1064
id_roots_set.clear()
1065
p_id_roots_set.clear()
1066
return ('inventories', _filtered_inv_stream())
1068
def _get_filtered_chk_streams(self, excluded_revision_keys):
1069
self._text_keys = set()
1070
excluded_revision_keys.discard(_mod_revision.NULL_REVISION)
1071
if not excluded_revision_keys:
1072
uninteresting_root_keys = set()
1073
uninteresting_pid_root_keys = set()
1075
# filter out any excluded revisions whose inventories are not
1077
# TODO: Update Repository.iter_inventories() to add
1078
# ignore_missing=True
1079
present_keys = self.from_repository._find_present_inventory_keys(
1080
excluded_revision_keys)
1081
present_ids = [k[-1] for k in present_keys]
1082
uninteresting_root_keys = set()
1083
uninteresting_pid_root_keys = set()
1084
for inv in self.from_repository.iter_inventories(present_ids):
1085
uninteresting_root_keys.add(inv.id_to_entry.key())
1086
uninteresting_pid_root_keys.add(
1087
inv.parent_id_basename_to_file_id.key())
1088
chk_bytes = self.from_repository.chk_bytes
1089
def _filter_id_to_entry():
1090
interesting_nodes = chk_map.iter_interesting_nodes(chk_bytes,
1091
self._chk_id_roots, uninteresting_root_keys)
1092
for record in _filter_text_keys(interesting_nodes, self._text_keys,
1093
chk_map._bytes_to_text_key):
1094
if record is not None:
1097
self._chk_id_roots = None
1098
yield 'chk_bytes', _filter_id_to_entry()
1099
def _get_parent_id_basename_to_file_id_pages():
1100
for record, items in chk_map.iter_interesting_nodes(chk_bytes,
1101
self._chk_p_id_roots, uninteresting_pid_root_keys):
1102
if record is not None:
1105
self._chk_p_id_roots = None
1106
yield 'chk_bytes', _get_parent_id_basename_to_file_id_pages()
1108
def get_stream(self, search):
1109
revision_ids = search.get_keys()
1110
for stream_info in self._fetch_revision_texts(revision_ids):
1112
self._revision_keys = [(rev_id,) for rev_id in revision_ids]
1113
self.from_repository.revisions.clear_cache()
1114
self.from_repository.signatures.clear_cache()
1115
yield self._get_inventory_stream(self._revision_keys)
1116
self.from_repository.inventories.clear_cache()
1117
# TODO: The keys to exclude might be part of the search recipe
1118
# For now, exclude all parents that are at the edge of ancestry, for
1119
# which we have inventories
1120
from_repo = self.from_repository
1121
parent_keys = from_repo._find_parent_keys_of_revisions(
1122
self._revision_keys)
1123
for stream_info in self._get_filtered_chk_streams(parent_keys):
1125
self.from_repository.chk_bytes.clear_cache()
1126
yield self._get_text_stream()
1127
self.from_repository.texts.clear_cache()
1129
def get_stream_for_missing_keys(self, missing_keys):
1130
# missing keys can only occur when we are byte copying and not
1131
# translating (because translation means we don't send
1132
# unreconstructable deltas ever).
1133
missing_inventory_keys = set()
1134
for key in missing_keys:
1135
if key[0] != 'inventories':
1136
raise AssertionError('The only missing keys we should'
1137
' be filling in are inventory keys, not %s'
1139
missing_inventory_keys.add(key[1:])
1140
if self._chk_id_roots or self._chk_p_id_roots:
1141
raise AssertionError('Cannot call get_stream_for_missing_keys'
1142
' until all of get_stream() has been consumed.')
1143
# Yield the inventory stream, so we can find the chk stream
1144
# Some of the missing_keys will be missing because they are ghosts.
1145
# As such, we can ignore them. The Sink is required to verify there are
1146
# no unavailable texts when the ghost inventories are not filled in.
1147
yield self._get_inventory_stream(missing_inventory_keys,
1149
# We use the empty set for excluded_revision_keys, to make it clear
1150
# that we want to transmit all referenced chk pages.
1151
for stream_info in self._get_filtered_chk_streams(set()):
1155
class _InterestingKeyInfo(object):
1157
self.interesting_root_keys = set()
1158
self.interesting_pid_root_keys = set()
1159
self.uninteresting_root_keys = set()
1160
self.uninteresting_pid_root_keys = set()
1162
def all_interesting(self):
1163
return self.interesting_root_keys.union(self.interesting_pid_root_keys)
1165
def all_uninteresting(self):
1166
return self.uninteresting_root_keys.union(
1167
self.uninteresting_pid_root_keys)
1170
return self.all_interesting().union(self.all_uninteresting())
1173
def _build_interesting_key_sets(repo, inventory_ids, parent_only_inv_ids):
1174
result = _InterestingKeyInfo()
1175
for inv in repo.iter_inventories(inventory_ids, 'unordered'):
1176
root_key = inv.id_to_entry.key()
1177
pid_root_key = inv.parent_id_basename_to_file_id.key()
1178
if inv.revision_id in parent_only_inv_ids:
1179
result.uninteresting_root_keys.add(root_key)
1180
result.uninteresting_pid_root_keys.add(pid_root_key)
1182
result.interesting_root_keys.add(root_key)
1183
result.interesting_pid_root_keys.add(pid_root_key)
1187
def _filter_text_keys(interesting_nodes_iterable, text_keys, bytes_to_text_key):
1188
"""Iterate the result of iter_interesting_nodes, yielding the records
1189
and adding to text_keys.
1191
text_keys_update = text_keys.update
1192
for record, items in interesting_nodes_iterable:
1193
text_keys_update([bytes_to_text_key(b) for n,b in items])
1199
class RepositoryFormatCHK1(RepositoryFormatPack):
729
1200
"""A hashed CHK+group compress pack repository."""
731
repository_class = GCCHKPackRepository
732
_commit_builder_class = PackRootCommitBuilder
733
rich_root_data = True
1202
repository_class = CHKInventoryRepository
734
1203
supports_external_lookups = True
735
supports_tree_reference = True
737
# Note: We cannot unpack a delta that references a text we haven't
738
# seen yet. There are 2 options, work in fulltexts, or require
739
# topological sorting. Using fulltexts is more optimal for local
740
# operations, because the source can be smart about extracting
741
# multiple in-a-row (and sharing strings). Topological is better
742
# for remote, because we access less data.
743
_fetch_order = 'unordered'
744
_fetch_uses_deltas = False
746
def _get_matching_bzrdir(self):
747
return bzrdir.format_registry.make_bzrdir('gc-chk16')
749
def _ignore_setting_bzrdir(self, format):
752
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
754
def get_format_string(self):
755
"""See RepositoryFormat.get_format_string()."""
756
return ('Bazaar development format - hash16chk+gc rich-root'
757
' (needs bzr.dev from 1.14)\n')
759
def get_format_description(self):
760
"""See RepositoryFormat.get_format_description()."""
761
return ("Development repository format - hash16chk+groupcompress")
763
def check_conversion_target(self, target_format):
764
if not target_format.rich_root_data:
765
raise errors.BadConversionTarget(
766
'Does not support rich root data.', target_format)
767
if not getattr(target_format, 'supports_tree_reference', False):
768
raise errors.BadConversionTarget(
769
'Does not support nested trees', target_format)
772
class RepositoryFormatPackGCCHK255(RepositoryFormatPackDevelopment5Hash255):
773
"""A hashed CHK+group compress pack repository."""
775
repository_class = GCCHKPackRepository
777
# Setting this to True causes us to use InterModel1And2, so for now set
778
# it to False which uses InterDifferingSerializer. When IM1&2 is
779
# removed (as it is in bzr.dev) we can set this back to True.
780
_commit_builder_class = PackRootCommitBuilder
781
rich_root_data = True
783
def _get_matching_bzrdir(self):
784
return bzrdir.format_registry.make_bzrdir('gc-chk255')
786
def _ignore_setting_bzrdir(self, format):
789
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
791
def get_format_string(self):
792
"""See RepositoryFormat.get_format_string()."""
793
return ('Bazaar development format - hash255chk+gc rich-root'
794
' (needs bzr.dev from 1.14)\n')
796
def get_format_description(self):
797
"""See RepositoryFormat.get_format_description()."""
798
return ("Development repository format - hash255chk+groupcompress")
800
def check_conversion_target(self, target_format):
801
if not target_format.rich_root_data:
802
raise errors.BadConversionTarget(
803
'Does not support rich root data.', target_format)
804
if not getattr(target_format, 'supports_tree_reference', False):
805
raise errors.BadConversionTarget(
806
'Does not support nested trees', target_format)
809
class RepositoryFormatPackGCCHK255Big(RepositoryFormatPackGCCHK255):
810
"""A hashed CHK+group compress pack repository."""
812
repository_class = GCCHKPackRepository
813
1204
supports_chks = True
814
1205
# For right now, setting this to True gives us InterModel1And2 rather
815
1206
# than InterDifferingSerializer
816
1207
_commit_builder_class = PackRootCommitBuilder
817
1208
rich_root_data = True
818
1209
_serializer = chk_serializer.chk_serializer_255_bigpage
1210
_commit_inv_deltas = True
1211
# What index classes to use
1212
index_builder_class = BTreeBuilder
1213
index_class = BTreeGraphIndex
819
1214
# Note: We cannot unpack a delta that references a text we haven't
820
1215
# seen yet. There are 2 options, work in fulltexts, or require
821
1216
# topological sorting. Using fulltexts is more optimal for local