745
745
def open_pack(self):
746
746
"""Open a pack for the pack we are creating."""
747
return NewPack(self._pack_collection, upload_suffix=self.suffix,
747
return self._pack_collection.pack_factory(self._pack_collection,
748
upload_suffix=self.suffix,
748
749
file_mode=self._pack_collection.repo.bzrdir._get_file_mode())
750
751
def _update_pack_order(self, entries, index_to_pack_map):
1344
1345
:ivar _names: map of {pack_name: (index_size,)}
1348
pack_factory = NewPack
1347
1350
def __init__(self, repo, transport, index_transport, upload_transport,
1348
1351
pack_transport, index_builder_class, index_class,
1349
1352
use_chk_index):
1952
1955
# Do not permit preparation for writing if we're not in a 'write lock'.
1953
1956
if not self.repo.is_write_locked():
1954
1957
raise errors.NotWriteLocked(self)
1955
self._new_pack = NewPack(self, upload_suffix='.pack',
1958
self._new_pack = self.pack_factory(self, upload_suffix='.pack',
1956
1959
file_mode=self.repo.bzrdir._get_file_mode())
1957
1960
# allow writing: queue writes to a new index
1958
1961
self.revision_index.add_writable_index(self._new_pack.revision_index,
2178
2181
revision_nodes = self._pack_collection.revision_index \
2179
2182
.combined_index.iter_all_entries()
2180
2183
index_positions = []
2181
# Get the cached index values for all revisions, and also the location
2182
# in each index of the revision text so we can perform linear IO.
2184
# Get the cached index values for all revisions, and also the
2185
# location in each index of the revision text so we can perform
2183
2187
for index, key, value, refs in revision_nodes:
2184
pos, length = value[1:].split(' ')
2185
index_positions.append((index, int(pos), key[0],
2186
tuple(parent[0] for parent in refs[0])))
2188
node = (index, key, value, refs)
2189
index_memo = self.revisions._index._node_to_position(node)
2190
assert index_memo[0] == index
2191
index_positions.append((index_memo, key[0],
2192
tuple(parent[0] for parent in refs[0])))
2187
2193
pb.update("Reading revision index", 0, 0)
2188
2194
index_positions.sort()
2189
batch_count = len(index_positions) / 1000 + 1
2190
pb.update("Checking cached revision graph", 0, batch_count)
2191
for offset in xrange(batch_count):
2196
pb.update("Checking cached revision graph", 0,
2197
len(index_positions))
2198
for offset in xrange(0, len(index_positions), 1000):
2192
2199
pb.update("Checking cached revision graph", offset)
2193
to_query = index_positions[offset * 1000:(offset + 1) * 1000]
2200
to_query = index_positions[offset:offset + batch_size]
2194
2201
if not to_query:
2196
rev_ids = [item[2] for item in to_query]
2203
rev_ids = [item[1] for item in to_query]
2197
2204
revs = self.get_revisions(rev_ids)
2198
2205
for revision, item in zip(revs, to_query):
2199
index_parents = item[3]
2206
index_parents = item[2]
2200
2207
rev_parents = tuple(revision.parent_ids)
2201
2208
if index_parents != rev_parents:
2202
result.append((revision.revision_id, index_parents, rev_parents))
2209
result.append((revision.revision_id, index_parents,