84
84
"""Packs reuse deltas."""
85
85
format = self.get_format()
86
86
repo = self.make_repository('.', format=format)
87
if isinstance(format.repository_format, RepositoryFormatCHK1):
87
if isinstance(format.repository_format, RepositoryFormat2a):
88
88
# TODO: This is currently a workaround. CHK format repositories
89
89
# ignore the 'deltas' flag, but during conversions, we can't
90
90
# do unordered delta fetches. Remove this clause once we
239
239
self.assertTrue(large_pack_name in pack_names)
241
241
def test_commit_write_group_returns_new_pack_names(self):
242
# This test doesn't need real disk.
243
self.vfs_transport_factory = tests.MemoryServer
242
244
format = self.get_format()
243
tree = self.make_branch_and_tree('foo', format=format)
244
tree.commit('first post')
245
repo = tree.branch.repository
245
repo = self.make_repository('foo', format=format)
246
246
repo.lock_write()
248
repo.start_write_group()
250
inv = inventory.Inventory(revision_id="A")
251
inv.root.revision = "A"
252
repo.texts.add_lines((inv.root.file_id, "A"), [], [])
253
rev = _mod_revision.Revision(timestamp=0, timezone=None,
254
committer="Foo Bar <foo@example.com>", message="Message",
257
repo.add_revision("A", rev, inv=inv)
259
repo.abort_write_group()
262
old_names = repo._pack_collection._names.keys()
263
result = repo.commit_write_group()
264
cur_names = repo._pack_collection._names.keys()
265
new_names = list(set(cur_names) - set(old_names))
266
self.assertEqual(new_names, result)
248
# All current pack repository styles autopack at 10 revisions; and
249
# autopack as well as regular commit write group needs to return
250
# the new pack name. Looping is a little ugly, but we don't have a
251
# clean way to test both the autopack logic and the normal code
252
# path without doing this loop.
253
for pos in range(10):
255
repo.start_write_group()
257
inv = inventory.Inventory(revision_id=revid)
258
inv.root.revision = revid
259
repo.texts.add_lines((inv.root.file_id, revid), [], [])
260
rev = _mod_revision.Revision(timestamp=0, timezone=None,
261
committer="Foo Bar <foo@example.com>", message="Message",
264
repo.add_revision(revid, rev, inv=inv)
266
repo.abort_write_group()
269
old_names = repo._pack_collection._names.keys()
270
result = repo.commit_write_group()
271
cur_names = repo._pack_collection._names.keys()
272
new_names = list(set(cur_names) - set(old_names))
273
self.assertEqual(new_names, result)
271
278
# failing to delete obsolete packs is not fatal
272
279
format = self.get_format()
273
280
server = fakenfs.FakeNFSServer()
275
self.addCleanup(server.tearDown)
281
self.start_server(server)
276
282
transport = get_transport(server.get_url())
277
283
bzrdir = self.get_format().initialize_on_transport(transport)
278
284
repo = bzrdir.create_repository()
295
301
self.assertEqual(1, len(list(index.iter_all_entries())))
296
302
self.assertEqual(2, len(tree.branch.repository.all_revision_ids()))
304
def test_pack_preserves_all_inventories(self):
305
# This is related to bug:
306
# https://bugs.launchpad.net/bzr/+bug/412198
307
# Stacked repositories need to keep the inventory for parents, even
308
# after a pack operation. However, it is harder to test that, then just
309
# test that all inventory texts are preserved.
310
format = self.get_format()
311
builder = self.make_branch_builder('source', format=format)
312
builder.start_series()
313
builder.build_snapshot('A-id', None, [
314
('add', ('', 'root-id', 'directory', None))])
315
builder.build_snapshot('B-id', None, [
316
('add', ('file', 'file-id', 'file', 'B content\n'))])
317
builder.build_snapshot('C-id', None, [
318
('modify', ('file-id', 'C content\n'))])
319
builder.finish_series()
320
b = builder.get_branch()
322
self.addCleanup(b.unlock)
323
repo = self.make_repository('repo', shared=True, format=format)
325
self.addCleanup(repo.unlock)
326
repo.fetch(b.repository, revision_id='B-id')
327
inv = b.repository.iter_inventories(['C-id']).next()
328
repo.start_write_group()
329
repo.add_inventory('C-id', inv, ['B-id'])
330
repo.commit_write_group()
331
self.assertEqual([('A-id',), ('B-id',), ('C-id',)],
332
sorted(repo.inventories.keys()))
334
self.assertEqual([('A-id',), ('B-id',), ('C-id',)],
335
sorted(repo.inventories.keys()))
336
# Content should be preserved as well
337
self.assertEqual(inv, repo.iter_inventories(['C-id']).next())
298
339
def test_pack_layout(self):
299
340
# Test that the ordering of revisions in pack repositories is
311
352
# revision access tends to be tip->ancestor, so ordering that way on
312
353
# disk is a good idea.
313
354
for _1, key, val, refs in pack.revision_index.iter_all_entries():
314
if type(format.repository_format) is RepositoryFormatCHK1:
355
if type(format.repository_format) is RepositoryFormat2a:
315
356
# group_start, group_len, internal_start, internal_len
316
357
pos = map(int, val.split())
590
631
def make_write_ready_repo(self):
591
632
format = self.get_format()
592
if isinstance(format.repository_format, RepositoryFormatCHK1):
633
if isinstance(format.repository_format, RepositoryFormat2a):
593
634
raise TestNotApplicable("No missing compression parents")
594
635
repo = self.make_repository('.', format=format)
595
636
repo.lock_write()
865
906
base.commit('foo')
866
907
referencing = self.make_branch_and_tree('repo', format=self.get_format())
867
908
referencing.branch.repository.add_fallback_repository(base.branch.repository)
868
referencing.commit('bar')
909
local_tree = referencing.branch.create_checkout('local')
910
local_tree.commit('bar')
869
911
new_instance = referencing.bzrdir.open_repository()
870
912
new_instance.lock_read()
871
913
self.addCleanup(new_instance.unlock)
884
926
# and max packs policy - so we are checking the policy is honoured
885
927
# in the test. But for now 11 commits is not a big deal in a single
929
local_tree = tree.branch.create_checkout('local')
887
930
for x in range(9):
888
tree.commit('commit %s' % x)
931
local_tree.commit('commit %s' % x)
889
932
# there should be 9 packs:
890
933
index = self.index_class(trans, 'pack-names', None)
891
934
self.assertEqual(9, len(list(index.iter_all_entries())))
892
935
# committing one more should coalesce to 1 of 10.
893
tree.commit('commit triggering pack')
936
local_tree.commit('commit triggering pack')
894
937
index = self.index_class(trans, 'pack-names', None)
895
938
self.assertEqual(1, len(list(index.iter_all_entries())))
896
939
# packing should not damage data
909
952
# in the obsolete_packs directory.
910
953
large_pack_name = list(index.iter_all_entries())[0][1][0]
911
954
# finally, committing again should not touch the large pack.
912
tree.commit('commit not triggering pack')
955
local_tree.commit('commit not triggering pack')
913
956
index = self.index_class(trans, 'pack-names', None)
914
957
self.assertEqual(2, len(list(index.iter_all_entries())))
915
958
pack_names = [node[1][0] for node in index.iter_all_entries()]
928
971
('add', ('', 'root-id', 'directory', None))])
929
972
builder.build_snapshot('B-id', ['A-id', 'ghost-id'], [])
930
973
builder.finish_series()
931
repo = self.make_repository('target')
974
repo = self.make_repository('target', format=self.get_format())
932
975
b = builder.get_branch()
934
977
self.addCleanup(b.unlock)
966
1009
source_repo, target_repo = self.create_source_and_target()
967
1010
target_repo.start_write_group()
969
stream = source_repo.revisions.get_record_stream([('B-id',)],
1012
# Copy all texts, inventories, and chks so that nothing is missing
1013
# for revision B-id.
1014
for vf_name in ['texts', 'chk_bytes', 'inventories']:
1015
source_vf = getattr(source_repo, vf_name, None)
1016
if source_vf is None:
1018
target_vf = getattr(target_repo, vf_name)
1019
stream = source_vf.get_record_stream(
1020
source_vf.keys(), 'unordered', True)
1021
target_vf.insert_record_stream(stream)
1022
# Copy just revision B-id
1023
stream = source_repo.revisions.get_record_stream(
1024
[('B-id',)], 'unordered', True)
971
1025
target_repo.revisions.insert_record_stream(stream)
972
1026
key_refs = target_repo.revisions._index._key_dependencies
973
1027
self.assertEqual([('B-id',)], sorted(key_refs.get_referrers()))
983
1037
# Create a smart server that publishes whatever the backing VFS server
985
1039
self.smart_server = server.SmartTCPServer_for_testing()
986
self.smart_server.setUp(self.get_server())
987
self.addCleanup(self.smart_server.tearDown)
1040
self.start_server(self.smart_server, self.get_server())
988
1041
# Log all HPSS calls into self.hpss_calls.
989
1042
client._SmartClient.hooks.install_named_hook(
990
1043
'call', self.capture_hpss_call, None)
1014
1067
tree.branch.push(remote_branch)
1015
1068
autopack_calls = len([call for call in self.hpss_calls if call ==
1016
1069
'PackRepository.autopack'])
1017
streaming_calls = len([call for call in self.hpss_calls if call ==
1018
'Repository.insert_stream'])
1070
streaming_calls = len([call for call in self.hpss_calls if call in
1071
('Repository.insert_stream', 'Repository.insert_stream_1.19')])
1019
1072
if autopack_calls:
1020
1073
# Non streaming server
1021
1074
self.assertEqual(1, autopack_calls)
1061
1114
format_supports_external_lookups=True,
1062
1115
index_class=BTreeGraphIndex),
1063
dict(format_name='development6-rich-root',
1064
format_string='Bazaar development format - group compression '
1065
'and chk inventory (needs bzr.dev from 1.14)\n',
1116
dict(format_name='2a',
1117
format_string="Bazaar repository format 2a "
1118
"(needs bzr 1.16 or later)\n",
1066
1119
format_supports_external_lookups=True,
1067
1120
index_class=BTreeGraphIndex),