216
237
pack_names = [node[1][0] for node in index.iter_all_entries()]
217
238
self.assertTrue(large_pack_name in pack_names)
240
def test_commit_write_group_returns_new_pack_names(self):
241
# This test doesn't need real disk.
242
self.vfs_transport_factory = memory.MemoryServer
243
format = self.get_format()
244
repo = self.make_repository('foo', format=format)
247
# All current pack repository styles autopack at 10 revisions; and
248
# autopack as well as regular commit write group needs to return
249
# the new pack name. Looping is a little ugly, but we don't have a
250
# clean way to test both the autopack logic and the normal code
251
# path without doing this loop.
252
for pos in range(10):
254
repo.start_write_group()
256
inv = inventory.Inventory(revision_id=revid)
257
inv.root.revision = revid
258
repo.texts.add_lines((inv.root.file_id, revid), [], [])
259
rev = _mod_revision.Revision(timestamp=0, timezone=None,
260
committer="Foo Bar <foo@example.com>", message="Message",
263
repo.add_revision(revid, rev, inv=inv)
265
repo.abort_write_group()
268
old_names = repo._pack_collection._names.keys()
269
result = repo.commit_write_group()
270
cur_names = repo._pack_collection._names.keys()
271
new_names = list(set(cur_names) - set(old_names))
272
self.assertEqual(new_names, result)
219
276
def test_fail_obsolete_deletion(self):
220
277
# failing to delete obsolete packs is not fatal
221
278
format = self.get_format()
222
server = fakenfs.FakeNFSServer()
224
self.addCleanup(server.tearDown)
279
server = test_server.FakeNFSServer()
280
self.start_server(server)
225
281
transport = get_transport(server.get_url())
226
282
bzrdir = self.get_format().initialize_on_transport(transport)
227
283
repo = bzrdir.create_repository()
244
317
self.assertEqual(1, len(list(index.iter_all_entries())))
245
318
self.assertEqual(2, len(tree.branch.repository.all_revision_ids()))
320
def test_pack_preserves_all_inventories(self):
321
# This is related to bug:
322
# https://bugs.launchpad.net/bzr/+bug/412198
323
# Stacked repositories need to keep the inventory for parents, even
324
# after a pack operation. However, it is harder to test that, then just
325
# test that all inventory texts are preserved.
326
format = self.get_format()
327
builder = self.make_branch_builder('source', format=format)
328
builder.start_series()
329
builder.build_snapshot('A-id', None, [
330
('add', ('', 'root-id', 'directory', None))])
331
builder.build_snapshot('B-id', None, [
332
('add', ('file', 'file-id', 'file', 'B content\n'))])
333
builder.build_snapshot('C-id', None, [
334
('modify', ('file-id', 'C content\n'))])
335
builder.finish_series()
336
b = builder.get_branch()
338
self.addCleanup(b.unlock)
339
repo = self.make_repository('repo', shared=True, format=format)
341
self.addCleanup(repo.unlock)
342
repo.fetch(b.repository, revision_id='B-id')
343
inv = b.repository.iter_inventories(['C-id']).next()
344
repo.start_write_group()
345
repo.add_inventory('C-id', inv, ['B-id'])
346
repo.commit_write_group()
347
self.assertEqual([('A-id',), ('B-id',), ('C-id',)],
348
sorted(repo.inventories.keys()))
350
self.assertEqual([('A-id',), ('B-id',), ('C-id',)],
351
sorted(repo.inventories.keys()))
352
# Content should be preserved as well
353
self.assertEqual(inv, repo.iter_inventories(['C-id']).next())
247
355
def test_pack_layout(self):
356
# Test that the ordering of revisions in pack repositories is
248
358
format = self.get_format()
249
359
tree = self.make_branch_and_tree('.', format=format)
250
360
trans = tree.branch.repository.bzrdir.get_repository_transport(None)
516
def test_concurrent_pack_triggers_reload(self):
517
# create 2 packs, which we will then collapse
518
tree = self.make_branch_and_tree('tree')
521
rev1 = tree.commit('one')
522
rev2 = tree.commit('two')
523
r2 = repository.Repository.open('tree')
526
# Now r2 has read the pack-names file, but will need to reload
527
# it after r1 has repacked
528
tree.branch.repository.pack()
529
self.assertEqual({rev2:(rev1,)}, r2.get_parent_map([rev2]))
535
def test_concurrent_pack_during_get_record_reloads(self):
536
tree = self.make_branch_and_tree('tree')
539
rev1 = tree.commit('one')
540
rev2 = tree.commit('two')
541
keys = [(rev1,), (rev2,)]
542
r2 = repository.Repository.open('tree')
545
# At this point, we will start grabbing a record stream, and
546
# trigger a repack mid-way
549
record_stream = r2.revisions.get_record_stream(keys,
551
for record in record_stream:
552
result[record.key] = record
554
tree.branch.repository.pack()
556
# The first record will be found in the original location, but
557
# after the pack, we have to reload to find the next record
558
self.assertEqual(sorted(keys), sorted(result.keys()))
564
def test_concurrent_pack_during_autopack(self):
565
tree = self.make_branch_and_tree('tree')
569
tree.commit('rev %d' % (i,))
570
r2 = repository.Repository.open('tree')
573
# Monkey patch so that pack occurs while the other repo is
574
# autopacking. This is slightly bad, but all current pack
575
# repository implementations have a _pack_collection, and we
576
# test that it gets triggered. So if a future format changes
577
# things, the test will fail rather than succeed accidentally.
579
r1 = tree.branch.repository
580
orig = r1._pack_collection.pack_distribution
581
def trigger_during_auto(*args, **kwargs):
582
ret = orig(*args, **kwargs)
583
if not autopack_count[0]:
585
autopack_count[0] += 1
587
r1._pack_collection.pack_distribution = trigger_during_auto
588
tree.commit('autopack-rev')
589
# This triggers 2 autopacks. The first one causes r2.pack() to
590
# fire, but r2 doesn't see the new pack file yet. The
591
# autopack restarts and sees there are 2 files and there
592
# should be only 1 for 10 commits. So it goes ahead and
593
# finishes autopacking.
594
self.assertEqual([2], autopack_count)
398
600
def test_lock_write_does_not_physically_lock(self):
399
601
repo = self.make_repository('.', format=self.get_format())
400
602
repo.lock_write()
479
676
self.assertRaises(errors.NoSuchRevision,
480
677
missing_ghost.get_inventory, 'ghost')
679
def make_write_ready_repo(self):
680
format = self.get_format()
681
if isinstance(format.repository_format, RepositoryFormat2a):
682
raise TestNotApplicable("No missing compression parents")
683
repo = self.make_repository('.', format=format)
685
self.addCleanup(repo.unlock)
686
repo.start_write_group()
687
self.addCleanup(repo.abort_write_group)
690
def test_missing_inventories_compression_parent_prevents_commit(self):
691
repo = self.make_write_ready_repo()
693
repo.inventories._index._missing_compression_parents.add(key)
694
self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
695
self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
697
def test_missing_revisions_compression_parent_prevents_commit(self):
698
repo = self.make_write_ready_repo()
700
repo.revisions._index._missing_compression_parents.add(key)
701
self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
702
self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
704
def test_missing_signatures_compression_parent_prevents_commit(self):
705
repo = self.make_write_ready_repo()
707
repo.signatures._index._missing_compression_parents.add(key)
708
self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
709
self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
711
def test_missing_text_compression_parent_prevents_commit(self):
712
repo = self.make_write_ready_repo()
713
key = ('some', 'junk')
714
repo.texts._index._missing_compression_parents.add(key)
715
self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
716
e = self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
482
718
def test_supports_external_lookups(self):
483
719
repo = self.make_repository('.', format=self.get_format())
484
720
self.assertEqual(self.format_supports_external_lookups,
485
721
repo._format.supports_external_lookups)
723
def test_abort_write_group_does_not_raise_when_suppressed(self):
724
"""Similar to per_repository.test_write_group's test of the same name.
726
Also requires that the exception is logged.
728
self.vfs_transport_factory = memory.MemoryServer
729
repo = self.make_repository('repo', format=self.get_format())
730
token = repo.lock_write()
731
self.addCleanup(repo.unlock)
732
repo.start_write_group()
733
# Damage the repository on the filesystem
734
self.get_transport('').rename('repo', 'foo')
735
# abort_write_group will not raise an error
736
self.assertEqual(None, repo.abort_write_group(suppress_errors=True))
737
# But it does log an error
739
self.assertContainsRe(log, 'abort_write_group failed')
740
self.assertContainsRe(log, r'INFO bzr: ERROR \(ignored\):')
741
if token is not None:
742
repo.leave_lock_in_place()
744
def test_abort_write_group_does_raise_when_not_suppressed(self):
745
self.vfs_transport_factory = memory.MemoryServer
746
repo = self.make_repository('repo', format=self.get_format())
747
token = repo.lock_write()
748
self.addCleanup(repo.unlock)
749
repo.start_write_group()
750
# Damage the repository on the filesystem
751
self.get_transport('').rename('repo', 'foo')
752
# abort_write_group will not raise an error
753
self.assertRaises(Exception, repo.abort_write_group)
754
if token is not None:
755
repo.leave_lock_in_place()
757
def test_suspend_write_group(self):
758
self.vfs_transport_factory = memory.MemoryServer
759
repo = self.make_repository('repo', format=self.get_format())
760
token = repo.lock_write()
761
self.addCleanup(repo.unlock)
762
repo.start_write_group()
763
repo.texts.add_lines(('file-id', 'revid'), (), ['lines'])
764
wg_tokens = repo.suspend_write_group()
765
expected_pack_name = wg_tokens[0] + '.pack'
766
expected_names = [wg_tokens[0] + ext for ext in
767
('.rix', '.iix', '.tix', '.six')]
768
if repo.chk_bytes is not None:
769
expected_names.append(wg_tokens[0] + '.cix')
770
expected_names.append(expected_pack_name)
771
upload_transport = repo._pack_collection._upload_transport
772
limbo_files = upload_transport.list_dir('')
773
self.assertEqual(sorted(expected_names), sorted(limbo_files))
774
md5 = osutils.md5(upload_transport.get_bytes(expected_pack_name))
775
self.assertEqual(wg_tokens[0], md5.hexdigest())
777
def test_resume_chk_bytes(self):
778
self.vfs_transport_factory = memory.MemoryServer
779
repo = self.make_repository('repo', format=self.get_format())
780
if repo.chk_bytes is None:
781
raise TestNotApplicable('no chk_bytes for this repository')
782
token = repo.lock_write()
783
self.addCleanup(repo.unlock)
784
repo.start_write_group()
785
text = 'a bit of text\n'
786
key = ('sha1:' + osutils.sha_string(text),)
787
repo.chk_bytes.add_lines(key, (), [text])
788
wg_tokens = repo.suspend_write_group()
789
same_repo = repo.bzrdir.open_repository()
790
same_repo.lock_write()
791
self.addCleanup(same_repo.unlock)
792
same_repo.resume_write_group(wg_tokens)
793
self.assertEqual([key], list(same_repo.chk_bytes.keys()))
795
text, same_repo.chk_bytes.get_record_stream([key],
796
'unordered', True).next().get_bytes_as('fulltext'))
797
same_repo.abort_write_group()
798
self.assertEqual([], list(same_repo.chk_bytes.keys()))
800
def test_resume_write_group_then_abort(self):
801
# Create a repo, start a write group, insert some data, suspend.
802
self.vfs_transport_factory = memory.MemoryServer
803
repo = self.make_repository('repo', format=self.get_format())
804
token = repo.lock_write()
805
self.addCleanup(repo.unlock)
806
repo.start_write_group()
807
text_key = ('file-id', 'revid')
808
repo.texts.add_lines(text_key, (), ['lines'])
809
wg_tokens = repo.suspend_write_group()
810
# Get a fresh repository object for the repo on the filesystem.
811
same_repo = repo.bzrdir.open_repository()
813
same_repo.lock_write()
814
self.addCleanup(same_repo.unlock)
815
same_repo.resume_write_group(wg_tokens)
816
same_repo.abort_write_group()
818
[], same_repo._pack_collection._upload_transport.list_dir(''))
820
[], same_repo._pack_collection._pack_transport.list_dir(''))
822
def test_commit_resumed_write_group(self):
823
self.vfs_transport_factory = memory.MemoryServer
824
repo = self.make_repository('repo', format=self.get_format())
825
token = repo.lock_write()
826
self.addCleanup(repo.unlock)
827
repo.start_write_group()
828
text_key = ('file-id', 'revid')
829
repo.texts.add_lines(text_key, (), ['lines'])
830
wg_tokens = repo.suspend_write_group()
831
# Get a fresh repository object for the repo on the filesystem.
832
same_repo = repo.bzrdir.open_repository()
834
same_repo.lock_write()
835
self.addCleanup(same_repo.unlock)
836
same_repo.resume_write_group(wg_tokens)
837
same_repo.commit_write_group()
838
expected_pack_name = wg_tokens[0] + '.pack'
839
expected_names = [wg_tokens[0] + ext for ext in
840
('.rix', '.iix', '.tix', '.six')]
841
if repo.chk_bytes is not None:
842
expected_names.append(wg_tokens[0] + '.cix')
844
[], same_repo._pack_collection._upload_transport.list_dir(''))
845
index_names = repo._pack_collection._index_transport.list_dir('')
846
self.assertEqual(sorted(expected_names), sorted(index_names))
847
pack_names = repo._pack_collection._pack_transport.list_dir('')
848
self.assertEqual([expected_pack_name], pack_names)
850
def test_resume_malformed_token(self):
851
self.vfs_transport_factory = memory.MemoryServer
852
# Make a repository with a suspended write group
853
repo = self.make_repository('repo', format=self.get_format())
854
token = repo.lock_write()
855
self.addCleanup(repo.unlock)
856
repo.start_write_group()
857
text_key = ('file-id', 'revid')
858
repo.texts.add_lines(text_key, (), ['lines'])
859
wg_tokens = repo.suspend_write_group()
860
# Make a new repository
861
new_repo = self.make_repository('new_repo', format=self.get_format())
862
token = new_repo.lock_write()
863
self.addCleanup(new_repo.unlock)
865
'../../../../repo/.bzr/repository/upload/' + wg_tokens[0])
867
errors.UnresumableWriteGroup,
868
new_repo.resume_write_group, [hacked_wg_token])
488
871
class TestPackRepositoryStacking(TestCaseWithTransport):
579
974
# and max packs policy - so we are checking the policy is honoured
580
975
# in the test. But for now 11 commits is not a big deal in a single
977
local_tree = tree.branch.create_checkout('local')
582
978
for x in range(9):
583
tree.commit('commit %s' % x)
979
local_tree.commit('commit %s' % x)
584
980
# there should be 9 packs:
585
981
index = self.index_class(trans, 'pack-names', None)
586
982
self.assertEqual(9, len(list(index.iter_all_entries())))
587
983
# committing one more should coalesce to 1 of 10.
588
tree.commit('commit triggering pack')
984
local_tree.commit('commit triggering pack')
589
985
index = self.index_class(trans, 'pack-names', None)
590
986
self.assertEqual(1, len(list(index.iter_all_entries())))
591
987
# packing should not damage data
592
988
tree = tree.bzrdir.open_workingtree()
593
989
check_result = tree.branch.repository.check(
594
990
[tree.branch.last_revision()])
595
# We should have 50 (10x5) files in the obsolete_packs directory.
991
nb_files = 5 # .pack, .rix, .iix, .tix, .six
992
if tree.branch.repository._format.supports_chks:
994
# We should have 10 x nb_files files in the obsolete_packs directory.
596
995
obsolete_files = list(trans.list_dir('obsolete_packs'))
597
996
self.assertFalse('foo' in obsolete_files)
598
997
self.assertFalse('bar' in obsolete_files)
599
self.assertEqual(50, len(obsolete_files))
998
self.assertEqual(10 * nb_files, len(obsolete_files))
600
999
# XXX: Todo check packs obsoleted correctly - old packs and indices
601
1000
# in the obsolete_packs directory.
602
1001
large_pack_name = list(index.iter_all_entries())[0][1][0]
603
1002
# finally, committing again should not touch the large pack.
604
tree.commit('commit not triggering pack')
1003
local_tree.commit('commit not triggering pack')
605
1004
index = self.index_class(trans, 'pack-names', None)
606
1005
self.assertEqual(2, len(list(index.iter_all_entries())))
607
1006
pack_names = [node[1][0] for node in index.iter_all_entries()]
608
1007
self.assertTrue(large_pack_name in pack_names)
611
def load_tests(basic_tests, module, test_loader):
1010
class TestKeyDependencies(TestCaseWithTransport):
1012
def get_format(self):
1013
return bzrdir.format_registry.make_bzrdir(self.format_name)
1015
def create_source_and_target(self):
1016
builder = self.make_branch_builder('source', format=self.get_format())
1017
builder.start_series()
1018
builder.build_snapshot('A-id', None, [
1019
('add', ('', 'root-id', 'directory', None))])
1020
builder.build_snapshot('B-id', ['A-id', 'ghost-id'], [])
1021
builder.finish_series()
1022
repo = self.make_repository('target', format=self.get_format())
1023
b = builder.get_branch()
1025
self.addCleanup(b.unlock)
1027
self.addCleanup(repo.unlock)
1028
return b.repository, repo
1030
def test_key_dependencies_cleared_on_abort(self):
1031
source_repo, target_repo = self.create_source_and_target()
1032
target_repo.start_write_group()
1034
stream = source_repo.revisions.get_record_stream([('B-id',)],
1036
target_repo.revisions.insert_record_stream(stream)
1037
key_refs = target_repo.revisions._index._key_dependencies
1038
self.assertEqual([('B-id',)], sorted(key_refs.get_referrers()))
1040
target_repo.abort_write_group()
1041
self.assertEqual([], sorted(key_refs.get_referrers()))
1043
def test_key_dependencies_cleared_on_suspend(self):
1044
source_repo, target_repo = self.create_source_and_target()
1045
target_repo.start_write_group()
1047
stream = source_repo.revisions.get_record_stream([('B-id',)],
1049
target_repo.revisions.insert_record_stream(stream)
1050
key_refs = target_repo.revisions._index._key_dependencies
1051
self.assertEqual([('B-id',)], sorted(key_refs.get_referrers()))
1053
target_repo.suspend_write_group()
1054
self.assertEqual([], sorted(key_refs.get_referrers()))
1056
def test_key_dependencies_cleared_on_commit(self):
1057
source_repo, target_repo = self.create_source_and_target()
1058
target_repo.start_write_group()
1060
# Copy all texts, inventories, and chks so that nothing is missing
1061
# for revision B-id.
1062
for vf_name in ['texts', 'chk_bytes', 'inventories']:
1063
source_vf = getattr(source_repo, vf_name, None)
1064
if source_vf is None:
1066
target_vf = getattr(target_repo, vf_name)
1067
stream = source_vf.get_record_stream(
1068
source_vf.keys(), 'unordered', True)
1069
target_vf.insert_record_stream(stream)
1070
# Copy just revision B-id
1071
stream = source_repo.revisions.get_record_stream(
1072
[('B-id',)], 'unordered', True)
1073
target_repo.revisions.insert_record_stream(stream)
1074
key_refs = target_repo.revisions._index._key_dependencies
1075
self.assertEqual([('B-id',)], sorted(key_refs.get_referrers()))
1077
target_repo.commit_write_group()
1078
self.assertEqual([], sorted(key_refs.get_referrers()))
1081
class TestSmartServerAutopack(TestCaseWithTransport):
1084
super(TestSmartServerAutopack, self).setUp()
1085
# Create a smart server that publishes whatever the backing VFS server
1087
self.smart_server = test_server.SmartTCPServer_for_testing()
1088
self.start_server(self.smart_server, self.get_server())
1089
# Log all HPSS calls into self.hpss_calls.
1090
client._SmartClient.hooks.install_named_hook(
1091
'call', self.capture_hpss_call, None)
1092
self.hpss_calls = []
1094
def capture_hpss_call(self, params):
1095
self.hpss_calls.append(params.method)
1097
def get_format(self):
1098
return bzrdir.format_registry.make_bzrdir(self.format_name)
1100
def test_autopack_or_streaming_rpc_is_used_when_using_hpss(self):
1101
# Make local and remote repos
1102
format = self.get_format()
1103
tree = self.make_branch_and_tree('local', format=format)
1104
self.make_branch_and_tree('remote', format=format)
1105
remote_branch_url = self.smart_server.get_url() + 'remote'
1106
remote_branch = bzrdir.BzrDir.open(remote_branch_url).open_branch()
1107
# Make 9 local revisions, and push them one at a time to the remote
1108
# repo to produce 9 pack files.
1110
tree.commit('commit %s' % x)
1111
tree.branch.push(remote_branch)
1112
# Make one more push to trigger an autopack
1113
self.hpss_calls = []
1114
tree.commit('commit triggering pack')
1115
tree.branch.push(remote_branch)
1116
autopack_calls = len([call for call in self.hpss_calls if call ==
1117
'PackRepository.autopack'])
1118
streaming_calls = len([call for call in self.hpss_calls if call in
1119
('Repository.insert_stream', 'Repository.insert_stream_1.19')])
1121
# Non streaming server
1122
self.assertEqual(1, autopack_calls)
1123
self.assertEqual(0, streaming_calls)
1125
# Streaming was used, which autopacks on the remote end.
1126
self.assertEqual(0, autopack_calls)
1127
# NB: The 2 calls are because of the sanity check that the server
1128
# supports the verb (see remote.py:RemoteSink.insert_stream for
1130
self.assertEqual(2, streaming_calls)
1133
def load_tests(basic_tests, module, loader):
612
1134
# these give the bzrdir canned format name, and the repository on-disk
614
1136
scenarios_params = [
630
1152
"(bzr 1.6.1)\n",
631
1153
format_supports_external_lookups=True,
632
1154
index_class=GraphIndex),
633
dict(format_name='development2',
634
format_string="Bazaar development format 2 "
635
"(needs bzr.dev from before 1.8)\n",
636
format_supports_external_lookups=True,
637
index_class=BTreeGraphIndex),
638
dict(format_name='development2-subtree',
639
format_string="Bazaar development format 2 "
640
"with subtree support (needs bzr.dev from before 1.8)\n",
1155
dict(format_name='1.9',
1156
format_string="Bazaar RepositoryFormatKnitPack6 (bzr 1.9)\n",
1157
format_supports_external_lookups=True,
1158
index_class=BTreeGraphIndex),
1159
dict(format_name='1.9-rich-root',
1160
format_string="Bazaar RepositoryFormatKnitPack6RichRoot "
1162
format_supports_external_lookups=True,
1163
index_class=BTreeGraphIndex),
1164
dict(format_name='2a',
1165
format_string="Bazaar repository format 2a "
1166
"(needs bzr 1.16 or later)\n",
641
1167
format_supports_external_lookups=True,
642
1168
index_class=BTreeGraphIndex),
644
adapter = tests.TestScenarioApplier()
645
1170
# name of the scenario is the format name
646
adapter.scenarios = [(s['format_name'], s) for s in scenarios_params]
647
suite = tests.TestSuite()
648
tests.adapt_tests(basic_tests, adapter, suite)
1171
scenarios = [(s['format_name'], s) for s in scenarios_params]
1172
return tests.multiply_tests(basic_tests, scenarios, loader.suiteClass())