194
187
for x in range(9):
195
188
tree.commit('commit %s' % x)
196
189
# there should be 9 packs:
197
index = self.index_class(trans, 'pack-names', None)
190
index = GraphIndex(trans, 'pack-names', None)
198
191
self.assertEqual(9, len(list(index.iter_all_entries())))
199
192
# insert some files in obsolete_packs which should be removed by pack.
200
193
trans.put_bytes('obsolete_packs/foo', '123')
201
194
trans.put_bytes('obsolete_packs/bar', '321')
202
195
# committing one more should coalesce to 1 of 10.
203
196
tree.commit('commit triggering pack')
204
index = self.index_class(trans, 'pack-names', None)
197
index = GraphIndex(trans, 'pack-names', None)
205
198
self.assertEqual(1, len(list(index.iter_all_entries())))
206
199
# packing should not damage data
207
200
tree = tree.bzrdir.open_workingtree()
208
201
check_result = tree.branch.repository.check(
209
202
[tree.branch.last_revision()])
210
nb_files = 5 # .pack, .rix, .iix, .tix, .six
211
if tree.branch.repository._format.supports_chks:
213
# We should have 10 x nb_files files in the obsolete_packs directory.
203
# We should have 50 (10x5) files in the obsolete_packs directory.
214
204
obsolete_files = list(trans.list_dir('obsolete_packs'))
215
205
self.assertFalse('foo' in obsolete_files)
216
206
self.assertFalse('bar' in obsolete_files)
217
self.assertEqual(10 * nb_files, len(obsolete_files))
207
self.assertEqual(50, len(obsolete_files))
218
208
# XXX: Todo check packs obsoleted correctly - old packs and indices
219
209
# in the obsolete_packs directory.
220
210
large_pack_name = list(index.iter_all_entries())[0][1][0]
221
211
# finally, committing again should not touch the large pack.
222
212
tree.commit('commit not triggering pack')
223
index = self.index_class(trans, 'pack-names', None)
213
index = GraphIndex(trans, 'pack-names', None)
224
214
self.assertEqual(2, len(list(index.iter_all_entries())))
225
215
pack_names = [node[1][0] for node in index.iter_all_entries()]
226
216
self.assertTrue(large_pack_name in pack_names)
228
def test_commit_write_group_returns_new_pack_names(self):
229
# This test doesn't need real disk.
230
self.vfs_transport_factory = memory.MemoryServer
231
format = self.get_format()
232
repo = self.make_repository('foo', format=format)
235
# All current pack repository styles autopack at 10 revisions; and
236
# autopack as well as regular commit write group needs to return
237
# the new pack name. Looping is a little ugly, but we don't have a
238
# clean way to test both the autopack logic and the normal code
239
# path without doing this loop.
240
for pos in range(10):
242
repo.start_write_group()
244
inv = inventory.Inventory(revision_id=revid)
245
inv.root.revision = revid
246
repo.texts.add_lines((inv.root.file_id, revid), [], [])
247
rev = _mod_revision.Revision(timestamp=0, timezone=None,
248
committer="Foo Bar <foo@example.com>", message="Message",
251
repo.add_revision(revid, rev, inv=inv)
253
repo.abort_write_group()
256
old_names = repo._pack_collection._names.keys()
257
result = repo.commit_write_group()
258
cur_names = repo._pack_collection._names.keys()
259
new_names = list(set(cur_names) - set(old_names))
260
self.assertEqual(new_names, result)
264
218
def test_fail_obsolete_deletion(self):
265
219
# failing to delete obsolete packs is not fatal
266
220
format = self.get_format()
267
server = test_server.FakeNFSServer()
268
self.start_server(server)
269
t = transport.get_transport_from_url(server.get_url())
270
bzrdir = self.get_format().initialize_on_transport(t)
221
server = fakenfs.FakeNFSServer()
223
self.addCleanup(server.tearDown)
224
transport = get_transport(server.get_url())
225
bzrdir = self.get_format().initialize_on_transport(transport)
271
226
repo = bzrdir.create_repository()
272
227
repo_transport = bzrdir.get_repository_transport(None)
273
228
self.assertTrue(repo_transport.has('obsolete_packs'))
301
239
tree.commit('more work')
302
240
tree.branch.repository.pack()
303
241
# there should be 1 pack:
304
index = self.index_class(trans, 'pack-names', None)
242
index = GraphIndex(trans, 'pack-names', None)
305
243
self.assertEqual(1, len(list(index.iter_all_entries())))
306
244
self.assertEqual(2, len(tree.branch.repository.all_revision_ids()))
308
def test_pack_preserves_all_inventories(self):
309
# This is related to bug:
310
# https://bugs.launchpad.net/bzr/+bug/412198
311
# Stacked repositories need to keep the inventory for parents, even
312
# after a pack operation. However, it is harder to test that, then just
313
# test that all inventory texts are preserved.
314
format = self.get_format()
315
builder = self.make_branch_builder('source', format=format)
316
builder.start_series()
317
builder.build_snapshot('A-id', None, [
318
('add', ('', 'root-id', 'directory', None))])
319
builder.build_snapshot('B-id', None, [
320
('add', ('file', 'file-id', 'file', 'B content\n'))])
321
builder.build_snapshot('C-id', None, [
322
('modify', ('file-id', 'C content\n'))])
323
builder.finish_series()
324
b = builder.get_branch()
326
self.addCleanup(b.unlock)
327
repo = self.make_repository('repo', shared=True, format=format)
329
self.addCleanup(repo.unlock)
330
repo.fetch(b.repository, revision_id='B-id')
331
inv = b.repository.iter_inventories(['C-id']).next()
332
repo.start_write_group()
333
repo.add_inventory('C-id', inv, ['B-id'])
334
repo.commit_write_group()
335
self.assertEqual([('A-id',), ('B-id',), ('C-id',)],
336
sorted(repo.inventories.keys()))
338
self.assertEqual([('A-id',), ('B-id',), ('C-id',)],
339
sorted(repo.inventories.keys()))
340
# Content should be preserved as well
341
self.assertEqual(inv, repo.iter_inventories(['C-id']).next())
343
246
def test_pack_layout(self):
344
# Test that the ordering of revisions in pack repositories is
346
247
format = self.get_format()
347
248
tree = self.make_branch_and_tree('.', format=format)
348
249
trans = tree.branch.repository.bzrdir.get_repository_transport(None)
504
def test_concurrent_pack_triggers_reload(self):
505
# create 2 packs, which we will then collapse
506
tree = self.make_branch_and_tree('tree')
509
rev1 = tree.commit('one')
510
rev2 = tree.commit('two')
511
r2 = repository.Repository.open('tree')
514
# Now r2 has read the pack-names file, but will need to reload
515
# it after r1 has repacked
516
tree.branch.repository.pack()
517
self.assertEqual({rev2:(rev1,)}, r2.get_parent_map([rev2]))
523
def test_concurrent_pack_during_get_record_reloads(self):
524
tree = self.make_branch_and_tree('tree')
527
rev1 = tree.commit('one')
528
rev2 = tree.commit('two')
529
keys = [(rev1,), (rev2,)]
530
r2 = repository.Repository.open('tree')
533
# At this point, we will start grabbing a record stream, and
534
# trigger a repack mid-way
537
record_stream = r2.revisions.get_record_stream(keys,
539
for record in record_stream:
540
result[record.key] = record
542
tree.branch.repository.pack()
544
# The first record will be found in the original location, but
545
# after the pack, we have to reload to find the next record
546
self.assertEqual(sorted(keys), sorted(result.keys()))
552
def test_concurrent_pack_during_autopack(self):
553
tree = self.make_branch_and_tree('tree')
557
tree.commit('rev %d' % (i,))
558
r2 = repository.Repository.open('tree')
561
# Monkey patch so that pack occurs while the other repo is
562
# autopacking. This is slightly bad, but all current pack
563
# repository implementations have a _pack_collection, and we
564
# test that it gets triggered. So if a future format changes
565
# things, the test will fail rather than succeed accidentally.
567
r1 = tree.branch.repository
568
orig = r1._pack_collection.pack_distribution
569
def trigger_during_auto(*args, **kwargs):
570
ret = orig(*args, **kwargs)
571
if not autopack_count[0]:
573
autopack_count[0] += 1
575
r1._pack_collection.pack_distribution = trigger_during_auto
576
tree.commit('autopack-rev')
577
# This triggers 2 autopacks. The first one causes r2.pack() to
578
# fire, but r2 doesn't see the new pack file yet. The
579
# autopack restarts and sees there are 2 files and there
580
# should be only 1 for 10 commits. So it goes ahead and
581
# finishes autopacking.
582
self.assertEqual([2], autopack_count)
588
397
def test_lock_write_does_not_physically_lock(self):
589
398
repo = self.make_repository('.', format=self.get_format())
590
399
repo.lock_write()
664
478
self.assertRaises(errors.NoSuchRevision,
665
479
missing_ghost.get_inventory, 'ghost')
667
def make_write_ready_repo(self):
668
format = self.get_format()
669
if isinstance(format.repository_format, RepositoryFormat2a):
670
raise TestNotApplicable("No missing compression parents")
671
repo = self.make_repository('.', format=format)
673
self.addCleanup(repo.unlock)
674
repo.start_write_group()
675
self.addCleanup(repo.abort_write_group)
678
def test_missing_inventories_compression_parent_prevents_commit(self):
679
repo = self.make_write_ready_repo()
681
repo.inventories._index._missing_compression_parents.add(key)
682
self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
683
self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
685
def test_missing_revisions_compression_parent_prevents_commit(self):
686
repo = self.make_write_ready_repo()
688
repo.revisions._index._missing_compression_parents.add(key)
689
self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
690
self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
692
def test_missing_signatures_compression_parent_prevents_commit(self):
693
repo = self.make_write_ready_repo()
695
repo.signatures._index._missing_compression_parents.add(key)
696
self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
697
self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
699
def test_missing_text_compression_parent_prevents_commit(self):
700
repo = self.make_write_ready_repo()
701
key = ('some', 'junk')
702
repo.texts._index._missing_compression_parents.add(key)
703
self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
704
e = self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
706
481
def test_supports_external_lookups(self):
707
482
repo = self.make_repository('.', format=self.get_format())
708
483
self.assertEqual(self.format_supports_external_lookups,
709
484
repo._format.supports_external_lookups)
711
def _lock_write(self, write_lockable):
712
"""Lock write_lockable, add a cleanup and return the result.
714
:param write_lockable: An object with a lock_write method.
715
:return: The result of write_lockable.lock_write().
717
result = write_lockable.lock_write()
718
self.addCleanup(result.unlock)
721
def test_abort_write_group_does_not_raise_when_suppressed(self):
722
"""Similar to per_repository.test_write_group's test of the same name.
724
Also requires that the exception is logged.
726
self.vfs_transport_factory = memory.MemoryServer
727
repo = self.make_repository('repo', format=self.get_format())
728
token = self._lock_write(repo).repository_token
729
repo.start_write_group()
730
# Damage the repository on the filesystem
731
self.get_transport('').rename('repo', 'foo')
732
# abort_write_group will not raise an error
733
self.assertEqual(None, repo.abort_write_group(suppress_errors=True))
734
# But it does log an error
736
self.assertContainsRe(log, 'abort_write_group failed')
737
self.assertContainsRe(log, r'INFO bzr: ERROR \(ignored\):')
738
if token is not None:
739
repo.leave_lock_in_place()
741
def test_abort_write_group_does_raise_when_not_suppressed(self):
742
self.vfs_transport_factory = memory.MemoryServer
743
repo = self.make_repository('repo', format=self.get_format())
744
token = self._lock_write(repo).repository_token
745
repo.start_write_group()
746
# Damage the repository on the filesystem
747
self.get_transport('').rename('repo', 'foo')
748
# abort_write_group will not raise an error
749
self.assertRaises(Exception, repo.abort_write_group)
750
if token is not None:
751
repo.leave_lock_in_place()
753
def test_suspend_write_group(self):
754
self.vfs_transport_factory = memory.MemoryServer
755
repo = self.make_repository('repo', format=self.get_format())
756
token = self._lock_write(repo).repository_token
757
repo.start_write_group()
758
repo.texts.add_lines(('file-id', 'revid'), (), ['lines'])
759
wg_tokens = repo.suspend_write_group()
760
expected_pack_name = wg_tokens[0] + '.pack'
761
expected_names = [wg_tokens[0] + ext for ext in
762
('.rix', '.iix', '.tix', '.six')]
763
if repo.chk_bytes is not None:
764
expected_names.append(wg_tokens[0] + '.cix')
765
expected_names.append(expected_pack_name)
766
upload_transport = repo._pack_collection._upload_transport
767
limbo_files = upload_transport.list_dir('')
768
self.assertEqual(sorted(expected_names), sorted(limbo_files))
769
md5 = osutils.md5(upload_transport.get_bytes(expected_pack_name))
770
self.assertEqual(wg_tokens[0], md5.hexdigest())
772
def test_resume_chk_bytes(self):
773
self.vfs_transport_factory = memory.MemoryServer
774
repo = self.make_repository('repo', format=self.get_format())
775
if repo.chk_bytes is None:
776
raise TestNotApplicable('no chk_bytes for this repository')
777
token = self._lock_write(repo).repository_token
778
repo.start_write_group()
779
text = 'a bit of text\n'
780
key = ('sha1:' + osutils.sha_string(text),)
781
repo.chk_bytes.add_lines(key, (), [text])
782
wg_tokens = repo.suspend_write_group()
783
same_repo = repo.bzrdir.open_repository()
784
same_repo.lock_write()
785
self.addCleanup(same_repo.unlock)
786
same_repo.resume_write_group(wg_tokens)
787
self.assertEqual([key], list(same_repo.chk_bytes.keys()))
789
text, same_repo.chk_bytes.get_record_stream([key],
790
'unordered', True).next().get_bytes_as('fulltext'))
791
same_repo.abort_write_group()
792
self.assertEqual([], list(same_repo.chk_bytes.keys()))
794
def test_resume_write_group_then_abort(self):
795
# Create a repo, start a write group, insert some data, suspend.
796
self.vfs_transport_factory = memory.MemoryServer
797
repo = self.make_repository('repo', format=self.get_format())
798
token = self._lock_write(repo).repository_token
799
repo.start_write_group()
800
text_key = ('file-id', 'revid')
801
repo.texts.add_lines(text_key, (), ['lines'])
802
wg_tokens = repo.suspend_write_group()
803
# Get a fresh repository object for the repo on the filesystem.
804
same_repo = repo.bzrdir.open_repository()
806
same_repo.lock_write()
807
self.addCleanup(same_repo.unlock)
808
same_repo.resume_write_group(wg_tokens)
809
same_repo.abort_write_group()
811
[], same_repo._pack_collection._upload_transport.list_dir(''))
813
[], same_repo._pack_collection._pack_transport.list_dir(''))
815
def test_commit_resumed_write_group(self):
816
self.vfs_transport_factory = memory.MemoryServer
817
repo = self.make_repository('repo', format=self.get_format())
818
token = self._lock_write(repo).repository_token
819
repo.start_write_group()
820
text_key = ('file-id', 'revid')
821
repo.texts.add_lines(text_key, (), ['lines'])
822
wg_tokens = repo.suspend_write_group()
823
# Get a fresh repository object for the repo on the filesystem.
824
same_repo = repo.bzrdir.open_repository()
826
same_repo.lock_write()
827
self.addCleanup(same_repo.unlock)
828
same_repo.resume_write_group(wg_tokens)
829
same_repo.commit_write_group()
830
expected_pack_name = wg_tokens[0] + '.pack'
831
expected_names = [wg_tokens[0] + ext for ext in
832
('.rix', '.iix', '.tix', '.six')]
833
if repo.chk_bytes is not None:
834
expected_names.append(wg_tokens[0] + '.cix')
836
[], same_repo._pack_collection._upload_transport.list_dir(''))
837
index_names = repo._pack_collection._index_transport.list_dir('')
838
self.assertEqual(sorted(expected_names), sorted(index_names))
839
pack_names = repo._pack_collection._pack_transport.list_dir('')
840
self.assertEqual([expected_pack_name], pack_names)
842
def test_resume_malformed_token(self):
843
self.vfs_transport_factory = memory.MemoryServer
844
# Make a repository with a suspended write group
845
repo = self.make_repository('repo', format=self.get_format())
846
token = self._lock_write(repo).repository_token
847
repo.start_write_group()
848
text_key = ('file-id', 'revid')
849
repo.texts.add_lines(text_key, (), ['lines'])
850
wg_tokens = repo.suspend_write_group()
851
# Make a new repository
852
new_repo = self.make_repository('new_repo', format=self.get_format())
853
token = self._lock_write(new_repo).repository_token
855
'../../../../repo/.bzr/repository/upload/' + wg_tokens[0])
857
errors.UnresumableWriteGroup,
858
new_repo.resume_write_group, [hacked_wg_token])
861
487
class TestPackRepositoryStacking(TestCaseWithTransport):
964
578
# and max packs policy - so we are checking the policy is honoured
965
579
# in the test. But for now 11 commits is not a big deal in a single
967
local_tree = tree.branch.create_checkout('local')
968
581
for x in range(9):
969
local_tree.commit('commit %s' % x)
582
tree.commit('commit %s' % x)
970
583
# there should be 9 packs:
971
index = self.index_class(trans, 'pack-names', None)
584
index = GraphIndex(trans, 'pack-names', None)
972
585
self.assertEqual(9, len(list(index.iter_all_entries())))
973
586
# committing one more should coalesce to 1 of 10.
974
local_tree.commit('commit triggering pack')
975
index = self.index_class(trans, 'pack-names', None)
587
tree.commit('commit triggering pack')
588
index = GraphIndex(trans, 'pack-names', None)
976
589
self.assertEqual(1, len(list(index.iter_all_entries())))
977
590
# packing should not damage data
978
591
tree = tree.bzrdir.open_workingtree()
979
592
check_result = tree.branch.repository.check(
980
593
[tree.branch.last_revision()])
981
nb_files = 5 # .pack, .rix, .iix, .tix, .six
982
if tree.branch.repository._format.supports_chks:
984
# We should have 10 x nb_files files in the obsolete_packs directory.
594
# We should have 50 (10x5) files in the obsolete_packs directory.
985
595
obsolete_files = list(trans.list_dir('obsolete_packs'))
986
596
self.assertFalse('foo' in obsolete_files)
987
597
self.assertFalse('bar' in obsolete_files)
988
self.assertEqual(10 * nb_files, len(obsolete_files))
598
self.assertEqual(50, len(obsolete_files))
989
599
# XXX: Todo check packs obsoleted correctly - old packs and indices
990
600
# in the obsolete_packs directory.
991
601
large_pack_name = list(index.iter_all_entries())[0][1][0]
992
602
# finally, committing again should not touch the large pack.
993
local_tree.commit('commit not triggering pack')
994
index = self.index_class(trans, 'pack-names', None)
603
tree.commit('commit not triggering pack')
604
index = GraphIndex(trans, 'pack-names', None)
995
605
self.assertEqual(2, len(list(index.iter_all_entries())))
996
606
pack_names = [node[1][0] for node in index.iter_all_entries()]
997
607
self.assertTrue(large_pack_name in pack_names)
1000
class TestKeyDependencies(TestCaseWithTransport):
1002
def get_format(self):
1003
return controldir.format_registry.make_bzrdir(self.format_name)
1005
def create_source_and_target(self):
1006
builder = self.make_branch_builder('source', format=self.get_format())
1007
builder.start_series()
1008
builder.build_snapshot('A-id', None, [
1009
('add', ('', 'root-id', 'directory', None))])
1010
builder.build_snapshot('B-id', ['A-id', 'ghost-id'], [])
1011
builder.finish_series()
1012
repo = self.make_repository('target', format=self.get_format())
1013
b = builder.get_branch()
1015
self.addCleanup(b.unlock)
1017
self.addCleanup(repo.unlock)
1018
return b.repository, repo
1020
def test_key_dependencies_cleared_on_abort(self):
1021
source_repo, target_repo = self.create_source_and_target()
1022
target_repo.start_write_group()
1024
stream = source_repo.revisions.get_record_stream([('B-id',)],
1026
target_repo.revisions.insert_record_stream(stream)
1027
key_refs = target_repo.revisions._index._key_dependencies
1028
self.assertEqual([('B-id',)], sorted(key_refs.get_referrers()))
1030
target_repo.abort_write_group()
1031
self.assertEqual([], sorted(key_refs.get_referrers()))
1033
def test_key_dependencies_cleared_on_suspend(self):
1034
source_repo, target_repo = self.create_source_and_target()
1035
target_repo.start_write_group()
1037
stream = source_repo.revisions.get_record_stream([('B-id',)],
1039
target_repo.revisions.insert_record_stream(stream)
1040
key_refs = target_repo.revisions._index._key_dependencies
1041
self.assertEqual([('B-id',)], sorted(key_refs.get_referrers()))
1043
target_repo.suspend_write_group()
1044
self.assertEqual([], sorted(key_refs.get_referrers()))
1046
def test_key_dependencies_cleared_on_commit(self):
1047
source_repo, target_repo = self.create_source_and_target()
1048
target_repo.start_write_group()
1050
# Copy all texts, inventories, and chks so that nothing is missing
1051
# for revision B-id.
1052
for vf_name in ['texts', 'chk_bytes', 'inventories']:
1053
source_vf = getattr(source_repo, vf_name, None)
1054
if source_vf is None:
1056
target_vf = getattr(target_repo, vf_name)
1057
stream = source_vf.get_record_stream(
1058
source_vf.keys(), 'unordered', True)
1059
target_vf.insert_record_stream(stream)
1060
# Copy just revision B-id
1061
stream = source_repo.revisions.get_record_stream(
1062
[('B-id',)], 'unordered', True)
1063
target_repo.revisions.insert_record_stream(stream)
1064
key_refs = target_repo.revisions._index._key_dependencies
1065
self.assertEqual([('B-id',)], sorted(key_refs.get_referrers()))
1067
target_repo.commit_write_group()
1068
self.assertEqual([], sorted(key_refs.get_referrers()))
1071
class TestSmartServerAutopack(TestCaseWithTransport):
1074
super(TestSmartServerAutopack, self).setUp()
1075
# Create a smart server that publishes whatever the backing VFS server
1077
self.smart_server = test_server.SmartTCPServer_for_testing()
1078
self.start_server(self.smart_server, self.get_server())
1079
# Log all HPSS calls into self.hpss_calls.
1080
client._SmartClient.hooks.install_named_hook(
1081
'call', self.capture_hpss_call, None)
1082
self.hpss_calls = []
1084
def capture_hpss_call(self, params):
1085
self.hpss_calls.append(params.method)
1087
def get_format(self):
1088
return controldir.format_registry.make_bzrdir(self.format_name)
1090
def test_autopack_or_streaming_rpc_is_used_when_using_hpss(self):
1091
# Make local and remote repos
1092
format = self.get_format()
1093
tree = self.make_branch_and_tree('local', format=format)
1094
self.make_branch_and_tree('remote', format=format)
1095
remote_branch_url = self.smart_server.get_url() + 'remote'
1096
remote_branch = controldir.ControlDir.open(remote_branch_url).open_branch()
1097
# Make 9 local revisions, and push them one at a time to the remote
1098
# repo to produce 9 pack files.
1100
tree.commit('commit %s' % x)
1101
tree.branch.push(remote_branch)
1102
# Make one more push to trigger an autopack
1103
self.hpss_calls = []
1104
tree.commit('commit triggering pack')
1105
tree.branch.push(remote_branch)
1106
autopack_calls = len([call for call in self.hpss_calls if call ==
1107
'PackRepository.autopack'])
1108
streaming_calls = len([call for call in self.hpss_calls if call in
1109
('Repository.insert_stream', 'Repository.insert_stream_1.19')])
1111
# Non streaming server
1112
self.assertEqual(1, autopack_calls)
1113
self.assertEqual(0, streaming_calls)
1115
# Streaming was used, which autopacks on the remote end.
1116
self.assertEqual(0, autopack_calls)
1117
# NB: The 2 calls are because of the sanity check that the server
1118
# supports the verb (see remote.py:RemoteSink.insert_stream for
1120
self.assertEqual(2, streaming_calls)
1123
def load_tests(basic_tests, module, loader):
610
def load_tests(basic_tests, module, test_loader):
1124
611
# these give the bzrdir canned format name, and the repository on-disk
1126
613
scenarios_params = [
1127
614
dict(format_name='pack-0.92',
1128
615
format_string="Bazaar pack repository format 1 (needs bzr 0.92)\n",
1129
format_supports_external_lookups=False,
1130
index_class=GraphIndex),
616
format_supports_external_lookups=False),
1131
617
dict(format_name='pack-0.92-subtree',
1132
618
format_string="Bazaar pack repository format 1 "
1133
619
"with subtree support (needs bzr 0.92)\n",
1134
format_supports_external_lookups=False,
1135
index_class=GraphIndex),
620
format_supports_external_lookups=False),
1136
621
dict(format_name='1.6',
1137
622
format_string="Bazaar RepositoryFormatKnitPack5 (bzr 1.6)\n",
1138
format_supports_external_lookups=True,
1139
index_class=GraphIndex),
623
format_supports_external_lookups=True),
1140
624
dict(format_name='1.6.1-rich-root',
1141
625
format_string="Bazaar RepositoryFormatKnitPack5RichRoot "
1142
626
"(bzr 1.6.1)\n",
1143
format_supports_external_lookups=True,
1144
index_class=GraphIndex),
1145
dict(format_name='1.9',
1146
format_string="Bazaar RepositoryFormatKnitPack6 (bzr 1.9)\n",
1147
format_supports_external_lookups=True,
1148
index_class=BTreeGraphIndex),
1149
dict(format_name='1.9-rich-root',
1150
format_string="Bazaar RepositoryFormatKnitPack6RichRoot "
1152
format_supports_external_lookups=True,
1153
index_class=BTreeGraphIndex),
1154
dict(format_name='2a',
1155
format_string="Bazaar repository format 2a "
1156
"(needs bzr 1.16 or later)\n",
1157
format_supports_external_lookups=True,
1158
index_class=BTreeGraphIndex),
627
format_supports_external_lookups=True),
628
dict(format_name='development',
629
format_string="Bazaar development format 1 "
630
"(needs bzr.dev from before 1.6)\n",
631
format_supports_external_lookups=True),
632
dict(format_name='development-subtree',
633
format_string="Bazaar development format 1 "
634
"with subtree support (needs bzr.dev from before 1.6)\n",
635
format_supports_external_lookups=True),
637
adapter = tests.TestScenarioApplier()
1160
638
# name of the scenario is the format name
1161
scenarios = [(s['format_name'], s) for s in scenarios_params]
1162
return tests.multiply_tests(basic_tests, scenarios, loader.suiteClass())
639
adapter.scenarios = [(s['format_name'], s) for s in scenarios_params]
640
suite = tests.TestSuite()
641
tests.adapt_tests(basic_tests, adapter, suite)