677
739
self.assertRaises(errors.RevisionNotPresent, empty_repo.fetch, broken_repo)
680
class TestKnitPackNoSubtrees(TestCaseWithTransport):
682
def get_format(self):
683
return bzrdir.format_registry.make_bzrdir('pack-0.92')
685
def test_disk_layout(self):
686
format = self.get_format()
687
repo = self.make_repository('.', format=format)
688
# in case of side effects of locking.
691
t = repo.bzrdir.get_repository_transport(None)
693
# XXX: no locks left when unlocked at the moment
694
# self.assertEqualDiff('', t.get('lock').read())
695
self.check_databases(t)
697
def check_format(self, t):
698
self.assertEqualDiff(
699
"Bazaar pack repository format 1 (needs bzr 0.92)\n",
700
t.get('format').read())
702
def assertHasNoKndx(self, t, knit_name):
703
"""Assert that knit_name has no index on t."""
704
self.assertFalse(t.has(knit_name + '.kndx'))
706
def assertHasNoKnit(self, t, knit_name):
707
"""Assert that knit_name exists on t."""
709
self.assertFalse(t.has(knit_name + '.knit'))
711
def check_databases(self, t):
712
"""check knit content for a repository."""
713
# check conversion worked
714
self.assertHasNoKndx(t, 'inventory')
715
self.assertHasNoKnit(t, 'inventory')
716
self.assertHasNoKndx(t, 'revisions')
717
self.assertHasNoKnit(t, 'revisions')
718
self.assertHasNoKndx(t, 'signatures')
719
self.assertHasNoKnit(t, 'signatures')
720
self.assertFalse(t.has('knits'))
721
# revision-indexes file-container directory
723
list(GraphIndex(t, 'pack-names', None).iter_all_entries()))
724
self.assertTrue(S_ISDIR(t.stat('packs').st_mode))
725
self.assertTrue(S_ISDIR(t.stat('upload').st_mode))
726
self.assertTrue(S_ISDIR(t.stat('indices').st_mode))
727
self.assertTrue(S_ISDIR(t.stat('obsolete_packs').st_mode))
729
def test_shared_disk_layout(self):
730
format = self.get_format()
731
repo = self.make_repository('.', shared=True, format=format)
733
t = repo.bzrdir.get_repository_transport(None)
735
# XXX: no locks left when unlocked at the moment
736
# self.assertEqualDiff('', t.get('lock').read())
737
# We should have a 'shared-storage' marker file.
738
self.assertEqualDiff('', t.get('shared-storage').read())
739
self.check_databases(t)
741
def test_shared_no_tree_disk_layout(self):
742
format = self.get_format()
743
repo = self.make_repository('.', shared=True, format=format)
744
repo.set_make_working_trees(False)
746
t = repo.bzrdir.get_repository_transport(None)
748
# XXX: no locks left when unlocked at the moment
749
# self.assertEqualDiff('', t.get('lock').read())
750
# We should have a 'shared-storage' marker file.
751
self.assertEqualDiff('', t.get('shared-storage').read())
752
# We should have a marker for the no-working-trees flag.
753
self.assertEqualDiff('', t.get('no-working-trees').read())
754
# The marker should go when we toggle the setting.
755
repo.set_make_working_trees(True)
756
self.assertFalse(t.has('no-working-trees'))
757
self.check_databases(t)
759
def test_adding_revision_creates_pack_indices(self):
760
format = self.get_format()
761
tree = self.make_branch_and_tree('.', format=format)
762
trans = tree.branch.repository.bzrdir.get_repository_transport(None)
764
list(GraphIndex(trans, 'pack-names', None).iter_all_entries()))
765
tree.commit('foobarbaz')
766
index = GraphIndex(trans, 'pack-names', None)
767
index_nodes = list(index.iter_all_entries())
768
self.assertEqual(1, len(index_nodes))
769
node = index_nodes[0]
771
# the pack sizes should be listed in the index
773
sizes = [int(digits) for digits in pack_value.split(' ')]
774
for size, suffix in zip(sizes, ['.rix', '.iix', '.tix', '.six']):
775
stat = trans.stat('indices/%s%s' % (name, suffix))
776
self.assertEqual(size, stat.st_size)
778
def test_pulling_nothing_leads_to_no_new_names(self):
779
format = self.get_format()
780
tree1 = self.make_branch_and_tree('1', format=format)
781
tree2 = self.make_branch_and_tree('2', format=format)
782
tree1.branch.repository.fetch(tree2.branch.repository)
783
trans = tree1.branch.repository.bzrdir.get_repository_transport(None)
785
list(GraphIndex(trans, 'pack-names', None).iter_all_entries()))
787
def test_commit_across_pack_shape_boundary_autopacks(self):
788
format = self.get_format()
789
tree = self.make_branch_and_tree('.', format=format)
790
trans = tree.branch.repository.bzrdir.get_repository_transport(None)
791
# This test could be a little cheaper by replacing the packs
792
# attribute on the repository to allow a different pack distribution
793
# and max packs policy - so we are checking the policy is honoured
794
# in the test. But for now 11 commits is not a big deal in a single
797
tree.commit('commit %s' % x)
798
# there should be 9 packs:
799
index = GraphIndex(trans, 'pack-names', None)
800
self.assertEqual(9, len(list(index.iter_all_entries())))
801
# insert some files in obsolete_packs which should be removed by pack.
802
trans.put_bytes('obsolete_packs/foo', '123')
803
trans.put_bytes('obsolete_packs/bar', '321')
804
# committing one more should coalesce to 1 of 10.
805
tree.commit('commit triggering pack')
806
index = GraphIndex(trans, 'pack-names', None)
807
self.assertEqual(1, len(list(index.iter_all_entries())))
808
# packing should not damage data
809
tree = tree.bzrdir.open_workingtree()
810
check_result = tree.branch.repository.check(
811
[tree.branch.last_revision()])
812
# We should have 50 (10x5) files in the obsolete_packs directory.
813
obsolete_files = list(trans.list_dir('obsolete_packs'))
814
self.assertFalse('foo' in obsolete_files)
815
self.assertFalse('bar' in obsolete_files)
816
self.assertEqual(50, len(obsolete_files))
817
# XXX: Todo check packs obsoleted correctly - old packs and indices
818
# in the obsolete_packs directory.
819
large_pack_name = list(index.iter_all_entries())[0][1][0]
820
# finally, committing again should not touch the large pack.
821
tree.commit('commit not triggering pack')
822
index = GraphIndex(trans, 'pack-names', None)
823
self.assertEqual(2, len(list(index.iter_all_entries())))
824
pack_names = [node[1][0] for node in index.iter_all_entries()]
825
self.assertTrue(large_pack_name in pack_names)
827
def test_fail_obsolete_deletion(self):
828
# failing to delete obsolete packs is not fatal
829
format = self.get_format()
830
server = fakenfs.FakeNFSServer()
832
self.addCleanup(server.tearDown)
833
transport = get_transport(server.get_url())
834
bzrdir = self.get_format().initialize_on_transport(transport)
835
repo = bzrdir.create_repository()
836
repo_transport = bzrdir.get_repository_transport(None)
837
self.assertTrue(repo_transport.has('obsolete_packs'))
838
# these files are in use by another client and typically can't be deleted
839
repo_transport.put_bytes('obsolete_packs/.nfsblahblah', 'contents')
840
repo._pack_collection._clear_obsolete_packs()
841
self.assertTrue(repo_transport.has('obsolete_packs/.nfsblahblah'))
843
def test_pack_after_two_commits_packs_everything(self):
844
format = self.get_format()
845
tree = self.make_branch_and_tree('.', format=format)
846
trans = tree.branch.repository.bzrdir.get_repository_transport(None)
848
tree.commit('more work')
849
tree.branch.repository.pack()
850
# there should be 1 pack:
851
index = GraphIndex(trans, 'pack-names', None)
852
self.assertEqual(1, len(list(index.iter_all_entries())))
853
self.assertEqual(2, len(tree.branch.repository.all_revision_ids()))
855
def test_pack_layout(self):
856
format = self.get_format()
857
tree = self.make_branch_and_tree('.', format=format)
858
trans = tree.branch.repository.bzrdir.get_repository_transport(None)
859
tree.commit('start', rev_id='1')
860
tree.commit('more work', rev_id='2')
861
tree.branch.repository.pack()
863
self.addCleanup(tree.unlock)
864
pack = tree.branch.repository._pack_collection.get_pack_by_name(
865
tree.branch.repository._pack_collection.names()[0])
866
# revision access tends to be tip->ancestor, so ordering that way on
867
# disk is a good idea.
868
for _1, key, val, refs in pack.revision_index.iter_all_entries():
870
pos_1 = int(val[1:].split()[0])
872
pos_2 = int(val[1:].split()[0])
873
self.assertTrue(pos_2 < pos_1)
875
def test_pack_repositories_support_multiple_write_locks(self):
876
format = self.get_format()
877
self.make_repository('.', shared=True, format=format)
878
r1 = repository.Repository.open('.')
879
r2 = repository.Repository.open('.')
881
self.addCleanup(r1.unlock)
885
def _add_text(self, repo, fileid):
886
"""Add a text to the repository within a write group."""
887
repo.texts.add_lines((fileid, 'samplerev+'+fileid), [], [])
889
def test_concurrent_writers_merge_new_packs(self):
890
format = self.get_format()
891
self.make_repository('.', shared=True, format=format)
892
r1 = repository.Repository.open('.')
893
r2 = repository.Repository.open('.')
896
# access enough data to load the names list
897
list(r1.all_revision_ids())
900
# access enough data to load the names list
901
list(r2.all_revision_ids())
902
r1.start_write_group()
904
r2.start_write_group()
906
self._add_text(r1, 'fileidr1')
907
self._add_text(r2, 'fileidr2')
909
r2.abort_write_group()
912
r1.abort_write_group()
914
# both r1 and r2 have open write groups with data in them
915
# created while the other's write group was open.
916
# Commit both which requires a merge to the pack-names.
918
r1.commit_write_group()
920
r1.abort_write_group()
921
r2.abort_write_group()
923
r2.commit_write_group()
924
# tell r1 to reload from disk
925
r1._pack_collection.reset()
926
# Now both repositories should know about both names
927
r1._pack_collection.ensure_loaded()
928
r2._pack_collection.ensure_loaded()
929
self.assertEqual(r1._pack_collection.names(), r2._pack_collection.names())
930
self.assertEqual(2, len(r1._pack_collection.names()))
936
def test_concurrent_writer_second_preserves_dropping_a_pack(self):
937
format = self.get_format()
938
self.make_repository('.', shared=True, format=format)
939
r1 = repository.Repository.open('.')
940
r2 = repository.Repository.open('.')
944
r1.start_write_group()
946
self._add_text(r1, 'fileidr1')
948
r1.abort_write_group()
951
r1.commit_write_group()
952
r1._pack_collection.ensure_loaded()
953
name_to_drop = r1._pack_collection.all_packs()[0].name
958
# access enough data to load the names list
959
list(r1.all_revision_ids())
962
# access enough data to load the names list
963
list(r2.all_revision_ids())
964
r1._pack_collection.ensure_loaded()
966
r2.start_write_group()
968
# in r1, drop the pack
969
r1._pack_collection._remove_pack_from_memory(
970
r1._pack_collection.get_pack_by_name(name_to_drop))
972
self._add_text(r2, 'fileidr2')
974
r2.abort_write_group()
977
r1._pack_collection.reset()
979
# r1 has a changed names list, and r2 an open write groups with
981
# save r1, and then commit the r2 write group, which requires a
982
# merge to the pack-names, which should not reinstate
985
r1._pack_collection._save_pack_names()
986
r1._pack_collection.reset()
988
r2.abort_write_group()
991
r2.commit_write_group()
993
r2.abort_write_group()
995
# Now both repositories should now about just one name.
996
r1._pack_collection.ensure_loaded()
997
r2._pack_collection.ensure_loaded()
998
self.assertEqual(r1._pack_collection.names(), r2._pack_collection.names())
999
self.assertEqual(1, len(r1._pack_collection.names()))
1000
self.assertFalse(name_to_drop in r1._pack_collection.names())
1006
def test_lock_write_does_not_physically_lock(self):
1007
repo = self.make_repository('.', format=self.get_format())
1009
self.addCleanup(repo.unlock)
1010
self.assertFalse(repo.get_physical_lock_status())
1012
def prepare_for_break_lock(self):
1013
# Setup the global ui factory state so that a break-lock method call
1014
# will find usable input in the input stream.
1015
old_factory = bzrlib.ui.ui_factory
1016
def restoreFactory():
1017
bzrlib.ui.ui_factory = old_factory
1018
self.addCleanup(restoreFactory)
1019
bzrlib.ui.ui_factory = bzrlib.ui.SilentUIFactory()
1020
bzrlib.ui.ui_factory.stdin = StringIO("y\n")
1022
def test_break_lock_breaks_physical_lock(self):
1023
repo = self.make_repository('.', format=self.get_format())
1024
repo._pack_collection.lock_names()
1025
repo2 = repository.Repository.open('.')
1026
self.assertTrue(repo.get_physical_lock_status())
1027
self.prepare_for_break_lock()
1029
self.assertFalse(repo.get_physical_lock_status())
1031
def test_broken_physical_locks_error_on__unlock_names_lock(self):
1032
repo = self.make_repository('.', format=self.get_format())
1033
repo._pack_collection.lock_names()
1034
self.assertTrue(repo.get_physical_lock_status())
1035
repo2 = repository.Repository.open('.')
1036
self.prepare_for_break_lock()
1038
self.assertRaises(errors.LockBroken, repo._pack_collection._unlock_names)
1040
def test_fetch_without_find_ghosts_ignores_ghosts(self):
1041
# we want two repositories at this point:
1042
# one with a revision that is a ghost in the other
1044
# 'ghost' is present in has_ghost, 'ghost' is absent in 'missing_ghost'.
1045
# 'references' is present in both repositories, and 'tip' is present
1046
# just in has_ghost.
1047
# has_ghost missing_ghost
1048
#------------------------------
1050
# 'references' 'references'
1052
# In this test we fetch 'tip' which should not fetch 'ghost'
1053
has_ghost = self.make_repository('has_ghost', format=self.get_format())
1054
missing_ghost = self.make_repository('missing_ghost',
1055
format=self.get_format())
1057
def add_commit(repo, revision_id, parent_ids):
1059
repo.start_write_group()
1060
inv = inventory.Inventory(revision_id=revision_id)
1061
inv.root.revision = revision_id
1062
root_id = inv.root.file_id
1063
sha1 = repo.add_inventory(revision_id, inv, [])
1064
repo.texts.add_lines((root_id, revision_id), [], [])
1065
rev = bzrlib.revision.Revision(timestamp=0,
1067
committer="Foo Bar <foo@example.com>",
1069
inventory_sha1=sha1,
1070
revision_id=revision_id)
1071
rev.parent_ids = parent_ids
1072
repo.add_revision(revision_id, rev)
1073
repo.commit_write_group()
1075
add_commit(has_ghost, 'ghost', [])
1076
add_commit(has_ghost, 'references', ['ghost'])
1077
add_commit(missing_ghost, 'references', ['ghost'])
1078
add_commit(has_ghost, 'tip', ['references'])
1079
missing_ghost.fetch(has_ghost, 'tip')
1080
# missing ghost now has tip and not ghost.
1081
rev = missing_ghost.get_revision('tip')
1082
inv = missing_ghost.get_inventory('tip')
1083
self.assertRaises(errors.NoSuchRevision,
1084
missing_ghost.get_revision, 'ghost')
1085
self.assertRaises(errors.NoSuchRevision,
1086
missing_ghost.get_inventory, 'ghost')
1088
def test_supports_external_lookups(self):
1089
repo = self.make_repository('.', format=self.get_format())
1090
self.assertFalse(repo._format.supports_external_lookups)
1093
class TestKnitPackSubtrees(TestKnitPackNoSubtrees):
1095
def get_format(self):
1096
return bzrdir.format_registry.make_bzrdir(
1097
'pack-0.92-subtree')
1099
def check_format(self, t):
1100
self.assertEqualDiff(
1101
"Bazaar pack repository format 1 with subtree support (needs bzr 0.92)\n",
1102
t.get('format').read())
1105
class TestDevelopment0(TestKnitPackNoSubtrees):
1107
def get_format(self):
1108
return bzrdir.format_registry.make_bzrdir(
1111
def check_format(self, t):
1112
self.assertEqualDiff(
1113
"Bazaar development format 0 (needs bzr.dev from before 1.3)\n",
1114
t.get('format').read())
1117
class TestDevelopment0Subtree(TestKnitPackNoSubtrees):
1119
def get_format(self):
1120
return bzrdir.format_registry.make_bzrdir(
1121
'development0-subtree')
1123
def check_format(self, t):
1124
self.assertEqualDiff(
1125
"Bazaar development format 0 with subtree support "
1126
"(needs bzr.dev from before 1.3)\n",
1127
t.get('format').read())
1130
class TestExternalDevelopment1(object):
1132
# mixin class for testing stack-supporting development formats
1134
def test_compatible_cross_formats(self):
1135
# early versions of the packing code relied on pack internals to
1136
# stack, but the current version should be able to stack on any
1138
repo = self.make_repository('repo', format=self.get_format())
1139
if repo.supports_rich_root():
1140
# can only stack on repositories that have compatible internal
1142
matching_format_name = 'pack-0.92-subtree'
1143
mismatching_format_name = 'pack-0.92'
1145
matching_format_name = 'pack-0.92'
1146
mismatching_format_name = 'pack-0.92-subtree'
1147
base = self.make_repository('base', format=matching_format_name)
1148
repo.add_fallback_repository(base)
1149
# you can't stack on something with incompatible data
1150
bad_repo = self.make_repository('mismatch',
1151
format=mismatching_format_name)
1152
self.assertRaises(errors.IncompatibleRepositories,
1153
repo.add_fallback_repository, bad_repo)
1155
def test_adding_pack_does_not_record_pack_names_from_other_repositories(self):
1156
base = self.make_branch_and_tree('base', format=self.get_format())
1158
referencing = self.make_branch_and_tree('repo', format=self.get_format())
1159
referencing.branch.repository.add_fallback_repository(base.branch.repository)
1160
referencing.commit('bar')
1161
new_instance = referencing.bzrdir.open_repository()
1162
new_instance.lock_read()
1163
self.addCleanup(new_instance.unlock)
1164
new_instance._pack_collection.ensure_loaded()
1165
self.assertEqual(1, len(new_instance._pack_collection.all_packs()))
1167
def test_autopack_only_considers_main_repo_packs(self):
1168
base = self.make_branch_and_tree('base', format=self.get_format())
1170
tree = self.make_branch_and_tree('repo', format=self.get_format())
1171
tree.branch.repository.add_fallback_repository(base.branch.repository)
1172
trans = tree.branch.repository.bzrdir.get_repository_transport(None)
1173
# This test could be a little cheaper by replacing the packs
1174
# attribute on the repository to allow a different pack distribution
1175
# and max packs policy - so we are checking the policy is honoured
1176
# in the test. But for now 11 commits is not a big deal in a single
1179
tree.commit('commit %s' % x)
1180
# there should be 9 packs:
1181
index = GraphIndex(trans, 'pack-names', None)
1182
self.assertEqual(9, len(list(index.iter_all_entries())))
1183
# committing one more should coalesce to 1 of 10.
1184
tree.commit('commit triggering pack')
1185
index = GraphIndex(trans, 'pack-names', None)
1186
self.assertEqual(1, len(list(index.iter_all_entries())))
1187
# packing should not damage data
1188
tree = tree.bzrdir.open_workingtree()
1189
check_result = tree.branch.repository.check(
1190
[tree.branch.last_revision()])
1191
# We should have 50 (10x5) files in the obsolete_packs directory.
1192
obsolete_files = list(trans.list_dir('obsolete_packs'))
1193
self.assertFalse('foo' in obsolete_files)
1194
self.assertFalse('bar' in obsolete_files)
1195
self.assertEqual(50, len(obsolete_files))
1196
# XXX: Todo check packs obsoleted correctly - old packs and indices
1197
# in the obsolete_packs directory.
1198
large_pack_name = list(index.iter_all_entries())[0][1][0]
1199
# finally, committing again should not touch the large pack.
1200
tree.commit('commit not triggering pack')
1201
index = GraphIndex(trans, 'pack-names', None)
1202
self.assertEqual(2, len(list(index.iter_all_entries())))
1203
pack_names = [node[1][0] for node in index.iter_all_entries()]
1204
self.assertTrue(large_pack_name in pack_names)
1207
class TestDevelopment1(TestKnitPackNoSubtrees, TestExternalDevelopment1):
1209
def get_format(self):
1210
return bzrdir.format_registry.make_bzrdir(
1213
def check_format(self, t):
1214
self.assertEqualDiff(
1215
"Bazaar development format 1 (needs bzr.dev from before 1.6)\n",
1216
t.get('format').read())
1218
def test_supports_external_lookups(self):
1219
repo = self.make_repository('.', format=self.get_format())
1220
self.assertTrue(repo._format.supports_external_lookups)
1223
class TestDevelopment1Subtree(TestKnitPackNoSubtrees, TestExternalDevelopment1):
1225
def get_format(self):
1226
return bzrdir.format_registry.make_bzrdir(
1227
'development-subtree')
1229
def check_format(self, t):
1230
self.assertEqualDiff(
1231
"Bazaar development format 1 with subtree support "
1232
"(needs bzr.dev from before 1.6)\n",
1233
t.get('format').read())
1235
def test_supports_external_lookups(self):
1236
repo = self.make_repository('.', format=self.get_format())
1237
self.assertTrue(repo._format.supports_external_lookups)
1240
742
class TestRepositoryPackCollection(TestCaseWithTransport):
1242
744
def get_format(self):