3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
1 |
# Copyright (C) 2008 Canonical Ltd
|
2 |
#
|
|
3 |
# This program is free software; you can redistribute it and/or modify
|
|
4 |
# it under the terms of the GNU General Public License as published by
|
|
5 |
# the Free Software Foundation; either version 2 of the License, or
|
|
6 |
# (at your option) any later version.
|
|
7 |
#
|
|
8 |
# This program is distributed in the hope that it will be useful,
|
|
9 |
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
10 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
11 |
# GNU General Public License for more details.
|
|
12 |
#
|
|
13 |
# You should have received a copy of the GNU General Public License
|
|
14 |
# along with this program; if not, write to the Free Software
|
|
4183.7.1
by Sabin Iacob
update FSF mailing address |
15 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
16 |
|
17 |
"""Tests for pack repositories.
|
|
18 |
||
19 |
These tests are repeated for all pack-based repository formats.
|
|
20 |
"""
|
|
21 |
||
3582.3.4
by Martin Pool
Use cStringIO rather than StringIO |
22 |
from cStringIO import StringIO |
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
23 |
from stat import S_ISDIR |
24 |
||
3735.1.1
by Robert Collins
Add development2 formats using BTree indices. |
25 |
from bzrlib.btree_index import BTreeGraphIndex |
26 |
from bzrlib.index import GraphIndex |
|
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
27 |
from bzrlib import ( |
28 |
bzrdir, |
|
29 |
errors, |
|
30 |
inventory, |
|
4002.1.1
by Andrew Bennetts
Implement suspend_write_group/resume_write_group. |
31 |
osutils, |
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
32 |
progress, |
33 |
repository, |
|
34 |
revision as _mod_revision, |
|
35 |
symbol_versioning, |
|
36 |
tests, |
|
37 |
ui, |
|
38 |
upgrade, |
|
39 |
workingtree, |
|
40 |
)
|
|
4360.4.6
by John Arbash Meinel
Change how 'missing.*parent_prevents_commit' determines what to skip. |
41 |
from bzrlib.repofmt import ( |
42 |
pack_repo, |
|
43 |
groupcompress_repo, |
|
44 |
)
|
|
4241.6.8
by Robert Collins, John Arbash Meinel, Ian Clatworthy, Vincent Ladeuil
Add --development6-rich-root, disabling the legacy and unneeded development2 format, and activating the tests for CHK features disabled pending this format. (Robert Collins, John Arbash Meinel, Ian Clatworthy, Vincent Ladeuil) |
45 |
from bzrlib.repofmt.groupcompress_repo import RepositoryFormatCHK1 |
3801.1.18
by Andrew Bennetts
Add a test that ensures that the autopack RPC is actually used for all pack formats. |
46 |
from bzrlib.smart import ( |
47 |
client, |
|
48 |
server, |
|
49 |
)
|
|
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
50 |
from bzrlib.tests import ( |
51 |
TestCase, |
|
52 |
TestCaseWithTransport, |
|
3582.3.3
by Martin Pool
Reenable tests for stacking pack repositories |
53 |
TestNotApplicable, |
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
54 |
TestSkipped, |
55 |
)
|
|
56 |
from bzrlib.transport import ( |
|
57 |
fakenfs, |
|
3825.4.2
by Andrew Bennetts
Run the abort_write_group tests against a memory transport to avoid platform-specific limits on changing files that may be in use. |
58 |
memory, |
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
59 |
get_transport, |
60 |
)
|
|
3825.4.2
by Andrew Bennetts
Run the abort_write_group tests against a memory transport to avoid platform-specific limits on changing files that may be in use. |
61 |
from bzrlib.tests.per_repository import TestCaseWithRepository |
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
62 |
|
63 |
||
64 |
class TestPackRepository(TestCaseWithTransport): |
|
65 |
"""Tests to be repeated across all pack-based formats.
|
|
66 |
||
67 |
The following are populated from the test scenario:
|
|
68 |
||
69 |
:ivar format_name: Registered name fo the format to test.
|
|
70 |
:ivar format_string: On-disk format marker.
|
|
71 |
:ivar format_supports_external_lookups: Boolean.
|
|
72 |
"""
|
|
73 |
||
74 |
def get_format(self): |
|
75 |
return bzrdir.format_registry.make_bzrdir(self.format_name) |
|
76 |
||
77 |
def test_attribute__fetch_order(self): |
|
3606.7.3
by John Arbash Meinel
We don't have to fetch in topological order, as long as we fix all of the delta logic pieces. |
78 |
"""Packs do not need ordered data retrieval."""
|
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
79 |
format = self.get_format() |
80 |
repo = self.make_repository('.', format=format) |
|
4053.1.4
by Robert Collins
Move the fetch control attributes from Repository to RepositoryFormat. |
81 |
self.assertEqual('unordered', repo._format._fetch_order) |
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
82 |
|
83 |
def test_attribute__fetch_uses_deltas(self): |
|
84 |
"""Packs reuse deltas."""
|
|
85 |
format = self.get_format() |
|
86 |
repo = self.make_repository('.', format=format) |
|
4265.1.4
by John Arbash Meinel
Special case the CHK1 format to allow it to not fetch using deltas. |
87 |
if isinstance(format.repository_format, RepositoryFormatCHK1): |
88 |
# TODO: This is currently a workaround. CHK format repositories
|
|
89 |
# ignore the 'deltas' flag, but during conversions, we can't
|
|
90 |
# do unordered delta fetches. Remove this clause once we
|
|
91 |
# improve the inter-format fetching.
|
|
92 |
self.assertEqual(False, repo._format._fetch_uses_deltas) |
|
93 |
else: |
|
94 |
self.assertEqual(True, repo._format._fetch_uses_deltas) |
|
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
95 |
|
96 |
def test_disk_layout(self): |
|
97 |
format = self.get_format() |
|
98 |
repo = self.make_repository('.', format=format) |
|
99 |
# in case of side effects of locking.
|
|
100 |
repo.lock_write() |
|
101 |
repo.unlock() |
|
102 |
t = repo.bzrdir.get_repository_transport(None) |
|
103 |
self.check_format(t) |
|
104 |
# XXX: no locks left when unlocked at the moment
|
|
105 |
# self.assertEqualDiff('', t.get('lock').read())
|
|
106 |
self.check_databases(t) |
|
107 |
||
108 |
def check_format(self, t): |
|
109 |
self.assertEqualDiff( |
|
110 |
self.format_string, # from scenario |
|
111 |
t.get('format').read()) |
|
112 |
||
113 |
def assertHasNoKndx(self, t, knit_name): |
|
114 |
"""Assert that knit_name has no index on t."""
|
|
115 |
self.assertFalse(t.has(knit_name + '.kndx')) |
|
116 |
||
117 |
def assertHasNoKnit(self, t, knit_name): |
|
118 |
"""Assert that knit_name exists on t."""
|
|
119 |
# no default content
|
|
120 |
self.assertFalse(t.has(knit_name + '.knit')) |
|
121 |
||
122 |
def check_databases(self, t): |
|
123 |
"""check knit content for a repository."""
|
|
124 |
# check conversion worked
|
|
125 |
self.assertHasNoKndx(t, 'inventory') |
|
126 |
self.assertHasNoKnit(t, 'inventory') |
|
127 |
self.assertHasNoKndx(t, 'revisions') |
|
128 |
self.assertHasNoKnit(t, 'revisions') |
|
129 |
self.assertHasNoKndx(t, 'signatures') |
|
130 |
self.assertHasNoKnit(t, 'signatures') |
|
131 |
self.assertFalse(t.has('knits')) |
|
132 |
# revision-indexes file-container directory
|
|
133 |
self.assertEqual([], |
|
3735.1.1
by Robert Collins
Add development2 formats using BTree indices. |
134 |
list(self.index_class(t, 'pack-names', None).iter_all_entries())) |
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
135 |
self.assertTrue(S_ISDIR(t.stat('packs').st_mode)) |
136 |
self.assertTrue(S_ISDIR(t.stat('upload').st_mode)) |
|
137 |
self.assertTrue(S_ISDIR(t.stat('indices').st_mode)) |
|
138 |
self.assertTrue(S_ISDIR(t.stat('obsolete_packs').st_mode)) |
|
139 |
||
140 |
def test_shared_disk_layout(self): |
|
141 |
format = self.get_format() |
|
142 |
repo = self.make_repository('.', shared=True, format=format) |
|
143 |
# we want:
|
|
144 |
t = repo.bzrdir.get_repository_transport(None) |
|
145 |
self.check_format(t) |
|
146 |
# XXX: no locks left when unlocked at the moment
|
|
147 |
# self.assertEqualDiff('', t.get('lock').read())
|
|
148 |
# We should have a 'shared-storage' marker file.
|
|
149 |
self.assertEqualDiff('', t.get('shared-storage').read()) |
|
150 |
self.check_databases(t) |
|
151 |
||
152 |
def test_shared_no_tree_disk_layout(self): |
|
153 |
format = self.get_format() |
|
154 |
repo = self.make_repository('.', shared=True, format=format) |
|
155 |
repo.set_make_working_trees(False) |
|
156 |
# we want:
|
|
157 |
t = repo.bzrdir.get_repository_transport(None) |
|
158 |
self.check_format(t) |
|
159 |
# XXX: no locks left when unlocked at the moment
|
|
160 |
# self.assertEqualDiff('', t.get('lock').read())
|
|
161 |
# We should have a 'shared-storage' marker file.
|
|
162 |
self.assertEqualDiff('', t.get('shared-storage').read()) |
|
163 |
# We should have a marker for the no-working-trees flag.
|
|
164 |
self.assertEqualDiff('', t.get('no-working-trees').read()) |
|
165 |
# The marker should go when we toggle the setting.
|
|
166 |
repo.set_make_working_trees(True) |
|
167 |
self.assertFalse(t.has('no-working-trees')) |
|
168 |
self.check_databases(t) |
|
169 |
||
170 |
def test_adding_revision_creates_pack_indices(self): |
|
171 |
format = self.get_format() |
|
172 |
tree = self.make_branch_and_tree('.', format=format) |
|
173 |
trans = tree.branch.repository.bzrdir.get_repository_transport(None) |
|
174 |
self.assertEqual([], |
|
3735.1.1
by Robert Collins
Add development2 formats using BTree indices. |
175 |
list(self.index_class(trans, 'pack-names', None).iter_all_entries())) |
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
176 |
tree.commit('foobarbaz') |
3735.1.1
by Robert Collins
Add development2 formats using BTree indices. |
177 |
index = self.index_class(trans, 'pack-names', None) |
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
178 |
index_nodes = list(index.iter_all_entries()) |
179 |
self.assertEqual(1, len(index_nodes)) |
|
180 |
node = index_nodes[0] |
|
181 |
name = node[1][0] |
|
182 |
# the pack sizes should be listed in the index
|
|
183 |
pack_value = node[2] |
|
184 |
sizes = [int(digits) for digits in pack_value.split(' ')] |
|
185 |
for size, suffix in zip(sizes, ['.rix', '.iix', '.tix', '.six']): |
|
186 |
stat = trans.stat('indices/%s%s' % (name, suffix)) |
|
187 |
self.assertEqual(size, stat.st_size) |
|
188 |
||
189 |
def test_pulling_nothing_leads_to_no_new_names(self): |
|
190 |
format = self.get_format() |
|
191 |
tree1 = self.make_branch_and_tree('1', format=format) |
|
192 |
tree2 = self.make_branch_and_tree('2', format=format) |
|
193 |
tree1.branch.repository.fetch(tree2.branch.repository) |
|
194 |
trans = tree1.branch.repository.bzrdir.get_repository_transport(None) |
|
195 |
self.assertEqual([], |
|
3735.1.1
by Robert Collins
Add development2 formats using BTree indices. |
196 |
list(self.index_class(trans, 'pack-names', None).iter_all_entries())) |
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
197 |
|
198 |
def test_commit_across_pack_shape_boundary_autopacks(self): |
|
199 |
format = self.get_format() |
|
200 |
tree = self.make_branch_and_tree('.', format=format) |
|
201 |
trans = tree.branch.repository.bzrdir.get_repository_transport(None) |
|
202 |
# This test could be a little cheaper by replacing the packs
|
|
203 |
# attribute on the repository to allow a different pack distribution
|
|
204 |
# and max packs policy - so we are checking the policy is honoured
|
|
205 |
# in the test. But for now 11 commits is not a big deal in a single
|
|
206 |
# test.
|
|
207 |
for x in range(9): |
|
208 |
tree.commit('commit %s' % x) |
|
209 |
# there should be 9 packs:
|
|
3735.1.1
by Robert Collins
Add development2 formats using BTree indices. |
210 |
index = self.index_class(trans, 'pack-names', None) |
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
211 |
self.assertEqual(9, len(list(index.iter_all_entries()))) |
212 |
# insert some files in obsolete_packs which should be removed by pack.
|
|
213 |
trans.put_bytes('obsolete_packs/foo', '123') |
|
214 |
trans.put_bytes('obsolete_packs/bar', '321') |
|
215 |
# committing one more should coalesce to 1 of 10.
|
|
216 |
tree.commit('commit triggering pack') |
|
3735.1.1
by Robert Collins
Add development2 formats using BTree indices. |
217 |
index = self.index_class(trans, 'pack-names', None) |
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
218 |
self.assertEqual(1, len(list(index.iter_all_entries()))) |
219 |
# packing should not damage data
|
|
220 |
tree = tree.bzrdir.open_workingtree() |
|
221 |
check_result = tree.branch.repository.check( |
|
222 |
[tree.branch.last_revision()]) |
|
4241.6.8
by Robert Collins, John Arbash Meinel, Ian Clatworthy, Vincent Ladeuil
Add --development6-rich-root, disabling the legacy and unneeded development2 format, and activating the tests for CHK features disabled pending this format. (Robert Collins, John Arbash Meinel, Ian Clatworthy, Vincent Ladeuil) |
223 |
nb_files = 5 # .pack, .rix, .iix, .tix, .six |
224 |
if tree.branch.repository._format.supports_chks: |
|
225 |
nb_files += 1 # .cix |
|
226 |
# We should have 10 x nb_files files in the obsolete_packs directory.
|
|
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
227 |
obsolete_files = list(trans.list_dir('obsolete_packs')) |
228 |
self.assertFalse('foo' in obsolete_files) |
|
229 |
self.assertFalse('bar' in obsolete_files) |
|
4241.6.8
by Robert Collins, John Arbash Meinel, Ian Clatworthy, Vincent Ladeuil
Add --development6-rich-root, disabling the legacy and unneeded development2 format, and activating the tests for CHK features disabled pending this format. (Robert Collins, John Arbash Meinel, Ian Clatworthy, Vincent Ladeuil) |
230 |
self.assertEqual(10 * nb_files, len(obsolete_files)) |
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
231 |
# XXX: Todo check packs obsoleted correctly - old packs and indices
|
232 |
# in the obsolete_packs directory.
|
|
233 |
large_pack_name = list(index.iter_all_entries())[0][1][0] |
|
234 |
# finally, committing again should not touch the large pack.
|
|
235 |
tree.commit('commit not triggering pack') |
|
3735.1.1
by Robert Collins
Add development2 formats using BTree indices. |
236 |
index = self.index_class(trans, 'pack-names', None) |
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
237 |
self.assertEqual(2, len(list(index.iter_all_entries()))) |
238 |
pack_names = [node[1][0] for node in index.iter_all_entries()] |
|
239 |
self.assertTrue(large_pack_name in pack_names) |
|
240 |
||
4431.3.7
by Jonathan Lange
Cherrypick bzr.dev 4470, resolving conflicts. |
241 |
def test_commit_write_group_returns_new_pack_names(self): |
242 |
format = self.get_format() |
|
243 |
tree = self.make_branch_and_tree('foo', format=format) |
|
244 |
tree.commit('first post') |
|
245 |
repo = tree.branch.repository |
|
246 |
repo.lock_write() |
|
247 |
try: |
|
248 |
repo.start_write_group() |
|
249 |
try: |
|
250 |
inv = inventory.Inventory(revision_id="A") |
|
251 |
inv.root.revision = "A" |
|
252 |
repo.texts.add_lines((inv.root.file_id, "A"), [], []) |
|
253 |
rev = _mod_revision.Revision(timestamp=0, timezone=None, |
|
254 |
committer="Foo Bar <foo@example.com>", message="Message", |
|
255 |
revision_id="A") |
|
256 |
rev.parent_ids = () |
|
257 |
repo.add_revision("A", rev, inv=inv) |
|
258 |
except: |
|
259 |
repo.abort_write_group() |
|
260 |
raise
|
|
261 |
else: |
|
262 |
old_names = repo._pack_collection._names.keys() |
|
263 |
result = repo.commit_write_group() |
|
264 |
cur_names = repo._pack_collection._names.keys() |
|
265 |
new_names = list(set(cur_names) - set(old_names)) |
|
266 |
self.assertEqual(new_names, result) |
|
267 |
finally: |
|
268 |
repo.unlock() |
|
269 |
||
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
270 |
def test_fail_obsolete_deletion(self): |
271 |
# failing to delete obsolete packs is not fatal
|
|
272 |
format = self.get_format() |
|
273 |
server = fakenfs.FakeNFSServer() |
|
274 |
server.setUp() |
|
275 |
self.addCleanup(server.tearDown) |
|
276 |
transport = get_transport(server.get_url()) |
|
277 |
bzrdir = self.get_format().initialize_on_transport(transport) |
|
278 |
repo = bzrdir.create_repository() |
|
279 |
repo_transport = bzrdir.get_repository_transport(None) |
|
280 |
self.assertTrue(repo_transport.has('obsolete_packs')) |
|
281 |
# these files are in use by another client and typically can't be deleted
|
|
282 |
repo_transport.put_bytes('obsolete_packs/.nfsblahblah', 'contents') |
|
283 |
repo._pack_collection._clear_obsolete_packs() |
|
284 |
self.assertTrue(repo_transport.has('obsolete_packs/.nfsblahblah')) |
|
285 |
||
286 |
def test_pack_after_two_commits_packs_everything(self): |
|
287 |
format = self.get_format() |
|
288 |
tree = self.make_branch_and_tree('.', format=format) |
|
289 |
trans = tree.branch.repository.bzrdir.get_repository_transport(None) |
|
290 |
tree.commit('start') |
|
291 |
tree.commit('more work') |
|
292 |
tree.branch.repository.pack() |
|
293 |
# there should be 1 pack:
|
|
3735.1.1
by Robert Collins
Add development2 formats using BTree indices. |
294 |
index = self.index_class(trans, 'pack-names', None) |
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
295 |
self.assertEqual(1, len(list(index.iter_all_entries()))) |
296 |
self.assertEqual(2, len(tree.branch.repository.all_revision_ids())) |
|
297 |
||
298 |
def test_pack_layout(self): |
|
4241.6.8
by Robert Collins, John Arbash Meinel, Ian Clatworthy, Vincent Ladeuil
Add --development6-rich-root, disabling the legacy and unneeded development2 format, and activating the tests for CHK features disabled pending this format. (Robert Collins, John Arbash Meinel, Ian Clatworthy, Vincent Ladeuil) |
299 |
# Test that the ordering of revisions in pack repositories is
|
300 |
# tip->ancestor
|
|
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
301 |
format = self.get_format() |
302 |
tree = self.make_branch_and_tree('.', format=format) |
|
303 |
trans = tree.branch.repository.bzrdir.get_repository_transport(None) |
|
304 |
tree.commit('start', rev_id='1') |
|
305 |
tree.commit('more work', rev_id='2') |
|
306 |
tree.branch.repository.pack() |
|
307 |
tree.lock_read() |
|
308 |
self.addCleanup(tree.unlock) |
|
309 |
pack = tree.branch.repository._pack_collection.get_pack_by_name( |
|
310 |
tree.branch.repository._pack_collection.names()[0]) |
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
311 |
# revision access tends to be tip->ancestor, so ordering that way on
|
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
312 |
# disk is a good idea.
|
313 |
for _1, key, val, refs in pack.revision_index.iter_all_entries(): |
|
4350.2.1
by John Arbash Meinel
Update a test to support CHK formats. |
314 |
if type(format.repository_format) is RepositoryFormatCHK1: |
315 |
# group_start, group_len, internal_start, internal_len
|
|
316 |
pos = map(int, val.split()) |
|
317 |
else: |
|
318 |
# eol_flag, start, len
|
|
319 |
pos = int(val[1:].split()[0]) |
|
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
320 |
if key == ('1',): |
4350.2.1
by John Arbash Meinel
Update a test to support CHK formats. |
321 |
pos_1 = pos |
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
322 |
else: |
4350.2.1
by John Arbash Meinel
Update a test to support CHK formats. |
323 |
pos_2 = pos |
324 |
self.assertTrue(pos_2 < pos_1, 'rev 1 came before rev 2 %s > %s' |
|
325 |
% (pos_1, pos_2)) |
|
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
326 |
|
327 |
def test_pack_repositories_support_multiple_write_locks(self): |
|
328 |
format = self.get_format() |
|
329 |
self.make_repository('.', shared=True, format=format) |
|
330 |
r1 = repository.Repository.open('.') |
|
331 |
r2 = repository.Repository.open('.') |
|
332 |
r1.lock_write() |
|
333 |
self.addCleanup(r1.unlock) |
|
334 |
r2.lock_write() |
|
335 |
r2.unlock() |
|
336 |
||
337 |
def _add_text(self, repo, fileid): |
|
338 |
"""Add a text to the repository within a write group."""
|
|
4241.6.8
by Robert Collins, John Arbash Meinel, Ian Clatworthy, Vincent Ladeuil
Add --development6-rich-root, disabling the legacy and unneeded development2 format, and activating the tests for CHK features disabled pending this format. (Robert Collins, John Arbash Meinel, Ian Clatworthy, Vincent Ladeuil) |
339 |
repo.texts.add_lines((fileid, 'samplerev+'+fileid), [], |
340 |
['smaplerev+'+fileid]) |
|
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
341 |
|
342 |
def test_concurrent_writers_merge_new_packs(self): |
|
343 |
format = self.get_format() |
|
344 |
self.make_repository('.', shared=True, format=format) |
|
345 |
r1 = repository.Repository.open('.') |
|
346 |
r2 = repository.Repository.open('.') |
|
347 |
r1.lock_write() |
|
348 |
try: |
|
349 |
# access enough data to load the names list
|
|
350 |
list(r1.all_revision_ids()) |
|
351 |
r2.lock_write() |
|
352 |
try: |
|
353 |
# access enough data to load the names list
|
|
354 |
list(r2.all_revision_ids()) |
|
355 |
r1.start_write_group() |
|
356 |
try: |
|
357 |
r2.start_write_group() |
|
358 |
try: |
|
359 |
self._add_text(r1, 'fileidr1') |
|
360 |
self._add_text(r2, 'fileidr2') |
|
361 |
except: |
|
362 |
r2.abort_write_group() |
|
363 |
raise
|
|
364 |
except: |
|
365 |
r1.abort_write_group() |
|
366 |
raise
|
|
367 |
# both r1 and r2 have open write groups with data in them
|
|
368 |
# created while the other's write group was open.
|
|
369 |
# Commit both which requires a merge to the pack-names.
|
|
370 |
try: |
|
371 |
r1.commit_write_group() |
|
372 |
except: |
|
373 |
r1.abort_write_group() |
|
374 |
r2.abort_write_group() |
|
375 |
raise
|
|
376 |
r2.commit_write_group() |
|
377 |
# tell r1 to reload from disk
|
|
378 |
r1._pack_collection.reset() |
|
379 |
# Now both repositories should know about both names
|
|
380 |
r1._pack_collection.ensure_loaded() |
|
381 |
r2._pack_collection.ensure_loaded() |
|
382 |
self.assertEqual(r1._pack_collection.names(), r2._pack_collection.names()) |
|
383 |
self.assertEqual(2, len(r1._pack_collection.names())) |
|
384 |
finally: |
|
385 |
r2.unlock() |
|
386 |
finally: |
|
387 |
r1.unlock() |
|
388 |
||
389 |
def test_concurrent_writer_second_preserves_dropping_a_pack(self): |
|
390 |
format = self.get_format() |
|
391 |
self.make_repository('.', shared=True, format=format) |
|
392 |
r1 = repository.Repository.open('.') |
|
393 |
r2 = repository.Repository.open('.') |
|
394 |
# add a pack to drop
|
|
395 |
r1.lock_write() |
|
396 |
try: |
|
397 |
r1.start_write_group() |
|
398 |
try: |
|
399 |
self._add_text(r1, 'fileidr1') |
|
400 |
except: |
|
401 |
r1.abort_write_group() |
|
402 |
raise
|
|
403 |
else: |
|
404 |
r1.commit_write_group() |
|
405 |
r1._pack_collection.ensure_loaded() |
|
406 |
name_to_drop = r1._pack_collection.all_packs()[0].name |
|
407 |
finally: |
|
408 |
r1.unlock() |
|
409 |
r1.lock_write() |
|
410 |
try: |
|
411 |
# access enough data to load the names list
|
|
412 |
list(r1.all_revision_ids()) |
|
413 |
r2.lock_write() |
|
414 |
try: |
|
415 |
# access enough data to load the names list
|
|
416 |
list(r2.all_revision_ids()) |
|
417 |
r1._pack_collection.ensure_loaded() |
|
418 |
try: |
|
419 |
r2.start_write_group() |
|
420 |
try: |
|
421 |
# in r1, drop the pack
|
|
422 |
r1._pack_collection._remove_pack_from_memory( |
|
423 |
r1._pack_collection.get_pack_by_name(name_to_drop)) |
|
424 |
# in r2, add a pack
|
|
425 |
self._add_text(r2, 'fileidr2') |
|
426 |
except: |
|
427 |
r2.abort_write_group() |
|
428 |
raise
|
|
429 |
except: |
|
430 |
r1._pack_collection.reset() |
|
431 |
raise
|
|
432 |
# r1 has a changed names list, and r2 an open write groups with
|
|
433 |
# changes.
|
|
434 |
# save r1, and then commit the r2 write group, which requires a
|
|
435 |
# merge to the pack-names, which should not reinstate
|
|
436 |
# name_to_drop
|
|
437 |
try: |
|
438 |
r1._pack_collection._save_pack_names() |
|
439 |
r1._pack_collection.reset() |
|
440 |
except: |
|
441 |
r2.abort_write_group() |
|
442 |
raise
|
|
443 |
try: |
|
444 |
r2.commit_write_group() |
|
445 |
except: |
|
446 |
r2.abort_write_group() |
|
447 |
raise
|
|
448 |
# Now both repositories should now about just one name.
|
|
449 |
r1._pack_collection.ensure_loaded() |
|
450 |
r2._pack_collection.ensure_loaded() |
|
451 |
self.assertEqual(r1._pack_collection.names(), r2._pack_collection.names()) |
|
452 |
self.assertEqual(1, len(r1._pack_collection.names())) |
|
453 |
self.assertFalse(name_to_drop in r1._pack_collection.names()) |
|
454 |
finally: |
|
455 |
r2.unlock() |
|
456 |
finally: |
|
457 |
r1.unlock() |
|
458 |
||
3789.1.1
by John Arbash Meinel
add the failing acceptance test for the first portion. |
459 |
def test_concurrent_pack_triggers_reload(self): |
460 |
# create 2 packs, which we will then collapse
|
|
461 |
tree = self.make_branch_and_tree('tree') |
|
3789.1.2
by John Arbash Meinel
Add RepositoryPackCollection.reload_pack_names() |
462 |
tree.lock_write() |
3789.1.1
by John Arbash Meinel
add the failing acceptance test for the first portion. |
463 |
try: |
3789.1.2
by John Arbash Meinel
Add RepositoryPackCollection.reload_pack_names() |
464 |
rev1 = tree.commit('one') |
465 |
rev2 = tree.commit('two') |
|
466 |
r2 = repository.Repository.open('tree') |
|
3789.1.1
by John Arbash Meinel
add the failing acceptance test for the first portion. |
467 |
r2.lock_read() |
468 |
try: |
|
469 |
# Now r2 has read the pack-names file, but will need to reload
|
|
470 |
# it after r1 has repacked
|
|
3789.1.2
by John Arbash Meinel
Add RepositoryPackCollection.reload_pack_names() |
471 |
tree.branch.repository.pack() |
472 |
self.assertEqual({rev2:(rev1,)}, r2.get_parent_map([rev2])) |
|
3789.1.1
by John Arbash Meinel
add the failing acceptance test for the first portion. |
473 |
finally: |
474 |
r2.unlock() |
|
475 |
finally: |
|
3789.1.2
by John Arbash Meinel
Add RepositoryPackCollection.reload_pack_names() |
476 |
tree.unlock() |
3789.1.1
by John Arbash Meinel
add the failing acceptance test for the first portion. |
477 |
|
3789.2.8
by John Arbash Meinel
Add a test that KnitPackRepository.get_record_stream retries when appropriate. |
478 |
def test_concurrent_pack_during_get_record_reloads(self): |
479 |
tree = self.make_branch_and_tree('tree') |
|
480 |
tree.lock_write() |
|
481 |
try: |
|
482 |
rev1 = tree.commit('one') |
|
483 |
rev2 = tree.commit('two') |
|
3789.2.14
by John Arbash Meinel
Update AggregateIndex to pass the reload_func into _DirectPackAccess |
484 |
keys = [(rev1,), (rev2,)] |
3789.2.8
by John Arbash Meinel
Add a test that KnitPackRepository.get_record_stream retries when appropriate. |
485 |
r2 = repository.Repository.open('tree') |
486 |
r2.lock_read() |
|
487 |
try: |
|
488 |
# At this point, we will start grabbing a record stream, and
|
|
489 |
# trigger a repack mid-way
|
|
490 |
packed = False |
|
491 |
result = {} |
|
492 |
record_stream = r2.revisions.get_record_stream(keys, |
|
493 |
'unordered', False) |
|
494 |
for record in record_stream: |
|
495 |
result[record.key] = record |
|
496 |
if not packed: |
|
497 |
tree.branch.repository.pack() |
|
498 |
packed = True |
|
499 |
# The first record will be found in the original location, but
|
|
500 |
# after the pack, we have to reload to find the next record
|
|
3789.2.14
by John Arbash Meinel
Update AggregateIndex to pass the reload_func into _DirectPackAccess |
501 |
self.assertEqual(sorted(keys), sorted(result.keys())) |
3789.2.8
by John Arbash Meinel
Add a test that KnitPackRepository.get_record_stream retries when appropriate. |
502 |
finally: |
503 |
r2.unlock() |
|
504 |
finally: |
|
505 |
tree.unlock() |
|
506 |
||
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
507 |
def test_lock_write_does_not_physically_lock(self): |
508 |
repo = self.make_repository('.', format=self.get_format()) |
|
509 |
repo.lock_write() |
|
510 |
self.addCleanup(repo.unlock) |
|
511 |
self.assertFalse(repo.get_physical_lock_status()) |
|
512 |
||
513 |
def prepare_for_break_lock(self): |
|
514 |
# Setup the global ui factory state so that a break-lock method call
|
|
515 |
# will find usable input in the input stream.
|
|
516 |
old_factory = ui.ui_factory |
|
517 |
def restoreFactory(): |
|
518 |
ui.ui_factory = old_factory |
|
519 |
self.addCleanup(restoreFactory) |
|
4449.3.27
by Martin Pool
More test updates to use CannedInputUIFactory |
520 |
ui.ui_factory = ui.CannedInputUIFactory([True]) |
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
521 |
|
522 |
def test_break_lock_breaks_physical_lock(self): |
|
523 |
repo = self.make_repository('.', format=self.get_format()) |
|
524 |
repo._pack_collection.lock_names() |
|
3650.4.1
by Aaron Bentley
Fix test kipple in test_break_lock_breaks_physical_lock |
525 |
repo.control_files.leave_in_place() |
526 |
repo.unlock() |
|
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
527 |
repo2 = repository.Repository.open('.') |
528 |
self.assertTrue(repo.get_physical_lock_status()) |
|
529 |
self.prepare_for_break_lock() |
|
530 |
repo2.break_lock() |
|
531 |
self.assertFalse(repo.get_physical_lock_status()) |
|
532 |
||
533 |
def test_broken_physical_locks_error_on__unlock_names_lock(self): |
|
534 |
repo = self.make_repository('.', format=self.get_format()) |
|
535 |
repo._pack_collection.lock_names() |
|
536 |
self.assertTrue(repo.get_physical_lock_status()) |
|
537 |
repo2 = repository.Repository.open('.') |
|
538 |
self.prepare_for_break_lock() |
|
539 |
repo2.break_lock() |
|
540 |
self.assertRaises(errors.LockBroken, repo._pack_collection._unlock_names) |
|
541 |
||
542 |
def test_fetch_without_find_ghosts_ignores_ghosts(self): |
|
543 |
# we want two repositories at this point:
|
|
544 |
# one with a revision that is a ghost in the other
|
|
545 |
# repository.
|
|
546 |
# 'ghost' is present in has_ghost, 'ghost' is absent in 'missing_ghost'.
|
|
547 |
# 'references' is present in both repositories, and 'tip' is present
|
|
548 |
# just in has_ghost.
|
|
549 |
# has_ghost missing_ghost
|
|
550 |
#------------------------------
|
|
551 |
# 'ghost' -
|
|
552 |
# 'references' 'references'
|
|
553 |
# 'tip' -
|
|
554 |
# In this test we fetch 'tip' which should not fetch 'ghost'
|
|
555 |
has_ghost = self.make_repository('has_ghost', format=self.get_format()) |
|
556 |
missing_ghost = self.make_repository('missing_ghost', |
|
557 |
format=self.get_format()) |
|
558 |
||
559 |
def add_commit(repo, revision_id, parent_ids): |
|
560 |
repo.lock_write() |
|
561 |
repo.start_write_group() |
|
562 |
inv = inventory.Inventory(revision_id=revision_id) |
|
563 |
inv.root.revision = revision_id |
|
564 |
root_id = inv.root.file_id |
|
565 |
sha1 = repo.add_inventory(revision_id, inv, []) |
|
566 |
repo.texts.add_lines((root_id, revision_id), [], []) |
|
567 |
rev = _mod_revision.Revision(timestamp=0, |
|
568 |
timezone=None, |
|
569 |
committer="Foo Bar <foo@example.com>", |
|
570 |
message="Message", |
|
571 |
inventory_sha1=sha1, |
|
572 |
revision_id=revision_id) |
|
573 |
rev.parent_ids = parent_ids |
|
574 |
repo.add_revision(revision_id, rev) |
|
575 |
repo.commit_write_group() |
|
576 |
repo.unlock() |
|
577 |
add_commit(has_ghost, 'ghost', []) |
|
578 |
add_commit(has_ghost, 'references', ['ghost']) |
|
579 |
add_commit(missing_ghost, 'references', ['ghost']) |
|
580 |
add_commit(has_ghost, 'tip', ['references']) |
|
581 |
missing_ghost.fetch(has_ghost, 'tip') |
|
582 |
# missing ghost now has tip and not ghost.
|
|
583 |
rev = missing_ghost.get_revision('tip') |
|
584 |
inv = missing_ghost.get_inventory('tip') |
|
585 |
self.assertRaises(errors.NoSuchRevision, |
|
586 |
missing_ghost.get_revision, 'ghost') |
|
587 |
self.assertRaises(errors.NoSuchRevision, |
|
588 |
missing_ghost.get_inventory, 'ghost') |
|
589 |
||
4011.5.6
by Andrew Bennetts
Make sure it's not possible to commit a pack write group when any versioned file has missing compression parents. |
590 |
def make_write_ready_repo(self): |
4360.4.6
by John Arbash Meinel
Change how 'missing.*parent_prevents_commit' determines what to skip. |
591 |
format = self.get_format() |
592 |
if isinstance(format.repository_format, RepositoryFormatCHK1): |
|
593 |
raise TestNotApplicable("No missing compression parents") |
|
594 |
repo = self.make_repository('.', format=format) |
|
4011.5.6
by Andrew Bennetts
Make sure it's not possible to commit a pack write group when any versioned file has missing compression parents. |
595 |
repo.lock_write() |
4360.4.6
by John Arbash Meinel
Change how 'missing.*parent_prevents_commit' determines what to skip. |
596 |
self.addCleanup(repo.unlock) |
4011.5.6
by Andrew Bennetts
Make sure it's not possible to commit a pack write group when any versioned file has missing compression parents. |
597 |
repo.start_write_group() |
4360.4.6
by John Arbash Meinel
Change how 'missing.*parent_prevents_commit' determines what to skip. |
598 |
self.addCleanup(repo.abort_write_group) |
4011.5.6
by Andrew Bennetts
Make sure it's not possible to commit a pack write group when any versioned file has missing compression parents. |
599 |
return repo |
600 |
||
601 |
def test_missing_inventories_compression_parent_prevents_commit(self): |
|
602 |
repo = self.make_write_ready_repo() |
|
603 |
key = ('junk',) |
|
604 |
repo.inventories._index._missing_compression_parents.add(key) |
|
605 |
self.assertRaises(errors.BzrCheckError, repo.commit_write_group) |
|
606 |
self.assertRaises(errors.BzrCheckError, repo.commit_write_group) |
|
607 |
||
608 |
def test_missing_revisions_compression_parent_prevents_commit(self): |
|
609 |
repo = self.make_write_ready_repo() |
|
610 |
key = ('junk',) |
|
611 |
repo.revisions._index._missing_compression_parents.add(key) |
|
612 |
self.assertRaises(errors.BzrCheckError, repo.commit_write_group) |
|
613 |
self.assertRaises(errors.BzrCheckError, repo.commit_write_group) |
|
614 |
||
615 |
def test_missing_signatures_compression_parent_prevents_commit(self): |
|
616 |
repo = self.make_write_ready_repo() |
|
617 |
key = ('junk',) |
|
618 |
repo.signatures._index._missing_compression_parents.add(key) |
|
619 |
self.assertRaises(errors.BzrCheckError, repo.commit_write_group) |
|
620 |
self.assertRaises(errors.BzrCheckError, repo.commit_write_group) |
|
621 |
||
622 |
def test_missing_text_compression_parent_prevents_commit(self): |
|
623 |
repo = self.make_write_ready_repo() |
|
624 |
key = ('some', 'junk') |
|
625 |
repo.texts._index._missing_compression_parents.add(key) |
|
626 |
self.assertRaises(errors.BzrCheckError, repo.commit_write_group) |
|
627 |
e = self.assertRaises(errors.BzrCheckError, repo.commit_write_group) |
|
628 |
||
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
629 |
def test_supports_external_lookups(self): |
630 |
repo = self.make_repository('.', format=self.get_format()) |
|
631 |
self.assertEqual(self.format_supports_external_lookups, |
|
632 |
repo._format.supports_external_lookups) |
|
633 |
||
3825.4.1
by Andrew Bennetts
Add suppress_errors to abort_write_group. |
634 |
def test_abort_write_group_does_not_raise_when_suppressed(self): |
635 |
"""Similar to per_repository.test_write_group's test of the same name.
|
|
636 |
||
637 |
Also requires that the exception is logged.
|
|
638 |
"""
|
|
3825.4.2
by Andrew Bennetts
Run the abort_write_group tests against a memory transport to avoid platform-specific limits on changing files that may be in use. |
639 |
self.vfs_transport_factory = memory.MemoryServer |
4343.3.7
by John Arbash Meinel
Update the suspend/resume/commit/abort_write_group tests for CHK1. |
640 |
repo = self.make_repository('repo', format=self.get_format()) |
3825.4.1
by Andrew Bennetts
Add suppress_errors to abort_write_group. |
641 |
token = repo.lock_write() |
642 |
self.addCleanup(repo.unlock) |
|
643 |
repo.start_write_group() |
|
644 |
# Damage the repository on the filesystem
|
|
645 |
self.get_transport('').rename('repo', 'foo') |
|
646 |
# abort_write_group will not raise an error
|
|
647 |
self.assertEqual(None, repo.abort_write_group(suppress_errors=True)) |
|
648 |
# But it does log an error
|
|
649 |
log_file = self._get_log(keep_log_file=True) |
|
650 |
self.assertContainsRe(log_file, 'abort_write_group failed') |
|
651 |
self.assertContainsRe(log_file, r'INFO bzr: ERROR \(ignored\):') |
|
652 |
if token is not None: |
|
653 |
repo.leave_lock_in_place() |
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
654 |
|
3825.4.1
by Andrew Bennetts
Add suppress_errors to abort_write_group. |
655 |
def test_abort_write_group_does_raise_when_not_suppressed(self): |
3825.4.2
by Andrew Bennetts
Run the abort_write_group tests against a memory transport to avoid platform-specific limits on changing files that may be in use. |
656 |
self.vfs_transport_factory = memory.MemoryServer |
4343.3.7
by John Arbash Meinel
Update the suspend/resume/commit/abort_write_group tests for CHK1. |
657 |
repo = self.make_repository('repo', format=self.get_format()) |
3825.4.1
by Andrew Bennetts
Add suppress_errors to abort_write_group. |
658 |
token = repo.lock_write() |
659 |
self.addCleanup(repo.unlock) |
|
660 |
repo.start_write_group() |
|
661 |
# Damage the repository on the filesystem
|
|
662 |
self.get_transport('').rename('repo', 'foo') |
|
663 |
# abort_write_group will not raise an error
|
|
664 |
self.assertRaises(Exception, repo.abort_write_group) |
|
665 |
if token is not None: |
|
666 |
repo.leave_lock_in_place() |
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
667 |
|
4002.1.1
by Andrew Bennetts
Implement suspend_write_group/resume_write_group. |
668 |
def test_suspend_write_group(self): |
669 |
self.vfs_transport_factory = memory.MemoryServer |
|
4343.3.7
by John Arbash Meinel
Update the suspend/resume/commit/abort_write_group tests for CHK1. |
670 |
repo = self.make_repository('repo', format=self.get_format()) |
4002.1.1
by Andrew Bennetts
Implement suspend_write_group/resume_write_group. |
671 |
token = repo.lock_write() |
672 |
self.addCleanup(repo.unlock) |
|
673 |
repo.start_write_group() |
|
674 |
repo.texts.add_lines(('file-id', 'revid'), (), ['lines']) |
|
675 |
wg_tokens = repo.suspend_write_group() |
|
676 |
expected_pack_name = wg_tokens[0] + '.pack' |
|
4343.3.7
by John Arbash Meinel
Update the suspend/resume/commit/abort_write_group tests for CHK1. |
677 |
expected_names = [wg_tokens[0] + ext for ext in |
678 |
('.rix', '.iix', '.tix', '.six')] |
|
679 |
if repo.chk_bytes is not None: |
|
680 |
expected_names.append(wg_tokens[0] + '.cix') |
|
681 |
expected_names.append(expected_pack_name) |
|
4002.1.1
by Andrew Bennetts
Implement suspend_write_group/resume_write_group. |
682 |
upload_transport = repo._pack_collection._upload_transport |
683 |
limbo_files = upload_transport.list_dir('') |
|
4343.3.7
by John Arbash Meinel
Update the suspend/resume/commit/abort_write_group tests for CHK1. |
684 |
self.assertEqual(sorted(expected_names), sorted(limbo_files)) |
4002.1.1
by Andrew Bennetts
Implement suspend_write_group/resume_write_group. |
685 |
md5 = osutils.md5(upload_transport.get_bytes(expected_pack_name)) |
686 |
self.assertEqual(wg_tokens[0], md5.hexdigest()) |
|
687 |
||
4343.3.8
by John Arbash Meinel
Some cleanup passes. |
688 |
def test_resume_chk_bytes(self): |
689 |
self.vfs_transport_factory = memory.MemoryServer |
|
690 |
repo = self.make_repository('repo', format=self.get_format()) |
|
691 |
if repo.chk_bytes is None: |
|
692 |
raise TestNotApplicable('no chk_bytes for this repository') |
|
693 |
token = repo.lock_write() |
|
694 |
self.addCleanup(repo.unlock) |
|
695 |
repo.start_write_group() |
|
696 |
text = 'a bit of text\n' |
|
697 |
key = ('sha1:' + osutils.sha_string(text),) |
|
698 |
repo.chk_bytes.add_lines(key, (), [text]) |
|
699 |
wg_tokens = repo.suspend_write_group() |
|
700 |
same_repo = repo.bzrdir.open_repository() |
|
701 |
same_repo.lock_write() |
|
702 |
self.addCleanup(same_repo.unlock) |
|
703 |
same_repo.resume_write_group(wg_tokens) |
|
704 |
self.assertEqual([key], list(same_repo.chk_bytes.keys())) |
|
705 |
self.assertEqual( |
|
706 |
text, same_repo.chk_bytes.get_record_stream([key], |
|
707 |
'unordered', True).next().get_bytes_as('fulltext')) |
|
708 |
same_repo.abort_write_group() |
|
709 |
self.assertEqual([], list(same_repo.chk_bytes.keys())) |
|
710 |
||
4002.1.1
by Andrew Bennetts
Implement suspend_write_group/resume_write_group. |
711 |
def test_resume_write_group_then_abort(self): |
712 |
# Create a repo, start a write group, insert some data, suspend.
|
|
713 |
self.vfs_transport_factory = memory.MemoryServer |
|
4343.3.7
by John Arbash Meinel
Update the suspend/resume/commit/abort_write_group tests for CHK1. |
714 |
repo = self.make_repository('repo', format=self.get_format()) |
4002.1.1
by Andrew Bennetts
Implement suspend_write_group/resume_write_group. |
715 |
token = repo.lock_write() |
716 |
self.addCleanup(repo.unlock) |
|
717 |
repo.start_write_group() |
|
718 |
text_key = ('file-id', 'revid') |
|
719 |
repo.texts.add_lines(text_key, (), ['lines']) |
|
720 |
wg_tokens = repo.suspend_write_group() |
|
721 |
# Get a fresh repository object for the repo on the filesystem.
|
|
722 |
same_repo = repo.bzrdir.open_repository() |
|
723 |
# Resume
|
|
724 |
same_repo.lock_write() |
|
725 |
self.addCleanup(same_repo.unlock) |
|
726 |
same_repo.resume_write_group(wg_tokens) |
|
727 |
same_repo.abort_write_group() |
|
728 |
self.assertEqual( |
|
729 |
[], same_repo._pack_collection._upload_transport.list_dir('')) |
|
730 |
self.assertEqual( |
|
731 |
[], same_repo._pack_collection._pack_transport.list_dir('')) |
|
732 |
||
4343.3.7
by John Arbash Meinel
Update the suspend/resume/commit/abort_write_group tests for CHK1. |
733 |
def test_commit_resumed_write_group(self): |
734 |
self.vfs_transport_factory = memory.MemoryServer |
|
735 |
repo = self.make_repository('repo', format=self.get_format()) |
|
736 |
token = repo.lock_write() |
|
737 |
self.addCleanup(repo.unlock) |
|
738 |
repo.start_write_group() |
|
739 |
text_key = ('file-id', 'revid') |
|
740 |
repo.texts.add_lines(text_key, (), ['lines']) |
|
741 |
wg_tokens = repo.suspend_write_group() |
|
742 |
# Get a fresh repository object for the repo on the filesystem.
|
|
743 |
same_repo = repo.bzrdir.open_repository() |
|
744 |
# Resume
|
|
745 |
same_repo.lock_write() |
|
746 |
self.addCleanup(same_repo.unlock) |
|
747 |
same_repo.resume_write_group(wg_tokens) |
|
748 |
same_repo.commit_write_group() |
|
749 |
expected_pack_name = wg_tokens[0] + '.pack' |
|
750 |
expected_names = [wg_tokens[0] + ext for ext in |
|
751 |
('.rix', '.iix', '.tix', '.six')] |
|
752 |
if repo.chk_bytes is not None: |
|
753 |
expected_names.append(wg_tokens[0] + '.cix') |
|
754 |
self.assertEqual( |
|
755 |
[], same_repo._pack_collection._upload_transport.list_dir('')) |
|
756 |
index_names = repo._pack_collection._index_transport.list_dir('') |
|
757 |
self.assertEqual(sorted(expected_names), sorted(index_names)) |
|
758 |
pack_names = repo._pack_collection._pack_transport.list_dir('') |
|
759 |
self.assertEqual([expected_pack_name], pack_names) |
|
760 |
||
4002.1.5
by Andrew Bennetts
Fix possible security issue with resuming write groups: make sure the token is well-formed so that it's not possible to steal a write group from another repo. |
761 |
def test_resume_malformed_token(self): |
762 |
self.vfs_transport_factory = memory.MemoryServer |
|
763 |
# Make a repository with a suspended write group
|
|
4343.3.7
by John Arbash Meinel
Update the suspend/resume/commit/abort_write_group tests for CHK1. |
764 |
repo = self.make_repository('repo', format=self.get_format()) |
4002.1.5
by Andrew Bennetts
Fix possible security issue with resuming write groups: make sure the token is well-formed so that it's not possible to steal a write group from another repo. |
765 |
token = repo.lock_write() |
766 |
self.addCleanup(repo.unlock) |
|
767 |
repo.start_write_group() |
|
768 |
text_key = ('file-id', 'revid') |
|
769 |
repo.texts.add_lines(text_key, (), ['lines']) |
|
770 |
wg_tokens = repo.suspend_write_group() |
|
771 |
# Make a new repository
|
|
4343.3.7
by John Arbash Meinel
Update the suspend/resume/commit/abort_write_group tests for CHK1. |
772 |
new_repo = self.make_repository('new_repo', format=self.get_format()) |
4002.1.5
by Andrew Bennetts
Fix possible security issue with resuming write groups: make sure the token is well-formed so that it's not possible to steal a write group from another repo. |
773 |
token = new_repo.lock_write() |
774 |
self.addCleanup(new_repo.unlock) |
|
775 |
hacked_wg_token = ( |
|
776 |
'../../../../repo/.bzr/repository/upload/' + wg_tokens[0]) |
|
777 |
self.assertRaises( |
|
4002.1.7
by Andrew Bennetts
Rename UnresumableWriteGroups to UnresumableWriteGroup. |
778 |
errors.UnresumableWriteGroup, |
4002.1.5
by Andrew Bennetts
Fix possible security issue with resuming write groups: make sure the token is well-formed so that it's not possible to steal a write group from another repo. |
779 |
new_repo.resume_write_group, [hacked_wg_token]) |
780 |
||
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
781 |
|
3582.3.3
by Martin Pool
Reenable tests for stacking pack repositories |
782 |
class TestPackRepositoryStacking(TestCaseWithTransport): |
783 |
||
784 |
"""Tests for stacking pack repositories"""
|
|
785 |
||
786 |
def setUp(self): |
|
787 |
if not self.format_supports_external_lookups: |
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
788 |
raise TestNotApplicable("%r doesn't support stacking" |
3582.3.3
by Martin Pool
Reenable tests for stacking pack repositories |
789 |
% (self.format_name,)) |
790 |
super(TestPackRepositoryStacking, self).setUp() |
|
791 |
||
792 |
def get_format(self): |
|
793 |
return bzrdir.format_registry.make_bzrdir(self.format_name) |
|
794 |
||
3606.10.5
by John Arbash Meinel
Switch out --1.6-rich-root for --1.6.1-rich-root. |
795 |
def test_stack_checks_rich_root_compatibility(self): |
3582.3.3
by Martin Pool
Reenable tests for stacking pack repositories |
796 |
# early versions of the packing code relied on pack internals to
|
797 |
# stack, but the current version should be able to stack on any
|
|
798 |
# format.
|
|
799 |
#
|
|
800 |
# TODO: Possibly this should be run per-repository-format and raise
|
|
801 |
# TestNotApplicable on formats that don't support stacking. -- mbp
|
|
802 |
# 20080729
|
|
803 |
repo = self.make_repository('repo', format=self.get_format()) |
|
804 |
if repo.supports_rich_root(): |
|
805 |
# can only stack on repositories that have compatible internal
|
|
806 |
# metadata
|
|
3606.10.5
by John Arbash Meinel
Switch out --1.6-rich-root for --1.6.1-rich-root. |
807 |
if getattr(repo._format, 'supports_tree_reference', False): |
4343.3.27
by John Arbash Meinel
Now that dev6 supports external references, the tests for |
808 |
matching_format_name = 'pack-0.92-subtree' |
809 |
else: |
|
3735.2.9
by Robert Collins
Get a working chk_map using inventory implementation bootstrapped. |
810 |
if repo._format.supports_chks: |
4241.6.8
by Robert Collins, John Arbash Meinel, Ian Clatworthy, Vincent Ladeuil
Add --development6-rich-root, disabling the legacy and unneeded development2 format, and activating the tests for CHK features disabled pending this format. (Robert Collins, John Arbash Meinel, Ian Clatworthy, Vincent Ladeuil) |
811 |
matching_format_name = 'development6-rich-root' |
3735.2.9
by Robert Collins
Get a working chk_map using inventory implementation bootstrapped. |
812 |
else: |
4343.3.27
by John Arbash Meinel
Now that dev6 supports external references, the tests for |
813 |
matching_format_name = 'rich-root-pack' |
3582.3.3
by Martin Pool
Reenable tests for stacking pack repositories |
814 |
mismatching_format_name = 'pack-0.92' |
815 |
else: |
|
4241.6.8
by Robert Collins, John Arbash Meinel, Ian Clatworthy, Vincent Ladeuil
Add --development6-rich-root, disabling the legacy and unneeded development2 format, and activating the tests for CHK features disabled pending this format. (Robert Collins, John Arbash Meinel, Ian Clatworthy, Vincent Ladeuil) |
816 |
# We don't have a non-rich-root CHK format.
|
3735.2.9
by Robert Collins
Get a working chk_map using inventory implementation bootstrapped. |
817 |
if repo._format.supports_chks: |
4241.6.8
by Robert Collins, John Arbash Meinel, Ian Clatworthy, Vincent Ladeuil
Add --development6-rich-root, disabling the legacy and unneeded development2 format, and activating the tests for CHK features disabled pending this format. (Robert Collins, John Arbash Meinel, Ian Clatworthy, Vincent Ladeuil) |
818 |
raise AssertionError("no non-rich-root CHK formats known") |
3735.2.9
by Robert Collins
Get a working chk_map using inventory implementation bootstrapped. |
819 |
else: |
820 |
matching_format_name = 'pack-0.92' |
|
3582.3.3
by Martin Pool
Reenable tests for stacking pack repositories |
821 |
mismatching_format_name = 'pack-0.92-subtree' |
822 |
base = self.make_repository('base', format=matching_format_name) |
|
823 |
repo.add_fallback_repository(base) |
|
824 |
# you can't stack on something with incompatible data
|
|
825 |
bad_repo = self.make_repository('mismatch', |
|
826 |
format=mismatching_format_name) |
|
827 |
e = self.assertRaises(errors.IncompatibleRepositories, |
|
828 |
repo.add_fallback_repository, bad_repo) |
|
829 |
self.assertContainsRe(str(e), |
|
830 |
r'(?m)KnitPackRepository.*/mismatch/.*\nis not compatible with\n' |
|
3735.2.9
by Robert Collins
Get a working chk_map using inventory implementation bootstrapped. |
831 |
r'.*Repository.*/repo/.*\n' |
3582.3.3
by Martin Pool
Reenable tests for stacking pack repositories |
832 |
r'different rich-root support') |
833 |
||
3606.10.5
by John Arbash Meinel
Switch out --1.6-rich-root for --1.6.1-rich-root. |
834 |
def test_stack_checks_serializers_compatibility(self): |
835 |
repo = self.make_repository('repo', format=self.get_format()) |
|
836 |
if getattr(repo._format, 'supports_tree_reference', False): |
|
837 |
# can only stack on repositories that have compatible internal
|
|
838 |
# metadata
|
|
4343.3.27
by John Arbash Meinel
Now that dev6 supports external references, the tests for |
839 |
matching_format_name = 'pack-0.92-subtree' |
3606.10.5
by John Arbash Meinel
Switch out --1.6-rich-root for --1.6.1-rich-root. |
840 |
mismatching_format_name = 'rich-root-pack' |
841 |
else: |
|
842 |
if repo.supports_rich_root(): |
|
4343.3.27
by John Arbash Meinel
Now that dev6 supports external references, the tests for |
843 |
if repo._format.supports_chks: |
844 |
matching_format_name = 'development6-rich-root' |
|
845 |
else: |
|
846 |
matching_format_name = 'rich-root-pack' |
|
3606.10.5
by John Arbash Meinel
Switch out --1.6-rich-root for --1.6.1-rich-root. |
847 |
mismatching_format_name = 'pack-0.92-subtree' |
848 |
else: |
|
849 |
raise TestNotApplicable('No formats use non-v5 serializer' |
|
850 |
' without having rich-root also set') |
|
851 |
base = self.make_repository('base', format=matching_format_name) |
|
852 |
repo.add_fallback_repository(base) |
|
853 |
# you can't stack on something with incompatible data
|
|
854 |
bad_repo = self.make_repository('mismatch', |
|
855 |
format=mismatching_format_name) |
|
856 |
e = self.assertRaises(errors.IncompatibleRepositories, |
|
857 |
repo.add_fallback_repository, bad_repo) |
|
858 |
self.assertContainsRe(str(e), |
|
859 |
r'(?m)KnitPackRepository.*/mismatch/.*\nis not compatible with\n' |
|
3735.2.9
by Robert Collins
Get a working chk_map using inventory implementation bootstrapped. |
860 |
r'.*Repository.*/repo/.*\n' |
3606.10.5
by John Arbash Meinel
Switch out --1.6-rich-root for --1.6.1-rich-root. |
861 |
r'different serializers') |
862 |
||
3582.3.3
by Martin Pool
Reenable tests for stacking pack repositories |
863 |
def test_adding_pack_does_not_record_pack_names_from_other_repositories(self): |
864 |
base = self.make_branch_and_tree('base', format=self.get_format()) |
|
865 |
base.commit('foo') |
|
866 |
referencing = self.make_branch_and_tree('repo', format=self.get_format()) |
|
867 |
referencing.branch.repository.add_fallback_repository(base.branch.repository) |
|
868 |
referencing.commit('bar') |
|
869 |
new_instance = referencing.bzrdir.open_repository() |
|
870 |
new_instance.lock_read() |
|
871 |
self.addCleanup(new_instance.unlock) |
|
872 |
new_instance._pack_collection.ensure_loaded() |
|
873 |
self.assertEqual(1, len(new_instance._pack_collection.all_packs())) |
|
874 |
||
875 |
def test_autopack_only_considers_main_repo_packs(self): |
|
4241.6.8
by Robert Collins, John Arbash Meinel, Ian Clatworthy, Vincent Ladeuil
Add --development6-rich-root, disabling the legacy and unneeded development2 format, and activating the tests for CHK features disabled pending this format. (Robert Collins, John Arbash Meinel, Ian Clatworthy, Vincent Ladeuil) |
876 |
format = self.get_format() |
877 |
base = self.make_branch_and_tree('base', format=format) |
|
3582.3.3
by Martin Pool
Reenable tests for stacking pack repositories |
878 |
base.commit('foo') |
4241.6.8
by Robert Collins, John Arbash Meinel, Ian Clatworthy, Vincent Ladeuil
Add --development6-rich-root, disabling the legacy and unneeded development2 format, and activating the tests for CHK features disabled pending this format. (Robert Collins, John Arbash Meinel, Ian Clatworthy, Vincent Ladeuil) |
879 |
tree = self.make_branch_and_tree('repo', format=format) |
3582.3.3
by Martin Pool
Reenable tests for stacking pack repositories |
880 |
tree.branch.repository.add_fallback_repository(base.branch.repository) |
881 |
trans = tree.branch.repository.bzrdir.get_repository_transport(None) |
|
882 |
# This test could be a little cheaper by replacing the packs
|
|
883 |
# attribute on the repository to allow a different pack distribution
|
|
884 |
# and max packs policy - so we are checking the policy is honoured
|
|
885 |
# in the test. But for now 11 commits is not a big deal in a single
|
|
886 |
# test.
|
|
887 |
for x in range(9): |
|
888 |
tree.commit('commit %s' % x) |
|
889 |
# there should be 9 packs:
|
|
3735.1.1
by Robert Collins
Add development2 formats using BTree indices. |
890 |
index = self.index_class(trans, 'pack-names', None) |
3582.3.3
by Martin Pool
Reenable tests for stacking pack repositories |
891 |
self.assertEqual(9, len(list(index.iter_all_entries()))) |
892 |
# committing one more should coalesce to 1 of 10.
|
|
893 |
tree.commit('commit triggering pack') |
|
3735.1.1
by Robert Collins
Add development2 formats using BTree indices. |
894 |
index = self.index_class(trans, 'pack-names', None) |
3582.3.3
by Martin Pool
Reenable tests for stacking pack repositories |
895 |
self.assertEqual(1, len(list(index.iter_all_entries()))) |
896 |
# packing should not damage data
|
|
897 |
tree = tree.bzrdir.open_workingtree() |
|
898 |
check_result = tree.branch.repository.check( |
|
899 |
[tree.branch.last_revision()]) |
|
4241.6.8
by Robert Collins, John Arbash Meinel, Ian Clatworthy, Vincent Ladeuil
Add --development6-rich-root, disabling the legacy and unneeded development2 format, and activating the tests for CHK features disabled pending this format. (Robert Collins, John Arbash Meinel, Ian Clatworthy, Vincent Ladeuil) |
900 |
nb_files = 5 # .pack, .rix, .iix, .tix, .six |
901 |
if tree.branch.repository._format.supports_chks: |
|
902 |
nb_files += 1 # .cix |
|
903 |
# We should have 10 x nb_files files in the obsolete_packs directory.
|
|
3582.3.3
by Martin Pool
Reenable tests for stacking pack repositories |
904 |
obsolete_files = list(trans.list_dir('obsolete_packs')) |
905 |
self.assertFalse('foo' in obsolete_files) |
|
906 |
self.assertFalse('bar' in obsolete_files) |
|
4241.6.8
by Robert Collins, John Arbash Meinel, Ian Clatworthy, Vincent Ladeuil
Add --development6-rich-root, disabling the legacy and unneeded development2 format, and activating the tests for CHK features disabled pending this format. (Robert Collins, John Arbash Meinel, Ian Clatworthy, Vincent Ladeuil) |
907 |
self.assertEqual(10 * nb_files, len(obsolete_files)) |
3582.3.3
by Martin Pool
Reenable tests for stacking pack repositories |
908 |
# XXX: Todo check packs obsoleted correctly - old packs and indices
|
909 |
# in the obsolete_packs directory.
|
|
910 |
large_pack_name = list(index.iter_all_entries())[0][1][0] |
|
911 |
# finally, committing again should not touch the large pack.
|
|
912 |
tree.commit('commit not triggering pack') |
|
3735.1.1
by Robert Collins
Add development2 formats using BTree indices. |
913 |
index = self.index_class(trans, 'pack-names', None) |
3582.3.3
by Martin Pool
Reenable tests for stacking pack repositories |
914 |
self.assertEqual(2, len(list(index.iter_all_entries()))) |
915 |
pack_names = [node[1][0] for node in index.iter_all_entries()] |
|
916 |
self.assertTrue(large_pack_name in pack_names) |
|
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
917 |
|
918 |
||
4343.3.33
by John Arbash Meinel
Clear KeyDependencies on abort/suspend/commit_write_group. |
919 |
class TestKeyDependencies(TestCaseWithTransport): |
920 |
||
921 |
def get_format(self): |
|
922 |
return bzrdir.format_registry.make_bzrdir(self.format_name) |
|
923 |
||
924 |
def create_source_and_target(self): |
|
925 |
builder = self.make_branch_builder('source', format=self.get_format()) |
|
926 |
builder.start_series() |
|
927 |
builder.build_snapshot('A-id', None, [ |
|
928 |
('add', ('', 'root-id', 'directory', None))]) |
|
929 |
builder.build_snapshot('B-id', ['A-id', 'ghost-id'], []) |
|
930 |
builder.finish_series() |
|
931 |
repo = self.make_repository('target') |
|
932 |
b = builder.get_branch() |
|
933 |
b.lock_read() |
|
934 |
self.addCleanup(b.unlock) |
|
935 |
repo.lock_write() |
|
936 |
self.addCleanup(repo.unlock) |
|
937 |
return b.repository, repo |
|
938 |
||
939 |
def test_key_dependencies_cleared_on_abort(self): |
|
940 |
source_repo, target_repo = self.create_source_and_target() |
|
941 |
target_repo.start_write_group() |
|
942 |
try: |
|
943 |
stream = source_repo.revisions.get_record_stream([('B-id',)], |
|
944 |
'unordered', True) |
|
945 |
target_repo.revisions.insert_record_stream(stream) |
|
946 |
key_refs = target_repo.revisions._index._key_dependencies |
|
947 |
self.assertEqual([('B-id',)], sorted(key_refs.get_referrers())) |
|
948 |
finally: |
|
949 |
target_repo.abort_write_group() |
|
950 |
self.assertEqual([], sorted(key_refs.get_referrers())) |
|
951 |
||
952 |
def test_key_dependencies_cleared_on_suspend(self): |
|
953 |
source_repo, target_repo = self.create_source_and_target() |
|
954 |
target_repo.start_write_group() |
|
955 |
try: |
|
956 |
stream = source_repo.revisions.get_record_stream([('B-id',)], |
|
957 |
'unordered', True) |
|
958 |
target_repo.revisions.insert_record_stream(stream) |
|
959 |
key_refs = target_repo.revisions._index._key_dependencies |
|
960 |
self.assertEqual([('B-id',)], sorted(key_refs.get_referrers())) |
|
961 |
finally: |
|
962 |
target_repo.suspend_write_group() |
|
963 |
self.assertEqual([], sorted(key_refs.get_referrers())) |
|
964 |
||
965 |
def test_key_dependencies_cleared_on_commit(self): |
|
966 |
source_repo, target_repo = self.create_source_and_target() |
|
967 |
target_repo.start_write_group() |
|
968 |
try: |
|
969 |
stream = source_repo.revisions.get_record_stream([('B-id',)], |
|
970 |
'unordered', True) |
|
971 |
target_repo.revisions.insert_record_stream(stream) |
|
972 |
key_refs = target_repo.revisions._index._key_dependencies |
|
973 |
self.assertEqual([('B-id',)], sorted(key_refs.get_referrers())) |
|
974 |
finally: |
|
975 |
target_repo.commit_write_group() |
|
976 |
self.assertEqual([], sorted(key_refs.get_referrers())) |
|
977 |
||
978 |
||
3801.1.18
by Andrew Bennetts
Add a test that ensures that the autopack RPC is actually used for all pack formats. |
979 |
class TestSmartServerAutopack(TestCaseWithTransport): |
980 |
||
981 |
def setUp(self): |
|
982 |
super(TestSmartServerAutopack, self).setUp() |
|
983 |
# Create a smart server that publishes whatever the backing VFS server
|
|
984 |
# does.
|
|
985 |
self.smart_server = server.SmartTCPServer_for_testing() |
|
986 |
self.smart_server.setUp(self.get_server()) |
|
987 |
self.addCleanup(self.smart_server.tearDown) |
|
988 |
# Log all HPSS calls into self.hpss_calls.
|
|
989 |
client._SmartClient.hooks.install_named_hook( |
|
990 |
'call', self.capture_hpss_call, None) |
|
991 |
self.hpss_calls = [] |
|
992 |
||
993 |
def capture_hpss_call(self, params): |
|
994 |
self.hpss_calls.append(params.method) |
|
995 |
||
996 |
def get_format(self): |
|
997 |
return bzrdir.format_registry.make_bzrdir(self.format_name) |
|
998 |
||
4029.2.1
by Robert Collins
Support streaming push to stacked branches. |
999 |
def test_autopack_or_streaming_rpc_is_used_when_using_hpss(self): |
3801.1.18
by Andrew Bennetts
Add a test that ensures that the autopack RPC is actually used for all pack formats. |
1000 |
# Make local and remote repos
|
3735.2.98
by John Arbash Meinel
Merge bzr.dev 4032. Resolve the new streaming fetch. |
1001 |
format = self.get_format() |
4241.6.8
by Robert Collins, John Arbash Meinel, Ian Clatworthy, Vincent Ladeuil
Add --development6-rich-root, disabling the legacy and unneeded development2 format, and activating the tests for CHK features disabled pending this format. (Robert Collins, John Arbash Meinel, Ian Clatworthy, Vincent Ladeuil) |
1002 |
tree = self.make_branch_and_tree('local', format=format) |
1003 |
self.make_branch_and_tree('remote', format=format) |
|
3801.1.18
by Andrew Bennetts
Add a test that ensures that the autopack RPC is actually used for all pack formats. |
1004 |
remote_branch_url = self.smart_server.get_url() + 'remote' |
1005 |
remote_branch = bzrdir.BzrDir.open(remote_branch_url).open_branch() |
|
1006 |
# Make 9 local revisions, and push them one at a time to the remote
|
|
1007 |
# repo to produce 9 pack files.
|
|
1008 |
for x in range(9): |
|
1009 |
tree.commit('commit %s' % x) |
|
1010 |
tree.branch.push(remote_branch) |
|
1011 |
# Make one more push to trigger an autopack
|
|
1012 |
self.hpss_calls = [] |
|
1013 |
tree.commit('commit triggering pack') |
|
1014 |
tree.branch.push(remote_branch) |
|
4029.2.1
by Robert Collins
Support streaming push to stacked branches. |
1015 |
autopack_calls = len([call for call in self.hpss_calls if call == |
1016 |
'PackRepository.autopack']) |
|
1017 |
streaming_calls = len([call for call in self.hpss_calls if call == |
|
1018 |
'Repository.insert_stream']) |
|
1019 |
if autopack_calls: |
|
1020 |
# Non streaming server
|
|
1021 |
self.assertEqual(1, autopack_calls) |
|
1022 |
self.assertEqual(0, streaming_calls) |
|
1023 |
else: |
|
1024 |
# Streaming was used, which autopacks on the remote end.
|
|
1025 |
self.assertEqual(0, autopack_calls) |
|
1026 |
# NB: The 2 calls are because of the sanity check that the server
|
|
1027 |
# supports the verb (see remote.py:RemoteSink.insert_stream for
|
|
1028 |
# details).
|
|
1029 |
self.assertEqual(2, streaming_calls) |
|
3801.1.18
by Andrew Bennetts
Add a test that ensures that the autopack RPC is actually used for all pack formats. |
1030 |
|
1031 |
||
4084.5.1
by Robert Collins
Bulk update all test adaptation into a single approach, using multiply_tests rather than test adapters. |
1032 |
def load_tests(basic_tests, module, loader): |
3582.3.3
by Martin Pool
Reenable tests for stacking pack repositories |
1033 |
# these give the bzrdir canned format name, and the repository on-disk
|
1034 |
# format string
|
|
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
1035 |
scenarios_params = [ |
1036 |
dict(format_name='pack-0.92', |
|
1037 |
format_string="Bazaar pack repository format 1 (needs bzr 0.92)\n", |
|
3735.1.1
by Robert Collins
Add development2 formats using BTree indices. |
1038 |
format_supports_external_lookups=False, |
1039 |
index_class=GraphIndex), |
|
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
1040 |
dict(format_name='pack-0.92-subtree', |
1041 |
format_string="Bazaar pack repository format 1 " |
|
1042 |
"with subtree support (needs bzr 0.92)\n", |
|
3735.1.1
by Robert Collins
Add development2 formats using BTree indices. |
1043 |
format_supports_external_lookups=False, |
1044 |
index_class=GraphIndex), |
|
3582.3.2
by Martin Pool
Add 1.6 formats to pack repository tests |
1045 |
dict(format_name='1.6', |
1046 |
format_string="Bazaar RepositoryFormatKnitPack5 (bzr 1.6)\n", |
|
3735.1.1
by Robert Collins
Add development2 formats using BTree indices. |
1047 |
format_supports_external_lookups=True, |
1048 |
index_class=GraphIndex), |
|
3606.10.5
by John Arbash Meinel
Switch out --1.6-rich-root for --1.6.1-rich-root. |
1049 |
dict(format_name='1.6.1-rich-root', |
3582.3.2
by Martin Pool
Add 1.6 formats to pack repository tests |
1050 |
format_string="Bazaar RepositoryFormatKnitPack5RichRoot " |
3606.10.5
by John Arbash Meinel
Switch out --1.6-rich-root for --1.6.1-rich-root. |
1051 |
"(bzr 1.6.1)\n", |
3735.1.1
by Robert Collins
Add development2 formats using BTree indices. |
1052 |
format_supports_external_lookups=True, |
1053 |
index_class=GraphIndex), |
|
3805.3.1
by John Arbash Meinel
Add repository 1.9 format, and update the documentation. |
1054 |
dict(format_name='1.9', |
1055 |
format_string="Bazaar RepositoryFormatKnitPack6 (bzr 1.9)\n", |
|
1056 |
format_supports_external_lookups=True, |
|
1057 |
index_class=BTreeGraphIndex), |
|
1058 |
dict(format_name='1.9-rich-root', |
|
1059 |
format_string="Bazaar RepositoryFormatKnitPack6RichRoot " |
|
1060 |
"(bzr 1.9)\n", |
|
1061 |
format_supports_external_lookups=True, |
|
1062 |
index_class=BTreeGraphIndex), |
|
4241.6.8
by Robert Collins, John Arbash Meinel, Ian Clatworthy, Vincent Ladeuil
Add --development6-rich-root, disabling the legacy and unneeded development2 format, and activating the tests for CHK features disabled pending this format. (Robert Collins, John Arbash Meinel, Ian Clatworthy, Vincent Ladeuil) |
1063 |
dict(format_name='development6-rich-root', |
1064 |
format_string='Bazaar development format - group compression ' |
|
1065 |
'and chk inventory (needs bzr.dev from 1.14)\n', |
|
4343.3.8
by John Arbash Meinel
Some cleanup passes. |
1066 |
format_supports_external_lookups=True, |
3735.2.40
by Robert Collins
Add development4 which has a parent_id to basename index on CHKInventory objects. |
1067 |
index_class=BTreeGraphIndex), |
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
1068 |
]
|
1069 |
# name of the scenario is the format name
|
|
4084.5.1
by Robert Collins
Bulk update all test adaptation into a single approach, using multiply_tests rather than test adapters. |
1070 |
scenarios = [(s['format_name'], s) for s in scenarios_params] |
1071 |
return tests.multiply_tests(basic_tests, scenarios, loader.suiteClass()) |