3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
1 |
# Copyright (C) 2008 Canonical Ltd
|
2 |
#
|
|
3 |
# This program is free software; you can redistribute it and/or modify
|
|
4 |
# it under the terms of the GNU General Public License as published by
|
|
5 |
# the Free Software Foundation; either version 2 of the License, or
|
|
6 |
# (at your option) any later version.
|
|
7 |
#
|
|
8 |
# This program is distributed in the hope that it will be useful,
|
|
9 |
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
10 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
11 |
# GNU General Public License for more details.
|
|
12 |
#
|
|
13 |
# You should have received a copy of the GNU General Public License
|
|
14 |
# along with this program; if not, write to the Free Software
|
|
15 |
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
16 |
||
17 |
"""Tests for pack repositories.
|
|
18 |
||
19 |
These tests are repeated for all pack-based repository formats.
|
|
20 |
"""
|
|
21 |
||
3582.3.4
by Martin Pool
Use cStringIO rather than StringIO |
22 |
from cStringIO import StringIO |
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
23 |
from stat import S_ISDIR |
24 |
||
3735.1.1
by Robert Collins
Add development2 formats using BTree indices. |
25 |
from bzrlib.btree_index import BTreeGraphIndex |
26 |
from bzrlib.index import GraphIndex |
|
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
27 |
from bzrlib import ( |
28 |
bzrdir, |
|
29 |
errors, |
|
30 |
inventory, |
|
4002.1.1
by Andrew Bennetts
Implement suspend_write_group/resume_write_group. |
31 |
osutils, |
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
32 |
progress, |
33 |
repository, |
|
34 |
revision as _mod_revision, |
|
35 |
symbol_versioning, |
|
36 |
tests, |
|
37 |
ui, |
|
38 |
upgrade, |
|
39 |
workingtree, |
|
40 |
)
|
|
3801.1.18
by Andrew Bennetts
Add a test that ensures that the autopack RPC is actually used for all pack formats. |
41 |
from bzrlib.smart import ( |
42 |
client, |
|
43 |
server, |
|
44 |
)
|
|
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
45 |
from bzrlib.tests import ( |
46 |
TestCase, |
|
47 |
TestCaseWithTransport, |
|
3582.3.3
by Martin Pool
Reenable tests for stacking pack repositories |
48 |
TestNotApplicable, |
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
49 |
TestSkipped, |
50 |
)
|
|
51 |
from bzrlib.transport import ( |
|
52 |
fakenfs, |
|
3825.4.2
by Andrew Bennetts
Run the abort_write_group tests against a memory transport to avoid platform-specific limits on changing files that may be in use. |
53 |
memory, |
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
54 |
get_transport, |
55 |
)
|
|
3825.4.2
by Andrew Bennetts
Run the abort_write_group tests against a memory transport to avoid platform-specific limits on changing files that may be in use. |
56 |
from bzrlib.tests.per_repository import TestCaseWithRepository |
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
57 |
|
58 |
||
59 |
class TestPackRepository(TestCaseWithTransport): |
|
60 |
"""Tests to be repeated across all pack-based formats.
|
|
61 |
||
62 |
The following are populated from the test scenario:
|
|
63 |
||
64 |
:ivar format_name: Registered name fo the format to test.
|
|
65 |
:ivar format_string: On-disk format marker.
|
|
66 |
:ivar format_supports_external_lookups: Boolean.
|
|
67 |
"""
|
|
68 |
||
69 |
def get_format(self): |
|
70 |
return bzrdir.format_registry.make_bzrdir(self.format_name) |
|
71 |
||
72 |
def test_attribute__fetch_order(self): |
|
3606.7.3
by John Arbash Meinel
We don't have to fetch in topological order, as long as we fix all of the delta logic pieces. |
73 |
"""Packs do not need ordered data retrieval."""
|
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
74 |
format = self.get_format() |
75 |
repo = self.make_repository('.', format=format) |
|
4053.1.4
by Robert Collins
Move the fetch control attributes from Repository to RepositoryFormat. |
76 |
self.assertEqual('unordered', repo._format._fetch_order) |
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
77 |
|
78 |
def test_attribute__fetch_uses_deltas(self): |
|
79 |
"""Packs reuse deltas."""
|
|
80 |
format = self.get_format() |
|
81 |
repo = self.make_repository('.', format=format) |
|
4053.1.4
by Robert Collins
Move the fetch control attributes from Repository to RepositoryFormat. |
82 |
self.assertEqual(True, repo._format._fetch_uses_deltas) |
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
83 |
|
84 |
def test_disk_layout(self): |
|
85 |
format = self.get_format() |
|
86 |
repo = self.make_repository('.', format=format) |
|
87 |
# in case of side effects of locking.
|
|
88 |
repo.lock_write() |
|
89 |
repo.unlock() |
|
90 |
t = repo.bzrdir.get_repository_transport(None) |
|
91 |
self.check_format(t) |
|
92 |
# XXX: no locks left when unlocked at the moment
|
|
93 |
# self.assertEqualDiff('', t.get('lock').read())
|
|
94 |
self.check_databases(t) |
|
95 |
||
96 |
def check_format(self, t): |
|
97 |
self.assertEqualDiff( |
|
98 |
self.format_string, # from scenario |
|
99 |
t.get('format').read()) |
|
100 |
||
101 |
def assertHasNoKndx(self, t, knit_name): |
|
102 |
"""Assert that knit_name has no index on t."""
|
|
103 |
self.assertFalse(t.has(knit_name + '.kndx')) |
|
104 |
||
105 |
def assertHasNoKnit(self, t, knit_name): |
|
106 |
"""Assert that knit_name exists on t."""
|
|
107 |
# no default content
|
|
108 |
self.assertFalse(t.has(knit_name + '.knit')) |
|
109 |
||
110 |
def check_databases(self, t): |
|
111 |
"""check knit content for a repository."""
|
|
112 |
# check conversion worked
|
|
113 |
self.assertHasNoKndx(t, 'inventory') |
|
114 |
self.assertHasNoKnit(t, 'inventory') |
|
115 |
self.assertHasNoKndx(t, 'revisions') |
|
116 |
self.assertHasNoKnit(t, 'revisions') |
|
117 |
self.assertHasNoKndx(t, 'signatures') |
|
118 |
self.assertHasNoKnit(t, 'signatures') |
|
119 |
self.assertFalse(t.has('knits')) |
|
120 |
# revision-indexes file-container directory
|
|
121 |
self.assertEqual([], |
|
3735.1.1
by Robert Collins
Add development2 formats using BTree indices. |
122 |
list(self.index_class(t, 'pack-names', None).iter_all_entries())) |
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
123 |
self.assertTrue(S_ISDIR(t.stat('packs').st_mode)) |
124 |
self.assertTrue(S_ISDIR(t.stat('upload').st_mode)) |
|
125 |
self.assertTrue(S_ISDIR(t.stat('indices').st_mode)) |
|
126 |
self.assertTrue(S_ISDIR(t.stat('obsolete_packs').st_mode)) |
|
127 |
||
128 |
def test_shared_disk_layout(self): |
|
129 |
format = self.get_format() |
|
130 |
repo = self.make_repository('.', shared=True, format=format) |
|
131 |
# we want:
|
|
132 |
t = repo.bzrdir.get_repository_transport(None) |
|
133 |
self.check_format(t) |
|
134 |
# XXX: no locks left when unlocked at the moment
|
|
135 |
# self.assertEqualDiff('', t.get('lock').read())
|
|
136 |
# We should have a 'shared-storage' marker file.
|
|
137 |
self.assertEqualDiff('', t.get('shared-storage').read()) |
|
138 |
self.check_databases(t) |
|
139 |
||
140 |
def test_shared_no_tree_disk_layout(self): |
|
141 |
format = self.get_format() |
|
142 |
repo = self.make_repository('.', shared=True, format=format) |
|
143 |
repo.set_make_working_trees(False) |
|
144 |
# we want:
|
|
145 |
t = repo.bzrdir.get_repository_transport(None) |
|
146 |
self.check_format(t) |
|
147 |
# XXX: no locks left when unlocked at the moment
|
|
148 |
# self.assertEqualDiff('', t.get('lock').read())
|
|
149 |
# We should have a 'shared-storage' marker file.
|
|
150 |
self.assertEqualDiff('', t.get('shared-storage').read()) |
|
151 |
# We should have a marker for the no-working-trees flag.
|
|
152 |
self.assertEqualDiff('', t.get('no-working-trees').read()) |
|
153 |
# The marker should go when we toggle the setting.
|
|
154 |
repo.set_make_working_trees(True) |
|
155 |
self.assertFalse(t.has('no-working-trees')) |
|
156 |
self.check_databases(t) |
|
157 |
||
158 |
def test_adding_revision_creates_pack_indices(self): |
|
159 |
format = self.get_format() |
|
160 |
tree = self.make_branch_and_tree('.', format=format) |
|
161 |
trans = tree.branch.repository.bzrdir.get_repository_transport(None) |
|
162 |
self.assertEqual([], |
|
3735.1.1
by Robert Collins
Add development2 formats using BTree indices. |
163 |
list(self.index_class(trans, 'pack-names', None).iter_all_entries())) |
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
164 |
tree.commit('foobarbaz') |
3735.1.1
by Robert Collins
Add development2 formats using BTree indices. |
165 |
index = self.index_class(trans, 'pack-names', None) |
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
166 |
index_nodes = list(index.iter_all_entries()) |
167 |
self.assertEqual(1, len(index_nodes)) |
|
168 |
node = index_nodes[0] |
|
169 |
name = node[1][0] |
|
170 |
# the pack sizes should be listed in the index
|
|
171 |
pack_value = node[2] |
|
172 |
sizes = [int(digits) for digits in pack_value.split(' ')] |
|
173 |
for size, suffix in zip(sizes, ['.rix', '.iix', '.tix', '.six']): |
|
174 |
stat = trans.stat('indices/%s%s' % (name, suffix)) |
|
175 |
self.assertEqual(size, stat.st_size) |
|
176 |
||
177 |
def test_pulling_nothing_leads_to_no_new_names(self): |
|
178 |
format = self.get_format() |
|
179 |
tree1 = self.make_branch_and_tree('1', format=format) |
|
180 |
tree2 = self.make_branch_and_tree('2', format=format) |
|
181 |
tree1.branch.repository.fetch(tree2.branch.repository) |
|
182 |
trans = tree1.branch.repository.bzrdir.get_repository_transport(None) |
|
183 |
self.assertEqual([], |
|
3735.1.1
by Robert Collins
Add development2 formats using BTree indices. |
184 |
list(self.index_class(trans, 'pack-names', None).iter_all_entries())) |
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
185 |
|
186 |
def test_commit_across_pack_shape_boundary_autopacks(self): |
|
187 |
format = self.get_format() |
|
188 |
tree = self.make_branch_and_tree('.', format=format) |
|
189 |
trans = tree.branch.repository.bzrdir.get_repository_transport(None) |
|
190 |
# This test could be a little cheaper by replacing the packs
|
|
191 |
# attribute on the repository to allow a different pack distribution
|
|
192 |
# and max packs policy - so we are checking the policy is honoured
|
|
193 |
# in the test. But for now 11 commits is not a big deal in a single
|
|
194 |
# test.
|
|
195 |
for x in range(9): |
|
196 |
tree.commit('commit %s' % x) |
|
197 |
# there should be 9 packs:
|
|
3735.1.1
by Robert Collins
Add development2 formats using BTree indices. |
198 |
index = self.index_class(trans, 'pack-names', None) |
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
199 |
self.assertEqual(9, len(list(index.iter_all_entries()))) |
200 |
# insert some files in obsolete_packs which should be removed by pack.
|
|
201 |
trans.put_bytes('obsolete_packs/foo', '123') |
|
202 |
trans.put_bytes('obsolete_packs/bar', '321') |
|
203 |
# committing one more should coalesce to 1 of 10.
|
|
204 |
tree.commit('commit triggering pack') |
|
3735.1.1
by Robert Collins
Add development2 formats using BTree indices. |
205 |
index = self.index_class(trans, 'pack-names', None) |
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
206 |
self.assertEqual(1, len(list(index.iter_all_entries()))) |
207 |
# packing should not damage data
|
|
208 |
tree = tree.bzrdir.open_workingtree() |
|
209 |
check_result = tree.branch.repository.check( |
|
210 |
[tree.branch.last_revision()]) |
|
211 |
# We should have 50 (10x5) files in the obsolete_packs directory.
|
|
212 |
obsolete_files = list(trans.list_dir('obsolete_packs')) |
|
213 |
self.assertFalse('foo' in obsolete_files) |
|
214 |
self.assertFalse('bar' in obsolete_files) |
|
215 |
self.assertEqual(50, len(obsolete_files)) |
|
216 |
# XXX: Todo check packs obsoleted correctly - old packs and indices
|
|
217 |
# in the obsolete_packs directory.
|
|
218 |
large_pack_name = list(index.iter_all_entries())[0][1][0] |
|
219 |
# finally, committing again should not touch the large pack.
|
|
220 |
tree.commit('commit not triggering pack') |
|
3735.1.1
by Robert Collins
Add development2 formats using BTree indices. |
221 |
index = self.index_class(trans, 'pack-names', None) |
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
222 |
self.assertEqual(2, len(list(index.iter_all_entries()))) |
223 |
pack_names = [node[1][0] for node in index.iter_all_entries()] |
|
224 |
self.assertTrue(large_pack_name in pack_names) |
|
225 |
||
226 |
def test_fail_obsolete_deletion(self): |
|
227 |
# failing to delete obsolete packs is not fatal
|
|
228 |
format = self.get_format() |
|
229 |
server = fakenfs.FakeNFSServer() |
|
230 |
server.setUp() |
|
231 |
self.addCleanup(server.tearDown) |
|
232 |
transport = get_transport(server.get_url()) |
|
233 |
bzrdir = self.get_format().initialize_on_transport(transport) |
|
234 |
repo = bzrdir.create_repository() |
|
235 |
repo_transport = bzrdir.get_repository_transport(None) |
|
236 |
self.assertTrue(repo_transport.has('obsolete_packs')) |
|
237 |
# these files are in use by another client and typically can't be deleted
|
|
238 |
repo_transport.put_bytes('obsolete_packs/.nfsblahblah', 'contents') |
|
239 |
repo._pack_collection._clear_obsolete_packs() |
|
240 |
self.assertTrue(repo_transport.has('obsolete_packs/.nfsblahblah')) |
|
241 |
||
242 |
def test_pack_after_two_commits_packs_everything(self): |
|
243 |
format = self.get_format() |
|
244 |
tree = self.make_branch_and_tree('.', format=format) |
|
245 |
trans = tree.branch.repository.bzrdir.get_repository_transport(None) |
|
246 |
tree.commit('start') |
|
247 |
tree.commit('more work') |
|
248 |
tree.branch.repository.pack() |
|
249 |
# there should be 1 pack:
|
|
3735.1.1
by Robert Collins
Add development2 formats using BTree indices. |
250 |
index = self.index_class(trans, 'pack-names', None) |
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
251 |
self.assertEqual(1, len(list(index.iter_all_entries()))) |
252 |
self.assertEqual(2, len(tree.branch.repository.all_revision_ids())) |
|
253 |
||
254 |
def test_pack_layout(self): |
|
255 |
format = self.get_format() |
|
256 |
tree = self.make_branch_and_tree('.', format=format) |
|
257 |
trans = tree.branch.repository.bzrdir.get_repository_transport(None) |
|
258 |
tree.commit('start', rev_id='1') |
|
259 |
tree.commit('more work', rev_id='2') |
|
260 |
tree.branch.repository.pack() |
|
261 |
tree.lock_read() |
|
262 |
self.addCleanup(tree.unlock) |
|
263 |
pack = tree.branch.repository._pack_collection.get_pack_by_name( |
|
264 |
tree.branch.repository._pack_collection.names()[0]) |
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
265 |
# revision access tends to be tip->ancestor, so ordering that way on
|
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
266 |
# disk is a good idea.
|
267 |
for _1, key, val, refs in pack.revision_index.iter_all_entries(): |
|
268 |
if key == ('1',): |
|
269 |
pos_1 = int(val[1:].split()[0]) |
|
270 |
else: |
|
271 |
pos_2 = int(val[1:].split()[0]) |
|
272 |
self.assertTrue(pos_2 < pos_1) |
|
273 |
||
274 |
def test_pack_repositories_support_multiple_write_locks(self): |
|
275 |
format = self.get_format() |
|
276 |
self.make_repository('.', shared=True, format=format) |
|
277 |
r1 = repository.Repository.open('.') |
|
278 |
r2 = repository.Repository.open('.') |
|
279 |
r1.lock_write() |
|
280 |
self.addCleanup(r1.unlock) |
|
281 |
r2.lock_write() |
|
282 |
r2.unlock() |
|
283 |
||
284 |
def _add_text(self, repo, fileid): |
|
285 |
"""Add a text to the repository within a write group."""
|
|
286 |
repo.texts.add_lines((fileid, 'samplerev+'+fileid), [], []) |
|
287 |
||
288 |
def test_concurrent_writers_merge_new_packs(self): |
|
289 |
format = self.get_format() |
|
290 |
self.make_repository('.', shared=True, format=format) |
|
291 |
r1 = repository.Repository.open('.') |
|
292 |
r2 = repository.Repository.open('.') |
|
293 |
r1.lock_write() |
|
294 |
try: |
|
295 |
# access enough data to load the names list
|
|
296 |
list(r1.all_revision_ids()) |
|
297 |
r2.lock_write() |
|
298 |
try: |
|
299 |
# access enough data to load the names list
|
|
300 |
list(r2.all_revision_ids()) |
|
301 |
r1.start_write_group() |
|
302 |
try: |
|
303 |
r2.start_write_group() |
|
304 |
try: |
|
305 |
self._add_text(r1, 'fileidr1') |
|
306 |
self._add_text(r2, 'fileidr2') |
|
307 |
except: |
|
308 |
r2.abort_write_group() |
|
309 |
raise
|
|
310 |
except: |
|
311 |
r1.abort_write_group() |
|
312 |
raise
|
|
313 |
# both r1 and r2 have open write groups with data in them
|
|
314 |
# created while the other's write group was open.
|
|
315 |
# Commit both which requires a merge to the pack-names.
|
|
316 |
try: |
|
317 |
r1.commit_write_group() |
|
318 |
except: |
|
319 |
r1.abort_write_group() |
|
320 |
r2.abort_write_group() |
|
321 |
raise
|
|
322 |
r2.commit_write_group() |
|
323 |
# tell r1 to reload from disk
|
|
324 |
r1._pack_collection.reset() |
|
325 |
# Now both repositories should know about both names
|
|
326 |
r1._pack_collection.ensure_loaded() |
|
327 |
r2._pack_collection.ensure_loaded() |
|
328 |
self.assertEqual(r1._pack_collection.names(), r2._pack_collection.names()) |
|
329 |
self.assertEqual(2, len(r1._pack_collection.names())) |
|
330 |
finally: |
|
331 |
r2.unlock() |
|
332 |
finally: |
|
333 |
r1.unlock() |
|
334 |
||
335 |
def test_concurrent_writer_second_preserves_dropping_a_pack(self): |
|
336 |
format = self.get_format() |
|
337 |
self.make_repository('.', shared=True, format=format) |
|
338 |
r1 = repository.Repository.open('.') |
|
339 |
r2 = repository.Repository.open('.') |
|
340 |
# add a pack to drop
|
|
341 |
r1.lock_write() |
|
342 |
try: |
|
343 |
r1.start_write_group() |
|
344 |
try: |
|
345 |
self._add_text(r1, 'fileidr1') |
|
346 |
except: |
|
347 |
r1.abort_write_group() |
|
348 |
raise
|
|
349 |
else: |
|
350 |
r1.commit_write_group() |
|
351 |
r1._pack_collection.ensure_loaded() |
|
352 |
name_to_drop = r1._pack_collection.all_packs()[0].name |
|
353 |
finally: |
|
354 |
r1.unlock() |
|
355 |
r1.lock_write() |
|
356 |
try: |
|
357 |
# access enough data to load the names list
|
|
358 |
list(r1.all_revision_ids()) |
|
359 |
r2.lock_write() |
|
360 |
try: |
|
361 |
# access enough data to load the names list
|
|
362 |
list(r2.all_revision_ids()) |
|
363 |
r1._pack_collection.ensure_loaded() |
|
364 |
try: |
|
365 |
r2.start_write_group() |
|
366 |
try: |
|
367 |
# in r1, drop the pack
|
|
368 |
r1._pack_collection._remove_pack_from_memory( |
|
369 |
r1._pack_collection.get_pack_by_name(name_to_drop)) |
|
370 |
# in r2, add a pack
|
|
371 |
self._add_text(r2, 'fileidr2') |
|
372 |
except: |
|
373 |
r2.abort_write_group() |
|
374 |
raise
|
|
375 |
except: |
|
376 |
r1._pack_collection.reset() |
|
377 |
raise
|
|
378 |
# r1 has a changed names list, and r2 an open write groups with
|
|
379 |
# changes.
|
|
380 |
# save r1, and then commit the r2 write group, which requires a
|
|
381 |
# merge to the pack-names, which should not reinstate
|
|
382 |
# name_to_drop
|
|
383 |
try: |
|
384 |
r1._pack_collection._save_pack_names() |
|
385 |
r1._pack_collection.reset() |
|
386 |
except: |
|
387 |
r2.abort_write_group() |
|
388 |
raise
|
|
389 |
try: |
|
390 |
r2.commit_write_group() |
|
391 |
except: |
|
392 |
r2.abort_write_group() |
|
393 |
raise
|
|
394 |
# Now both repositories should now about just one name.
|
|
395 |
r1._pack_collection.ensure_loaded() |
|
396 |
r2._pack_collection.ensure_loaded() |
|
397 |
self.assertEqual(r1._pack_collection.names(), r2._pack_collection.names()) |
|
398 |
self.assertEqual(1, len(r1._pack_collection.names())) |
|
399 |
self.assertFalse(name_to_drop in r1._pack_collection.names()) |
|
400 |
finally: |
|
401 |
r2.unlock() |
|
402 |
finally: |
|
403 |
r1.unlock() |
|
404 |
||
3789.1.1
by John Arbash Meinel
add the failing acceptance test for the first portion. |
405 |
def test_concurrent_pack_triggers_reload(self): |
406 |
# create 2 packs, which we will then collapse
|
|
407 |
tree = self.make_branch_and_tree('tree') |
|
3789.1.2
by John Arbash Meinel
Add RepositoryPackCollection.reload_pack_names() |
408 |
tree.lock_write() |
3789.1.1
by John Arbash Meinel
add the failing acceptance test for the first portion. |
409 |
try: |
3789.1.2
by John Arbash Meinel
Add RepositoryPackCollection.reload_pack_names() |
410 |
rev1 = tree.commit('one') |
411 |
rev2 = tree.commit('two') |
|
412 |
r2 = repository.Repository.open('tree') |
|
3789.1.1
by John Arbash Meinel
add the failing acceptance test for the first portion. |
413 |
r2.lock_read() |
414 |
try: |
|
415 |
# Now r2 has read the pack-names file, but will need to reload
|
|
416 |
# it after r1 has repacked
|
|
3789.1.2
by John Arbash Meinel
Add RepositoryPackCollection.reload_pack_names() |
417 |
tree.branch.repository.pack() |
418 |
self.assertEqual({rev2:(rev1,)}, r2.get_parent_map([rev2])) |
|
3789.1.1
by John Arbash Meinel
add the failing acceptance test for the first portion. |
419 |
finally: |
420 |
r2.unlock() |
|
421 |
finally: |
|
3789.1.2
by John Arbash Meinel
Add RepositoryPackCollection.reload_pack_names() |
422 |
tree.unlock() |
3789.1.1
by John Arbash Meinel
add the failing acceptance test for the first portion. |
423 |
|
3789.2.8
by John Arbash Meinel
Add a test that KnitPackRepository.get_record_stream retries when appropriate. |
424 |
def test_concurrent_pack_during_get_record_reloads(self): |
425 |
tree = self.make_branch_and_tree('tree') |
|
426 |
tree.lock_write() |
|
427 |
try: |
|
428 |
rev1 = tree.commit('one') |
|
429 |
rev2 = tree.commit('two') |
|
3789.2.14
by John Arbash Meinel
Update AggregateIndex to pass the reload_func into _DirectPackAccess |
430 |
keys = [(rev1,), (rev2,)] |
3789.2.8
by John Arbash Meinel
Add a test that KnitPackRepository.get_record_stream retries when appropriate. |
431 |
r2 = repository.Repository.open('tree') |
432 |
r2.lock_read() |
|
433 |
try: |
|
434 |
# At this point, we will start grabbing a record stream, and
|
|
435 |
# trigger a repack mid-way
|
|
436 |
packed = False |
|
437 |
result = {} |
|
438 |
record_stream = r2.revisions.get_record_stream(keys, |
|
439 |
'unordered', False) |
|
440 |
for record in record_stream: |
|
441 |
result[record.key] = record |
|
442 |
if not packed: |
|
443 |
tree.branch.repository.pack() |
|
444 |
packed = True |
|
445 |
# The first record will be found in the original location, but
|
|
446 |
# after the pack, we have to reload to find the next record
|
|
3789.2.14
by John Arbash Meinel
Update AggregateIndex to pass the reload_func into _DirectPackAccess |
447 |
self.assertEqual(sorted(keys), sorted(result.keys())) |
3789.2.8
by John Arbash Meinel
Add a test that KnitPackRepository.get_record_stream retries when appropriate. |
448 |
finally: |
449 |
r2.unlock() |
|
450 |
finally: |
|
451 |
tree.unlock() |
|
452 |
||
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
453 |
def test_lock_write_does_not_physically_lock(self): |
454 |
repo = self.make_repository('.', format=self.get_format()) |
|
455 |
repo.lock_write() |
|
456 |
self.addCleanup(repo.unlock) |
|
457 |
self.assertFalse(repo.get_physical_lock_status()) |
|
458 |
||
459 |
def prepare_for_break_lock(self): |
|
460 |
# Setup the global ui factory state so that a break-lock method call
|
|
461 |
# will find usable input in the input stream.
|
|
462 |
old_factory = ui.ui_factory |
|
463 |
def restoreFactory(): |
|
464 |
ui.ui_factory = old_factory |
|
465 |
self.addCleanup(restoreFactory) |
|
466 |
ui.ui_factory = ui.SilentUIFactory() |
|
467 |
ui.ui_factory.stdin = StringIO("y\n") |
|
468 |
||
469 |
def test_break_lock_breaks_physical_lock(self): |
|
470 |
repo = self.make_repository('.', format=self.get_format()) |
|
471 |
repo._pack_collection.lock_names() |
|
3650.4.1
by Aaron Bentley
Fix test kipple in test_break_lock_breaks_physical_lock |
472 |
repo.control_files.leave_in_place() |
473 |
repo.unlock() |
|
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
474 |
repo2 = repository.Repository.open('.') |
475 |
self.assertTrue(repo.get_physical_lock_status()) |
|
476 |
self.prepare_for_break_lock() |
|
477 |
repo2.break_lock() |
|
478 |
self.assertFalse(repo.get_physical_lock_status()) |
|
479 |
||
480 |
def test_broken_physical_locks_error_on__unlock_names_lock(self): |
|
481 |
repo = self.make_repository('.', format=self.get_format()) |
|
482 |
repo._pack_collection.lock_names() |
|
483 |
self.assertTrue(repo.get_physical_lock_status()) |
|
484 |
repo2 = repository.Repository.open('.') |
|
485 |
self.prepare_for_break_lock() |
|
486 |
repo2.break_lock() |
|
487 |
self.assertRaises(errors.LockBroken, repo._pack_collection._unlock_names) |
|
488 |
||
489 |
def test_fetch_without_find_ghosts_ignores_ghosts(self): |
|
490 |
# we want two repositories at this point:
|
|
491 |
# one with a revision that is a ghost in the other
|
|
492 |
# repository.
|
|
493 |
# 'ghost' is present in has_ghost, 'ghost' is absent in 'missing_ghost'.
|
|
494 |
# 'references' is present in both repositories, and 'tip' is present
|
|
495 |
# just in has_ghost.
|
|
496 |
# has_ghost missing_ghost
|
|
497 |
#------------------------------
|
|
498 |
# 'ghost' -
|
|
499 |
# 'references' 'references'
|
|
500 |
# 'tip' -
|
|
501 |
# In this test we fetch 'tip' which should not fetch 'ghost'
|
|
502 |
has_ghost = self.make_repository('has_ghost', format=self.get_format()) |
|
503 |
missing_ghost = self.make_repository('missing_ghost', |
|
504 |
format=self.get_format()) |
|
505 |
||
506 |
def add_commit(repo, revision_id, parent_ids): |
|
507 |
repo.lock_write() |
|
508 |
repo.start_write_group() |
|
509 |
inv = inventory.Inventory(revision_id=revision_id) |
|
510 |
inv.root.revision = revision_id |
|
511 |
root_id = inv.root.file_id |
|
512 |
sha1 = repo.add_inventory(revision_id, inv, []) |
|
513 |
repo.texts.add_lines((root_id, revision_id), [], []) |
|
514 |
rev = _mod_revision.Revision(timestamp=0, |
|
515 |
timezone=None, |
|
516 |
committer="Foo Bar <foo@example.com>", |
|
517 |
message="Message", |
|
518 |
inventory_sha1=sha1, |
|
519 |
revision_id=revision_id) |
|
520 |
rev.parent_ids = parent_ids |
|
521 |
repo.add_revision(revision_id, rev) |
|
522 |
repo.commit_write_group() |
|
523 |
repo.unlock() |
|
524 |
add_commit(has_ghost, 'ghost', []) |
|
525 |
add_commit(has_ghost, 'references', ['ghost']) |
|
526 |
add_commit(missing_ghost, 'references', ['ghost']) |
|
527 |
add_commit(has_ghost, 'tip', ['references']) |
|
528 |
missing_ghost.fetch(has_ghost, 'tip') |
|
529 |
# missing ghost now has tip and not ghost.
|
|
530 |
rev = missing_ghost.get_revision('tip') |
|
531 |
inv = missing_ghost.get_inventory('tip') |
|
532 |
self.assertRaises(errors.NoSuchRevision, |
|
533 |
missing_ghost.get_revision, 'ghost') |
|
534 |
self.assertRaises(errors.NoSuchRevision, |
|
535 |
missing_ghost.get_inventory, 'ghost') |
|
536 |
||
4011.5.6
by Andrew Bennetts
Make sure it's not possible to commit a pack write group when any versioned file has missing compression parents. |
537 |
def make_write_ready_repo(self): |
538 |
repo = self.make_repository('.', format=self.get_format()) |
|
539 |
repo.lock_write() |
|
540 |
repo.start_write_group() |
|
541 |
return repo |
|
542 |
||
543 |
def test_missing_inventories_compression_parent_prevents_commit(self): |
|
544 |
repo = self.make_write_ready_repo() |
|
545 |
key = ('junk',) |
|
546 |
repo.inventories._index._missing_compression_parents.add(key) |
|
547 |
self.assertRaises(errors.BzrCheckError, repo.commit_write_group) |
|
548 |
self.assertRaises(errors.BzrCheckError, repo.commit_write_group) |
|
549 |
repo.abort_write_group() |
|
550 |
repo.unlock() |
|
551 |
||
552 |
def test_missing_revisions_compression_parent_prevents_commit(self): |
|
553 |
repo = self.make_write_ready_repo() |
|
554 |
key = ('junk',) |
|
555 |
repo.revisions._index._missing_compression_parents.add(key) |
|
556 |
self.assertRaises(errors.BzrCheckError, repo.commit_write_group) |
|
557 |
self.assertRaises(errors.BzrCheckError, repo.commit_write_group) |
|
558 |
repo.abort_write_group() |
|
559 |
repo.unlock() |
|
560 |
||
561 |
def test_missing_signatures_compression_parent_prevents_commit(self): |
|
562 |
repo = self.make_write_ready_repo() |
|
563 |
key = ('junk',) |
|
564 |
repo.signatures._index._missing_compression_parents.add(key) |
|
565 |
self.assertRaises(errors.BzrCheckError, repo.commit_write_group) |
|
566 |
self.assertRaises(errors.BzrCheckError, repo.commit_write_group) |
|
567 |
repo.abort_write_group() |
|
568 |
repo.unlock() |
|
569 |
||
570 |
def test_missing_text_compression_parent_prevents_commit(self): |
|
571 |
repo = self.make_write_ready_repo() |
|
572 |
key = ('some', 'junk') |
|
573 |
repo.texts._index._missing_compression_parents.add(key) |
|
574 |
self.assertRaises(errors.BzrCheckError, repo.commit_write_group) |
|
575 |
e = self.assertRaises(errors.BzrCheckError, repo.commit_write_group) |
|
576 |
repo.abort_write_group() |
|
577 |
repo.unlock() |
|
578 |
||
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
579 |
def test_supports_external_lookups(self): |
580 |
repo = self.make_repository('.', format=self.get_format()) |
|
581 |
self.assertEqual(self.format_supports_external_lookups, |
|
582 |
repo._format.supports_external_lookups) |
|
583 |
||
3825.4.1
by Andrew Bennetts
Add suppress_errors to abort_write_group. |
584 |
def test_abort_write_group_does_not_raise_when_suppressed(self): |
585 |
"""Similar to per_repository.test_write_group's test of the same name.
|
|
586 |
||
587 |
Also requires that the exception is logged.
|
|
588 |
"""
|
|
3825.4.2
by Andrew Bennetts
Run the abort_write_group tests against a memory transport to avoid platform-specific limits on changing files that may be in use. |
589 |
self.vfs_transport_factory = memory.MemoryServer |
3825.4.1
by Andrew Bennetts
Add suppress_errors to abort_write_group. |
590 |
repo = self.make_repository('repo') |
591 |
token = repo.lock_write() |
|
592 |
self.addCleanup(repo.unlock) |
|
593 |
repo.start_write_group() |
|
594 |
# Damage the repository on the filesystem
|
|
595 |
self.get_transport('').rename('repo', 'foo') |
|
596 |
# abort_write_group will not raise an error
|
|
597 |
self.assertEqual(None, repo.abort_write_group(suppress_errors=True)) |
|
598 |
# But it does log an error
|
|
599 |
log_file = self._get_log(keep_log_file=True) |
|
600 |
self.assertContainsRe(log_file, 'abort_write_group failed') |
|
601 |
self.assertContainsRe(log_file, r'INFO bzr: ERROR \(ignored\):') |
|
602 |
if token is not None: |
|
603 |
repo.leave_lock_in_place() |
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
604 |
|
3825.4.1
by Andrew Bennetts
Add suppress_errors to abort_write_group. |
605 |
def test_abort_write_group_does_raise_when_not_suppressed(self): |
3825.4.2
by Andrew Bennetts
Run the abort_write_group tests against a memory transport to avoid platform-specific limits on changing files that may be in use. |
606 |
self.vfs_transport_factory = memory.MemoryServer |
3825.4.1
by Andrew Bennetts
Add suppress_errors to abort_write_group. |
607 |
repo = self.make_repository('repo') |
608 |
token = repo.lock_write() |
|
609 |
self.addCleanup(repo.unlock) |
|
610 |
repo.start_write_group() |
|
611 |
# Damage the repository on the filesystem
|
|
612 |
self.get_transport('').rename('repo', 'foo') |
|
613 |
# abort_write_group will not raise an error
|
|
614 |
self.assertRaises(Exception, repo.abort_write_group) |
|
615 |
if token is not None: |
|
616 |
repo.leave_lock_in_place() |
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
617 |
|
4002.1.1
by Andrew Bennetts
Implement suspend_write_group/resume_write_group. |
618 |
def test_suspend_write_group(self): |
619 |
self.vfs_transport_factory = memory.MemoryServer |
|
620 |
repo = self.make_repository('repo') |
|
621 |
token = repo.lock_write() |
|
622 |
self.addCleanup(repo.unlock) |
|
623 |
repo.start_write_group() |
|
624 |
repo.texts.add_lines(('file-id', 'revid'), (), ['lines']) |
|
625 |
wg_tokens = repo.suspend_write_group() |
|
626 |
expected_pack_name = wg_tokens[0] + '.pack' |
|
627 |
upload_transport = repo._pack_collection._upload_transport |
|
628 |
limbo_files = upload_transport.list_dir('') |
|
629 |
self.assertTrue(expected_pack_name in limbo_files, limbo_files) |
|
630 |
md5 = osutils.md5(upload_transport.get_bytes(expected_pack_name)) |
|
631 |
self.assertEqual(wg_tokens[0], md5.hexdigest()) |
|
632 |
||
633 |
def test_resume_write_group_then_abort(self): |
|
634 |
# Create a repo, start a write group, insert some data, suspend.
|
|
635 |
self.vfs_transport_factory = memory.MemoryServer |
|
636 |
repo = self.make_repository('repo') |
|
637 |
token = repo.lock_write() |
|
638 |
self.addCleanup(repo.unlock) |
|
639 |
repo.start_write_group() |
|
640 |
text_key = ('file-id', 'revid') |
|
641 |
repo.texts.add_lines(text_key, (), ['lines']) |
|
642 |
wg_tokens = repo.suspend_write_group() |
|
643 |
# Get a fresh repository object for the repo on the filesystem.
|
|
644 |
same_repo = repo.bzrdir.open_repository() |
|
645 |
# Resume
|
|
646 |
same_repo.lock_write() |
|
647 |
self.addCleanup(same_repo.unlock) |
|
648 |
same_repo.resume_write_group(wg_tokens) |
|
649 |
same_repo.abort_write_group() |
|
650 |
self.assertEqual( |
|
651 |
[], same_repo._pack_collection._upload_transport.list_dir('')) |
|
652 |
self.assertEqual( |
|
653 |
[], same_repo._pack_collection._pack_transport.list_dir('')) |
|
654 |
||
4002.1.5
by Andrew Bennetts
Fix possible security issue with resuming write groups: make sure the token is well-formed so that it's not possible to steal a write group from another repo. |
655 |
def test_resume_malformed_token(self): |
656 |
self.vfs_transport_factory = memory.MemoryServer |
|
657 |
# Make a repository with a suspended write group
|
|
658 |
repo = self.make_repository('repo') |
|
659 |
token = repo.lock_write() |
|
660 |
self.addCleanup(repo.unlock) |
|
661 |
repo.start_write_group() |
|
662 |
text_key = ('file-id', 'revid') |
|
663 |
repo.texts.add_lines(text_key, (), ['lines']) |
|
664 |
wg_tokens = repo.suspend_write_group() |
|
665 |
# Make a new repository
|
|
666 |
new_repo = self.make_repository('new_repo') |
|
667 |
token = new_repo.lock_write() |
|
668 |
self.addCleanup(new_repo.unlock) |
|
669 |
hacked_wg_token = ( |
|
670 |
'../../../../repo/.bzr/repository/upload/' + wg_tokens[0]) |
|
671 |
self.assertRaises( |
|
4002.1.7
by Andrew Bennetts
Rename UnresumableWriteGroups to UnresumableWriteGroup. |
672 |
errors.UnresumableWriteGroup, |
4002.1.5
by Andrew Bennetts
Fix possible security issue with resuming write groups: make sure the token is well-formed so that it's not possible to steal a write group from another repo. |
673 |
new_repo.resume_write_group, [hacked_wg_token]) |
674 |
||
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
675 |
|
3582.3.3
by Martin Pool
Reenable tests for stacking pack repositories |
676 |
class TestPackRepositoryStacking(TestCaseWithTransport): |
677 |
||
678 |
"""Tests for stacking pack repositories"""
|
|
679 |
||
680 |
def setUp(self): |
|
681 |
if not self.format_supports_external_lookups: |
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
682 |
raise TestNotApplicable("%r doesn't support stacking" |
3582.3.3
by Martin Pool
Reenable tests for stacking pack repositories |
683 |
% (self.format_name,)) |
684 |
super(TestPackRepositoryStacking, self).setUp() |
|
685 |
||
686 |
def get_format(self): |
|
687 |
return bzrdir.format_registry.make_bzrdir(self.format_name) |
|
688 |
||
3606.10.5
by John Arbash Meinel
Switch out --1.6-rich-root for --1.6.1-rich-root. |
689 |
def test_stack_checks_rich_root_compatibility(self): |
3582.3.3
by Martin Pool
Reenable tests for stacking pack repositories |
690 |
# early versions of the packing code relied on pack internals to
|
691 |
# stack, but the current version should be able to stack on any
|
|
692 |
# format.
|
|
693 |
#
|
|
694 |
# TODO: Possibly this should be run per-repository-format and raise
|
|
695 |
# TestNotApplicable on formats that don't support stacking. -- mbp
|
|
696 |
# 20080729
|
|
697 |
repo = self.make_repository('repo', format=self.get_format()) |
|
698 |
if repo.supports_rich_root(): |
|
699 |
# can only stack on repositories that have compatible internal
|
|
700 |
# metadata
|
|
3606.10.5
by John Arbash Meinel
Switch out --1.6-rich-root for --1.6.1-rich-root. |
701 |
if getattr(repo._format, 'supports_tree_reference', False): |
702 |
matching_format_name = 'pack-0.92-subtree' |
|
703 |
else: |
|
704 |
matching_format_name = 'rich-root-pack' |
|
3582.3.3
by Martin Pool
Reenable tests for stacking pack repositories |
705 |
mismatching_format_name = 'pack-0.92' |
706 |
else: |
|
707 |
matching_format_name = 'pack-0.92' |
|
708 |
mismatching_format_name = 'pack-0.92-subtree' |
|
709 |
base = self.make_repository('base', format=matching_format_name) |
|
710 |
repo.add_fallback_repository(base) |
|
711 |
# you can't stack on something with incompatible data
|
|
712 |
bad_repo = self.make_repository('mismatch', |
|
713 |
format=mismatching_format_name) |
|
714 |
e = self.assertRaises(errors.IncompatibleRepositories, |
|
715 |
repo.add_fallback_repository, bad_repo) |
|
716 |
self.assertContainsRe(str(e), |
|
717 |
r'(?m)KnitPackRepository.*/mismatch/.*\nis not compatible with\n' |
|
718 |
r'KnitPackRepository.*/repo/.*\n' |
|
719 |
r'different rich-root support') |
|
720 |
||
3606.10.5
by John Arbash Meinel
Switch out --1.6-rich-root for --1.6.1-rich-root. |
721 |
def test_stack_checks_serializers_compatibility(self): |
722 |
repo = self.make_repository('repo', format=self.get_format()) |
|
723 |
if getattr(repo._format, 'supports_tree_reference', False): |
|
724 |
# can only stack on repositories that have compatible internal
|
|
725 |
# metadata
|
|
726 |
matching_format_name = 'pack-0.92-subtree' |
|
727 |
mismatching_format_name = 'rich-root-pack' |
|
728 |
else: |
|
729 |
if repo.supports_rich_root(): |
|
730 |
matching_format_name = 'rich-root-pack' |
|
731 |
mismatching_format_name = 'pack-0.92-subtree' |
|
732 |
else: |
|
733 |
raise TestNotApplicable('No formats use non-v5 serializer' |
|
734 |
' without having rich-root also set') |
|
735 |
base = self.make_repository('base', format=matching_format_name) |
|
736 |
repo.add_fallback_repository(base) |
|
737 |
# you can't stack on something with incompatible data
|
|
738 |
bad_repo = self.make_repository('mismatch', |
|
739 |
format=mismatching_format_name) |
|
740 |
e = self.assertRaises(errors.IncompatibleRepositories, |
|
741 |
repo.add_fallback_repository, bad_repo) |
|
742 |
self.assertContainsRe(str(e), |
|
743 |
r'(?m)KnitPackRepository.*/mismatch/.*\nis not compatible with\n' |
|
744 |
r'KnitPackRepository.*/repo/.*\n' |
|
745 |
r'different serializers') |
|
746 |
||
3582.3.3
by Martin Pool
Reenable tests for stacking pack repositories |
747 |
def test_adding_pack_does_not_record_pack_names_from_other_repositories(self): |
748 |
base = self.make_branch_and_tree('base', format=self.get_format()) |
|
749 |
base.commit('foo') |
|
750 |
referencing = self.make_branch_and_tree('repo', format=self.get_format()) |
|
751 |
referencing.branch.repository.add_fallback_repository(base.branch.repository) |
|
752 |
referencing.commit('bar') |
|
753 |
new_instance = referencing.bzrdir.open_repository() |
|
754 |
new_instance.lock_read() |
|
755 |
self.addCleanup(new_instance.unlock) |
|
756 |
new_instance._pack_collection.ensure_loaded() |
|
757 |
self.assertEqual(1, len(new_instance._pack_collection.all_packs())) |
|
758 |
||
759 |
def test_autopack_only_considers_main_repo_packs(self): |
|
760 |
base = self.make_branch_and_tree('base', format=self.get_format()) |
|
761 |
base.commit('foo') |
|
762 |
tree = self.make_branch_and_tree('repo', format=self.get_format()) |
|
763 |
tree.branch.repository.add_fallback_repository(base.branch.repository) |
|
764 |
trans = tree.branch.repository.bzrdir.get_repository_transport(None) |
|
765 |
# This test could be a little cheaper by replacing the packs
|
|
766 |
# attribute on the repository to allow a different pack distribution
|
|
767 |
# and max packs policy - so we are checking the policy is honoured
|
|
768 |
# in the test. But for now 11 commits is not a big deal in a single
|
|
769 |
# test.
|
|
770 |
for x in range(9): |
|
771 |
tree.commit('commit %s' % x) |
|
772 |
# there should be 9 packs:
|
|
3735.1.1
by Robert Collins
Add development2 formats using BTree indices. |
773 |
index = self.index_class(trans, 'pack-names', None) |
3582.3.3
by Martin Pool
Reenable tests for stacking pack repositories |
774 |
self.assertEqual(9, len(list(index.iter_all_entries()))) |
775 |
# committing one more should coalesce to 1 of 10.
|
|
776 |
tree.commit('commit triggering pack') |
|
3735.1.1
by Robert Collins
Add development2 formats using BTree indices. |
777 |
index = self.index_class(trans, 'pack-names', None) |
3582.3.3
by Martin Pool
Reenable tests for stacking pack repositories |
778 |
self.assertEqual(1, len(list(index.iter_all_entries()))) |
779 |
# packing should not damage data
|
|
780 |
tree = tree.bzrdir.open_workingtree() |
|
781 |
check_result = tree.branch.repository.check( |
|
782 |
[tree.branch.last_revision()]) |
|
783 |
# We should have 50 (10x5) files in the obsolete_packs directory.
|
|
784 |
obsolete_files = list(trans.list_dir('obsolete_packs')) |
|
785 |
self.assertFalse('foo' in obsolete_files) |
|
786 |
self.assertFalse('bar' in obsolete_files) |
|
787 |
self.assertEqual(50, len(obsolete_files)) |
|
788 |
# XXX: Todo check packs obsoleted correctly - old packs and indices
|
|
789 |
# in the obsolete_packs directory.
|
|
790 |
large_pack_name = list(index.iter_all_entries())[0][1][0] |
|
791 |
# finally, committing again should not touch the large pack.
|
|
792 |
tree.commit('commit not triggering pack') |
|
3735.1.1
by Robert Collins
Add development2 formats using BTree indices. |
793 |
index = self.index_class(trans, 'pack-names', None) |
3582.3.3
by Martin Pool
Reenable tests for stacking pack repositories |
794 |
self.assertEqual(2, len(list(index.iter_all_entries()))) |
795 |
pack_names = [node[1][0] for node in index.iter_all_entries()] |
|
796 |
self.assertTrue(large_pack_name in pack_names) |
|
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
797 |
|
798 |
||
3801.1.18
by Andrew Bennetts
Add a test that ensures that the autopack RPC is actually used for all pack formats. |
799 |
class TestSmartServerAutopack(TestCaseWithTransport): |
800 |
||
801 |
def setUp(self): |
|
802 |
super(TestSmartServerAutopack, self).setUp() |
|
803 |
# Create a smart server that publishes whatever the backing VFS server
|
|
804 |
# does.
|
|
805 |
self.smart_server = server.SmartTCPServer_for_testing() |
|
806 |
self.smart_server.setUp(self.get_server()) |
|
807 |
self.addCleanup(self.smart_server.tearDown) |
|
808 |
# Log all HPSS calls into self.hpss_calls.
|
|
809 |
client._SmartClient.hooks.install_named_hook( |
|
810 |
'call', self.capture_hpss_call, None) |
|
811 |
self.hpss_calls = [] |
|
812 |
||
813 |
def capture_hpss_call(self, params): |
|
814 |
self.hpss_calls.append(params.method) |
|
815 |
||
816 |
def get_format(self): |
|
817 |
return bzrdir.format_registry.make_bzrdir(self.format_name) |
|
818 |
||
4029.2.1
by Robert Collins
Support streaming push to stacked branches. |
819 |
def test_autopack_or_streaming_rpc_is_used_when_using_hpss(self): |
3801.1.18
by Andrew Bennetts
Add a test that ensures that the autopack RPC is actually used for all pack formats. |
820 |
# Make local and remote repos
|
821 |
tree = self.make_branch_and_tree('local', format=self.get_format()) |
|
822 |
self.make_branch_and_tree('remote', format=self.get_format()) |
|
823 |
remote_branch_url = self.smart_server.get_url() + 'remote' |
|
824 |
remote_branch = bzrdir.BzrDir.open(remote_branch_url).open_branch() |
|
825 |
# Make 9 local revisions, and push them one at a time to the remote
|
|
826 |
# repo to produce 9 pack files.
|
|
827 |
for x in range(9): |
|
828 |
tree.commit('commit %s' % x) |
|
829 |
tree.branch.push(remote_branch) |
|
830 |
# Make one more push to trigger an autopack
|
|
831 |
self.hpss_calls = [] |
|
832 |
tree.commit('commit triggering pack') |
|
833 |
tree.branch.push(remote_branch) |
|
4029.2.1
by Robert Collins
Support streaming push to stacked branches. |
834 |
autopack_calls = len([call for call in self.hpss_calls if call == |
835 |
'PackRepository.autopack']) |
|
836 |
streaming_calls = len([call for call in self.hpss_calls if call == |
|
837 |
'Repository.insert_stream']) |
|
838 |
if autopack_calls: |
|
839 |
# Non streaming server
|
|
840 |
self.assertEqual(1, autopack_calls) |
|
841 |
self.assertEqual(0, streaming_calls) |
|
842 |
else: |
|
843 |
# Streaming was used, which autopacks on the remote end.
|
|
844 |
self.assertEqual(0, autopack_calls) |
|
845 |
# NB: The 2 calls are because of the sanity check that the server
|
|
846 |
# supports the verb (see remote.py:RemoteSink.insert_stream for
|
|
847 |
# details).
|
|
848 |
self.assertEqual(2, streaming_calls) |
|
3801.1.18
by Andrew Bennetts
Add a test that ensures that the autopack RPC is actually used for all pack formats. |
849 |
|
850 |
||
4084.5.1
by Robert Collins
Bulk update all test adaptation into a single approach, using multiply_tests rather than test adapters. |
851 |
def load_tests(basic_tests, module, loader): |
3582.3.3
by Martin Pool
Reenable tests for stacking pack repositories |
852 |
# these give the bzrdir canned format name, and the repository on-disk
|
853 |
# format string
|
|
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
854 |
scenarios_params = [ |
855 |
dict(format_name='pack-0.92', |
|
856 |
format_string="Bazaar pack repository format 1 (needs bzr 0.92)\n", |
|
3735.1.1
by Robert Collins
Add development2 formats using BTree indices. |
857 |
format_supports_external_lookups=False, |
858 |
index_class=GraphIndex), |
|
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
859 |
dict(format_name='pack-0.92-subtree', |
860 |
format_string="Bazaar pack repository format 1 " |
|
861 |
"with subtree support (needs bzr 0.92)\n", |
|
3735.1.1
by Robert Collins
Add development2 formats using BTree indices. |
862 |
format_supports_external_lookups=False, |
863 |
index_class=GraphIndex), |
|
3582.3.2
by Martin Pool
Add 1.6 formats to pack repository tests |
864 |
dict(format_name='1.6', |
865 |
format_string="Bazaar RepositoryFormatKnitPack5 (bzr 1.6)\n", |
|
3735.1.1
by Robert Collins
Add development2 formats using BTree indices. |
866 |
format_supports_external_lookups=True, |
867 |
index_class=GraphIndex), |
|
3606.10.5
by John Arbash Meinel
Switch out --1.6-rich-root for --1.6.1-rich-root. |
868 |
dict(format_name='1.6.1-rich-root', |
3582.3.2
by Martin Pool
Add 1.6 formats to pack repository tests |
869 |
format_string="Bazaar RepositoryFormatKnitPack5RichRoot " |
3606.10.5
by John Arbash Meinel
Switch out --1.6-rich-root for --1.6.1-rich-root. |
870 |
"(bzr 1.6.1)\n", |
3735.1.1
by Robert Collins
Add development2 formats using BTree indices. |
871 |
format_supports_external_lookups=True, |
872 |
index_class=GraphIndex), |
|
3805.3.1
by John Arbash Meinel
Add repository 1.9 format, and update the documentation. |
873 |
dict(format_name='1.9', |
874 |
format_string="Bazaar RepositoryFormatKnitPack6 (bzr 1.9)\n", |
|
875 |
format_supports_external_lookups=True, |
|
876 |
index_class=BTreeGraphIndex), |
|
877 |
dict(format_name='1.9-rich-root', |
|
878 |
format_string="Bazaar RepositoryFormatKnitPack6RichRoot " |
|
879 |
"(bzr 1.9)\n", |
|
880 |
format_supports_external_lookups=True, |
|
881 |
index_class=BTreeGraphIndex), |
|
3735.1.1
by Robert Collins
Add development2 formats using BTree indices. |
882 |
dict(format_name='development2', |
883 |
format_string="Bazaar development format 2 " |
|
884 |
"(needs bzr.dev from before 1.8)\n", |
|
885 |
format_supports_external_lookups=True, |
|
886 |
index_class=BTreeGraphIndex), |
|
887 |
dict(format_name='development2-subtree', |
|
888 |
format_string="Bazaar development format 2 " |
|
889 |
"with subtree support (needs bzr.dev from before 1.8)\n", |
|
890 |
format_supports_external_lookups=True, |
|
891 |
index_class=BTreeGraphIndex), |
|
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
892 |
]
|
893 |
# name of the scenario is the format name
|
|
4084.5.1
by Robert Collins
Bulk update all test adaptation into a single approach, using multiply_tests rather than test adapters. |
894 |
scenarios = [(s['format_name'], s) for s in scenarios_params] |
895 |
return tests.multiply_tests(basic_tests, scenarios, loader.suiteClass()) |