4634.126.1
by John Arbash Meinel
(jam) Fix bug #507566, concurrent autopacking correctness. |
1 |
# Copyright (C) 2006-2010 Canonical Ltd
|
1685.1.63
by Martin Pool
Small Transport fixups |
2 |
#
|
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
3 |
# This program is free software; you can redistribute it and/or modify
|
4 |
# it under the terms of the GNU General Public License as published by
|
|
5 |
# the Free Software Foundation; either version 2 of the License, or
|
|
6 |
# (at your option) any later version.
|
|
1685.1.63
by Martin Pool
Small Transport fixups |
7 |
#
|
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
8 |
# This program is distributed in the hope that it will be useful,
|
9 |
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
10 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
11 |
# GNU General Public License for more details.
|
|
1685.1.63
by Martin Pool
Small Transport fixups |
12 |
#
|
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
13 |
# You should have received a copy of the GNU General Public License
|
14 |
# along with this program; if not, write to the Free Software
|
|
4183.7.1
by Sabin Iacob
update FSF mailing address |
15 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
16 |
|
17 |
"""Tests for the Repository facility that are not interface tests.
|
|
18 |
||
3689.1.4
by John Arbash Meinel
Doc strings that reference repository_implementations |
19 |
For interface tests see tests/per_repository/*.py.
|
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
20 |
|
21 |
For concrete class tests see this file, and for storage formats tests
|
|
22 |
also see this file.
|
|
23 |
"""
|
|
24 |
||
1773.4.1
by Martin Pool
Add pyflakes makefile target; fix many warnings |
25 |
from stat import S_ISDIR |
4789.25.4
by John Arbash Meinel
Turn a repository format 7 failure into a KnownFailure. |
26 |
import sys |
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
27 |
|
1556.1.4
by Robert Collins
Add a new format for what will become knit, and the surrounding logic to upgrade repositories within metadirs, and tests for the same. |
28 |
import bzrlib |
5121.2.2
by Jelmer Vernooij
Remove more unused imports in the tests. |
29 |
from bzrlib.errors import (NoSuchFile, |
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
30 |
UnknownFormatError, |
31 |
UnsupportedFormatError, |
|
32 |
)
|
|
4360.4.3
by John Arbash Meinel
Introduce a KnitPackStreamSource which is used when |
33 |
from bzrlib import ( |
5365.5.20
by John Arbash Meinel
Add some tests that check the leaf factory is correct. |
34 |
btree_index, |
4360.4.3
by John Arbash Meinel
Introduce a KnitPackStreamSource which is used when |
35 |
graph, |
36 |
tests, |
|
37 |
)
|
|
3735.1.1
by Robert Collins
Add development2 formats using BTree indices. |
38 |
from bzrlib.btree_index import BTreeBuilder, BTreeGraphIndex |
5121.2.2
by Jelmer Vernooij
Remove more unused imports in the tests. |
39 |
from bzrlib.index import GraphIndex |
2241.1.1
by Martin Pool
Change RepositoryFormat to use a Registry rather than ad-hoc dictionary |
40 |
from bzrlib.repository import RepositoryFormat |
2670.3.5
by Andrew Bennetts
Remove get_stream_as_bytes from KnitVersionedFile's API, make it a function in knitrepo.py instead. |
41 |
from bzrlib.tests import ( |
42 |
TestCase, |
|
43 |
TestCaseWithTransport, |
|
44 |
)
|
|
3446.2.1
by Martin Pool
Failure to delete an obsolete pack file should not be fatal. |
45 |
from bzrlib.transport import ( |
46 |
get_transport, |
|
47 |
)
|
|
2241.1.1
by Martin Pool
Change RepositoryFormat to use a Registry rather than ad-hoc dictionary |
48 |
from bzrlib import ( |
2535.3.41
by Andrew Bennetts
Add tests for InterRemoteToOther.is_compatible. |
49 |
bzrdir, |
50 |
errors, |
|
2535.3.57
by Andrew Bennetts
Perform some sanity checking of data streams rather than blindly inserting them into our repository. |
51 |
inventory, |
2929.3.5
by Vincent Ladeuil
New files, same warnings, same fixes. |
52 |
osutils, |
2241.1.1
by Martin Pool
Change RepositoryFormat to use a Registry rather than ad-hoc dictionary |
53 |
repository, |
2535.3.57
by Andrew Bennetts
Perform some sanity checking of data streams rather than blindly inserting them into our repository. |
54 |
revision as _mod_revision, |
2241.1.1
by Martin Pool
Change RepositoryFormat to use a Registry rather than ad-hoc dictionary |
55 |
upgrade, |
4634.126.1
by John Arbash Meinel
(jam) Fix bug #507566, concurrent autopacking correctness. |
56 |
versionedfile, |
2241.1.1
by Martin Pool
Change RepositoryFormat to use a Registry rather than ad-hoc dictionary |
57 |
workingtree, |
58 |
)
|
|
3735.42.5
by John Arbash Meinel
Change the tests so we now just use a direct test that _get_source is |
59 |
from bzrlib.repofmt import ( |
60 |
groupcompress_repo, |
|
61 |
knitrepo, |
|
62 |
pack_repo, |
|
63 |
weaverepo, |
|
64 |
)
|
|
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
65 |
|
66 |
||
67 |
class TestDefaultFormat(TestCase): |
|
68 |
||
1534.4.41
by Robert Collins
Branch now uses BzrDir reasonably sanely. |
69 |
def test_get_set_default_format(self): |
2204.5.3
by Aaron Bentley
zap old repository default handling |
70 |
old_default = bzrdir.format_registry.get('default') |
71 |
private_default = old_default().repository_format.__class__ |
|
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
72 |
old_format = repository.RepositoryFormat.get_default_format() |
1910.2.33
by Aaron Bentley
Fix default format test |
73 |
self.assertTrue(isinstance(old_format, private_default)) |
2204.5.3
by Aaron Bentley
zap old repository default handling |
74 |
def make_sample_bzrdir(): |
75 |
my_bzrdir = bzrdir.BzrDirMetaFormat1() |
|
76 |
my_bzrdir.repository_format = SampleRepositoryFormat() |
|
77 |
return my_bzrdir |
|
78 |
bzrdir.format_registry.remove('default') |
|
79 |
bzrdir.format_registry.register('sample', make_sample_bzrdir, '') |
|
80 |
bzrdir.format_registry.set_default('sample') |
|
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
81 |
# creating a repository should now create an instrumented dir.
|
82 |
try: |
|
1534.4.47
by Robert Collins
Split out repository into .bzr/repository |
83 |
# the default branch format is used by the meta dir format
|
84 |
# which is not the default bzrdir format at this point
|
|
1685.1.63
by Martin Pool
Small Transport fixups |
85 |
dir = bzrdir.BzrDirMetaFormat1().initialize('memory:///') |
1534.4.47
by Robert Collins
Split out repository into .bzr/repository |
86 |
result = dir.create_repository() |
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
87 |
self.assertEqual(result, 'A bzr repository dir') |
2241.1.1
by Martin Pool
Change RepositoryFormat to use a Registry rather than ad-hoc dictionary |
88 |
finally: |
2204.5.3
by Aaron Bentley
zap old repository default handling |
89 |
bzrdir.format_registry.remove('default') |
2363.5.14
by Aaron Bentley
Prevent repository.get_set_default_format from corrupting inventory |
90 |
bzrdir.format_registry.remove('sample') |
2204.5.3
by Aaron Bentley
zap old repository default handling |
91 |
bzrdir.format_registry.register('default', old_default, '') |
92 |
self.assertIsInstance(repository.RepositoryFormat.get_default_format(), |
|
93 |
old_format.__class__) |
|
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
94 |
|
95 |
||
96 |
class SampleRepositoryFormat(repository.RepositoryFormat): |
|
97 |
"""A sample format
|
|
98 |
||
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
99 |
this format is initializable, unsupported to aid in testing the
|
1534.4.47
by Robert Collins
Split out repository into .bzr/repository |
100 |
open and open(unsupported=True) routines.
|
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
101 |
"""
|
102 |
||
103 |
def get_format_string(self): |
|
104 |
"""See RepositoryFormat.get_format_string()."""
|
|
105 |
return "Sample .bzr repository format." |
|
106 |
||
1534.6.1
by Robert Collins
allow API creation of shared repositories |
107 |
def initialize(self, a_bzrdir, shared=False): |
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
108 |
"""Initialize a repository in a BzrDir"""
|
1534.4.47
by Robert Collins
Split out repository into .bzr/repository |
109 |
t = a_bzrdir.get_repository_transport(self) |
1955.3.13
by John Arbash Meinel
Run the full test suite, and fix up any deprecation warnings. |
110 |
t.put_bytes('format', self.get_format_string()) |
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
111 |
return 'A bzr repository dir' |
112 |
||
113 |
def is_supported(self): |
|
114 |
return False |
|
115 |
||
1534.4.47
by Robert Collins
Split out repository into .bzr/repository |
116 |
def open(self, a_bzrdir, _found=False): |
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
117 |
return "opened repository." |
1534.4.41
by Robert Collins
Branch now uses BzrDir reasonably sanely. |
118 |
|
119 |
||
1534.4.47
by Robert Collins
Split out repository into .bzr/repository |
120 |
class TestRepositoryFormat(TestCaseWithTransport): |
121 |
"""Tests for the Repository format detection used by the bzr meta dir facility.BzrBranchFormat facility."""
|
|
122 |
||
123 |
def test_find_format(self): |
|
124 |
# is the right format object found for a repository?
|
|
125 |
# create a branch with a few known format objects.
|
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
126 |
# this is not quite the same as
|
1534.4.47
by Robert Collins
Split out repository into .bzr/repository |
127 |
self.build_tree(["foo/", "bar/"]) |
128 |
def check_format(format, url): |
|
129 |
dir = format._matchingbzrdir.initialize(url) |
|
130 |
format.initialize(dir) |
|
131 |
t = get_transport(url) |
|
132 |
found_format = repository.RepositoryFormat.find_format(dir) |
|
133 |
self.failUnless(isinstance(found_format, format.__class__)) |
|
2241.1.4
by Martin Pool
Moved old weave-based repository formats into bzrlib.repofmt.weaverepo. |
134 |
check_format(weaverepo.RepositoryFormat7(), "bar") |
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
135 |
|
1534.4.47
by Robert Collins
Split out repository into .bzr/repository |
136 |
def test_find_format_no_repository(self): |
137 |
dir = bzrdir.BzrDirMetaFormat1().initialize(self.get_url()) |
|
138 |
self.assertRaises(errors.NoRepositoryPresent, |
|
139 |
repository.RepositoryFormat.find_format, |
|
140 |
dir) |
|
141 |
||
142 |
def test_find_format_unknown_format(self): |
|
143 |
dir = bzrdir.BzrDirMetaFormat1().initialize(self.get_url()) |
|
144 |
SampleRepositoryFormat().initialize(dir) |
|
145 |
self.assertRaises(UnknownFormatError, |
|
146 |
repository.RepositoryFormat.find_format, |
|
147 |
dir) |
|
148 |
||
149 |
def test_register_unregister_format(self): |
|
150 |
format = SampleRepositoryFormat() |
|
151 |
# make a control dir
|
|
152 |
dir = bzrdir.BzrDirMetaFormat1().initialize(self.get_url()) |
|
153 |
# make a repo
|
|
154 |
format.initialize(dir) |
|
155 |
# register a format for it.
|
|
156 |
repository.RepositoryFormat.register_format(format) |
|
157 |
# which repository.Open will refuse (not supported)
|
|
158 |
self.assertRaises(UnsupportedFormatError, repository.Repository.open, self.get_url()) |
|
159 |
# but open(unsupported) will work
|
|
160 |
self.assertEqual(format.open(dir), "opened repository.") |
|
161 |
# unregister the format
|
|
162 |
repository.RepositoryFormat.unregister_format(format) |
|
163 |
||
164 |
||
1534.4.41
by Robert Collins
Branch now uses BzrDir reasonably sanely. |
165 |
class TestFormat6(TestCaseWithTransport): |
166 |
||
3565.3.1
by Robert Collins
* The generic fetch code now uses two attributes on Repository objects |
167 |
def test_attribute__fetch_order(self): |
168 |
"""Weaves need topological data insertion."""
|
|
169 |
control = bzrdir.BzrDirFormat6().initialize(self.get_url()) |
|
170 |
repo = weaverepo.RepositoryFormat6().initialize(control) |
|
4053.1.4
by Robert Collins
Move the fetch control attributes from Repository to RepositoryFormat. |
171 |
self.assertEqual('topological', repo._format._fetch_order) |
3565.3.1
by Robert Collins
* The generic fetch code now uses two attributes on Repository objects |
172 |
|
173 |
def test_attribute__fetch_uses_deltas(self): |
|
174 |
"""Weaves do not reuse deltas."""
|
|
175 |
control = bzrdir.BzrDirFormat6().initialize(self.get_url()) |
|
176 |
repo = weaverepo.RepositoryFormat6().initialize(control) |
|
4053.1.4
by Robert Collins
Move the fetch control attributes from Repository to RepositoryFormat. |
177 |
self.assertEqual(False, repo._format._fetch_uses_deltas) |
3565.3.1
by Robert Collins
* The generic fetch code now uses two attributes on Repository objects |
178 |
|
3565.3.4
by Robert Collins
Defer decision to reconcile to the repository being fetched into. |
179 |
def test_attribute__fetch_reconcile(self): |
180 |
"""Weave repositories need a reconcile after fetch."""
|
|
181 |
control = bzrdir.BzrDirFormat6().initialize(self.get_url()) |
|
182 |
repo = weaverepo.RepositoryFormat6().initialize(control) |
|
4053.1.4
by Robert Collins
Move the fetch control attributes from Repository to RepositoryFormat. |
183 |
self.assertEqual(True, repo._format._fetch_reconcile) |
3565.3.4
by Robert Collins
Defer decision to reconcile to the repository being fetched into. |
184 |
|
1534.4.41
by Robert Collins
Branch now uses BzrDir reasonably sanely. |
185 |
def test_no_ancestry_weave(self): |
186 |
control = bzrdir.BzrDirFormat6().initialize(self.get_url()) |
|
2241.1.4
by Martin Pool
Moved old weave-based repository formats into bzrlib.repofmt.weaverepo. |
187 |
repo = weaverepo.RepositoryFormat6().initialize(control) |
1534.4.41
by Robert Collins
Branch now uses BzrDir reasonably sanely. |
188 |
# We no longer need to create the ancestry.weave file
|
189 |
# since it is *never* used.
|
|
190 |
self.assertRaises(NoSuchFile, |
|
191 |
control.transport.get, |
|
192 |
'ancestry.weave') |
|
193 |
||
3221.3.1
by Robert Collins
* Repository formats have a new supported-feature attribute |
194 |
def test_supports_external_lookups(self): |
195 |
control = bzrdir.BzrDirFormat6().initialize(self.get_url()) |
|
196 |
repo = weaverepo.RepositoryFormat6().initialize(control) |
|
197 |
self.assertFalse(repo._format.supports_external_lookups) |
|
198 |
||
1534.4.47
by Robert Collins
Split out repository into .bzr/repository |
199 |
|
200 |
class TestFormat7(TestCaseWithTransport): |
|
3565.3.1
by Robert Collins
* The generic fetch code now uses two attributes on Repository objects |
201 |
|
202 |
def test_attribute__fetch_order(self): |
|
203 |
"""Weaves need topological data insertion."""
|
|
204 |
control = bzrdir.BzrDirMetaFormat1().initialize(self.get_url()) |
|
205 |
repo = weaverepo.RepositoryFormat7().initialize(control) |
|
4053.1.4
by Robert Collins
Move the fetch control attributes from Repository to RepositoryFormat. |
206 |
self.assertEqual('topological', repo._format._fetch_order) |
3565.3.1
by Robert Collins
* The generic fetch code now uses two attributes on Repository objects |
207 |
|
208 |
def test_attribute__fetch_uses_deltas(self): |
|
209 |
"""Weaves do not reuse deltas."""
|
|
210 |
control = bzrdir.BzrDirMetaFormat1().initialize(self.get_url()) |
|
211 |
repo = weaverepo.RepositoryFormat7().initialize(control) |
|
4053.1.4
by Robert Collins
Move the fetch control attributes from Repository to RepositoryFormat. |
212 |
self.assertEqual(False, repo._format._fetch_uses_deltas) |
3565.3.1
by Robert Collins
* The generic fetch code now uses two attributes on Repository objects |
213 |
|
3565.3.4
by Robert Collins
Defer decision to reconcile to the repository being fetched into. |
214 |
def test_attribute__fetch_reconcile(self): |
215 |
"""Weave repositories need a reconcile after fetch."""
|
|
216 |
control = bzrdir.BzrDirMetaFormat1().initialize(self.get_url()) |
|
217 |
repo = weaverepo.RepositoryFormat7().initialize(control) |
|
4053.1.4
by Robert Collins
Move the fetch control attributes from Repository to RepositoryFormat. |
218 |
self.assertEqual(True, repo._format._fetch_reconcile) |
3565.3.4
by Robert Collins
Defer decision to reconcile to the repository being fetched into. |
219 |
|
1534.4.47
by Robert Collins
Split out repository into .bzr/repository |
220 |
def test_disk_layout(self): |
221 |
control = bzrdir.BzrDirMetaFormat1().initialize(self.get_url()) |
|
2241.1.4
by Martin Pool
Moved old weave-based repository formats into bzrlib.repofmt.weaverepo. |
222 |
repo = weaverepo.RepositoryFormat7().initialize(control) |
1534.5.3
by Robert Collins
Make format 4/5/6 branches share a single LockableFiles instance across wt/branch/repository. |
223 |
# in case of side effects of locking.
|
224 |
repo.lock_write() |
|
225 |
repo.unlock() |
|
1534.4.47
by Robert Collins
Split out repository into .bzr/repository |
226 |
# we want:
|
227 |
# format 'Bazaar-NG Repository format 7'
|
|
228 |
# lock ''
|
|
229 |
# inventory.weave == empty_weave
|
|
230 |
# empty revision-store directory
|
|
231 |
# empty weaves directory
|
|
232 |
t = control.get_repository_transport(None) |
|
233 |
self.assertEqualDiff('Bazaar-NG Repository format 7', |
|
234 |
t.get('format').read()) |
|
235 |
self.assertTrue(S_ISDIR(t.stat('revision-store').st_mode)) |
|
236 |
self.assertTrue(S_ISDIR(t.stat('weaves').st_mode)) |
|
237 |
self.assertEqualDiff('# bzr weave file v5\n' |
|
238 |
'w\n' |
|
239 |
'W\n', |
|
240 |
t.get('inventory.weave').read()) |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
241 |
# Creating a file with id Foo:Bar results in a non-escaped file name on
|
242 |
# disk.
|
|
243 |
control.create_branch() |
|
244 |
tree = control.create_workingtree() |
|
245 |
tree.add(['foo'], ['Foo:Bar'], ['file']) |
|
246 |
tree.put_file_bytes_non_atomic('Foo:Bar', 'content\n') |
|
4789.25.4
by John Arbash Meinel
Turn a repository format 7 failure into a KnownFailure. |
247 |
try: |
248 |
tree.commit('first post', rev_id='first') |
|
249 |
except errors.IllegalPath: |
|
250 |
if sys.platform != 'win32': |
|
251 |
raise
|
|
252 |
self.knownFailure('Foo:Bar cannot be used as a file-id on windows' |
|
253 |
' in repo format 7') |
|
254 |
return
|
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
255 |
self.assertEqualDiff( |
256 |
'# bzr weave file v5\n' |
|
257 |
'i\n' |
|
258 |
'1 7fe70820e08a1aac0ef224d9c66ab66831cc4ab1\n' |
|
259 |
'n first\n' |
|
260 |
'\n' |
|
261 |
'w\n' |
|
262 |
'{ 0\n' |
|
263 |
'. content\n' |
|
264 |
'}\n' |
|
265 |
'W\n', |
|
266 |
t.get('weaves/74/Foo%3ABar.weave').read()) |
|
1534.6.1
by Robert Collins
allow API creation of shared repositories |
267 |
|
268 |
def test_shared_disk_layout(self): |
|
269 |
control = bzrdir.BzrDirMetaFormat1().initialize(self.get_url()) |
|
2241.1.4
by Martin Pool
Moved old weave-based repository formats into bzrlib.repofmt.weaverepo. |
270 |
repo = weaverepo.RepositoryFormat7().initialize(control, shared=True) |
1534.6.1
by Robert Collins
allow API creation of shared repositories |
271 |
# we want:
|
272 |
# format 'Bazaar-NG Repository format 7'
|
|
273 |
# inventory.weave == empty_weave
|
|
274 |
# empty revision-store directory
|
|
275 |
# empty weaves directory
|
|
276 |
# a 'shared-storage' marker file.
|
|
1553.5.49
by Martin Pool
Use LockDirs for repo format 7 |
277 |
# lock is not present when unlocked
|
1534.6.1
by Robert Collins
allow API creation of shared repositories |
278 |
t = control.get_repository_transport(None) |
279 |
self.assertEqualDiff('Bazaar-NG Repository format 7', |
|
280 |
t.get('format').read()) |
|
281 |
self.assertEqualDiff('', t.get('shared-storage').read()) |
|
282 |
self.assertTrue(S_ISDIR(t.stat('revision-store').st_mode)) |
|
283 |
self.assertTrue(S_ISDIR(t.stat('weaves').st_mode)) |
|
284 |
self.assertEqualDiff('# bzr weave file v5\n' |
|
285 |
'w\n' |
|
286 |
'W\n', |
|
287 |
t.get('inventory.weave').read()) |
|
1553.5.49
by Martin Pool
Use LockDirs for repo format 7 |
288 |
self.assertFalse(t.has('branch-lock')) |
289 |
||
1553.5.56
by Martin Pool
Format 7 repo now uses LockDir! |
290 |
def test_creates_lockdir(self): |
1553.5.49
by Martin Pool
Use LockDirs for repo format 7 |
291 |
"""Make sure it appears to be controlled by a LockDir existence"""
|
292 |
control = bzrdir.BzrDirMetaFormat1().initialize(self.get_url()) |
|
2241.1.4
by Martin Pool
Moved old weave-based repository formats into bzrlib.repofmt.weaverepo. |
293 |
repo = weaverepo.RepositoryFormat7().initialize(control, shared=True) |
1553.5.49
by Martin Pool
Use LockDirs for repo format 7 |
294 |
t = control.get_repository_transport(None) |
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
295 |
# TODO: Should check there is a 'lock' toplevel directory,
|
1553.5.58
by Martin Pool
Change LockDirs to format "lock-name/held/info" |
296 |
# regardless of contents
|
297 |
self.assertFalse(t.has('lock/held/info')) |
|
1553.5.49
by Martin Pool
Use LockDirs for repo format 7 |
298 |
repo.lock_write() |
1658.1.4
by Martin Pool
Quieten warning from TestFormat7.test_creates_lockdir about failing to unlock |
299 |
try: |
300 |
self.assertTrue(t.has('lock/held/info')) |
|
301 |
finally: |
|
302 |
# unlock so we don't get a warning about failing to do so
|
|
303 |
repo.unlock() |
|
1553.5.56
by Martin Pool
Format 7 repo now uses LockDir! |
304 |
|
305 |
def test_uses_lockdir(self): |
|
306 |
"""repo format 7 actually locks on lockdir"""
|
|
307 |
base_url = self.get_url() |
|
308 |
control = bzrdir.BzrDirMetaFormat1().initialize(base_url) |
|
2241.1.4
by Martin Pool
Moved old weave-based repository formats into bzrlib.repofmt.weaverepo. |
309 |
repo = weaverepo.RepositoryFormat7().initialize(control, shared=True) |
1553.5.56
by Martin Pool
Format 7 repo now uses LockDir! |
310 |
t = control.get_repository_transport(None) |
311 |
repo.lock_write() |
|
312 |
repo.unlock() |
|
313 |
del repo |
|
314 |
# make sure the same lock is created by opening it
|
|
315 |
repo = repository.Repository.open(base_url) |
|
316 |
repo.lock_write() |
|
1553.5.58
by Martin Pool
Change LockDirs to format "lock-name/held/info" |
317 |
self.assertTrue(t.has('lock/held/info')) |
1553.5.56
by Martin Pool
Format 7 repo now uses LockDir! |
318 |
repo.unlock() |
1553.5.58
by Martin Pool
Change LockDirs to format "lock-name/held/info" |
319 |
self.assertFalse(t.has('lock/held/info')) |
1534.6.5
by Robert Collins
Cloning of repos preserves shared and make-working-tree attributes. |
320 |
|
321 |
def test_shared_no_tree_disk_layout(self): |
|
322 |
control = bzrdir.BzrDirMetaFormat1().initialize(self.get_url()) |
|
2241.1.4
by Martin Pool
Moved old weave-based repository formats into bzrlib.repofmt.weaverepo. |
323 |
repo = weaverepo.RepositoryFormat7().initialize(control, shared=True) |
1534.6.5
by Robert Collins
Cloning of repos preserves shared and make-working-tree attributes. |
324 |
repo.set_make_working_trees(False) |
325 |
# we want:
|
|
326 |
# format 'Bazaar-NG Repository format 7'
|
|
327 |
# lock ''
|
|
328 |
# inventory.weave == empty_weave
|
|
329 |
# empty revision-store directory
|
|
330 |
# empty weaves directory
|
|
331 |
# a 'shared-storage' marker file.
|
|
332 |
t = control.get_repository_transport(None) |
|
333 |
self.assertEqualDiff('Bazaar-NG Repository format 7', |
|
334 |
t.get('format').read()) |
|
1553.5.56
by Martin Pool
Format 7 repo now uses LockDir! |
335 |
## self.assertEqualDiff('', t.get('lock').read())
|
1534.6.5
by Robert Collins
Cloning of repos preserves shared and make-working-tree attributes. |
336 |
self.assertEqualDiff('', t.get('shared-storage').read()) |
337 |
self.assertEqualDiff('', t.get('no-working-trees').read()) |
|
338 |
repo.set_make_working_trees(True) |
|
339 |
self.assertFalse(t.has('no-working-trees')) |
|
340 |
self.assertTrue(S_ISDIR(t.stat('revision-store').st_mode)) |
|
341 |
self.assertTrue(S_ISDIR(t.stat('weaves').st_mode)) |
|
342 |
self.assertEqualDiff('# bzr weave file v5\n' |
|
343 |
'w\n' |
|
344 |
'W\n', |
|
345 |
t.get('inventory.weave').read()) |
|
1534.1.27
by Robert Collins
Start InterRepository with InterRepository.get. |
346 |
|
3221.3.1
by Robert Collins
* Repository formats have a new supported-feature attribute |
347 |
def test_supports_external_lookups(self): |
348 |
control = bzrdir.BzrDirMetaFormat1().initialize(self.get_url()) |
|
349 |
repo = weaverepo.RepositoryFormat7().initialize(control) |
|
350 |
self.assertFalse(repo._format.supports_external_lookups) |
|
351 |
||
1534.1.27
by Robert Collins
Start InterRepository with InterRepository.get. |
352 |
|
1556.1.3
by Robert Collins
Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids |
353 |
class TestFormatKnit1(TestCaseWithTransport): |
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
354 |
|
3565.3.1
by Robert Collins
* The generic fetch code now uses two attributes on Repository objects |
355 |
def test_attribute__fetch_order(self): |
356 |
"""Knits need topological data insertion."""
|
|
357 |
repo = self.make_repository('.', |
|
358 |
format=bzrdir.format_registry.get('knit')()) |
|
4053.1.4
by Robert Collins
Move the fetch control attributes from Repository to RepositoryFormat. |
359 |
self.assertEqual('topological', repo._format._fetch_order) |
3565.3.1
by Robert Collins
* The generic fetch code now uses two attributes on Repository objects |
360 |
|
361 |
def test_attribute__fetch_uses_deltas(self): |
|
362 |
"""Knits reuse deltas."""
|
|
363 |
repo = self.make_repository('.', |
|
364 |
format=bzrdir.format_registry.get('knit')()) |
|
4053.1.4
by Robert Collins
Move the fetch control attributes from Repository to RepositoryFormat. |
365 |
self.assertEqual(True, repo._format._fetch_uses_deltas) |
3565.3.1
by Robert Collins
* The generic fetch code now uses two attributes on Repository objects |
366 |
|
1556.1.3
by Robert Collins
Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids |
367 |
def test_disk_layout(self): |
368 |
control = bzrdir.BzrDirMetaFormat1().initialize(self.get_url()) |
|
2241.1.6
by Martin Pool
Move Knit repositories into the submodule bzrlib.repofmt.knitrepo and |
369 |
repo = knitrepo.RepositoryFormatKnit1().initialize(control) |
1556.1.3
by Robert Collins
Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids |
370 |
# in case of side effects of locking.
|
371 |
repo.lock_write() |
|
372 |
repo.unlock() |
|
373 |
# we want:
|
|
374 |
# format 'Bazaar-NG Knit Repository Format 1'
|
|
1553.5.62
by Martin Pool
Add tests that MetaDir repositories use LockDirs |
375 |
# lock: is a directory
|
1556.1.3
by Robert Collins
Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids |
376 |
# inventory.weave == empty_weave
|
377 |
# empty revision-store directory
|
|
378 |
# empty weaves directory
|
|
379 |
t = control.get_repository_transport(None) |
|
380 |
self.assertEqualDiff('Bazaar-NG Knit Repository Format 1', |
|
381 |
t.get('format').read()) |
|
1553.5.57
by Martin Pool
[merge] sync from bzr.dev |
382 |
# XXX: no locks left when unlocked at the moment
|
383 |
# self.assertEqualDiff('', t.get('lock').read())
|
|
1556.1.3
by Robert Collins
Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids |
384 |
self.assertTrue(S_ISDIR(t.stat('knits').st_mode)) |
1563.2.35
by Robert Collins
cleanup deprecation warnings and finish conversion so the inventory is knit based too. |
385 |
self.check_knits(t) |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
386 |
# Check per-file knits.
|
387 |
branch = control.create_branch() |
|
388 |
tree = control.create_workingtree() |
|
389 |
tree.add(['foo'], ['Nasty-IdC:'], ['file']) |
|
390 |
tree.put_file_bytes_non_atomic('Nasty-IdC:', '') |
|
391 |
tree.commit('1st post', rev_id='foo') |
|
392 |
self.assertHasKnit(t, 'knits/e8/%254easty-%2549d%2543%253a', |
|
393 |
'\nfoo fulltext 0 81 :') |
|
1563.2.35
by Robert Collins
cleanup deprecation warnings and finish conversion so the inventory is knit based too. |
394 |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
395 |
def assertHasKnit(self, t, knit_name, extra_content=''): |
1654.1.3
by Robert Collins
Refactor repository knit tests slightly to remove duplication - add a assertHasKnit method. |
396 |
"""Assert that knit_name exists on t."""
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
397 |
self.assertEqualDiff('# bzr knit index 8\n' + extra_content, |
1654.1.3
by Robert Collins
Refactor repository knit tests slightly to remove duplication - add a assertHasKnit method. |
398 |
t.get(knit_name + '.kndx').read()) |
399 |
||
1563.2.35
by Robert Collins
cleanup deprecation warnings and finish conversion so the inventory is knit based too. |
400 |
def check_knits(self, t): |
401 |
"""check knit content for a repository."""
|
|
1654.1.3
by Robert Collins
Refactor repository knit tests slightly to remove duplication - add a assertHasKnit method. |
402 |
self.assertHasKnit(t, 'inventory') |
403 |
self.assertHasKnit(t, 'revisions') |
|
404 |
self.assertHasKnit(t, 'signatures') |
|
1556.1.3
by Robert Collins
Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids |
405 |
|
406 |
def test_shared_disk_layout(self): |
|
407 |
control = bzrdir.BzrDirMetaFormat1().initialize(self.get_url()) |
|
2241.1.6
by Martin Pool
Move Knit repositories into the submodule bzrlib.repofmt.knitrepo and |
408 |
repo = knitrepo.RepositoryFormatKnit1().initialize(control, shared=True) |
1556.1.3
by Robert Collins
Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids |
409 |
# we want:
|
410 |
# format 'Bazaar-NG Knit Repository Format 1'
|
|
1553.5.62
by Martin Pool
Add tests that MetaDir repositories use LockDirs |
411 |
# lock: is a directory
|
1556.1.3
by Robert Collins
Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids |
412 |
# inventory.weave == empty_weave
|
413 |
# empty revision-store directory
|
|
414 |
# empty weaves directory
|
|
415 |
# a 'shared-storage' marker file.
|
|
416 |
t = control.get_repository_transport(None) |
|
417 |
self.assertEqualDiff('Bazaar-NG Knit Repository Format 1', |
|
418 |
t.get('format').read()) |
|
1553.5.57
by Martin Pool
[merge] sync from bzr.dev |
419 |
# XXX: no locks left when unlocked at the moment
|
420 |
# self.assertEqualDiff('', t.get('lock').read())
|
|
1556.1.3
by Robert Collins
Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids |
421 |
self.assertEqualDiff('', t.get('shared-storage').read()) |
422 |
self.assertTrue(S_ISDIR(t.stat('knits').st_mode)) |
|
1563.2.35
by Robert Collins
cleanup deprecation warnings and finish conversion so the inventory is knit based too. |
423 |
self.check_knits(t) |
1556.1.3
by Robert Collins
Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids |
424 |
|
425 |
def test_shared_no_tree_disk_layout(self): |
|
426 |
control = bzrdir.BzrDirMetaFormat1().initialize(self.get_url()) |
|
2241.1.6
by Martin Pool
Move Knit repositories into the submodule bzrlib.repofmt.knitrepo and |
427 |
repo = knitrepo.RepositoryFormatKnit1().initialize(control, shared=True) |
1556.1.3
by Robert Collins
Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids |
428 |
repo.set_make_working_trees(False) |
429 |
# we want:
|
|
430 |
# format 'Bazaar-NG Knit Repository Format 1'
|
|
431 |
# lock ''
|
|
432 |
# inventory.weave == empty_weave
|
|
433 |
# empty revision-store directory
|
|
434 |
# empty weaves directory
|
|
435 |
# a 'shared-storage' marker file.
|
|
436 |
t = control.get_repository_transport(None) |
|
437 |
self.assertEqualDiff('Bazaar-NG Knit Repository Format 1', |
|
438 |
t.get('format').read()) |
|
1553.5.57
by Martin Pool
[merge] sync from bzr.dev |
439 |
# XXX: no locks left when unlocked at the moment
|
440 |
# self.assertEqualDiff('', t.get('lock').read())
|
|
1556.1.3
by Robert Collins
Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids |
441 |
self.assertEqualDiff('', t.get('shared-storage').read()) |
442 |
self.assertEqualDiff('', t.get('no-working-trees').read()) |
|
443 |
repo.set_make_working_trees(True) |
|
444 |
self.assertFalse(t.has('no-working-trees')) |
|
445 |
self.assertTrue(S_ISDIR(t.stat('knits').st_mode)) |
|
1563.2.35
by Robert Collins
cleanup deprecation warnings and finish conversion so the inventory is knit based too. |
446 |
self.check_knits(t) |
1556.1.3
by Robert Collins
Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids |
447 |
|
2917.2.1
by John Arbash Meinel
Fix bug #152360. The xml5 serializer should be using |
448 |
def test_deserialise_sets_root_revision(self): |
449 |
"""We must have a inventory.root.revision
|
|
450 |
||
451 |
Old versions of the XML5 serializer did not set the revision_id for
|
|
452 |
the whole inventory. So we grab the one from the expected text. Which
|
|
453 |
is valid when the api is not being abused.
|
|
454 |
"""
|
|
455 |
repo = self.make_repository('.', |
|
456 |
format=bzrdir.format_registry.get('knit')()) |
|
457 |
inv_xml = '<inventory format="5">\n</inventory>\n' |
|
4988.3.3
by Jelmer Vernooij
rename Repository.deserialise_inventory to Repository._deserialise_inventory. |
458 |
inv = repo._deserialise_inventory('test-rev-id', inv_xml) |
2917.2.1
by John Arbash Meinel
Fix bug #152360. The xml5 serializer should be using |
459 |
self.assertEqual('test-rev-id', inv.root.revision) |
460 |
||
461 |
def test_deserialise_uses_global_revision_id(self): |
|
462 |
"""If it is set, then we re-use the global revision id"""
|
|
463 |
repo = self.make_repository('.', |
|
464 |
format=bzrdir.format_registry.get('knit')()) |
|
465 |
inv_xml = ('<inventory format="5" revision_id="other-rev-id">\n' |
|
466 |
'</inventory>\n') |
|
467 |
# Arguably, the deserialise_inventory should detect a mismatch, and
|
|
468 |
# raise an error, rather than silently using one revision_id over the
|
|
469 |
# other.
|
|
4988.3.3
by Jelmer Vernooij
rename Repository.deserialise_inventory to Repository._deserialise_inventory. |
470 |
self.assertRaises(AssertionError, repo._deserialise_inventory, |
3169.2.2
by Robert Collins
Add a test to Repository.deserialise_inventory that the resulting ivnentory is the one asked for, and update relevant tests. Also tweak the model 1 to 2 regenerate inventories logic to use the revision trees parent marker which is more accurate in some cases. |
471 |
'test-rev-id', inv_xml) |
4988.3.3
by Jelmer Vernooij
rename Repository.deserialise_inventory to Repository._deserialise_inventory. |
472 |
inv = repo._deserialise_inventory('other-rev-id', inv_xml) |
2917.2.1
by John Arbash Meinel
Fix bug #152360. The xml5 serializer should be using |
473 |
self.assertEqual('other-rev-id', inv.root.revision) |
474 |
||
3221.3.1
by Robert Collins
* Repository formats have a new supported-feature attribute |
475 |
def test_supports_external_lookups(self): |
476 |
repo = self.make_repository('.', |
|
477 |
format=bzrdir.format_registry.get('knit')()) |
|
478 |
self.assertFalse(repo._format.supports_external_lookups) |
|
479 |
||
2535.3.53
by Andrew Bennetts
Remove get_stream_as_bytes from KnitVersionedFile's API, make it a function in knitrepo.py instead. |
480 |
|
2305.2.3
by Andrew Bennetts
Bring across test_repository improvements from the hpss branch to fix the last test failures. |
481 |
class DummyRepository(object): |
482 |
"""A dummy repository for testing."""
|
|
483 |
||
3452.2.11
by Andrew Bennetts
Merge thread. |
484 |
_format = None |
2305.2.3
by Andrew Bennetts
Bring across test_repository improvements from the hpss branch to fix the last test failures. |
485 |
_serializer = None |
486 |
||
487 |
def supports_rich_root(self): |
|
4606.4.1
by Robert Collins
Prepare test_repository's inter_repository tests for 2a. |
488 |
if self._format is not None: |
489 |
return self._format.rich_root_data |
|
2305.2.3
by Andrew Bennetts
Bring across test_repository improvements from the hpss branch to fix the last test failures. |
490 |
return False |
491 |
||
3709.5.10
by Andrew Bennetts
Fix test failure caused by missing attributes on DummyRepository. |
492 |
def get_graph(self): |
493 |
raise NotImplementedError |
|
494 |
||
495 |
def get_parent_map(self, revision_ids): |
|
496 |
raise NotImplementedError |
|
497 |
||
2305.2.3
by Andrew Bennetts
Bring across test_repository improvements from the hpss branch to fix the last test failures. |
498 |
|
499 |
class InterDummy(repository.InterRepository): |
|
500 |
"""An inter-repository optimised code path for DummyRepository.
|
|
501 |
||
502 |
This is for use during testing where we use DummyRepository as repositories
|
|
1534.1.28
by Robert Collins
Allow for optimised InterRepository selection. |
503 |
so that none of the default regsitered inter-repository classes will
|
2818.4.2
by Robert Collins
Review feedback. |
504 |
MATCH.
|
1534.1.28
by Robert Collins
Allow for optimised InterRepository selection. |
505 |
"""
|
506 |
||
507 |
@staticmethod
|
|
508 |
def is_compatible(repo_source, repo_target): |
|
2305.2.3
by Andrew Bennetts
Bring across test_repository improvements from the hpss branch to fix the last test failures. |
509 |
"""InterDummy is compatible with DummyRepository."""
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
510 |
return (isinstance(repo_source, DummyRepository) and |
2305.2.3
by Andrew Bennetts
Bring across test_repository improvements from the hpss branch to fix the last test failures. |
511 |
isinstance(repo_target, DummyRepository)) |
1534.1.28
by Robert Collins
Allow for optimised InterRepository selection. |
512 |
|
513 |
||
1534.1.27
by Robert Collins
Start InterRepository with InterRepository.get. |
514 |
class TestInterRepository(TestCaseWithTransport): |
515 |
||
516 |
def test_get_default_inter_repository(self): |
|
517 |
# test that the InterRepository.get(repo_a, repo_b) probes
|
|
518 |
# for a inter_repo class where is_compatible(repo_a, repo_b) returns
|
|
519 |
# true and returns a default inter_repo otherwise.
|
|
520 |
# This also tests that the default registered optimised interrepository
|
|
521 |
# classes do not barf inappropriately when a surprising repository type
|
|
522 |
# is handed to them.
|
|
2305.2.3
by Andrew Bennetts
Bring across test_repository improvements from the hpss branch to fix the last test failures. |
523 |
dummy_a = DummyRepository() |
524 |
dummy_b = DummyRepository() |
|
1534.1.28
by Robert Collins
Allow for optimised InterRepository selection. |
525 |
self.assertGetsDefaultInterRepository(dummy_a, dummy_b) |
526 |
||
527 |
def assertGetsDefaultInterRepository(self, repo_a, repo_b): |
|
2305.2.3
by Andrew Bennetts
Bring across test_repository improvements from the hpss branch to fix the last test failures. |
528 |
"""Asserts that InterRepository.get(repo_a, repo_b) -> the default.
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
529 |
|
2305.2.3
by Andrew Bennetts
Bring across test_repository improvements from the hpss branch to fix the last test failures. |
530 |
The effective default is now InterSameDataRepository because there is
|
531 |
no actual sane default in the presence of incompatible data models.
|
|
532 |
"""
|
|
1534.1.28
by Robert Collins
Allow for optimised InterRepository selection. |
533 |
inter_repo = repository.InterRepository.get(repo_a, repo_b) |
2305.2.3
by Andrew Bennetts
Bring across test_repository improvements from the hpss branch to fix the last test failures. |
534 |
self.assertEqual(repository.InterSameDataRepository, |
1534.1.27
by Robert Collins
Start InterRepository with InterRepository.get. |
535 |
inter_repo.__class__) |
1534.1.28
by Robert Collins
Allow for optimised InterRepository selection. |
536 |
self.assertEqual(repo_a, inter_repo.source) |
537 |
self.assertEqual(repo_b, inter_repo.target) |
|
538 |
||
539 |
def test_register_inter_repository_class(self): |
|
540 |
# test that a optimised code path provider - a
|
|
541 |
# InterRepository subclass can be registered and unregistered
|
|
542 |
# and that it is correctly selected when given a repository
|
|
543 |
# pair that it returns true on for the is_compatible static method
|
|
544 |
# check
|
|
2305.2.3
by Andrew Bennetts
Bring across test_repository improvements from the hpss branch to fix the last test failures. |
545 |
dummy_a = DummyRepository() |
4606.4.1
by Robert Collins
Prepare test_repository's inter_repository tests for 2a. |
546 |
dummy_a._format = RepositoryFormat() |
2305.2.3
by Andrew Bennetts
Bring across test_repository improvements from the hpss branch to fix the last test failures. |
547 |
dummy_b = DummyRepository() |
4606.4.1
by Robert Collins
Prepare test_repository's inter_repository tests for 2a. |
548 |
dummy_b._format = RepositoryFormat() |
2305.2.3
by Andrew Bennetts
Bring across test_repository improvements from the hpss branch to fix the last test failures. |
549 |
repo = self.make_repository('.') |
550 |
# hack dummies to look like repo somewhat.
|
|
551 |
dummy_a._serializer = repo._serializer |
|
4606.4.1
by Robert Collins
Prepare test_repository's inter_repository tests for 2a. |
552 |
dummy_a._format.supports_tree_reference = repo._format.supports_tree_reference |
553 |
dummy_a._format.rich_root_data = repo._format.rich_root_data |
|
2305.2.3
by Andrew Bennetts
Bring across test_repository improvements from the hpss branch to fix the last test failures. |
554 |
dummy_b._serializer = repo._serializer |
4606.4.1
by Robert Collins
Prepare test_repository's inter_repository tests for 2a. |
555 |
dummy_b._format.supports_tree_reference = repo._format.supports_tree_reference |
556 |
dummy_b._format.rich_root_data = repo._format.rich_root_data |
|
2305.2.3
by Andrew Bennetts
Bring across test_repository improvements from the hpss branch to fix the last test failures. |
557 |
repository.InterRepository.register_optimiser(InterDummy) |
1534.1.28
by Robert Collins
Allow for optimised InterRepository selection. |
558 |
try: |
2305.2.3
by Andrew Bennetts
Bring across test_repository improvements from the hpss branch to fix the last test failures. |
559 |
# we should get the default for something InterDummy returns False
|
1534.1.28
by Robert Collins
Allow for optimised InterRepository selection. |
560 |
# to
|
2305.2.3
by Andrew Bennetts
Bring across test_repository improvements from the hpss branch to fix the last test failures. |
561 |
self.assertFalse(InterDummy.is_compatible(dummy_a, repo)) |
562 |
self.assertGetsDefaultInterRepository(dummy_a, repo) |
|
563 |
# and we should get an InterDummy for a pair it 'likes'
|
|
564 |
self.assertTrue(InterDummy.is_compatible(dummy_a, dummy_b)) |
|
1534.1.28
by Robert Collins
Allow for optimised InterRepository selection. |
565 |
inter_repo = repository.InterRepository.get(dummy_a, dummy_b) |
2305.2.3
by Andrew Bennetts
Bring across test_repository improvements from the hpss branch to fix the last test failures. |
566 |
self.assertEqual(InterDummy, inter_repo.__class__) |
1534.1.28
by Robert Collins
Allow for optimised InterRepository selection. |
567 |
self.assertEqual(dummy_a, inter_repo.source) |
568 |
self.assertEqual(dummy_b, inter_repo.target) |
|
569 |
finally: |
|
2305.2.3
by Andrew Bennetts
Bring across test_repository improvements from the hpss branch to fix the last test failures. |
570 |
repository.InterRepository.unregister_optimiser(InterDummy) |
1534.1.28
by Robert Collins
Allow for optimised InterRepository selection. |
571 |
# now we should get the default InterRepository object again.
|
572 |
self.assertGetsDefaultInterRepository(dummy_a, dummy_b) |
|
1534.1.33
by Robert Collins
Move copy_content_into into InterRepository and InterWeaveRepo, and disable the default codepath test as we have optimised paths for all current combinations. |
573 |
|
2241.1.17
by Martin Pool
Restore old InterWeave tests |
574 |
|
575 |
class TestInterWeaveRepo(TestCaseWithTransport): |
|
576 |
||
577 |
def test_is_compatible_and_registered(self): |
|
578 |
# InterWeaveRepo is compatible when either side
|
|
579 |
# is a format 5/6/7 branch
|
|
2241.1.20
by mbp at sourcefrog
update tests for new locations of weave repos |
580 |
from bzrlib.repofmt import knitrepo, weaverepo |
581 |
formats = [weaverepo.RepositoryFormat5(), |
|
582 |
weaverepo.RepositoryFormat6(), |
|
583 |
weaverepo.RepositoryFormat7()] |
|
584 |
incompatible_formats = [weaverepo.RepositoryFormat4(), |
|
585 |
knitrepo.RepositoryFormatKnit1(), |
|
2241.1.17
by Martin Pool
Restore old InterWeave tests |
586 |
]
|
587 |
repo_a = self.make_repository('a') |
|
588 |
repo_b = self.make_repository('b') |
|
589 |
is_compatible = repository.InterWeaveRepo.is_compatible |
|
590 |
for source in incompatible_formats: |
|
591 |
# force incompatible left then right
|
|
592 |
repo_a._format = source |
|
593 |
repo_b._format = formats[0] |
|
594 |
self.assertFalse(is_compatible(repo_a, repo_b)) |
|
595 |
self.assertFalse(is_compatible(repo_b, repo_a)) |
|
596 |
for source in formats: |
|
597 |
repo_a._format = source |
|
598 |
for target in formats: |
|
599 |
repo_b._format = target |
|
600 |
self.assertTrue(is_compatible(repo_a, repo_b)) |
|
601 |
self.assertEqual(repository.InterWeaveRepo, |
|
602 |
repository.InterRepository.get(repo_a, |
|
603 |
repo_b).__class__) |
|
604 |
||
1556.1.4
by Robert Collins
Add a new format for what will become knit, and the surrounding logic to upgrade repositories within metadirs, and tests for the same. |
605 |
|
606 |
class TestRepositoryConverter(TestCaseWithTransport): |
|
607 |
||
608 |
def test_convert_empty(self): |
|
609 |
t = get_transport(self.get_url('.')) |
|
610 |
t.mkdir('repository') |
|
611 |
repo_dir = bzrdir.BzrDirMetaFormat1().initialize('repository') |
|
2241.1.4
by Martin Pool
Moved old weave-based repository formats into bzrlib.repofmt.weaverepo. |
612 |
repo = weaverepo.RepositoryFormat7().initialize(repo_dir) |
2241.1.6
by Martin Pool
Move Knit repositories into the submodule bzrlib.repofmt.knitrepo and |
613 |
target_format = knitrepo.RepositoryFormatKnit1() |
1556.1.4
by Robert Collins
Add a new format for what will become knit, and the surrounding logic to upgrade repositories within metadirs, and tests for the same. |
614 |
converter = repository.CopyConverter(target_format) |
1594.1.3
by Robert Collins
Fixup pb usage to use nested_progress_bar. |
615 |
pb = bzrlib.ui.ui_factory.nested_progress_bar() |
616 |
try: |
|
617 |
converter.convert(repo, pb) |
|
618 |
finally: |
|
619 |
pb.finished() |
|
1556.1.4
by Robert Collins
Add a new format for what will become knit, and the surrounding logic to upgrade repositories within metadirs, and tests for the same. |
620 |
repo = repo_dir.open_repository() |
621 |
self.assertTrue(isinstance(target_format, repo._format.__class__)) |
|
1843.2.5
by Aaron Bentley
Add test of _unescape_xml |
622 |
|
623 |
||
624 |
class TestMisc(TestCase): |
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
625 |
|
1843.2.5
by Aaron Bentley
Add test of _unescape_xml |
626 |
def test_unescape_xml(self): |
627 |
"""We get some kind of error when malformed entities are passed"""
|
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
628 |
self.assertRaises(KeyError, repository._unescape_xml, 'foo&bar;') |
1910.2.13
by Aaron Bentley
Start work on converter |
629 |
|
630 |
||
2255.2.211
by Robert Collins
Remove knit2 repository format- it has never been supported. |
631 |
class TestRepositoryFormatKnit3(TestCaseWithTransport): |
1910.2.13
by Aaron Bentley
Start work on converter |
632 |
|
3565.3.1
by Robert Collins
* The generic fetch code now uses two attributes on Repository objects |
633 |
def test_attribute__fetch_order(self): |
634 |
"""Knits need topological data insertion."""
|
|
635 |
format = bzrdir.BzrDirMetaFormat1() |
|
636 |
format.repository_format = knitrepo.RepositoryFormatKnit3() |
|
637 |
repo = self.make_repository('.', format=format) |
|
4053.1.4
by Robert Collins
Move the fetch control attributes from Repository to RepositoryFormat. |
638 |
self.assertEqual('topological', repo._format._fetch_order) |
3565.3.1
by Robert Collins
* The generic fetch code now uses two attributes on Repository objects |
639 |
|
640 |
def test_attribute__fetch_uses_deltas(self): |
|
641 |
"""Knits reuse deltas."""
|
|
642 |
format = bzrdir.BzrDirMetaFormat1() |
|
643 |
format.repository_format = knitrepo.RepositoryFormatKnit3() |
|
644 |
repo = self.make_repository('.', format=format) |
|
4053.1.4
by Robert Collins
Move the fetch control attributes from Repository to RepositoryFormat. |
645 |
self.assertEqual(True, repo._format._fetch_uses_deltas) |
3565.3.1
by Robert Collins
* The generic fetch code now uses two attributes on Repository objects |
646 |
|
1910.2.13
by Aaron Bentley
Start work on converter |
647 |
def test_convert(self): |
648 |
"""Ensure the upgrade adds weaves for roots"""
|
|
1910.2.35
by Aaron Bentley
Better fix for convesion test |
649 |
format = bzrdir.BzrDirMetaFormat1() |
2241.1.6
by Martin Pool
Move Knit repositories into the submodule bzrlib.repofmt.knitrepo and |
650 |
format.repository_format = knitrepo.RepositoryFormatKnit1() |
1910.2.35
by Aaron Bentley
Better fix for convesion test |
651 |
tree = self.make_branch_and_tree('.', format) |
1910.2.13
by Aaron Bentley
Start work on converter |
652 |
tree.commit("Dull commit", rev_id="dull") |
653 |
revision_tree = tree.branch.repository.revision_tree('dull') |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
654 |
revision_tree.lock_read() |
655 |
try: |
|
656 |
self.assertRaises(errors.NoSuchFile, revision_tree.get_file_lines, |
|
657 |
revision_tree.inventory.root.file_id) |
|
658 |
finally: |
|
659 |
revision_tree.unlock() |
|
1910.2.13
by Aaron Bentley
Start work on converter |
660 |
format = bzrdir.BzrDirMetaFormat1() |
2255.2.211
by Robert Collins
Remove knit2 repository format- it has never been supported. |
661 |
format.repository_format = knitrepo.RepositoryFormatKnit3() |
1910.2.13
by Aaron Bentley
Start work on converter |
662 |
upgrade.Convert('.', format) |
1910.2.27
by Aaron Bentley
Fixed conversion test |
663 |
tree = workingtree.WorkingTree.open('.') |
1910.2.13
by Aaron Bentley
Start work on converter |
664 |
revision_tree = tree.branch.repository.revision_tree('dull') |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
665 |
revision_tree.lock_read() |
666 |
try: |
|
667 |
revision_tree.get_file_lines(revision_tree.inventory.root.file_id) |
|
668 |
finally: |
|
669 |
revision_tree.unlock() |
|
1910.2.27
by Aaron Bentley
Fixed conversion test |
670 |
tree.commit("Another dull commit", rev_id='dull2') |
671 |
revision_tree = tree.branch.repository.revision_tree('dull2') |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
672 |
revision_tree.lock_read() |
673 |
self.addCleanup(revision_tree.unlock) |
|
1910.2.27
by Aaron Bentley
Fixed conversion test |
674 |
self.assertEqual('dull', revision_tree.inventory.root.revision) |
2220.2.2
by Martin Pool
Add tag command and basic implementation |
675 |
|
3221.3.1
by Robert Collins
* Repository formats have a new supported-feature attribute |
676 |
def test_supports_external_lookups(self): |
677 |
format = bzrdir.BzrDirMetaFormat1() |
|
678 |
format.repository_format = knitrepo.RepositoryFormatKnit3() |
|
679 |
repo = self.make_repository('.', format=format) |
|
680 |
self.assertFalse(repo._format.supports_external_lookups) |
|
681 |
||
2535.3.57
by Andrew Bennetts
Perform some sanity checking of data streams rather than blindly inserting them into our repository. |
682 |
|
4667.1.1
by John Arbash Meinel
Drop the Test2a test times from 5+s down to 1.4s |
683 |
class Test2a(tests.TestCaseWithMemoryTransport): |
4431.3.7
by Jonathan Lange
Cherrypick bzr.dev 4470, resolving conflicts. |
684 |
|
5365.5.20
by John Arbash Meinel
Add some tests that check the leaf factory is correct. |
685 |
def test_chk_bytes_uses_custom_btree_parser(self): |
686 |
mt = self.make_branch_and_memory_tree('test', format='2a') |
|
687 |
mt.lock_write() |
|
688 |
self.addCleanup(mt.unlock) |
|
689 |
mt.add([''], ['root-id']) |
|
690 |
mt.commit('first') |
|
691 |
index = mt.branch.repository.chk_bytes._index._graph_index._indices[0] |
|
692 |
self.assertEqual(btree_index._gcchk_factory, index._leaf_factory) |
|
693 |
# It should also work if we re-open the repo
|
|
694 |
repo = mt.branch.repository.bzrdir.open_repository() |
|
695 |
repo.lock_read() |
|
696 |
self.addCleanup(repo.unlock) |
|
697 |
index = repo.chk_bytes._index._graph_index._indices[0] |
|
698 |
self.assertEqual(btree_index._gcchk_factory, index._leaf_factory) |
|
699 |
||
4634.20.1
by Robert Collins
Fix bug 402652 by recompressing all texts that are streamed - slightly slower at fetch, substantially faster and more compact at read. |
700 |
def test_fetch_combines_groups(self): |
701 |
builder = self.make_branch_builder('source', format='2a') |
|
702 |
builder.start_series() |
|
703 |
builder.build_snapshot('1', None, [ |
|
704 |
('add', ('', 'root-id', 'directory', '')), |
|
705 |
('add', ('file', 'file-id', 'file', 'content\n'))]) |
|
706 |
builder.build_snapshot('2', ['1'], [ |
|
707 |
('modify', ('file-id', 'content-2\n'))]) |
|
708 |
builder.finish_series() |
|
709 |
source = builder.get_branch() |
|
710 |
target = self.make_repository('target', format='2a') |
|
711 |
target.fetch(source.repository) |
|
712 |
target.lock_read() |
|
4665.3.2
by John Arbash Meinel
An alternative implementation that passes both tests. |
713 |
self.addCleanup(target.unlock) |
4634.20.1
by Robert Collins
Fix bug 402652 by recompressing all texts that are streamed - slightly slower at fetch, substantially faster and more compact at read. |
714 |
details = target.texts._index.get_build_details( |
715 |
[('file-id', '1',), ('file-id', '2',)]) |
|
716 |
file_1_details = details[('file-id', '1')] |
|
717 |
file_2_details = details[('file-id', '2')] |
|
718 |
# The index, and what to read off disk, should be the same for both
|
|
719 |
# versions of the file.
|
|
720 |
self.assertEqual(file_1_details[0][:3], file_2_details[0][:3]) |
|
721 |
||
4634.23.1
by Robert Collins
Cherrypick from bzr.dev: Fix bug 402652: recompress badly packed groups during fetch. (John Arbash Meinel, Robert Collins) |
722 |
def test_fetch_combines_groups(self): |
723 |
builder = self.make_branch_builder('source', format='2a') |
|
724 |
builder.start_series() |
|
725 |
builder.build_snapshot('1', None, [ |
|
726 |
('add', ('', 'root-id', 'directory', '')), |
|
727 |
('add', ('file', 'file-id', 'file', 'content\n'))]) |
|
728 |
builder.build_snapshot('2', ['1'], [ |
|
729 |
('modify', ('file-id', 'content-2\n'))]) |
|
730 |
builder.finish_series() |
|
731 |
source = builder.get_branch() |
|
732 |
target = self.make_repository('target', format='2a') |
|
733 |
target.fetch(source.repository) |
|
734 |
target.lock_read() |
|
735 |
self.addCleanup(target.unlock) |
|
736 |
details = target.texts._index.get_build_details( |
|
737 |
[('file-id', '1',), ('file-id', '2',)]) |
|
738 |
file_1_details = details[('file-id', '1')] |
|
739 |
file_2_details = details[('file-id', '2')] |
|
740 |
# The index, and what to read off disk, should be the same for both
|
|
741 |
# versions of the file.
|
|
742 |
self.assertEqual(file_1_details[0][:3], file_2_details[0][:3]) |
|
743 |
||
744 |
def test_fetch_combines_groups(self): |
|
745 |
builder = self.make_branch_builder('source', format='2a') |
|
746 |
builder.start_series() |
|
747 |
builder.build_snapshot('1', None, [ |
|
748 |
('add', ('', 'root-id', 'directory', '')), |
|
749 |
('add', ('file', 'file-id', 'file', 'content\n'))]) |
|
750 |
builder.build_snapshot('2', ['1'], [ |
|
751 |
('modify', ('file-id', 'content-2\n'))]) |
|
752 |
builder.finish_series() |
|
753 |
source = builder.get_branch() |
|
754 |
target = self.make_repository('target', format='2a') |
|
755 |
target.fetch(source.repository) |
|
756 |
target.lock_read() |
|
757 |
self.addCleanup(target.unlock) |
|
758 |
details = target.texts._index.get_build_details( |
|
759 |
[('file-id', '1',), ('file-id', '2',)]) |
|
760 |
file_1_details = details[('file-id', '1')] |
|
761 |
file_2_details = details[('file-id', '2')] |
|
762 |
# The index, and what to read off disk, should be the same for both
|
|
763 |
# versions of the file.
|
|
764 |
self.assertEqual(file_1_details[0][:3], file_2_details[0][:3]) |
|
765 |
||
4431.3.7
by Jonathan Lange
Cherrypick bzr.dev 4470, resolving conflicts. |
766 |
def test_format_pack_compresses_True(self): |
767 |
repo = self.make_repository('repo', format='2a') |
|
768 |
self.assertTrue(repo._format.pack_compresses) |
|
3735.2.40
by Robert Collins
Add development4 which has a parent_id to basename index on CHKInventory objects. |
769 |
|
770 |
def test_inventories_use_chk_map_with_parent_base_dict(self): |
|
4667.1.1
by John Arbash Meinel
Drop the Test2a test times from 5+s down to 1.4s |
771 |
tree = self.make_branch_and_memory_tree('repo', format="2a") |
772 |
tree.lock_write() |
|
773 |
tree.add([''], ['TREE_ROOT']) |
|
3735.2.40
by Robert Collins
Add development4 which has a parent_id to basename index on CHKInventory objects. |
774 |
revid = tree.commit("foo") |
4667.1.1
by John Arbash Meinel
Drop the Test2a test times from 5+s down to 1.4s |
775 |
tree.unlock() |
3735.2.40
by Robert Collins
Add development4 which has a parent_id to basename index on CHKInventory objects. |
776 |
tree.lock_read() |
777 |
self.addCleanup(tree.unlock) |
|
778 |
inv = tree.branch.repository.get_inventory(revid) |
|
3735.2.41
by Robert Collins
Make the parent_id_basename index be updated during CHKInventory.apply_delta. |
779 |
self.assertNotEqual(None, inv.parent_id_basename_to_file_id) |
780 |
inv.parent_id_basename_to_file_id._ensure_root() |
|
3735.2.40
by Robert Collins
Add development4 which has a parent_id to basename index on CHKInventory objects. |
781 |
inv.id_to_entry._ensure_root() |
4241.6.8
by Robert Collins, John Arbash Meinel, Ian Clatworthy, Vincent Ladeuil
Add --development6-rich-root, disabling the legacy and unneeded development2 format, and activating the tests for CHK features disabled pending this format. (Robert Collins, John Arbash Meinel, Ian Clatworthy, Vincent Ladeuil) |
782 |
self.assertEqual(65536, inv.id_to_entry._root_node.maximum_size) |
783 |
self.assertEqual(65536, |
|
3735.2.41
by Robert Collins
Make the parent_id_basename index be updated during CHKInventory.apply_delta. |
784 |
inv.parent_id_basename_to_file_id._root_node.maximum_size) |
3735.2.40
by Robert Collins
Add development4 which has a parent_id to basename index on CHKInventory objects. |
785 |
|
4431.3.7
by Jonathan Lange
Cherrypick bzr.dev 4470, resolving conflicts. |
786 |
def test_autopack_unchanged_chk_nodes(self): |
787 |
# at 20 unchanged commits, chk pages are packed that are split into
|
|
788 |
# two groups such that the new pack being made doesn't have all its
|
|
789 |
# pages in the source packs (though they are in the repository).
|
|
4667.1.1
by John Arbash Meinel
Drop the Test2a test times from 5+s down to 1.4s |
790 |
# Use a memory backed repository, we don't need to hit disk for this
|
791 |
tree = self.make_branch_and_memory_tree('tree', format='2a') |
|
792 |
tree.lock_write() |
|
793 |
self.addCleanup(tree.unlock) |
|
794 |
tree.add([''], ['TREE_ROOT']) |
|
4431.3.7
by Jonathan Lange
Cherrypick bzr.dev 4470, resolving conflicts. |
795 |
for pos in range(20): |
796 |
tree.commit(str(pos)) |
|
797 |
||
798 |
def test_pack_with_hint(self): |
|
4667.1.1
by John Arbash Meinel
Drop the Test2a test times from 5+s down to 1.4s |
799 |
tree = self.make_branch_and_memory_tree('tree', format='2a') |
800 |
tree.lock_write() |
|
801 |
self.addCleanup(tree.unlock) |
|
802 |
tree.add([''], ['TREE_ROOT']) |
|
4431.3.7
by Jonathan Lange
Cherrypick bzr.dev 4470, resolving conflicts. |
803 |
# 1 commit to leave untouched
|
804 |
tree.commit('1') |
|
805 |
to_keep = tree.branch.repository._pack_collection.names() |
|
806 |
# 2 to combine
|
|
807 |
tree.commit('2') |
|
808 |
tree.commit('3') |
|
809 |
all = tree.branch.repository._pack_collection.names() |
|
810 |
combine = list(set(all) - set(to_keep)) |
|
811 |
self.assertLength(3, all) |
|
812 |
self.assertLength(2, combine) |
|
813 |
tree.branch.repository.pack(hint=combine) |
|
814 |
final = tree.branch.repository._pack_collection.names() |
|
815 |
self.assertLength(2, final) |
|
816 |
self.assertFalse(combine[0] in final) |
|
817 |
self.assertFalse(combine[1] in final) |
|
818 |
self.assertSubset(to_keep, final) |
|
819 |
||
4360.4.3
by John Arbash Meinel
Introduce a KnitPackStreamSource which is used when |
820 |
def test_stream_source_to_gc(self): |
4462.2.1
by Robert Collins
Add new attribute to RepositoryFormat pack_compresses, hinting when pack can be useful. |
821 |
source = self.make_repository('source', format='2a') |
822 |
target = self.make_repository('target', format='2a') |
|
4360.4.3
by John Arbash Meinel
Introduce a KnitPackStreamSource which is used when |
823 |
stream = source._get_source(target._format) |
824 |
self.assertIsInstance(stream, groupcompress_repo.GroupCHKStreamSource) |
|
825 |
||
826 |
def test_stream_source_to_non_gc(self): |
|
4462.2.1
by Robert Collins
Add new attribute to RepositoryFormat pack_compresses, hinting when pack can be useful. |
827 |
source = self.make_repository('source', format='2a') |
4360.4.3
by John Arbash Meinel
Introduce a KnitPackStreamSource which is used when |
828 |
target = self.make_repository('target', format='rich-root-pack') |
829 |
stream = source._get_source(target._format) |
|
830 |
# We don't want the child GroupCHKStreamSource
|
|
831 |
self.assertIs(type(stream), repository.StreamSource) |
|
832 |
||
4360.4.9
by John Arbash Meinel
Merge bzr.dev, bringing in the gc stacking fixes. |
833 |
def test_get_stream_for_missing_keys_includes_all_chk_refs(self): |
834 |
source_builder = self.make_branch_builder('source', |
|
4462.2.1
by Robert Collins
Add new attribute to RepositoryFormat pack_compresses, hinting when pack can be useful. |
835 |
format='2a') |
4360.4.9
by John Arbash Meinel
Merge bzr.dev, bringing in the gc stacking fixes. |
836 |
# We have to build a fairly large tree, so that we are sure the chk
|
837 |
# pages will have split into multiple pages.
|
|
838 |
entries = [('add', ('', 'a-root-id', 'directory', None))] |
|
839 |
for i in 'abcdefghijklmnopqrstuvwxyz123456789': |
|
840 |
for j in 'abcdefghijklmnopqrstuvwxyz123456789': |
|
841 |
fname = i + j |
|
842 |
fid = fname + '-id' |
|
843 |
content = 'content for %s\n' % (fname,) |
|
844 |
entries.append(('add', (fname, fid, 'file', content))) |
|
845 |
source_builder.start_series() |
|
846 |
source_builder.build_snapshot('rev-1', None, entries) |
|
847 |
# Now change a few of them, so we get a few new pages for the second
|
|
848 |
# revision
|
|
849 |
source_builder.build_snapshot('rev-2', ['rev-1'], [ |
|
850 |
('modify', ('aa-id', 'new content for aa-id\n')), |
|
851 |
('modify', ('cc-id', 'new content for cc-id\n')), |
|
852 |
('modify', ('zz-id', 'new content for zz-id\n')), |
|
853 |
])
|
|
854 |
source_builder.finish_series() |
|
855 |
source_branch = source_builder.get_branch() |
|
856 |
source_branch.lock_read() |
|
857 |
self.addCleanup(source_branch.unlock) |
|
4462.2.1
by Robert Collins
Add new attribute to RepositoryFormat pack_compresses, hinting when pack can be useful. |
858 |
target = self.make_repository('target', format='2a') |
4360.4.9
by John Arbash Meinel
Merge bzr.dev, bringing in the gc stacking fixes. |
859 |
source = source_branch.repository._get_source(target._format) |
860 |
self.assertIsInstance(source, groupcompress_repo.GroupCHKStreamSource) |
|
861 |
||
862 |
# On a regular pass, getting the inventories and chk pages for rev-2
|
|
863 |
# would only get the newly created chk pages
|
|
864 |
search = graph.SearchResult(set(['rev-2']), set(['rev-1']), 1, |
|
865 |
set(['rev-2'])) |
|
866 |
simple_chk_records = [] |
|
867 |
for vf_name, substream in source.get_stream(search): |
|
868 |
if vf_name == 'chk_bytes': |
|
869 |
for record in substream: |
|
870 |
simple_chk_records.append(record.key) |
|
871 |
else: |
|
872 |
for _ in substream: |
|
873 |
continue
|
|
874 |
# 3 pages, the root (InternalNode), + 2 pages which actually changed
|
|
875 |
self.assertEqual([('sha1:91481f539e802c76542ea5e4c83ad416bf219f73',), |
|
876 |
('sha1:4ff91971043668583985aec83f4f0ab10a907d3f',), |
|
877 |
('sha1:81e7324507c5ca132eedaf2d8414ee4bb2226187',), |
|
878 |
('sha1:b101b7da280596c71a4540e9a1eeba8045985ee0',)], |
|
879 |
simple_chk_records) |
|
880 |
# Now, when we do a similar call using 'get_stream_for_missing_keys'
|
|
881 |
# we should get a much larger set of pages.
|
|
882 |
missing = [('inventories', 'rev-2')] |
|
883 |
full_chk_records = [] |
|
884 |
for vf_name, substream in source.get_stream_for_missing_keys(missing): |
|
885 |
if vf_name == 'inventories': |
|
886 |
for record in substream: |
|
887 |
self.assertEqual(('rev-2',), record.key) |
|
888 |
elif vf_name == 'chk_bytes': |
|
889 |
for record in substream: |
|
890 |
full_chk_records.append(record.key) |
|
891 |
else: |
|
892 |
self.fail('Should not be getting a stream of %s' % (vf_name,)) |
|
893 |
# We have 257 records now. This is because we have 1 root page, and 256
|
|
894 |
# leaf pages in a complete listing.
|
|
895 |
self.assertEqual(257, len(full_chk_records)) |
|
896 |
self.assertSubset(simple_chk_records, full_chk_records) |
|
897 |
||
4465.2.7
by Aaron Bentley
Move test_inconsistency_fatal to test_repository |
898 |
def test_inconsistency_fatal(self): |
899 |
repo = self.make_repository('repo', format='2a') |
|
900 |
self.assertTrue(repo.revisions._index._inconsistency_fatal) |
|
901 |
self.assertFalse(repo.texts._index._inconsistency_fatal) |
|
902 |
self.assertFalse(repo.inventories._index._inconsistency_fatal) |
|
903 |
self.assertFalse(repo.signatures._index._inconsistency_fatal) |
|
904 |
self.assertFalse(repo.chk_bytes._index._inconsistency_fatal) |
|
905 |
||
4360.4.3
by John Arbash Meinel
Introduce a KnitPackStreamSource which is used when |
906 |
|
907 |
class TestKnitPackStreamSource(tests.TestCaseWithMemoryTransport): |
|
908 |
||
909 |
def test_source_to_exact_pack_092(self): |
|
910 |
source = self.make_repository('source', format='pack-0.92') |
|
911 |
target = self.make_repository('target', format='pack-0.92') |
|
912 |
stream_source = source._get_source(target._format) |
|
913 |
self.assertIsInstance(stream_source, pack_repo.KnitPackStreamSource) |
|
914 |
||
915 |
def test_source_to_exact_pack_rich_root_pack(self): |
|
916 |
source = self.make_repository('source', format='rich-root-pack') |
|
917 |
target = self.make_repository('target', format='rich-root-pack') |
|
918 |
stream_source = source._get_source(target._format) |
|
919 |
self.assertIsInstance(stream_source, pack_repo.KnitPackStreamSource) |
|
920 |
||
921 |
def test_source_to_exact_pack_19(self): |
|
922 |
source = self.make_repository('source', format='1.9') |
|
923 |
target = self.make_repository('target', format='1.9') |
|
924 |
stream_source = source._get_source(target._format) |
|
925 |
self.assertIsInstance(stream_source, pack_repo.KnitPackStreamSource) |
|
926 |
||
927 |
def test_source_to_exact_pack_19_rich_root(self): |
|
928 |
source = self.make_repository('source', format='1.9-rich-root') |
|
929 |
target = self.make_repository('target', format='1.9-rich-root') |
|
930 |
stream_source = source._get_source(target._format) |
|
931 |
self.assertIsInstance(stream_source, pack_repo.KnitPackStreamSource) |
|
932 |
||
933 |
def test_source_to_remote_exact_pack_19(self): |
|
934 |
trans = self.make_smart_server('target') |
|
935 |
trans.ensure_base() |
|
936 |
source = self.make_repository('source', format='1.9') |
|
937 |
target = self.make_repository('target', format='1.9') |
|
938 |
target = repository.Repository.open(trans.base) |
|
939 |
stream_source = source._get_source(target._format) |
|
940 |
self.assertIsInstance(stream_source, pack_repo.KnitPackStreamSource) |
|
941 |
||
942 |
def test_stream_source_to_non_exact(self): |
|
943 |
source = self.make_repository('source', format='pack-0.92') |
|
944 |
target = self.make_repository('target', format='1.9') |
|
945 |
stream = source._get_source(target._format) |
|
946 |
self.assertIs(type(stream), repository.StreamSource) |
|
947 |
||
948 |
def test_stream_source_to_non_exact_rich_root(self): |
|
949 |
source = self.make_repository('source', format='1.9') |
|
950 |
target = self.make_repository('target', format='1.9-rich-root') |
|
951 |
stream = source._get_source(target._format) |
|
952 |
self.assertIs(type(stream), repository.StreamSource) |
|
953 |
||
954 |
def test_source_to_remote_non_exact_pack_19(self): |
|
955 |
trans = self.make_smart_server('target') |
|
956 |
trans.ensure_base() |
|
957 |
source = self.make_repository('source', format='1.9') |
|
958 |
target = self.make_repository('target', format='1.6') |
|
959 |
target = repository.Repository.open(trans.base) |
|
960 |
stream_source = source._get_source(target._format) |
|
961 |
self.assertIs(type(stream_source), repository.StreamSource) |
|
962 |
||
963 |
def test_stream_source_to_knit(self): |
|
964 |
source = self.make_repository('source', format='pack-0.92') |
|
965 |
target = self.make_repository('target', format='dirstate') |
|
966 |
stream = source._get_source(target._format) |
|
967 |
self.assertIs(type(stream), repository.StreamSource) |
|
968 |
||
3735.2.40
by Robert Collins
Add development4 which has a parent_id to basename index on CHKInventory objects. |
969 |
|
4343.3.32
by John Arbash Meinel
Change the tests for _find_revision_outside_set to the new _find_parent_ids function. |
970 |
class TestDevelopment6FindParentIdsOfRevisions(TestCaseWithTransport): |
971 |
"""Tests for _find_parent_ids_of_revisions."""
|
|
3735.4.1
by Andrew Bennetts
Add _find_revision_outside_set. |
972 |
|
973 |
def setUp(self): |
|
4343.3.32
by John Arbash Meinel
Change the tests for _find_revision_outside_set to the new _find_parent_ids function. |
974 |
super(TestDevelopment6FindParentIdsOfRevisions, self).setUp() |
4241.6.8
by Robert Collins, John Arbash Meinel, Ian Clatworthy, Vincent Ladeuil
Add --development6-rich-root, disabling the legacy and unneeded development2 format, and activating the tests for CHK features disabled pending this format. (Robert Collins, John Arbash Meinel, Ian Clatworthy, Vincent Ladeuil) |
975 |
self.builder = self.make_branch_builder('source', |
976 |
format='development6-rich-root') |
|
3735.4.1
by Andrew Bennetts
Add _find_revision_outside_set. |
977 |
self.builder.start_series() |
978 |
self.builder.build_snapshot('initial', None, |
|
979 |
[('add', ('', 'tree-root', 'directory', None))]) |
|
980 |
self.repo = self.builder.get_branch().repository |
|
981 |
self.addCleanup(self.builder.finish_series) |
|
3735.2.99
by John Arbash Meinel
Merge bzr.dev 4034. Whitespace cleanup |
982 |
|
4343.3.32
by John Arbash Meinel
Change the tests for _find_revision_outside_set to the new _find_parent_ids function. |
983 |
def assertParentIds(self, expected_result, rev_set): |
984 |
self.assertEqual(sorted(expected_result), |
|
985 |
sorted(self.repo._find_parent_ids_of_revisions(rev_set))) |
|
3735.4.1
by Andrew Bennetts
Add _find_revision_outside_set. |
986 |
|
987 |
def test_simple(self): |
|
988 |
self.builder.build_snapshot('revid1', None, []) |
|
4343.3.32
by John Arbash Meinel
Change the tests for _find_revision_outside_set to the new _find_parent_ids function. |
989 |
self.builder.build_snapshot('revid2', ['revid1'], []) |
3735.4.1
by Andrew Bennetts
Add _find_revision_outside_set. |
990 |
rev_set = ['revid2'] |
4343.3.32
by John Arbash Meinel
Change the tests for _find_revision_outside_set to the new _find_parent_ids function. |
991 |
self.assertParentIds(['revid1'], rev_set) |
3735.4.1
by Andrew Bennetts
Add _find_revision_outside_set. |
992 |
|
993 |
def test_not_first_parent(self): |
|
994 |
self.builder.build_snapshot('revid1', None, []) |
|
4343.3.32
by John Arbash Meinel
Change the tests for _find_revision_outside_set to the new _find_parent_ids function. |
995 |
self.builder.build_snapshot('revid2', ['revid1'], []) |
996 |
self.builder.build_snapshot('revid3', ['revid2'], []) |
|
3735.4.1
by Andrew Bennetts
Add _find_revision_outside_set. |
997 |
rev_set = ['revid3', 'revid2'] |
4343.3.32
by John Arbash Meinel
Change the tests for _find_revision_outside_set to the new _find_parent_ids function. |
998 |
self.assertParentIds(['revid1'], rev_set) |
3735.4.1
by Andrew Bennetts
Add _find_revision_outside_set. |
999 |
|
1000 |
def test_not_null(self): |
|
1001 |
rev_set = ['initial'] |
|
4343.3.32
by John Arbash Meinel
Change the tests for _find_revision_outside_set to the new _find_parent_ids function. |
1002 |
self.assertParentIds([], rev_set) |
3735.4.1
by Andrew Bennetts
Add _find_revision_outside_set. |
1003 |
|
1004 |
def test_not_null_set(self): |
|
1005 |
self.builder.build_snapshot('revid1', None, []) |
|
1006 |
rev_set = [_mod_revision.NULL_REVISION] |
|
4343.3.32
by John Arbash Meinel
Change the tests for _find_revision_outside_set to the new _find_parent_ids function. |
1007 |
self.assertParentIds([], rev_set) |
3735.4.1
by Andrew Bennetts
Add _find_revision_outside_set. |
1008 |
|
1009 |
def test_ghost(self): |
|
1010 |
self.builder.build_snapshot('revid1', None, []) |
|
1011 |
rev_set = ['ghost', 'revid1'] |
|
4343.3.32
by John Arbash Meinel
Change the tests for _find_revision_outside_set to the new _find_parent_ids function. |
1012 |
self.assertParentIds(['initial'], rev_set) |
3735.4.1
by Andrew Bennetts
Add _find_revision_outside_set. |
1013 |
|
1014 |
def test_ghost_parent(self): |
|
1015 |
self.builder.build_snapshot('revid1', None, []) |
|
1016 |
self.builder.build_snapshot('revid2', ['revid1', 'ghost'], []) |
|
1017 |
rev_set = ['revid2', 'revid1'] |
|
4343.3.32
by John Arbash Meinel
Change the tests for _find_revision_outside_set to the new _find_parent_ids function. |
1018 |
self.assertParentIds(['ghost', 'initial'], rev_set) |
3735.4.1
by Andrew Bennetts
Add _find_revision_outside_set. |
1019 |
|
1020 |
def test_righthand_parent(self): |
|
1021 |
self.builder.build_snapshot('revid1', None, []) |
|
1022 |
self.builder.build_snapshot('revid2a', ['revid1'], []) |
|
1023 |
self.builder.build_snapshot('revid2b', ['revid1'], []) |
|
1024 |
self.builder.build_snapshot('revid3', ['revid2a', 'revid2b'], []) |
|
1025 |
rev_set = ['revid3', 'revid2a'] |
|
4343.3.32
by John Arbash Meinel
Change the tests for _find_revision_outside_set to the new _find_parent_ids function. |
1026 |
self.assertParentIds(['revid1', 'revid2b'], rev_set) |
3735.4.1
by Andrew Bennetts
Add _find_revision_outside_set. |
1027 |
|
1028 |
||
2535.3.57
by Andrew Bennetts
Perform some sanity checking of data streams rather than blindly inserting them into our repository. |
1029 |
class TestWithBrokenRepo(TestCaseWithTransport): |
2592.3.214
by Robert Collins
Merge bzr.dev. |
1030 |
"""These tests seem to be more appropriate as interface tests?"""
|
2535.3.57
by Andrew Bennetts
Perform some sanity checking of data streams rather than blindly inserting them into our repository. |
1031 |
|
1032 |
def make_broken_repository(self): |
|
1033 |
# XXX: This function is borrowed from Aaron's "Reconcile can fix bad
|
|
1034 |
# parent references" branch which is due to land in bzr.dev soon. Once
|
|
1035 |
# it does, this duplication should be removed.
|
|
1036 |
repo = self.make_repository('broken-repo') |
|
1037 |
cleanups = [] |
|
1038 |
try: |
|
1039 |
repo.lock_write() |
|
1040 |
cleanups.append(repo.unlock) |
|
1041 |
repo.start_write_group() |
|
1042 |
cleanups.append(repo.commit_write_group) |
|
1043 |
# make rev1a: A well-formed revision, containing 'file1'
|
|
1044 |
inv = inventory.Inventory(revision_id='rev1a') |
|
1045 |
inv.root.revision = 'rev1a' |
|
1046 |
self.add_file(repo, inv, 'file1', 'rev1a', []) |
|
4634.35.21
by Andrew Bennetts
Fix test_insert_from_broken_repo in test_repository. |
1047 |
repo.texts.add_lines((inv.root.file_id, 'rev1a'), [], []) |
2535.3.57
by Andrew Bennetts
Perform some sanity checking of data streams rather than blindly inserting them into our repository. |
1048 |
repo.add_inventory('rev1a', inv, []) |
1049 |
revision = _mod_revision.Revision('rev1a', |
|
1050 |
committer='jrandom@example.com', timestamp=0, |
|
1051 |
inventory_sha1='', timezone=0, message='foo', parent_ids=[]) |
|
1052 |
repo.add_revision('rev1a',revision, inv) |
|
1053 |
||
1054 |
# make rev1b, which has no Revision, but has an Inventory, and
|
|
1055 |
# file1
|
|
1056 |
inv = inventory.Inventory(revision_id='rev1b') |
|
1057 |
inv.root.revision = 'rev1b' |
|
1058 |
self.add_file(repo, inv, 'file1', 'rev1b', []) |
|
1059 |
repo.add_inventory('rev1b', inv, []) |
|
1060 |
||
1061 |
# make rev2, with file1 and file2
|
|
1062 |
# file2 is sane
|
|
1063 |
# file1 has 'rev1b' as an ancestor, even though this is not
|
|
1064 |
# mentioned by 'rev1a', making it an unreferenced ancestor
|
|
1065 |
inv = inventory.Inventory() |
|
1066 |
self.add_file(repo, inv, 'file1', 'rev2', ['rev1a', 'rev1b']) |
|
1067 |
self.add_file(repo, inv, 'file2', 'rev2', []) |
|
1068 |
self.add_revision(repo, 'rev2', inv, ['rev1a']) |
|
1069 |
||
1070 |
# make ghost revision rev1c
|
|
1071 |
inv = inventory.Inventory() |
|
1072 |
self.add_file(repo, inv, 'file2', 'rev1c', []) |
|
1073 |
||
1074 |
# make rev3 with file2
|
|
1075 |
# file2 refers to 'rev1c', which is a ghost in this repository, so
|
|
1076 |
# file2 cannot have rev1c as its ancestor.
|
|
1077 |
inv = inventory.Inventory() |
|
1078 |
self.add_file(repo, inv, 'file2', 'rev3', ['rev1c']) |
|
1079 |
self.add_revision(repo, 'rev3', inv, ['rev1c']) |
|
1080 |
return repo |
|
1081 |
finally: |
|
1082 |
for cleanup in reversed(cleanups): |
|
1083 |
cleanup() |
|
1084 |
||
1085 |
def add_revision(self, repo, revision_id, inv, parent_ids): |
|
1086 |
inv.revision_id = revision_id |
|
1087 |
inv.root.revision = revision_id |
|
4634.35.21
by Andrew Bennetts
Fix test_insert_from_broken_repo in test_repository. |
1088 |
repo.texts.add_lines((inv.root.file_id, revision_id), [], []) |
2535.3.57
by Andrew Bennetts
Perform some sanity checking of data streams rather than blindly inserting them into our repository. |
1089 |
repo.add_inventory(revision_id, inv, parent_ids) |
1090 |
revision = _mod_revision.Revision(revision_id, |
|
1091 |
committer='jrandom@example.com', timestamp=0, inventory_sha1='', |
|
1092 |
timezone=0, message='foo', parent_ids=parent_ids) |
|
1093 |
repo.add_revision(revision_id,revision, inv) |
|
1094 |
||
1095 |
def add_file(self, repo, inv, filename, revision, parents): |
|
1096 |
file_id = filename + '-id' |
|
1097 |
entry = inventory.InventoryFile(file_id, filename, 'TREE_ROOT') |
|
1098 |
entry.revision = revision |
|
2535.4.10
by Andrew Bennetts
Fix one failing test, disable another. |
1099 |
entry.text_size = 0 |
2535.3.57
by Andrew Bennetts
Perform some sanity checking of data streams rather than blindly inserting them into our repository. |
1100 |
inv.add(entry) |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1101 |
text_key = (file_id, revision) |
1102 |
parent_keys = [(file_id, parent) for parent in parents] |
|
1103 |
repo.texts.add_lines(text_key, parent_keys, ['line\n']) |
|
2535.3.57
by Andrew Bennetts
Perform some sanity checking of data streams rather than blindly inserting them into our repository. |
1104 |
|
1105 |
def test_insert_from_broken_repo(self): |
|
1106 |
"""Inserting a data stream from a broken repository won't silently
|
|
1107 |
corrupt the target repository.
|
|
1108 |
"""
|
|
1109 |
broken_repo = self.make_broken_repository() |
|
1110 |
empty_repo = self.make_repository('empty-repo') |
|
4606.1.1
by Robert Collins
Change test_insert_from_broken_repo from a known failure to a working test. |
1111 |
try: |
1112 |
empty_repo.fetch(broken_repo) |
|
1113 |
except (errors.RevisionNotPresent, errors.BzrCheckError): |
|
1114 |
# Test successful: compression parent not being copied leads to
|
|
1115 |
# error.
|
|
1116 |
return
|
|
1117 |
empty_repo.lock_read() |
|
1118 |
self.addCleanup(empty_repo.unlock) |
|
1119 |
text = empty_repo.texts.get_record_stream( |
|
1120 |
[('file2-id', 'rev3')], 'topological', True).next() |
|
1121 |
self.assertEqual('line\n', text.get_bytes_as('fulltext')) |
|
2592.3.214
by Robert Collins
Merge bzr.dev. |
1122 |
|
1123 |
||
2592.3.84
by Robert Collins
Start of autopacking logic. |
1124 |
class TestRepositoryPackCollection(TestCaseWithTransport): |
1125 |
||
1126 |
def get_format(self): |
|
3010.3.3
by Martin Pool
Merge trunk |
1127 |
return bzrdir.format_registry.make_bzrdir('pack-0.92') |
2592.3.84
by Robert Collins
Start of autopacking logic. |
1128 |
|
3711.4.1
by John Arbash Meinel
Fix bug #242510, when determining the autopack sequence, |
1129 |
def get_packs(self): |
1130 |
format = self.get_format() |
|
1131 |
repo = self.make_repository('.', format=format) |
|
1132 |
return repo._pack_collection |
|
1133 |
||
3789.2.20
by John Arbash Meinel
The autopack code can now trigger itself to retry when _copy_revision_texts fails. |
1134 |
def make_packs_and_alt_repo(self, write_lock=False): |
3789.2.19
by John Arbash Meinel
Refactor to make the tests a bit simpler |
1135 |
"""Create a pack repo with 3 packs, and access it via a second repo."""
|
4617.4.1
by Robert Collins
Fix a pack specific test which didn't lock its format down. |
1136 |
tree = self.make_branch_and_tree('.', format=self.get_format()) |
3789.2.19
by John Arbash Meinel
Refactor to make the tests a bit simpler |
1137 |
tree.lock_write() |
1138 |
self.addCleanup(tree.unlock) |
|
1139 |
rev1 = tree.commit('one') |
|
1140 |
rev2 = tree.commit('two') |
|
1141 |
rev3 = tree.commit('three') |
|
1142 |
r = repository.Repository.open('.') |
|
3789.2.20
by John Arbash Meinel
The autopack code can now trigger itself to retry when _copy_revision_texts fails. |
1143 |
if write_lock: |
1144 |
r.lock_write() |
|
1145 |
else: |
|
1146 |
r.lock_read() |
|
3789.2.19
by John Arbash Meinel
Refactor to make the tests a bit simpler |
1147 |
self.addCleanup(r.unlock) |
1148 |
packs = r._pack_collection |
|
1149 |
packs.ensure_loaded() |
|
1150 |
return tree, r, packs, [rev1, rev2, rev3] |
|
1151 |
||
4634.127.1
by John Arbash Meinel
Partial fix for bug #507557. |
1152 |
def test__clear_obsolete_packs(self): |
1153 |
packs = self.get_packs() |
|
1154 |
obsolete_pack_trans = packs.transport.clone('obsolete_packs') |
|
1155 |
obsolete_pack_trans.put_bytes('a-pack.pack', 'content\n') |
|
1156 |
obsolete_pack_trans.put_bytes('a-pack.rix', 'content\n') |
|
1157 |
obsolete_pack_trans.put_bytes('a-pack.iix', 'content\n') |
|
1158 |
obsolete_pack_trans.put_bytes('another-pack.pack', 'foo\n') |
|
1159 |
obsolete_pack_trans.put_bytes('not-a-pack.rix', 'foo\n') |
|
1160 |
res = packs._clear_obsolete_packs() |
|
1161 |
self.assertEqual(['a-pack', 'another-pack'], sorted(res)) |
|
1162 |
self.assertEqual([], obsolete_pack_trans.list_dir('.')) |
|
1163 |
||
1164 |
def test__clear_obsolete_packs_preserve(self): |
|
1165 |
packs = self.get_packs() |
|
1166 |
obsolete_pack_trans = packs.transport.clone('obsolete_packs') |
|
1167 |
obsolete_pack_trans.put_bytes('a-pack.pack', 'content\n') |
|
1168 |
obsolete_pack_trans.put_bytes('a-pack.rix', 'content\n') |
|
1169 |
obsolete_pack_trans.put_bytes('a-pack.iix', 'content\n') |
|
1170 |
obsolete_pack_trans.put_bytes('another-pack.pack', 'foo\n') |
|
1171 |
obsolete_pack_trans.put_bytes('not-a-pack.rix', 'foo\n') |
|
1172 |
res = packs._clear_obsolete_packs(preserve=set(['a-pack'])) |
|
1173 |
self.assertEqual(['a-pack', 'another-pack'], sorted(res)) |
|
1174 |
self.assertEqual(['a-pack.iix', 'a-pack.pack', 'a-pack.rix'], |
|
1175 |
sorted(obsolete_pack_trans.list_dir('.'))) |
|
1176 |
||
2592.3.84
by Robert Collins
Start of autopacking logic. |
1177 |
def test__max_pack_count(self): |
2592.3.219
by Robert Collins
Review feedback. |
1178 |
"""The maximum pack count is a function of the number of revisions."""
|
2592.3.84
by Robert Collins
Start of autopacking logic. |
1179 |
# no revisions - one pack, so that we can have a revision free repo
|
1180 |
# without it blowing up
|
|
3711.4.1
by John Arbash Meinel
Fix bug #242510, when determining the autopack sequence, |
1181 |
packs = self.get_packs() |
2592.3.84
by Robert Collins
Start of autopacking logic. |
1182 |
self.assertEqual(1, packs._max_pack_count(0)) |
1183 |
# after that the sum of the digits, - check the first 1-9
|
|
1184 |
self.assertEqual(1, packs._max_pack_count(1)) |
|
1185 |
self.assertEqual(2, packs._max_pack_count(2)) |
|
1186 |
self.assertEqual(3, packs._max_pack_count(3)) |
|
1187 |
self.assertEqual(4, packs._max_pack_count(4)) |
|
1188 |
self.assertEqual(5, packs._max_pack_count(5)) |
|
1189 |
self.assertEqual(6, packs._max_pack_count(6)) |
|
1190 |
self.assertEqual(7, packs._max_pack_count(7)) |
|
1191 |
self.assertEqual(8, packs._max_pack_count(8)) |
|
1192 |
self.assertEqual(9, packs._max_pack_count(9)) |
|
1193 |
# check the boundary cases with two digits for the next decade
|
|
1194 |
self.assertEqual(1, packs._max_pack_count(10)) |
|
1195 |
self.assertEqual(2, packs._max_pack_count(11)) |
|
1196 |
self.assertEqual(10, packs._max_pack_count(19)) |
|
1197 |
self.assertEqual(2, packs._max_pack_count(20)) |
|
1198 |
self.assertEqual(3, packs._max_pack_count(21)) |
|
1199 |
# check some arbitrary big numbers
|
|
1200 |
self.assertEqual(25, packs._max_pack_count(112894)) |
|
1201 |
||
4928.1.1
by Martin Pool
Give RepositoryPackCollection a repr |
1202 |
def test_repr(self): |
1203 |
packs = self.get_packs() |
|
1204 |
self.assertContainsRe(repr(packs), |
|
1205 |
'RepositoryPackCollection(.*Repository(.*))') |
|
1206 |
||
4634.127.2
by John Arbash Meinel
Change the _obsolete_packs code to handle files that are already gone. |
1207 |
def test__obsolete_packs(self): |
1208 |
tree, r, packs, revs = self.make_packs_and_alt_repo(write_lock=True) |
|
1209 |
names = packs.names() |
|
1210 |
pack = packs.get_pack_by_name(names[0]) |
|
1211 |
# Schedule this one for removal
|
|
1212 |
packs._remove_pack_from_memory(pack) |
|
1213 |
# Simulate a concurrent update by renaming the .pack file and one of
|
|
1214 |
# the indices
|
|
1215 |
packs.transport.rename('packs/%s.pack' % (names[0],), |
|
1216 |
'obsolete_packs/%s.pack' % (names[0],)) |
|
1217 |
packs.transport.rename('indices/%s.iix' % (names[0],), |
|
1218 |
'obsolete_packs/%s.iix' % (names[0],)) |
|
1219 |
# Now trigger the obsoletion, and ensure that all the remaining files
|
|
1220 |
# are still renamed
|
|
1221 |
packs._obsolete_packs([pack]) |
|
1222 |
self.assertEqual([n + '.pack' for n in names[1:]], |
|
1223 |
sorted(packs._pack_transport.list_dir('.'))) |
|
1224 |
# names[0] should not be present in the index anymore
|
|
1225 |
self.assertEqual(names[1:], |
|
1226 |
sorted(set([osutils.splitext(n)[0] for n in |
|
1227 |
packs._index_transport.list_dir('.')]))) |
|
1228 |
||
2592.3.84
by Robert Collins
Start of autopacking logic. |
1229 |
def test_pack_distribution_zero(self): |
3711.4.1
by John Arbash Meinel
Fix bug #242510, when determining the autopack sequence, |
1230 |
packs = self.get_packs() |
2592.3.84
by Robert Collins
Start of autopacking logic. |
1231 |
self.assertEqual([0], packs.pack_distribution(0)) |
3052.1.6
by John Arbash Meinel
Change the lock check to raise ObjectNotLocked. |
1232 |
|
1233 |
def test_ensure_loaded_unlocked(self): |
|
3711.4.1
by John Arbash Meinel
Fix bug #242510, when determining the autopack sequence, |
1234 |
packs = self.get_packs() |
3052.1.6
by John Arbash Meinel
Change the lock check to raise ObjectNotLocked. |
1235 |
self.assertRaises(errors.ObjectNotLocked, |
3711.4.1
by John Arbash Meinel
Fix bug #242510, when determining the autopack sequence, |
1236 |
packs.ensure_loaded) |
3052.1.6
by John Arbash Meinel
Change the lock check to raise ObjectNotLocked. |
1237 |
|
2592.3.84
by Robert Collins
Start of autopacking logic. |
1238 |
def test_pack_distribution_one_to_nine(self): |
3711.4.1
by John Arbash Meinel
Fix bug #242510, when determining the autopack sequence, |
1239 |
packs = self.get_packs() |
2592.3.84
by Robert Collins
Start of autopacking logic. |
1240 |
self.assertEqual([1], |
1241 |
packs.pack_distribution(1)) |
|
1242 |
self.assertEqual([1, 1], |
|
1243 |
packs.pack_distribution(2)) |
|
1244 |
self.assertEqual([1, 1, 1], |
|
1245 |
packs.pack_distribution(3)) |
|
1246 |
self.assertEqual([1, 1, 1, 1], |
|
1247 |
packs.pack_distribution(4)) |
|
1248 |
self.assertEqual([1, 1, 1, 1, 1], |
|
1249 |
packs.pack_distribution(5)) |
|
1250 |
self.assertEqual([1, 1, 1, 1, 1, 1], |
|
1251 |
packs.pack_distribution(6)) |
|
1252 |
self.assertEqual([1, 1, 1, 1, 1, 1, 1], |
|
1253 |
packs.pack_distribution(7)) |
|
1254 |
self.assertEqual([1, 1, 1, 1, 1, 1, 1, 1], |
|
1255 |
packs.pack_distribution(8)) |
|
1256 |
self.assertEqual([1, 1, 1, 1, 1, 1, 1, 1, 1], |
|
1257 |
packs.pack_distribution(9)) |
|
1258 |
||
1259 |
def test_pack_distribution_stable_at_boundaries(self): |
|
1260 |
"""When there are multi-rev packs the counts are stable."""
|
|
3711.4.1
by John Arbash Meinel
Fix bug #242510, when determining the autopack sequence, |
1261 |
packs = self.get_packs() |
2592.3.84
by Robert Collins
Start of autopacking logic. |
1262 |
# in 10s:
|
1263 |
self.assertEqual([10], packs.pack_distribution(10)) |
|
1264 |
self.assertEqual([10, 1], packs.pack_distribution(11)) |
|
1265 |
self.assertEqual([10, 10], packs.pack_distribution(20)) |
|
1266 |
self.assertEqual([10, 10, 1], packs.pack_distribution(21)) |
|
1267 |
# 100s
|
|
1268 |
self.assertEqual([100], packs.pack_distribution(100)) |
|
1269 |
self.assertEqual([100, 1], packs.pack_distribution(101)) |
|
1270 |
self.assertEqual([100, 10, 1], packs.pack_distribution(111)) |
|
1271 |
self.assertEqual([100, 100], packs.pack_distribution(200)) |
|
1272 |
self.assertEqual([100, 100, 1], packs.pack_distribution(201)) |
|
1273 |
self.assertEqual([100, 100, 10, 1], packs.pack_distribution(211)) |
|
1274 |
||
2592.3.85
by Robert Collins
Finish autopack corner cases. |
1275 |
def test_plan_pack_operations_2009_revisions_skip_all_packs(self): |
3711.4.1
by John Arbash Meinel
Fix bug #242510, when determining the autopack sequence, |
1276 |
packs = self.get_packs() |
2592.3.85
by Robert Collins
Finish autopack corner cases. |
1277 |
existing_packs = [(2000, "big"), (9, "medium")] |
1278 |
# rev count - 2009 -> 2x1000 + 9x1
|
|
1279 |
pack_operations = packs.plan_autopack_combinations( |
|
1280 |
existing_packs, [1000, 1000, 1, 1, 1, 1, 1, 1, 1, 1, 1]) |
|
1281 |
self.assertEqual([], pack_operations) |
|
1282 |
||
1283 |
def test_plan_pack_operations_2010_revisions_skip_all_packs(self): |
|
3711.4.1
by John Arbash Meinel
Fix bug #242510, when determining the autopack sequence, |
1284 |
packs = self.get_packs() |
2592.3.85
by Robert Collins
Finish autopack corner cases. |
1285 |
existing_packs = [(2000, "big"), (9, "medium"), (1, "single")] |
1286 |
# rev count - 2010 -> 2x1000 + 1x10
|
|
1287 |
pack_operations = packs.plan_autopack_combinations( |
|
1288 |
existing_packs, [1000, 1000, 10]) |
|
1289 |
self.assertEqual([], pack_operations) |
|
1290 |
||
1291 |
def test_plan_pack_operations_2010_combines_smallest_two(self): |
|
3711.4.1
by John Arbash Meinel
Fix bug #242510, when determining the autopack sequence, |
1292 |
packs = self.get_packs() |
2592.3.85
by Robert Collins
Finish autopack corner cases. |
1293 |
existing_packs = [(1999, "big"), (9, "medium"), (1, "single2"), |
1294 |
(1, "single1")] |
|
1295 |
# rev count - 2010 -> 2x1000 + 1x10 (3)
|
|
1296 |
pack_operations = packs.plan_autopack_combinations( |
|
1297 |
existing_packs, [1000, 1000, 10]) |
|
3711.4.2
by John Arbash Meinel
Change the logic to solve it in a different way. |
1298 |
self.assertEqual([[2, ["single2", "single1"]]], pack_operations) |
2592.3.85
by Robert Collins
Finish autopack corner cases. |
1299 |
|
3711.4.2
by John Arbash Meinel
Change the logic to solve it in a different way. |
1300 |
def test_plan_pack_operations_creates_a_single_op(self): |
3711.4.1
by John Arbash Meinel
Fix bug #242510, when determining the autopack sequence, |
1301 |
packs = self.get_packs() |
3711.4.2
by John Arbash Meinel
Change the logic to solve it in a different way. |
1302 |
existing_packs = [(50, 'a'), (40, 'b'), (30, 'c'), (10, 'd'), |
1303 |
(10, 'e'), (6, 'f'), (4, 'g')] |
|
1304 |
# rev count 150 -> 1x100 and 5x10
|
|
1305 |
# The two size 10 packs do not need to be touched. The 50, 40, 30 would
|
|
1306 |
# be combined into a single 120 size pack, and the 6 & 4 would
|
|
1307 |
# becombined into a size 10 pack. However, if we have to rewrite them,
|
|
1308 |
# we save a pack file with no increased I/O by putting them into the
|
|
1309 |
# same file.
|
|
1310 |
distribution = packs.pack_distribution(150) |
|
3711.4.1
by John Arbash Meinel
Fix bug #242510, when determining the autopack sequence, |
1311 |
pack_operations = packs.plan_autopack_combinations(existing_packs, |
3711.4.2
by John Arbash Meinel
Change the logic to solve it in a different way. |
1312 |
distribution) |
1313 |
self.assertEqual([[130, ['a', 'b', 'c', 'f', 'g']]], pack_operations) |
|
3711.4.1
by John Arbash Meinel
Fix bug #242510, when determining the autopack sequence, |
1314 |
|
2592.3.173
by Robert Collins
Basic implementation of all_packs. |
1315 |
def test_all_packs_none(self): |
1316 |
format = self.get_format() |
|
1317 |
tree = self.make_branch_and_tree('.', format=format) |
|
1318 |
tree.lock_read() |
|
1319 |
self.addCleanup(tree.unlock) |
|
2592.3.232
by Martin Pool
Disambiguate two member variables called _packs into _packs_by_name and _pack_collection |
1320 |
packs = tree.branch.repository._pack_collection |
2592.3.173
by Robert Collins
Basic implementation of all_packs. |
1321 |
packs.ensure_loaded() |
1322 |
self.assertEqual([], packs.all_packs()) |
|
1323 |
||
1324 |
def test_all_packs_one(self): |
|
1325 |
format = self.get_format() |
|
1326 |
tree = self.make_branch_and_tree('.', format=format) |
|
1327 |
tree.commit('start') |
|
1328 |
tree.lock_read() |
|
1329 |
self.addCleanup(tree.unlock) |
|
2592.3.232
by Martin Pool
Disambiguate two member variables called _packs into _packs_by_name and _pack_collection |
1330 |
packs = tree.branch.repository._pack_collection |
2592.3.173
by Robert Collins
Basic implementation of all_packs. |
1331 |
packs.ensure_loaded() |
2592.3.176
by Robert Collins
Various pack refactorings. |
1332 |
self.assertEqual([ |
1333 |
packs.get_pack_by_name(packs.names()[0])], |
|
1334 |
packs.all_packs()) |
|
2592.3.173
by Robert Collins
Basic implementation of all_packs. |
1335 |
|
1336 |
def test_all_packs_two(self): |
|
1337 |
format = self.get_format() |
|
1338 |
tree = self.make_branch_and_tree('.', format=format) |
|
1339 |
tree.commit('start') |
|
1340 |
tree.commit('continue') |
|
1341 |
tree.lock_read() |
|
1342 |
self.addCleanup(tree.unlock) |
|
2592.3.232
by Martin Pool
Disambiguate two member variables called _packs into _packs_by_name and _pack_collection |
1343 |
packs = tree.branch.repository._pack_collection |
2592.3.173
by Robert Collins
Basic implementation of all_packs. |
1344 |
packs.ensure_loaded() |
1345 |
self.assertEqual([ |
|
2592.3.176
by Robert Collins
Various pack refactorings. |
1346 |
packs.get_pack_by_name(packs.names()[0]), |
1347 |
packs.get_pack_by_name(packs.names()[1]), |
|
2592.3.173
by Robert Collins
Basic implementation of all_packs. |
1348 |
], packs.all_packs()) |
1349 |
||
2592.3.176
by Robert Collins
Various pack refactorings. |
1350 |
def test_get_pack_by_name(self): |
1351 |
format = self.get_format() |
|
1352 |
tree = self.make_branch_and_tree('.', format=format) |
|
1353 |
tree.commit('start') |
|
1354 |
tree.lock_read() |
|
1355 |
self.addCleanup(tree.unlock) |
|
2592.3.232
by Martin Pool
Disambiguate two member variables called _packs into _packs_by_name and _pack_collection |
1356 |
packs = tree.branch.repository._pack_collection |
4145.1.6
by Robert Collins
More test fallout, but all caught now. |
1357 |
packs.reset() |
2592.3.176
by Robert Collins
Various pack refactorings. |
1358 |
packs.ensure_loaded() |
1359 |
name = packs.names()[0] |
|
1360 |
pack_1 = packs.get_pack_by_name(name) |
|
1361 |
# the pack should be correctly initialised
|
|
3517.4.5
by Martin Pool
Correct use of packs._names in test_get_pack_by_name |
1362 |
sizes = packs._names[name] |
3221.12.4
by Robert Collins
Implement basic repository supporting external references. |
1363 |
rev_index = GraphIndex(packs._index_transport, name + '.rix', sizes[0]) |
1364 |
inv_index = GraphIndex(packs._index_transport, name + '.iix', sizes[1]) |
|
1365 |
txt_index = GraphIndex(packs._index_transport, name + '.tix', sizes[2]) |
|
1366 |
sig_index = GraphIndex(packs._index_transport, name + '.six', sizes[3]) |
|
2592.3.191
by Robert Collins
Give Pack responsibility for index naming, and two concrete classes - NewPack for new packs and ExistingPack for packs we read from disk. |
1367 |
self.assertEqual(pack_repo.ExistingPack(packs._pack_transport, |
2592.3.219
by Robert Collins
Review feedback. |
1368 |
name, rev_index, inv_index, txt_index, sig_index), pack_1) |
2592.3.176
by Robert Collins
Various pack refactorings. |
1369 |
# and the same instance should be returned on successive calls.
|
1370 |
self.assertTrue(pack_1 is packs.get_pack_by_name(name)) |
|
1371 |
||
3789.1.2
by John Arbash Meinel
Add RepositoryPackCollection.reload_pack_names() |
1372 |
def test_reload_pack_names_new_entry(self): |
3789.2.19
by John Arbash Meinel
Refactor to make the tests a bit simpler |
1373 |
tree, r, packs, revs = self.make_packs_and_alt_repo() |
3789.1.2
by John Arbash Meinel
Add RepositoryPackCollection.reload_pack_names() |
1374 |
names = packs.names() |
1375 |
# Add a new pack file into the repository
|
|
3789.2.19
by John Arbash Meinel
Refactor to make the tests a bit simpler |
1376 |
rev4 = tree.commit('four') |
3789.1.2
by John Arbash Meinel
Add RepositoryPackCollection.reload_pack_names() |
1377 |
new_names = tree.branch.repository._pack_collection.names() |
1378 |
new_name = set(new_names).difference(names) |
|
1379 |
self.assertEqual(1, len(new_name)) |
|
1380 |
new_name = new_name.pop() |
|
1381 |
# The old collection hasn't noticed yet
|
|
1382 |
self.assertEqual(names, packs.names()) |
|
3789.1.8
by John Arbash Meinel
Change the api of reload_pack_names(). |
1383 |
self.assertTrue(packs.reload_pack_names()) |
3789.1.2
by John Arbash Meinel
Add RepositoryPackCollection.reload_pack_names() |
1384 |
self.assertEqual(new_names, packs.names()) |
1385 |
# And the repository can access the new revision
|
|
3789.2.19
by John Arbash Meinel
Refactor to make the tests a bit simpler |
1386 |
self.assertEqual({rev4:(revs[-1],)}, r.get_parent_map([rev4])) |
3789.1.8
by John Arbash Meinel
Change the api of reload_pack_names(). |
1387 |
self.assertFalse(packs.reload_pack_names()) |
3789.1.2
by John Arbash Meinel
Add RepositoryPackCollection.reload_pack_names() |
1388 |
|
1389 |
def test_reload_pack_names_added_and_removed(self): |
|
3789.2.19
by John Arbash Meinel
Refactor to make the tests a bit simpler |
1390 |
tree, r, packs, revs = self.make_packs_and_alt_repo() |
3789.1.2
by John Arbash Meinel
Add RepositoryPackCollection.reload_pack_names() |
1391 |
names = packs.names() |
1392 |
# Now repack the whole thing
|
|
1393 |
tree.branch.repository.pack() |
|
1394 |
new_names = tree.branch.repository._pack_collection.names() |
|
1395 |
# The other collection hasn't noticed yet
|
|
1396 |
self.assertEqual(names, packs.names()) |
|
3789.1.8
by John Arbash Meinel
Change the api of reload_pack_names(). |
1397 |
self.assertTrue(packs.reload_pack_names()) |
3789.1.2
by John Arbash Meinel
Add RepositoryPackCollection.reload_pack_names() |
1398 |
self.assertEqual(new_names, packs.names()) |
3789.2.19
by John Arbash Meinel
Refactor to make the tests a bit simpler |
1399 |
self.assertEqual({revs[-1]:(revs[-2],)}, r.get_parent_map([revs[-1]])) |
3789.1.8
by John Arbash Meinel
Change the api of reload_pack_names(). |
1400 |
self.assertFalse(packs.reload_pack_names()) |
3789.1.2
by John Arbash Meinel
Add RepositoryPackCollection.reload_pack_names() |
1401 |
|
4634.126.1
by John Arbash Meinel
(jam) Fix bug #507566, concurrent autopacking correctness. |
1402 |
def test_reload_pack_names_preserves_pending(self): |
1403 |
# TODO: Update this to also test for pending-deleted names
|
|
1404 |
tree, r, packs, revs = self.make_packs_and_alt_repo(write_lock=True) |
|
1405 |
# We will add one pack (via start_write_group + insert_record_stream),
|
|
1406 |
# and remove another pack (via _remove_pack_from_memory)
|
|
1407 |
orig_names = packs.names() |
|
1408 |
orig_at_load = packs._packs_at_load |
|
1409 |
to_remove_name = iter(orig_names).next() |
|
1410 |
r.start_write_group() |
|
1411 |
self.addCleanup(r.abort_write_group) |
|
1412 |
r.texts.insert_record_stream([versionedfile.FulltextContentFactory( |
|
1413 |
('text', 'rev'), (), None, 'content\n')]) |
|
1414 |
new_pack = packs._new_pack |
|
1415 |
self.assertTrue(new_pack.data_inserted()) |
|
1416 |
new_pack.finish() |
|
1417 |
packs.allocate(new_pack) |
|
1418 |
packs._new_pack = None |
|
1419 |
removed_pack = packs.get_pack_by_name(to_remove_name) |
|
1420 |
packs._remove_pack_from_memory(removed_pack) |
|
1421 |
names = packs.names() |
|
4634.127.3
by John Arbash Meinel
Add code so we don't try to obsolete files someone else has 'claimed'. |
1422 |
all_nodes, deleted_nodes, new_nodes, _ = packs._diff_pack_names() |
4634.126.1
by John Arbash Meinel
(jam) Fix bug #507566, concurrent autopacking correctness. |
1423 |
new_names = set([x[0][0] for x in new_nodes]) |
1424 |
self.assertEqual(names, sorted([x[0][0] for x in all_nodes])) |
|
1425 |
self.assertEqual(set(names) - set(orig_names), new_names) |
|
1426 |
self.assertEqual(set([new_pack.name]), new_names) |
|
1427 |
self.assertEqual([to_remove_name], |
|
1428 |
sorted([x[0][0] for x in deleted_nodes])) |
|
1429 |
packs.reload_pack_names() |
|
1430 |
reloaded_names = packs.names() |
|
1431 |
self.assertEqual(orig_at_load, packs._packs_at_load) |
|
1432 |
self.assertEqual(names, reloaded_names) |
|
4634.127.3
by John Arbash Meinel
Add code so we don't try to obsolete files someone else has 'claimed'. |
1433 |
all_nodes, deleted_nodes, new_nodes, _ = packs._diff_pack_names() |
4634.126.1
by John Arbash Meinel
(jam) Fix bug #507566, concurrent autopacking correctness. |
1434 |
new_names = set([x[0][0] for x in new_nodes]) |
1435 |
self.assertEqual(names, sorted([x[0][0] for x in all_nodes])) |
|
1436 |
self.assertEqual(set(names) - set(orig_names), new_names) |
|
1437 |
self.assertEqual(set([new_pack.name]), new_names) |
|
1438 |
self.assertEqual([to_remove_name], |
|
1439 |
sorted([x[0][0] for x in deleted_nodes])) |
|
1440 |
||
4634.127.5
by John Arbash Meinel
Possible fix for making sure packs triggering autopacking get cleaned up. |
1441 |
def test_autopack_obsoletes_new_pack(self): |
1442 |
tree, r, packs, revs = self.make_packs_and_alt_repo(write_lock=True) |
|
1443 |
packs._max_pack_count = lambda x: 1 |
|
1444 |
packs.pack_distribution = lambda x: [10] |
|
1445 |
r.start_write_group() |
|
1446 |
r.revisions.insert_record_stream([versionedfile.FulltextContentFactory( |
|
1447 |
('bogus-rev',), (), None, 'bogus-content\n')]) |
|
1448 |
# This should trigger an autopack, which will combine everything into a
|
|
1449 |
# single pack file.
|
|
1450 |
new_names = r.commit_write_group() |
|
1451 |
names = packs.names() |
|
1452 |
self.assertEqual(1, len(names)) |
|
1453 |
self.assertEqual([names[0] + '.pack'], |
|
1454 |
packs._pack_transport.list_dir('.')) |
|
1455 |
||
3789.2.20
by John Arbash Meinel
The autopack code can now trigger itself to retry when _copy_revision_texts fails. |
1456 |
def test_autopack_reloads_and_stops(self): |
1457 |
tree, r, packs, revs = self.make_packs_and_alt_repo(write_lock=True) |
|
1458 |
# After we have determined what needs to be autopacked, trigger a
|
|
1459 |
# full-pack via the other repo which will cause us to re-evaluate and
|
|
1460 |
# decide we don't need to do anything
|
|
1461 |
orig_execute = packs._execute_pack_operations |
|
1462 |
def _munged_execute_pack_ops(*args, **kwargs): |
|
1463 |
tree.branch.repository.pack() |
|
1464 |
return orig_execute(*args, **kwargs) |
|
1465 |
packs._execute_pack_operations = _munged_execute_pack_ops |
|
1466 |
packs._max_pack_count = lambda x: 1 |
|
1467 |
packs.pack_distribution = lambda x: [10] |
|
1468 |
self.assertFalse(packs.autopack()) |
|
1469 |
self.assertEqual(1, len(packs.names())) |
|
1470 |
self.assertEqual(tree.branch.repository._pack_collection.names(), |
|
1471 |
packs.names()) |
|
1472 |
||
4634.127.1
by John Arbash Meinel
Partial fix for bug #507557. |
1473 |
def test__save_pack_names(self): |
1474 |
tree, r, packs, revs = self.make_packs_and_alt_repo(write_lock=True) |
|
1475 |
names = packs.names() |
|
1476 |
pack = packs.get_pack_by_name(names[0]) |
|
1477 |
packs._remove_pack_from_memory(pack) |
|
1478 |
packs._save_pack_names(obsolete_packs=[pack]) |
|
1479 |
cur_packs = packs._pack_transport.list_dir('.') |
|
1480 |
self.assertEqual([n + '.pack' for n in names[1:]], sorted(cur_packs)) |
|
1481 |
# obsolete_packs will also have stuff like .rix and .iix present.
|
|
1482 |
obsolete_packs = packs.transport.list_dir('obsolete_packs') |
|
1483 |
obsolete_names = set([osutils.splitext(n)[0] for n in obsolete_packs]) |
|
1484 |
self.assertEqual([pack.name], sorted(obsolete_names)) |
|
1485 |
||
1486 |
def test__save_pack_names_already_obsoleted(self): |
|
1487 |
tree, r, packs, revs = self.make_packs_and_alt_repo(write_lock=True) |
|
1488 |
names = packs.names() |
|
1489 |
pack = packs.get_pack_by_name(names[0]) |
|
1490 |
packs._remove_pack_from_memory(pack) |
|
1491 |
# We are going to simulate a concurrent autopack by manually obsoleting
|
|
1492 |
# the pack directly.
|
|
1493 |
packs._obsolete_packs([pack]) |
|
1494 |
packs._save_pack_names(clear_obsolete_packs=True, |
|
1495 |
obsolete_packs=[pack]) |
|
1496 |
cur_packs = packs._pack_transport.list_dir('.') |
|
1497 |
self.assertEqual([n + '.pack' for n in names[1:]], sorted(cur_packs)) |
|
1498 |
# Note that while we set clear_obsolete_packs=True, it should not
|
|
1499 |
# delete a pack file that we have also scheduled for obsoletion.
|
|
1500 |
obsolete_packs = packs.transport.list_dir('obsolete_packs') |
|
1501 |
obsolete_names = set([osutils.splitext(n)[0] for n in obsolete_packs]) |
|
1502 |
self.assertEqual([pack.name], sorted(obsolete_names)) |
|
1503 |
||
4634.127.3
by John Arbash Meinel
Add code so we don't try to obsolete files someone else has 'claimed'. |
1504 |
|
2592.3.173
by Robert Collins
Basic implementation of all_packs. |
1505 |
|
1506 |
class TestPack(TestCaseWithTransport): |
|
1507 |
"""Tests for the Pack object."""
|
|
1508 |
||
1509 |
def assertCurrentlyEqual(self, left, right): |
|
1510 |
self.assertTrue(left == right) |
|
1511 |
self.assertTrue(right == left) |
|
1512 |
self.assertFalse(left != right) |
|
1513 |
self.assertFalse(right != left) |
|
1514 |
||
1515 |
def assertCurrentlyNotEqual(self, left, right): |
|
1516 |
self.assertFalse(left == right) |
|
1517 |
self.assertFalse(right == left) |
|
1518 |
self.assertTrue(left != right) |
|
1519 |
self.assertTrue(right != left) |
|
1520 |
||
1521 |
def test___eq____ne__(self): |
|
2592.3.191
by Robert Collins
Give Pack responsibility for index naming, and two concrete classes - NewPack for new packs and ExistingPack for packs we read from disk. |
1522 |
left = pack_repo.ExistingPack('', '', '', '', '', '') |
1523 |
right = pack_repo.ExistingPack('', '', '', '', '', '') |
|
2592.3.173
by Robert Collins
Basic implementation of all_packs. |
1524 |
self.assertCurrentlyEqual(left, right) |
1525 |
# change all attributes and ensure equality changes as we do.
|
|
1526 |
left.revision_index = 'a' |
|
1527 |
self.assertCurrentlyNotEqual(left, right) |
|
1528 |
right.revision_index = 'a' |
|
1529 |
self.assertCurrentlyEqual(left, right) |
|
1530 |
left.inventory_index = 'a' |
|
1531 |
self.assertCurrentlyNotEqual(left, right) |
|
1532 |
right.inventory_index = 'a' |
|
1533 |
self.assertCurrentlyEqual(left, right) |
|
1534 |
left.text_index = 'a' |
|
1535 |
self.assertCurrentlyNotEqual(left, right) |
|
1536 |
right.text_index = 'a' |
|
1537 |
self.assertCurrentlyEqual(left, right) |
|
1538 |
left.signature_index = 'a' |
|
1539 |
self.assertCurrentlyNotEqual(left, right) |
|
1540 |
right.signature_index = 'a' |
|
1541 |
self.assertCurrentlyEqual(left, right) |
|
1542 |
left.name = 'a' |
|
1543 |
self.assertCurrentlyNotEqual(left, right) |
|
1544 |
right.name = 'a' |
|
1545 |
self.assertCurrentlyEqual(left, right) |
|
1546 |
left.transport = 'a' |
|
1547 |
self.assertCurrentlyNotEqual(left, right) |
|
1548 |
right.transport = 'a' |
|
1549 |
self.assertCurrentlyEqual(left, right) |
|
2592.3.179
by Robert Collins
Generate the revision_index_map for packing during the core operation, from the pack objects. |
1550 |
|
1551 |
def test_file_name(self): |
|
2592.3.191
by Robert Collins
Give Pack responsibility for index naming, and two concrete classes - NewPack for new packs and ExistingPack for packs we read from disk. |
1552 |
pack = pack_repo.ExistingPack('', 'a_name', '', '', '', '') |
2592.3.179
by Robert Collins
Generate the revision_index_map for packing during the core operation, from the pack objects. |
1553 |
self.assertEqual('a_name.pack', pack.file_name()) |
2592.3.192
by Robert Collins
Move new revision index management to NewPack. |
1554 |
|
1555 |
||
1556 |
class TestNewPack(TestCaseWithTransport): |
|
1557 |
"""Tests for pack_repo.NewPack."""
|
|
1558 |
||
2592.3.193
by Robert Collins
Move hash tracking of new packs into NewPack. |
1559 |
def test_new_instance_attributes(self): |
2592.3.194
by Robert Collins
Output the revision index from NewPack.finish |
1560 |
upload_transport = self.get_transport('upload') |
1561 |
pack_transport = self.get_transport('pack') |
|
1562 |
index_transport = self.get_transport('index') |
|
1563 |
upload_transport.mkdir('.') |
|
4241.6.8
by Robert Collins, John Arbash Meinel, Ian Clatworthy, Vincent Ladeuil
Add --development6-rich-root, disabling the legacy and unneeded development2 format, and activating the tests for CHK features disabled pending this format. (Robert Collins, John Arbash Meinel, Ian Clatworthy, Vincent Ladeuil) |
1564 |
collection = pack_repo.RepositoryPackCollection( |
1565 |
repo=None, |
|
3830.3.1
by Martin Pool
NewPack should be constructed from the PackCollection, rather than attributes of it |
1566 |
transport=self.get_transport('.'), |
1567 |
index_transport=index_transport, |
|
1568 |
upload_transport=upload_transport, |
|
1569 |
pack_transport=pack_transport, |
|
1570 |
index_builder_class=BTreeBuilder, |
|
4241.6.8
by Robert Collins, John Arbash Meinel, Ian Clatworthy, Vincent Ladeuil
Add --development6-rich-root, disabling the legacy and unneeded development2 format, and activating the tests for CHK features disabled pending this format. (Robert Collins, John Arbash Meinel, Ian Clatworthy, Vincent Ladeuil) |
1571 |
index_class=BTreeGraphIndex, |
1572 |
use_chk_index=False) |
|
3830.3.1
by Martin Pool
NewPack should be constructed from the PackCollection, rather than attributes of it |
1573 |
pack = pack_repo.NewPack(collection) |
4857.2.1
by John Arbash Meinel
2 test_repository tests weren't adding cleanups when opening files. |
1574 |
self.addCleanup(pack.abort) # Make sure the write stream gets closed |
3735.1.1
by Robert Collins
Add development2 formats using BTree indices. |
1575 |
self.assertIsInstance(pack.revision_index, BTreeBuilder) |
1576 |
self.assertIsInstance(pack.inventory_index, BTreeBuilder) |
|
2929.3.5
by Vincent Ladeuil
New files, same warnings, same fixes. |
1577 |
self.assertIsInstance(pack._hash, type(osutils.md5())) |
2592.3.194
by Robert Collins
Output the revision index from NewPack.finish |
1578 |
self.assertTrue(pack.upload_transport is upload_transport) |
1579 |
self.assertTrue(pack.index_transport is index_transport) |
|
1580 |
self.assertTrue(pack.pack_transport is pack_transport) |
|
1581 |
self.assertEqual(None, pack.index_sizes) |
|
1582 |
self.assertEqual(20, len(pack.random_name)) |
|
1583 |
self.assertIsInstance(pack.random_name, str) |
|
1584 |
self.assertIsInstance(pack.start_time, float) |
|
2951.1.2
by Robert Collins
Partial refactoring of pack_repo to create a Packer object for packing. |
1585 |
|
1586 |
||
1587 |
class TestPacker(TestCaseWithTransport): |
|
1588 |
"""Tests for the packs repository Packer class."""
|
|
2951.1.10
by Robert Collins
Peer review feedback with Ian. |
1589 |
|
3824.2.4
by John Arbash Meinel
Add a test that ensures the pack ordering changes as part of calling .pack() |
1590 |
def test_pack_optimizes_pack_order(self): |
4617.8.1
by Robert Collins
Lock down another test assuming the default was a PackRepository. |
1591 |
builder = self.make_branch_builder('.', format="1.9") |
3824.2.4
by John Arbash Meinel
Add a test that ensures the pack ordering changes as part of calling .pack() |
1592 |
builder.start_series() |
1593 |
builder.build_snapshot('A', None, [ |
|
1594 |
('add', ('', 'root-id', 'directory', None)), |
|
1595 |
('add', ('f', 'f-id', 'file', 'content\n'))]) |
|
1596 |
builder.build_snapshot('B', ['A'], |
|
1597 |
[('modify', ('f-id', 'new-content\n'))]) |
|
1598 |
builder.build_snapshot('C', ['B'], |
|
1599 |
[('modify', ('f-id', 'third-content\n'))]) |
|
1600 |
builder.build_snapshot('D', ['C'], |
|
1601 |
[('modify', ('f-id', 'fourth-content\n'))]) |
|
1602 |
b = builder.get_branch() |
|
1603 |
b.lock_read() |
|
1604 |
builder.finish_series() |
|
1605 |
self.addCleanup(b.unlock) |
|
1606 |
# At this point, we should have 4 pack files available
|
|
1607 |
# Because of how they were built, they correspond to
|
|
1608 |
# ['D', 'C', 'B', 'A']
|
|
1609 |
packs = b.repository._pack_collection.packs |
|
1610 |
packer = pack_repo.Packer(b.repository._pack_collection, |
|
1611 |
packs, 'testing', |
|
1612 |
revision_ids=['B', 'C']) |
|
1613 |
# Now, when we are copying the B & C revisions, their pack files should
|
|
1614 |
# be moved to the front of the stack
|
|
3824.2.5
by Andrew Bennetts
Minor tweaks to comments etc. |
1615 |
# The new ordering moves B & C to the front of the .packs attribute,
|
1616 |
# and leaves the others in the original order.
|
|
3824.2.4
by John Arbash Meinel
Add a test that ensures the pack ordering changes as part of calling .pack() |
1617 |
new_packs = [packs[1], packs[2], packs[0], packs[3]] |
1618 |
new_pack = packer.pack() |
|
1619 |
self.assertEqual(new_packs, packer.packs) |
|
3146.6.1
by Aaron Bentley
InterDifferingSerializer shows a progress bar |
1620 |
|
1621 |
||
3777.5.4
by John Arbash Meinel
OptimisingPacker now sets the optimize flags for the indexes being built. |
1622 |
class TestOptimisingPacker(TestCaseWithTransport): |
1623 |
"""Tests for the OptimisingPacker class."""
|
|
1624 |
||
1625 |
def get_pack_collection(self): |
|
1626 |
repo = self.make_repository('.') |
|
1627 |
return repo._pack_collection |
|
1628 |
||
1629 |
def test_open_pack_will_optimise(self): |
|
1630 |
packer = pack_repo.OptimisingPacker(self.get_pack_collection(), |
|
1631 |
[], '.test') |
|
1632 |
new_pack = packer.open_pack() |
|
4857.2.1
by John Arbash Meinel
2 test_repository tests weren't adding cleanups when opening files. |
1633 |
self.addCleanup(new_pack.abort) # ensure cleanup |
3777.5.4
by John Arbash Meinel
OptimisingPacker now sets the optimize flags for the indexes being built. |
1634 |
self.assertIsInstance(new_pack, pack_repo.NewPack) |
1635 |
self.assertTrue(new_pack.revision_index._optimize_for_size) |
|
1636 |
self.assertTrue(new_pack.inventory_index._optimize_for_size) |
|
1637 |
self.assertTrue(new_pack.text_index._optimize_for_size) |
|
1638 |
self.assertTrue(new_pack.signature_index._optimize_for_size) |
|
4462.2.6
by Robert Collins
Cause StreamSink to partially pack repositories after cross format fetches when beneficial. |
1639 |
|
1640 |
||
4431.3.7
by Jonathan Lange
Cherrypick bzr.dev 4470, resolving conflicts. |
1641 |
class TestCrossFormatPacks(TestCaseWithTransport): |
1642 |
||
1643 |
def log_pack(self, hint=None): |
|
1644 |
self.calls.append(('pack', hint)) |
|
1645 |
self.orig_pack(hint=hint) |
|
1646 |
if self.expect_hint: |
|
1647 |
self.assertTrue(hint) |
|
1648 |
||
1649 |
def run_stream(self, src_fmt, target_fmt, expect_pack_called): |
|
1650 |
self.expect_hint = expect_pack_called |
|
1651 |
self.calls = [] |
|
1652 |
source_tree = self.make_branch_and_tree('src', format=src_fmt) |
|
1653 |
source_tree.lock_write() |
|
1654 |
self.addCleanup(source_tree.unlock) |
|
1655 |
tip = source_tree.commit('foo') |
|
1656 |
target = self.make_repository('target', format=target_fmt) |
|
1657 |
target.lock_write() |
|
1658 |
self.addCleanup(target.unlock) |
|
1659 |
source = source_tree.branch.repository._get_source(target._format) |
|
1660 |
self.orig_pack = target.pack |
|
1661 |
target.pack = self.log_pack |
|
1662 |
search = target.search_missing_revision_ids( |
|
1663 |
source_tree.branch.repository, tip) |
|
1664 |
stream = source.get_stream(search) |
|
1665 |
from_format = source_tree.branch.repository._format |
|
1666 |
sink = target._get_sink() |
|
1667 |
sink.insert_stream(stream, from_format, []) |
|
1668 |
if expect_pack_called: |
|
1669 |
self.assertLength(1, self.calls) |
|
1670 |
else: |
|
1671 |
self.assertLength(0, self.calls) |
|
1672 |
||
1673 |
def run_fetch(self, src_fmt, target_fmt, expect_pack_called): |
|
1674 |
self.expect_hint = expect_pack_called |
|
1675 |
self.calls = [] |
|
1676 |
source_tree = self.make_branch_and_tree('src', format=src_fmt) |
|
1677 |
source_tree.lock_write() |
|
1678 |
self.addCleanup(source_tree.unlock) |
|
1679 |
tip = source_tree.commit('foo') |
|
1680 |
target = self.make_repository('target', format=target_fmt) |
|
1681 |
target.lock_write() |
|
1682 |
self.addCleanup(target.unlock) |
|
1683 |
source = source_tree.branch.repository |
|
1684 |
self.orig_pack = target.pack |
|
1685 |
target.pack = self.log_pack |
|
1686 |
target.fetch(source) |
|
1687 |
if expect_pack_called: |
|
1688 |
self.assertLength(1, self.calls) |
|
1689 |
else: |
|
1690 |
self.assertLength(0, self.calls) |
|
1691 |
||
1692 |
def test_sink_format_hint_no(self): |
|
1693 |
# When the target format says packing makes no difference, pack is not
|
|
1694 |
# called.
|
|
1695 |
self.run_stream('1.9', 'rich-root-pack', False) |
|
1696 |
||
1697 |
def test_sink_format_hint_yes(self): |
|
1698 |
# When the target format says packing makes a difference, pack is
|
|
1699 |
# called.
|
|
1700 |
self.run_stream('1.9', '2a', True) |
|
1701 |
||
1702 |
def test_sink_format_same_no(self): |
|
1703 |
# When the formats are the same, pack is not called.
|
|
1704 |
self.run_stream('2a', '2a', False) |
|
1705 |
||
1706 |
def test_IDS_format_hint_no(self): |
|
1707 |
# When the target format says packing makes no difference, pack is not
|
|
1708 |
# called.
|
|
1709 |
self.run_fetch('1.9', 'rich-root-pack', False) |
|
1710 |
||
1711 |
def test_IDS_format_hint_yes(self): |
|
1712 |
# When the target format says packing makes a difference, pack is
|
|
1713 |
# called.
|
|
1714 |
self.run_fetch('1.9', '2a', True) |
|
1715 |
||
1716 |
def test_IDS_format_same_no(self): |
|
1717 |
# When the formats are the same, pack is not called.
|
|
1718 |
self.run_fetch('2a', '2a', False) |