4634.126.1
by John Arbash Meinel
(jam) Fix bug #507566, concurrent autopacking correctness. |
1 |
# Copyright (C) 2006-2010 Canonical Ltd
|
1685.1.63
by Martin Pool
Small Transport fixups |
2 |
#
|
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
3 |
# This program is free software; you can redistribute it and/or modify
|
4 |
# it under the terms of the GNU General Public License as published by
|
|
5 |
# the Free Software Foundation; either version 2 of the License, or
|
|
6 |
# (at your option) any later version.
|
|
1685.1.63
by Martin Pool
Small Transport fixups |
7 |
#
|
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
8 |
# This program is distributed in the hope that it will be useful,
|
9 |
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
10 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
11 |
# GNU General Public License for more details.
|
|
1685.1.63
by Martin Pool
Small Transport fixups |
12 |
#
|
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
13 |
# You should have received a copy of the GNU General Public License
|
14 |
# along with this program; if not, write to the Free Software
|
|
4183.7.1
by Sabin Iacob
update FSF mailing address |
15 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
16 |
|
17 |
"""Tests for the Repository facility that are not interface tests.
|
|
18 |
||
3689.1.4
by John Arbash Meinel
Doc strings that reference repository_implementations |
19 |
For interface tests see tests/per_repository/*.py.
|
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
20 |
|
21 |
For concrete class tests see this file, and for storage formats tests
|
|
22 |
also see this file.
|
|
23 |
"""
|
|
24 |
||
1773.4.1
by Martin Pool
Add pyflakes makefile target; fix many warnings |
25 |
from stat import S_ISDIR |
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
26 |
from StringIO import StringIO |
4789.25.4
by John Arbash Meinel
Turn a repository format 7 failure into a KnownFailure. |
27 |
import sys |
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
28 |
|
1556.1.4
by Robert Collins
Add a new format for what will become knit, and the surrounding logic to upgrade repositories within metadirs, and tests for the same. |
29 |
import bzrlib |
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
30 |
from bzrlib.errors import (NotBranchError, |
1534.4.41
by Robert Collins
Branch now uses BzrDir reasonably sanely. |
31 |
NoSuchFile, |
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
32 |
UnknownFormatError, |
33 |
UnsupportedFormatError, |
|
34 |
)
|
|
4360.4.3
by John Arbash Meinel
Introduce a KnitPackStreamSource which is used when |
35 |
from bzrlib import ( |
36 |
graph, |
|
37 |
tests, |
|
38 |
)
|
|
3735.4.1
by Andrew Bennetts
Add _find_revision_outside_set. |
39 |
from bzrlib.branchbuilder import BranchBuilder |
3735.1.1
by Robert Collins
Add development2 formats using BTree indices. |
40 |
from bzrlib.btree_index import BTreeBuilder, BTreeGraphIndex |
2592.3.192
by Robert Collins
Move new revision index management to NewPack. |
41 |
from bzrlib.index import GraphIndex, InMemoryGraphIndex |
2241.1.1
by Martin Pool
Change RepositoryFormat to use a Registry rather than ad-hoc dictionary |
42 |
from bzrlib.repository import RepositoryFormat |
2535.3.41
by Andrew Bennetts
Add tests for InterRemoteToOther.is_compatible. |
43 |
from bzrlib.smart import server |
2670.3.5
by Andrew Bennetts
Remove get_stream_as_bytes from KnitVersionedFile's API, make it a function in knitrepo.py instead. |
44 |
from bzrlib.tests import ( |
45 |
TestCase, |
|
46 |
TestCaseWithTransport, |
|
3446.2.1
by Martin Pool
Failure to delete an obsolete pack file should not be fatal. |
47 |
TestSkipped, |
2670.3.5
by Andrew Bennetts
Remove get_stream_as_bytes from KnitVersionedFile's API, make it a function in knitrepo.py instead. |
48 |
test_knit, |
49 |
)
|
|
3446.2.1
by Martin Pool
Failure to delete an obsolete pack file should not be fatal. |
50 |
from bzrlib.transport import ( |
51 |
fakenfs, |
|
52 |
get_transport, |
|
53 |
)
|
|
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
54 |
from bzrlib.transport.memory import MemoryServer |
2241.1.1
by Martin Pool
Change RepositoryFormat to use a Registry rather than ad-hoc dictionary |
55 |
from bzrlib import ( |
2694.5.4
by Jelmer Vernooij
Move bzrlib.util.bencode to bzrlib._bencode_py. |
56 |
bencode, |
2535.3.41
by Andrew Bennetts
Add tests for InterRemoteToOther.is_compatible. |
57 |
bzrdir, |
58 |
errors, |
|
2535.3.57
by Andrew Bennetts
Perform some sanity checking of data streams rather than blindly inserting them into our repository. |
59 |
inventory, |
2929.3.5
by Vincent Ladeuil
New files, same warnings, same fixes. |
60 |
osutils, |
3146.6.1
by Aaron Bentley
InterDifferingSerializer shows a progress bar |
61 |
progress, |
2241.1.1
by Martin Pool
Change RepositoryFormat to use a Registry rather than ad-hoc dictionary |
62 |
repository, |
2535.3.57
by Andrew Bennetts
Perform some sanity checking of data streams rather than blindly inserting them into our repository. |
63 |
revision as _mod_revision, |
2535.3.41
by Andrew Bennetts
Add tests for InterRemoteToOther.is_compatible. |
64 |
symbol_versioning, |
2241.1.1
by Martin Pool
Change RepositoryFormat to use a Registry rather than ad-hoc dictionary |
65 |
upgrade, |
4634.126.1
by John Arbash Meinel
(jam) Fix bug #507566, concurrent autopacking correctness. |
66 |
versionedfile, |
2241.1.1
by Martin Pool
Change RepositoryFormat to use a Registry rather than ad-hoc dictionary |
67 |
workingtree, |
68 |
)
|
|
3735.42.5
by John Arbash Meinel
Change the tests so we now just use a direct test that _get_source is |
69 |
from bzrlib.repofmt import ( |
70 |
groupcompress_repo, |
|
71 |
knitrepo, |
|
72 |
pack_repo, |
|
73 |
weaverepo, |
|
74 |
)
|
|
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
75 |
|
76 |
||
77 |
class TestDefaultFormat(TestCase): |
|
78 |
||
1534.4.41
by Robert Collins
Branch now uses BzrDir reasonably sanely. |
79 |
def test_get_set_default_format(self): |
2204.5.3
by Aaron Bentley
zap old repository default handling |
80 |
old_default = bzrdir.format_registry.get('default') |
81 |
private_default = old_default().repository_format.__class__ |
|
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
82 |
old_format = repository.RepositoryFormat.get_default_format() |
1910.2.33
by Aaron Bentley
Fix default format test |
83 |
self.assertTrue(isinstance(old_format, private_default)) |
2204.5.3
by Aaron Bentley
zap old repository default handling |
84 |
def make_sample_bzrdir(): |
85 |
my_bzrdir = bzrdir.BzrDirMetaFormat1() |
|
86 |
my_bzrdir.repository_format = SampleRepositoryFormat() |
|
87 |
return my_bzrdir |
|
88 |
bzrdir.format_registry.remove('default') |
|
89 |
bzrdir.format_registry.register('sample', make_sample_bzrdir, '') |
|
90 |
bzrdir.format_registry.set_default('sample') |
|
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
91 |
# creating a repository should now create an instrumented dir.
|
92 |
try: |
|
1534.4.47
by Robert Collins
Split out repository into .bzr/repository |
93 |
# the default branch format is used by the meta dir format
|
94 |
# which is not the default bzrdir format at this point
|
|
1685.1.63
by Martin Pool
Small Transport fixups |
95 |
dir = bzrdir.BzrDirMetaFormat1().initialize('memory:///') |
1534.4.47
by Robert Collins
Split out repository into .bzr/repository |
96 |
result = dir.create_repository() |
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
97 |
self.assertEqual(result, 'A bzr repository dir') |
2241.1.1
by Martin Pool
Change RepositoryFormat to use a Registry rather than ad-hoc dictionary |
98 |
finally: |
2204.5.3
by Aaron Bentley
zap old repository default handling |
99 |
bzrdir.format_registry.remove('default') |
2363.5.14
by Aaron Bentley
Prevent repository.get_set_default_format from corrupting inventory |
100 |
bzrdir.format_registry.remove('sample') |
2204.5.3
by Aaron Bentley
zap old repository default handling |
101 |
bzrdir.format_registry.register('default', old_default, '') |
102 |
self.assertIsInstance(repository.RepositoryFormat.get_default_format(), |
|
103 |
old_format.__class__) |
|
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
104 |
|
105 |
||
106 |
class SampleRepositoryFormat(repository.RepositoryFormat): |
|
107 |
"""A sample format
|
|
108 |
||
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
109 |
this format is initializable, unsupported to aid in testing the
|
1534.4.47
by Robert Collins
Split out repository into .bzr/repository |
110 |
open and open(unsupported=True) routines.
|
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
111 |
"""
|
112 |
||
113 |
def get_format_string(self): |
|
114 |
"""See RepositoryFormat.get_format_string()."""
|
|
115 |
return "Sample .bzr repository format." |
|
116 |
||
1534.6.1
by Robert Collins
allow API creation of shared repositories |
117 |
def initialize(self, a_bzrdir, shared=False): |
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
118 |
"""Initialize a repository in a BzrDir"""
|
1534.4.47
by Robert Collins
Split out repository into .bzr/repository |
119 |
t = a_bzrdir.get_repository_transport(self) |
1955.3.13
by John Arbash Meinel
Run the full test suite, and fix up any deprecation warnings. |
120 |
t.put_bytes('format', self.get_format_string()) |
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
121 |
return 'A bzr repository dir' |
122 |
||
123 |
def is_supported(self): |
|
124 |
return False |
|
125 |
||
1534.4.47
by Robert Collins
Split out repository into .bzr/repository |
126 |
def open(self, a_bzrdir, _found=False): |
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
127 |
return "opened repository." |
1534.4.41
by Robert Collins
Branch now uses BzrDir reasonably sanely. |
128 |
|
129 |
||
1534.4.47
by Robert Collins
Split out repository into .bzr/repository |
130 |
class TestRepositoryFormat(TestCaseWithTransport): |
131 |
"""Tests for the Repository format detection used by the bzr meta dir facility.BzrBranchFormat facility."""
|
|
132 |
||
133 |
def test_find_format(self): |
|
134 |
# is the right format object found for a repository?
|
|
135 |
# create a branch with a few known format objects.
|
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
136 |
# this is not quite the same as
|
1534.4.47
by Robert Collins
Split out repository into .bzr/repository |
137 |
self.build_tree(["foo/", "bar/"]) |
138 |
def check_format(format, url): |
|
139 |
dir = format._matchingbzrdir.initialize(url) |
|
140 |
format.initialize(dir) |
|
141 |
t = get_transport(url) |
|
142 |
found_format = repository.RepositoryFormat.find_format(dir) |
|
143 |
self.failUnless(isinstance(found_format, format.__class__)) |
|
2241.1.4
by Martin Pool
Moved old weave-based repository formats into bzrlib.repofmt.weaverepo. |
144 |
check_format(weaverepo.RepositoryFormat7(), "bar") |
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
145 |
|
1534.4.47
by Robert Collins
Split out repository into .bzr/repository |
146 |
def test_find_format_no_repository(self): |
147 |
dir = bzrdir.BzrDirMetaFormat1().initialize(self.get_url()) |
|
148 |
self.assertRaises(errors.NoRepositoryPresent, |
|
149 |
repository.RepositoryFormat.find_format, |
|
150 |
dir) |
|
151 |
||
152 |
def test_find_format_unknown_format(self): |
|
153 |
dir = bzrdir.BzrDirMetaFormat1().initialize(self.get_url()) |
|
154 |
SampleRepositoryFormat().initialize(dir) |
|
155 |
self.assertRaises(UnknownFormatError, |
|
156 |
repository.RepositoryFormat.find_format, |
|
157 |
dir) |
|
158 |
||
159 |
def test_register_unregister_format(self): |
|
160 |
format = SampleRepositoryFormat() |
|
161 |
# make a control dir
|
|
162 |
dir = bzrdir.BzrDirMetaFormat1().initialize(self.get_url()) |
|
163 |
# make a repo
|
|
164 |
format.initialize(dir) |
|
165 |
# register a format for it.
|
|
166 |
repository.RepositoryFormat.register_format(format) |
|
167 |
# which repository.Open will refuse (not supported)
|
|
168 |
self.assertRaises(UnsupportedFormatError, repository.Repository.open, self.get_url()) |
|
169 |
# but open(unsupported) will work
|
|
170 |
self.assertEqual(format.open(dir), "opened repository.") |
|
171 |
# unregister the format
|
|
172 |
repository.RepositoryFormat.unregister_format(format) |
|
173 |
||
174 |
||
1534.4.41
by Robert Collins
Branch now uses BzrDir reasonably sanely. |
175 |
class TestFormat6(TestCaseWithTransport): |
176 |
||
3565.3.1
by Robert Collins
* The generic fetch code now uses two attributes on Repository objects |
177 |
def test_attribute__fetch_order(self): |
178 |
"""Weaves need topological data insertion."""
|
|
179 |
control = bzrdir.BzrDirFormat6().initialize(self.get_url()) |
|
180 |
repo = weaverepo.RepositoryFormat6().initialize(control) |
|
4053.1.4
by Robert Collins
Move the fetch control attributes from Repository to RepositoryFormat. |
181 |
self.assertEqual('topological', repo._format._fetch_order) |
3565.3.1
by Robert Collins
* The generic fetch code now uses two attributes on Repository objects |
182 |
|
183 |
def test_attribute__fetch_uses_deltas(self): |
|
184 |
"""Weaves do not reuse deltas."""
|
|
185 |
control = bzrdir.BzrDirFormat6().initialize(self.get_url()) |
|
186 |
repo = weaverepo.RepositoryFormat6().initialize(control) |
|
4053.1.4
by Robert Collins
Move the fetch control attributes from Repository to RepositoryFormat. |
187 |
self.assertEqual(False, repo._format._fetch_uses_deltas) |
3565.3.1
by Robert Collins
* The generic fetch code now uses two attributes on Repository objects |
188 |
|
3565.3.4
by Robert Collins
Defer decision to reconcile to the repository being fetched into. |
189 |
def test_attribute__fetch_reconcile(self): |
190 |
"""Weave repositories need a reconcile after fetch."""
|
|
191 |
control = bzrdir.BzrDirFormat6().initialize(self.get_url()) |
|
192 |
repo = weaverepo.RepositoryFormat6().initialize(control) |
|
4053.1.4
by Robert Collins
Move the fetch control attributes from Repository to RepositoryFormat. |
193 |
self.assertEqual(True, repo._format._fetch_reconcile) |
3565.3.4
by Robert Collins
Defer decision to reconcile to the repository being fetched into. |
194 |
|
1534.4.41
by Robert Collins
Branch now uses BzrDir reasonably sanely. |
195 |
def test_no_ancestry_weave(self): |
196 |
control = bzrdir.BzrDirFormat6().initialize(self.get_url()) |
|
2241.1.4
by Martin Pool
Moved old weave-based repository formats into bzrlib.repofmt.weaverepo. |
197 |
repo = weaverepo.RepositoryFormat6().initialize(control) |
1534.4.41
by Robert Collins
Branch now uses BzrDir reasonably sanely. |
198 |
# We no longer need to create the ancestry.weave file
|
199 |
# since it is *never* used.
|
|
200 |
self.assertRaises(NoSuchFile, |
|
201 |
control.transport.get, |
|
202 |
'ancestry.weave') |
|
203 |
||
3221.3.1
by Robert Collins
* Repository formats have a new supported-feature attribute |
204 |
def test_supports_external_lookups(self): |
205 |
control = bzrdir.BzrDirFormat6().initialize(self.get_url()) |
|
206 |
repo = weaverepo.RepositoryFormat6().initialize(control) |
|
207 |
self.assertFalse(repo._format.supports_external_lookups) |
|
208 |
||
1534.4.47
by Robert Collins
Split out repository into .bzr/repository |
209 |
|
210 |
class TestFormat7(TestCaseWithTransport): |
|
3565.3.1
by Robert Collins
* The generic fetch code now uses two attributes on Repository objects |
211 |
|
212 |
def test_attribute__fetch_order(self): |
|
213 |
"""Weaves need topological data insertion."""
|
|
214 |
control = bzrdir.BzrDirMetaFormat1().initialize(self.get_url()) |
|
215 |
repo = weaverepo.RepositoryFormat7().initialize(control) |
|
4053.1.4
by Robert Collins
Move the fetch control attributes from Repository to RepositoryFormat. |
216 |
self.assertEqual('topological', repo._format._fetch_order) |
3565.3.1
by Robert Collins
* The generic fetch code now uses two attributes on Repository objects |
217 |
|
218 |
def test_attribute__fetch_uses_deltas(self): |
|
219 |
"""Weaves do not reuse deltas."""
|
|
220 |
control = bzrdir.BzrDirMetaFormat1().initialize(self.get_url()) |
|
221 |
repo = weaverepo.RepositoryFormat7().initialize(control) |
|
4053.1.4
by Robert Collins
Move the fetch control attributes from Repository to RepositoryFormat. |
222 |
self.assertEqual(False, repo._format._fetch_uses_deltas) |
3565.3.1
by Robert Collins
* The generic fetch code now uses two attributes on Repository objects |
223 |
|
3565.3.4
by Robert Collins
Defer decision to reconcile to the repository being fetched into. |
224 |
def test_attribute__fetch_reconcile(self): |
225 |
"""Weave repositories need a reconcile after fetch."""
|
|
226 |
control = bzrdir.BzrDirMetaFormat1().initialize(self.get_url()) |
|
227 |
repo = weaverepo.RepositoryFormat7().initialize(control) |
|
4053.1.4
by Robert Collins
Move the fetch control attributes from Repository to RepositoryFormat. |
228 |
self.assertEqual(True, repo._format._fetch_reconcile) |
3565.3.4
by Robert Collins
Defer decision to reconcile to the repository being fetched into. |
229 |
|
1534.4.47
by Robert Collins
Split out repository into .bzr/repository |
230 |
def test_disk_layout(self): |
231 |
control = bzrdir.BzrDirMetaFormat1().initialize(self.get_url()) |
|
2241.1.4
by Martin Pool
Moved old weave-based repository formats into bzrlib.repofmt.weaverepo. |
232 |
repo = weaverepo.RepositoryFormat7().initialize(control) |
1534.5.3
by Robert Collins
Make format 4/5/6 branches share a single LockableFiles instance across wt/branch/repository. |
233 |
# in case of side effects of locking.
|
234 |
repo.lock_write() |
|
235 |
repo.unlock() |
|
1534.4.47
by Robert Collins
Split out repository into .bzr/repository |
236 |
# we want:
|
237 |
# format 'Bazaar-NG Repository format 7'
|
|
238 |
# lock ''
|
|
239 |
# inventory.weave == empty_weave
|
|
240 |
# empty revision-store directory
|
|
241 |
# empty weaves directory
|
|
242 |
t = control.get_repository_transport(None) |
|
243 |
self.assertEqualDiff('Bazaar-NG Repository format 7', |
|
244 |
t.get('format').read()) |
|
245 |
self.assertTrue(S_ISDIR(t.stat('revision-store').st_mode)) |
|
246 |
self.assertTrue(S_ISDIR(t.stat('weaves').st_mode)) |
|
247 |
self.assertEqualDiff('# bzr weave file v5\n' |
|
248 |
'w\n' |
|
249 |
'W\n', |
|
250 |
t.get('inventory.weave').read()) |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
251 |
# Creating a file with id Foo:Bar results in a non-escaped file name on
|
252 |
# disk.
|
|
253 |
control.create_branch() |
|
254 |
tree = control.create_workingtree() |
|
255 |
tree.add(['foo'], ['Foo:Bar'], ['file']) |
|
256 |
tree.put_file_bytes_non_atomic('Foo:Bar', 'content\n') |
|
4789.25.4
by John Arbash Meinel
Turn a repository format 7 failure into a KnownFailure. |
257 |
try: |
258 |
tree.commit('first post', rev_id='first') |
|
259 |
except errors.IllegalPath: |
|
260 |
if sys.platform != 'win32': |
|
261 |
raise
|
|
262 |
self.knownFailure('Foo:Bar cannot be used as a file-id on windows' |
|
263 |
' in repo format 7') |
|
264 |
return
|
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
265 |
self.assertEqualDiff( |
266 |
'# bzr weave file v5\n' |
|
267 |
'i\n' |
|
268 |
'1 7fe70820e08a1aac0ef224d9c66ab66831cc4ab1\n' |
|
269 |
'n first\n' |
|
270 |
'\n' |
|
271 |
'w\n' |
|
272 |
'{ 0\n' |
|
273 |
'. content\n' |
|
274 |
'}\n' |
|
275 |
'W\n', |
|
276 |
t.get('weaves/74/Foo%3ABar.weave').read()) |
|
1534.6.1
by Robert Collins
allow API creation of shared repositories |
277 |
|
278 |
def test_shared_disk_layout(self): |
|
279 |
control = bzrdir.BzrDirMetaFormat1().initialize(self.get_url()) |
|
2241.1.4
by Martin Pool
Moved old weave-based repository formats into bzrlib.repofmt.weaverepo. |
280 |
repo = weaverepo.RepositoryFormat7().initialize(control, shared=True) |
1534.6.1
by Robert Collins
allow API creation of shared repositories |
281 |
# we want:
|
282 |
# format 'Bazaar-NG Repository format 7'
|
|
283 |
# inventory.weave == empty_weave
|
|
284 |
# empty revision-store directory
|
|
285 |
# empty weaves directory
|
|
286 |
# a 'shared-storage' marker file.
|
|
1553.5.49
by Martin Pool
Use LockDirs for repo format 7 |
287 |
# lock is not present when unlocked
|
1534.6.1
by Robert Collins
allow API creation of shared repositories |
288 |
t = control.get_repository_transport(None) |
289 |
self.assertEqualDiff('Bazaar-NG Repository format 7', |
|
290 |
t.get('format').read()) |
|
291 |
self.assertEqualDiff('', t.get('shared-storage').read()) |
|
292 |
self.assertTrue(S_ISDIR(t.stat('revision-store').st_mode)) |
|
293 |
self.assertTrue(S_ISDIR(t.stat('weaves').st_mode)) |
|
294 |
self.assertEqualDiff('# bzr weave file v5\n' |
|
295 |
'w\n' |
|
296 |
'W\n', |
|
297 |
t.get('inventory.weave').read()) |
|
1553.5.49
by Martin Pool
Use LockDirs for repo format 7 |
298 |
self.assertFalse(t.has('branch-lock')) |
299 |
||
1553.5.56
by Martin Pool
Format 7 repo now uses LockDir! |
300 |
def test_creates_lockdir(self): |
1553.5.49
by Martin Pool
Use LockDirs for repo format 7 |
301 |
"""Make sure it appears to be controlled by a LockDir existence"""
|
302 |
control = bzrdir.BzrDirMetaFormat1().initialize(self.get_url()) |
|
2241.1.4
by Martin Pool
Moved old weave-based repository formats into bzrlib.repofmt.weaverepo. |
303 |
repo = weaverepo.RepositoryFormat7().initialize(control, shared=True) |
1553.5.49
by Martin Pool
Use LockDirs for repo format 7 |
304 |
t = control.get_repository_transport(None) |
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
305 |
# TODO: Should check there is a 'lock' toplevel directory,
|
1553.5.58
by Martin Pool
Change LockDirs to format "lock-name/held/info" |
306 |
# regardless of contents
|
307 |
self.assertFalse(t.has('lock/held/info')) |
|
1553.5.49
by Martin Pool
Use LockDirs for repo format 7 |
308 |
repo.lock_write() |
1658.1.4
by Martin Pool
Quieten warning from TestFormat7.test_creates_lockdir about failing to unlock |
309 |
try: |
310 |
self.assertTrue(t.has('lock/held/info')) |
|
311 |
finally: |
|
312 |
# unlock so we don't get a warning about failing to do so
|
|
313 |
repo.unlock() |
|
1553.5.56
by Martin Pool
Format 7 repo now uses LockDir! |
314 |
|
315 |
def test_uses_lockdir(self): |
|
316 |
"""repo format 7 actually locks on lockdir"""
|
|
317 |
base_url = self.get_url() |
|
318 |
control = bzrdir.BzrDirMetaFormat1().initialize(base_url) |
|
2241.1.4
by Martin Pool
Moved old weave-based repository formats into bzrlib.repofmt.weaverepo. |
319 |
repo = weaverepo.RepositoryFormat7().initialize(control, shared=True) |
1553.5.56
by Martin Pool
Format 7 repo now uses LockDir! |
320 |
t = control.get_repository_transport(None) |
321 |
repo.lock_write() |
|
322 |
repo.unlock() |
|
323 |
del repo |
|
324 |
# make sure the same lock is created by opening it
|
|
325 |
repo = repository.Repository.open(base_url) |
|
326 |
repo.lock_write() |
|
1553.5.58
by Martin Pool
Change LockDirs to format "lock-name/held/info" |
327 |
self.assertTrue(t.has('lock/held/info')) |
1553.5.56
by Martin Pool
Format 7 repo now uses LockDir! |
328 |
repo.unlock() |
1553.5.58
by Martin Pool
Change LockDirs to format "lock-name/held/info" |
329 |
self.assertFalse(t.has('lock/held/info')) |
1534.6.5
by Robert Collins
Cloning of repos preserves shared and make-working-tree attributes. |
330 |
|
331 |
def test_shared_no_tree_disk_layout(self): |
|
332 |
control = bzrdir.BzrDirMetaFormat1().initialize(self.get_url()) |
|
2241.1.4
by Martin Pool
Moved old weave-based repository formats into bzrlib.repofmt.weaverepo. |
333 |
repo = weaverepo.RepositoryFormat7().initialize(control, shared=True) |
1534.6.5
by Robert Collins
Cloning of repos preserves shared and make-working-tree attributes. |
334 |
repo.set_make_working_trees(False) |
335 |
# we want:
|
|
336 |
# format 'Bazaar-NG Repository format 7'
|
|
337 |
# lock ''
|
|
338 |
# inventory.weave == empty_weave
|
|
339 |
# empty revision-store directory
|
|
340 |
# empty weaves directory
|
|
341 |
# a 'shared-storage' marker file.
|
|
342 |
t = control.get_repository_transport(None) |
|
343 |
self.assertEqualDiff('Bazaar-NG Repository format 7', |
|
344 |
t.get('format').read()) |
|
1553.5.56
by Martin Pool
Format 7 repo now uses LockDir! |
345 |
## self.assertEqualDiff('', t.get('lock').read())
|
1534.6.5
by Robert Collins
Cloning of repos preserves shared and make-working-tree attributes. |
346 |
self.assertEqualDiff('', t.get('shared-storage').read()) |
347 |
self.assertEqualDiff('', t.get('no-working-trees').read()) |
|
348 |
repo.set_make_working_trees(True) |
|
349 |
self.assertFalse(t.has('no-working-trees')) |
|
350 |
self.assertTrue(S_ISDIR(t.stat('revision-store').st_mode)) |
|
351 |
self.assertTrue(S_ISDIR(t.stat('weaves').st_mode)) |
|
352 |
self.assertEqualDiff('# bzr weave file v5\n' |
|
353 |
'w\n' |
|
354 |
'W\n', |
|
355 |
t.get('inventory.weave').read()) |
|
1534.1.27
by Robert Collins
Start InterRepository with InterRepository.get. |
356 |
|
3221.3.1
by Robert Collins
* Repository formats have a new supported-feature attribute |
357 |
def test_supports_external_lookups(self): |
358 |
control = bzrdir.BzrDirMetaFormat1().initialize(self.get_url()) |
|
359 |
repo = weaverepo.RepositoryFormat7().initialize(control) |
|
360 |
self.assertFalse(repo._format.supports_external_lookups) |
|
361 |
||
1534.1.27
by Robert Collins
Start InterRepository with InterRepository.get. |
362 |
|
1556.1.3
by Robert Collins
Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids |
363 |
class TestFormatKnit1(TestCaseWithTransport): |
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
364 |
|
3565.3.1
by Robert Collins
* The generic fetch code now uses two attributes on Repository objects |
365 |
def test_attribute__fetch_order(self): |
366 |
"""Knits need topological data insertion."""
|
|
367 |
repo = self.make_repository('.', |
|
368 |
format=bzrdir.format_registry.get('knit')()) |
|
4053.1.4
by Robert Collins
Move the fetch control attributes from Repository to RepositoryFormat. |
369 |
self.assertEqual('topological', repo._format._fetch_order) |
3565.3.1
by Robert Collins
* The generic fetch code now uses two attributes on Repository objects |
370 |
|
371 |
def test_attribute__fetch_uses_deltas(self): |
|
372 |
"""Knits reuse deltas."""
|
|
373 |
repo = self.make_repository('.', |
|
374 |
format=bzrdir.format_registry.get('knit')()) |
|
4053.1.4
by Robert Collins
Move the fetch control attributes from Repository to RepositoryFormat. |
375 |
self.assertEqual(True, repo._format._fetch_uses_deltas) |
3565.3.1
by Robert Collins
* The generic fetch code now uses two attributes on Repository objects |
376 |
|
1556.1.3
by Robert Collins
Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids |
377 |
def test_disk_layout(self): |
378 |
control = bzrdir.BzrDirMetaFormat1().initialize(self.get_url()) |
|
2241.1.6
by Martin Pool
Move Knit repositories into the submodule bzrlib.repofmt.knitrepo and |
379 |
repo = knitrepo.RepositoryFormatKnit1().initialize(control) |
1556.1.3
by Robert Collins
Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids |
380 |
# in case of side effects of locking.
|
381 |
repo.lock_write() |
|
382 |
repo.unlock() |
|
383 |
# we want:
|
|
384 |
# format 'Bazaar-NG Knit Repository Format 1'
|
|
1553.5.62
by Martin Pool
Add tests that MetaDir repositories use LockDirs |
385 |
# lock: is a directory
|
1556.1.3
by Robert Collins
Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids |
386 |
# inventory.weave == empty_weave
|
387 |
# empty revision-store directory
|
|
388 |
# empty weaves directory
|
|
389 |
t = control.get_repository_transport(None) |
|
390 |
self.assertEqualDiff('Bazaar-NG Knit Repository Format 1', |
|
391 |
t.get('format').read()) |
|
1553.5.57
by Martin Pool
[merge] sync from bzr.dev |
392 |
# XXX: no locks left when unlocked at the moment
|
393 |
# self.assertEqualDiff('', t.get('lock').read())
|
|
1556.1.3
by Robert Collins
Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids |
394 |
self.assertTrue(S_ISDIR(t.stat('knits').st_mode)) |
1563.2.35
by Robert Collins
cleanup deprecation warnings and finish conversion so the inventory is knit based too. |
395 |
self.check_knits(t) |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
396 |
# Check per-file knits.
|
397 |
branch = control.create_branch() |
|
398 |
tree = control.create_workingtree() |
|
399 |
tree.add(['foo'], ['Nasty-IdC:'], ['file']) |
|
400 |
tree.put_file_bytes_non_atomic('Nasty-IdC:', '') |
|
401 |
tree.commit('1st post', rev_id='foo') |
|
402 |
self.assertHasKnit(t, 'knits/e8/%254easty-%2549d%2543%253a', |
|
403 |
'\nfoo fulltext 0 81 :') |
|
1563.2.35
by Robert Collins
cleanup deprecation warnings and finish conversion so the inventory is knit based too. |
404 |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
405 |
def assertHasKnit(self, t, knit_name, extra_content=''): |
1654.1.3
by Robert Collins
Refactor repository knit tests slightly to remove duplication - add a assertHasKnit method. |
406 |
"""Assert that knit_name exists on t."""
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
407 |
self.assertEqualDiff('# bzr knit index 8\n' + extra_content, |
1654.1.3
by Robert Collins
Refactor repository knit tests slightly to remove duplication - add a assertHasKnit method. |
408 |
t.get(knit_name + '.kndx').read()) |
409 |
||
1563.2.35
by Robert Collins
cleanup deprecation warnings and finish conversion so the inventory is knit based too. |
410 |
def check_knits(self, t): |
411 |
"""check knit content for a repository."""
|
|
1654.1.3
by Robert Collins
Refactor repository knit tests slightly to remove duplication - add a assertHasKnit method. |
412 |
self.assertHasKnit(t, 'inventory') |
413 |
self.assertHasKnit(t, 'revisions') |
|
414 |
self.assertHasKnit(t, 'signatures') |
|
1556.1.3
by Robert Collins
Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids |
415 |
|
416 |
def test_shared_disk_layout(self): |
|
417 |
control = bzrdir.BzrDirMetaFormat1().initialize(self.get_url()) |
|
2241.1.6
by Martin Pool
Move Knit repositories into the submodule bzrlib.repofmt.knitrepo and |
418 |
repo = knitrepo.RepositoryFormatKnit1().initialize(control, shared=True) |
1556.1.3
by Robert Collins
Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids |
419 |
# we want:
|
420 |
# format 'Bazaar-NG Knit Repository Format 1'
|
|
1553.5.62
by Martin Pool
Add tests that MetaDir repositories use LockDirs |
421 |
# lock: is a directory
|
1556.1.3
by Robert Collins
Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids |
422 |
# inventory.weave == empty_weave
|
423 |
# empty revision-store directory
|
|
424 |
# empty weaves directory
|
|
425 |
# a 'shared-storage' marker file.
|
|
426 |
t = control.get_repository_transport(None) |
|
427 |
self.assertEqualDiff('Bazaar-NG Knit Repository Format 1', |
|
428 |
t.get('format').read()) |
|
1553.5.57
by Martin Pool
[merge] sync from bzr.dev |
429 |
# XXX: no locks left when unlocked at the moment
|
430 |
# self.assertEqualDiff('', t.get('lock').read())
|
|
1556.1.3
by Robert Collins
Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids |
431 |
self.assertEqualDiff('', t.get('shared-storage').read()) |
432 |
self.assertTrue(S_ISDIR(t.stat('knits').st_mode)) |
|
1563.2.35
by Robert Collins
cleanup deprecation warnings and finish conversion so the inventory is knit based too. |
433 |
self.check_knits(t) |
1556.1.3
by Robert Collins
Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids |
434 |
|
435 |
def test_shared_no_tree_disk_layout(self): |
|
436 |
control = bzrdir.BzrDirMetaFormat1().initialize(self.get_url()) |
|
2241.1.6
by Martin Pool
Move Knit repositories into the submodule bzrlib.repofmt.knitrepo and |
437 |
repo = knitrepo.RepositoryFormatKnit1().initialize(control, shared=True) |
1556.1.3
by Robert Collins
Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids |
438 |
repo.set_make_working_trees(False) |
439 |
# we want:
|
|
440 |
# format 'Bazaar-NG Knit Repository Format 1'
|
|
441 |
# lock ''
|
|
442 |
# inventory.weave == empty_weave
|
|
443 |
# empty revision-store directory
|
|
444 |
# empty weaves directory
|
|
445 |
# a 'shared-storage' marker file.
|
|
446 |
t = control.get_repository_transport(None) |
|
447 |
self.assertEqualDiff('Bazaar-NG Knit Repository Format 1', |
|
448 |
t.get('format').read()) |
|
1553.5.57
by Martin Pool
[merge] sync from bzr.dev |
449 |
# XXX: no locks left when unlocked at the moment
|
450 |
# self.assertEqualDiff('', t.get('lock').read())
|
|
1556.1.3
by Robert Collins
Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids |
451 |
self.assertEqualDiff('', t.get('shared-storage').read()) |
452 |
self.assertEqualDiff('', t.get('no-working-trees').read()) |
|
453 |
repo.set_make_working_trees(True) |
|
454 |
self.assertFalse(t.has('no-working-trees')) |
|
455 |
self.assertTrue(S_ISDIR(t.stat('knits').st_mode)) |
|
1563.2.35
by Robert Collins
cleanup deprecation warnings and finish conversion so the inventory is knit based too. |
456 |
self.check_knits(t) |
1556.1.3
by Robert Collins
Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids |
457 |
|
2917.2.1
by John Arbash Meinel
Fix bug #152360. The xml5 serializer should be using |
458 |
def test_deserialise_sets_root_revision(self): |
459 |
"""We must have a inventory.root.revision
|
|
460 |
||
461 |
Old versions of the XML5 serializer did not set the revision_id for
|
|
462 |
the whole inventory. So we grab the one from the expected text. Which
|
|
463 |
is valid when the api is not being abused.
|
|
464 |
"""
|
|
465 |
repo = self.make_repository('.', |
|
466 |
format=bzrdir.format_registry.get('knit')()) |
|
467 |
inv_xml = '<inventory format="5">\n</inventory>\n' |
|
4988.3.3
by Jelmer Vernooij
rename Repository.deserialise_inventory to Repository._deserialise_inventory. |
468 |
inv = repo._deserialise_inventory('test-rev-id', inv_xml) |
2917.2.1
by John Arbash Meinel
Fix bug #152360. The xml5 serializer should be using |
469 |
self.assertEqual('test-rev-id', inv.root.revision) |
470 |
||
471 |
def test_deserialise_uses_global_revision_id(self): |
|
472 |
"""If it is set, then we re-use the global revision id"""
|
|
473 |
repo = self.make_repository('.', |
|
474 |
format=bzrdir.format_registry.get('knit')()) |
|
475 |
inv_xml = ('<inventory format="5" revision_id="other-rev-id">\n' |
|
476 |
'</inventory>\n') |
|
477 |
# Arguably, the deserialise_inventory should detect a mismatch, and
|
|
478 |
# raise an error, rather than silently using one revision_id over the
|
|
479 |
# other.
|
|
4988.3.3
by Jelmer Vernooij
rename Repository.deserialise_inventory to Repository._deserialise_inventory. |
480 |
self.assertRaises(AssertionError, repo._deserialise_inventory, |
3169.2.2
by Robert Collins
Add a test to Repository.deserialise_inventory that the resulting ivnentory is the one asked for, and update relevant tests. Also tweak the model 1 to 2 regenerate inventories logic to use the revision trees parent marker which is more accurate in some cases. |
481 |
'test-rev-id', inv_xml) |
4988.3.3
by Jelmer Vernooij
rename Repository.deserialise_inventory to Repository._deserialise_inventory. |
482 |
inv = repo._deserialise_inventory('other-rev-id', inv_xml) |
2917.2.1
by John Arbash Meinel
Fix bug #152360. The xml5 serializer should be using |
483 |
self.assertEqual('other-rev-id', inv.root.revision) |
484 |
||
3221.3.1
by Robert Collins
* Repository formats have a new supported-feature attribute |
485 |
def test_supports_external_lookups(self): |
486 |
repo = self.make_repository('.', |
|
487 |
format=bzrdir.format_registry.get('knit')()) |
|
488 |
self.assertFalse(repo._format.supports_external_lookups) |
|
489 |
||
2535.3.53
by Andrew Bennetts
Remove get_stream_as_bytes from KnitVersionedFile's API, make it a function in knitrepo.py instead. |
490 |
|
2305.2.3
by Andrew Bennetts
Bring across test_repository improvements from the hpss branch to fix the last test failures. |
491 |
class DummyRepository(object): |
492 |
"""A dummy repository for testing."""
|
|
493 |
||
3452.2.11
by Andrew Bennetts
Merge thread. |
494 |
_format = None |
2305.2.3
by Andrew Bennetts
Bring across test_repository improvements from the hpss branch to fix the last test failures. |
495 |
_serializer = None |
496 |
||
497 |
def supports_rich_root(self): |
|
4606.4.1
by Robert Collins
Prepare test_repository's inter_repository tests for 2a. |
498 |
if self._format is not None: |
499 |
return self._format.rich_root_data |
|
2305.2.3
by Andrew Bennetts
Bring across test_repository improvements from the hpss branch to fix the last test failures. |
500 |
return False |
501 |
||
3709.5.10
by Andrew Bennetts
Fix test failure caused by missing attributes on DummyRepository. |
502 |
def get_graph(self): |
503 |
raise NotImplementedError |
|
504 |
||
505 |
def get_parent_map(self, revision_ids): |
|
506 |
raise NotImplementedError |
|
507 |
||
2305.2.3
by Andrew Bennetts
Bring across test_repository improvements from the hpss branch to fix the last test failures. |
508 |
|
509 |
class InterDummy(repository.InterRepository): |
|
510 |
"""An inter-repository optimised code path for DummyRepository.
|
|
511 |
||
512 |
This is for use during testing where we use DummyRepository as repositories
|
|
1534.1.28
by Robert Collins
Allow for optimised InterRepository selection. |
513 |
so that none of the default regsitered inter-repository classes will
|
2818.4.2
by Robert Collins
Review feedback. |
514 |
MATCH.
|
1534.1.28
by Robert Collins
Allow for optimised InterRepository selection. |
515 |
"""
|
516 |
||
517 |
@staticmethod
|
|
518 |
def is_compatible(repo_source, repo_target): |
|
2305.2.3
by Andrew Bennetts
Bring across test_repository improvements from the hpss branch to fix the last test failures. |
519 |
"""InterDummy is compatible with DummyRepository."""
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
520 |
return (isinstance(repo_source, DummyRepository) and |
2305.2.3
by Andrew Bennetts
Bring across test_repository improvements from the hpss branch to fix the last test failures. |
521 |
isinstance(repo_target, DummyRepository)) |
1534.1.28
by Robert Collins
Allow for optimised InterRepository selection. |
522 |
|
523 |
||
1534.1.27
by Robert Collins
Start InterRepository with InterRepository.get. |
524 |
class TestInterRepository(TestCaseWithTransport): |
525 |
||
526 |
def test_get_default_inter_repository(self): |
|
527 |
# test that the InterRepository.get(repo_a, repo_b) probes
|
|
528 |
# for a inter_repo class where is_compatible(repo_a, repo_b) returns
|
|
529 |
# true and returns a default inter_repo otherwise.
|
|
530 |
# This also tests that the default registered optimised interrepository
|
|
531 |
# classes do not barf inappropriately when a surprising repository type
|
|
532 |
# is handed to them.
|
|
2305.2.3
by Andrew Bennetts
Bring across test_repository improvements from the hpss branch to fix the last test failures. |
533 |
dummy_a = DummyRepository() |
534 |
dummy_b = DummyRepository() |
|
1534.1.28
by Robert Collins
Allow for optimised InterRepository selection. |
535 |
self.assertGetsDefaultInterRepository(dummy_a, dummy_b) |
536 |
||
537 |
def assertGetsDefaultInterRepository(self, repo_a, repo_b): |
|
2305.2.3
by Andrew Bennetts
Bring across test_repository improvements from the hpss branch to fix the last test failures. |
538 |
"""Asserts that InterRepository.get(repo_a, repo_b) -> the default.
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
539 |
|
2305.2.3
by Andrew Bennetts
Bring across test_repository improvements from the hpss branch to fix the last test failures. |
540 |
The effective default is now InterSameDataRepository because there is
|
541 |
no actual sane default in the presence of incompatible data models.
|
|
542 |
"""
|
|
1534.1.28
by Robert Collins
Allow for optimised InterRepository selection. |
543 |
inter_repo = repository.InterRepository.get(repo_a, repo_b) |
2305.2.3
by Andrew Bennetts
Bring across test_repository improvements from the hpss branch to fix the last test failures. |
544 |
self.assertEqual(repository.InterSameDataRepository, |
1534.1.27
by Robert Collins
Start InterRepository with InterRepository.get. |
545 |
inter_repo.__class__) |
1534.1.28
by Robert Collins
Allow for optimised InterRepository selection. |
546 |
self.assertEqual(repo_a, inter_repo.source) |
547 |
self.assertEqual(repo_b, inter_repo.target) |
|
548 |
||
549 |
def test_register_inter_repository_class(self): |
|
550 |
# test that a optimised code path provider - a
|
|
551 |
# InterRepository subclass can be registered and unregistered
|
|
552 |
# and that it is correctly selected when given a repository
|
|
553 |
# pair that it returns true on for the is_compatible static method
|
|
554 |
# check
|
|
2305.2.3
by Andrew Bennetts
Bring across test_repository improvements from the hpss branch to fix the last test failures. |
555 |
dummy_a = DummyRepository() |
4606.4.1
by Robert Collins
Prepare test_repository's inter_repository tests for 2a. |
556 |
dummy_a._format = RepositoryFormat() |
2305.2.3
by Andrew Bennetts
Bring across test_repository improvements from the hpss branch to fix the last test failures. |
557 |
dummy_b = DummyRepository() |
4606.4.1
by Robert Collins
Prepare test_repository's inter_repository tests for 2a. |
558 |
dummy_b._format = RepositoryFormat() |
2305.2.3
by Andrew Bennetts
Bring across test_repository improvements from the hpss branch to fix the last test failures. |
559 |
repo = self.make_repository('.') |
560 |
# hack dummies to look like repo somewhat.
|
|
561 |
dummy_a._serializer = repo._serializer |
|
4606.4.1
by Robert Collins
Prepare test_repository's inter_repository tests for 2a. |
562 |
dummy_a._format.supports_tree_reference = repo._format.supports_tree_reference |
563 |
dummy_a._format.rich_root_data = repo._format.rich_root_data |
|
2305.2.3
by Andrew Bennetts
Bring across test_repository improvements from the hpss branch to fix the last test failures. |
564 |
dummy_b._serializer = repo._serializer |
4606.4.1
by Robert Collins
Prepare test_repository's inter_repository tests for 2a. |
565 |
dummy_b._format.supports_tree_reference = repo._format.supports_tree_reference |
566 |
dummy_b._format.rich_root_data = repo._format.rich_root_data |
|
2305.2.3
by Andrew Bennetts
Bring across test_repository improvements from the hpss branch to fix the last test failures. |
567 |
repository.InterRepository.register_optimiser(InterDummy) |
1534.1.28
by Robert Collins
Allow for optimised InterRepository selection. |
568 |
try: |
2305.2.3
by Andrew Bennetts
Bring across test_repository improvements from the hpss branch to fix the last test failures. |
569 |
# we should get the default for something InterDummy returns False
|
1534.1.28
by Robert Collins
Allow for optimised InterRepository selection. |
570 |
# to
|
2305.2.3
by Andrew Bennetts
Bring across test_repository improvements from the hpss branch to fix the last test failures. |
571 |
self.assertFalse(InterDummy.is_compatible(dummy_a, repo)) |
572 |
self.assertGetsDefaultInterRepository(dummy_a, repo) |
|
573 |
# and we should get an InterDummy for a pair it 'likes'
|
|
574 |
self.assertTrue(InterDummy.is_compatible(dummy_a, dummy_b)) |
|
1534.1.28
by Robert Collins
Allow for optimised InterRepository selection. |
575 |
inter_repo = repository.InterRepository.get(dummy_a, dummy_b) |
2305.2.3
by Andrew Bennetts
Bring across test_repository improvements from the hpss branch to fix the last test failures. |
576 |
self.assertEqual(InterDummy, inter_repo.__class__) |
1534.1.28
by Robert Collins
Allow for optimised InterRepository selection. |
577 |
self.assertEqual(dummy_a, inter_repo.source) |
578 |
self.assertEqual(dummy_b, inter_repo.target) |
|
579 |
finally: |
|
2305.2.3
by Andrew Bennetts
Bring across test_repository improvements from the hpss branch to fix the last test failures. |
580 |
repository.InterRepository.unregister_optimiser(InterDummy) |
1534.1.28
by Robert Collins
Allow for optimised InterRepository selection. |
581 |
# now we should get the default InterRepository object again.
|
582 |
self.assertGetsDefaultInterRepository(dummy_a, dummy_b) |
|
1534.1.33
by Robert Collins
Move copy_content_into into InterRepository and InterWeaveRepo, and disable the default codepath test as we have optimised paths for all current combinations. |
583 |
|
2241.1.17
by Martin Pool
Restore old InterWeave tests |
584 |
|
585 |
class TestInterWeaveRepo(TestCaseWithTransport): |
|
586 |
||
587 |
def test_is_compatible_and_registered(self): |
|
588 |
# InterWeaveRepo is compatible when either side
|
|
589 |
# is a format 5/6/7 branch
|
|
2241.1.20
by mbp at sourcefrog
update tests for new locations of weave repos |
590 |
from bzrlib.repofmt import knitrepo, weaverepo |
591 |
formats = [weaverepo.RepositoryFormat5(), |
|
592 |
weaverepo.RepositoryFormat6(), |
|
593 |
weaverepo.RepositoryFormat7()] |
|
594 |
incompatible_formats = [weaverepo.RepositoryFormat4(), |
|
595 |
knitrepo.RepositoryFormatKnit1(), |
|
2241.1.17
by Martin Pool
Restore old InterWeave tests |
596 |
]
|
597 |
repo_a = self.make_repository('a') |
|
598 |
repo_b = self.make_repository('b') |
|
599 |
is_compatible = repository.InterWeaveRepo.is_compatible |
|
600 |
for source in incompatible_formats: |
|
601 |
# force incompatible left then right
|
|
602 |
repo_a._format = source |
|
603 |
repo_b._format = formats[0] |
|
604 |
self.assertFalse(is_compatible(repo_a, repo_b)) |
|
605 |
self.assertFalse(is_compatible(repo_b, repo_a)) |
|
606 |
for source in formats: |
|
607 |
repo_a._format = source |
|
608 |
for target in formats: |
|
609 |
repo_b._format = target |
|
610 |
self.assertTrue(is_compatible(repo_a, repo_b)) |
|
611 |
self.assertEqual(repository.InterWeaveRepo, |
|
612 |
repository.InterRepository.get(repo_a, |
|
613 |
repo_b).__class__) |
|
614 |
||
1556.1.4
by Robert Collins
Add a new format for what will become knit, and the surrounding logic to upgrade repositories within metadirs, and tests for the same. |
615 |
|
616 |
class TestRepositoryConverter(TestCaseWithTransport): |
|
617 |
||
618 |
def test_convert_empty(self): |
|
619 |
t = get_transport(self.get_url('.')) |
|
620 |
t.mkdir('repository') |
|
621 |
repo_dir = bzrdir.BzrDirMetaFormat1().initialize('repository') |
|
2241.1.4
by Martin Pool
Moved old weave-based repository formats into bzrlib.repofmt.weaverepo. |
622 |
repo = weaverepo.RepositoryFormat7().initialize(repo_dir) |
2241.1.6
by Martin Pool
Move Knit repositories into the submodule bzrlib.repofmt.knitrepo and |
623 |
target_format = knitrepo.RepositoryFormatKnit1() |
1556.1.4
by Robert Collins
Add a new format for what will become knit, and the surrounding logic to upgrade repositories within metadirs, and tests for the same. |
624 |
converter = repository.CopyConverter(target_format) |
1594.1.3
by Robert Collins
Fixup pb usage to use nested_progress_bar. |
625 |
pb = bzrlib.ui.ui_factory.nested_progress_bar() |
626 |
try: |
|
627 |
converter.convert(repo, pb) |
|
628 |
finally: |
|
629 |
pb.finished() |
|
1556.1.4
by Robert Collins
Add a new format for what will become knit, and the surrounding logic to upgrade repositories within metadirs, and tests for the same. |
630 |
repo = repo_dir.open_repository() |
631 |
self.assertTrue(isinstance(target_format, repo._format.__class__)) |
|
1843.2.5
by Aaron Bentley
Add test of _unescape_xml |
632 |
|
633 |
||
634 |
class TestMisc(TestCase): |
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
635 |
|
1843.2.5
by Aaron Bentley
Add test of _unescape_xml |
636 |
def test_unescape_xml(self): |
637 |
"""We get some kind of error when malformed entities are passed"""
|
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
638 |
self.assertRaises(KeyError, repository._unescape_xml, 'foo&bar;') |
1910.2.13
by Aaron Bentley
Start work on converter |
639 |
|
640 |
||
2255.2.211
by Robert Collins
Remove knit2 repository format- it has never been supported. |
641 |
class TestRepositoryFormatKnit3(TestCaseWithTransport): |
1910.2.13
by Aaron Bentley
Start work on converter |
642 |
|
3565.3.1
by Robert Collins
* The generic fetch code now uses two attributes on Repository objects |
643 |
def test_attribute__fetch_order(self): |
644 |
"""Knits need topological data insertion."""
|
|
645 |
format = bzrdir.BzrDirMetaFormat1() |
|
646 |
format.repository_format = knitrepo.RepositoryFormatKnit3() |
|
647 |
repo = self.make_repository('.', format=format) |
|
4053.1.4
by Robert Collins
Move the fetch control attributes from Repository to RepositoryFormat. |
648 |
self.assertEqual('topological', repo._format._fetch_order) |
3565.3.1
by Robert Collins
* The generic fetch code now uses two attributes on Repository objects |
649 |
|
650 |
def test_attribute__fetch_uses_deltas(self): |
|
651 |
"""Knits reuse deltas."""
|
|
652 |
format = bzrdir.BzrDirMetaFormat1() |
|
653 |
format.repository_format = knitrepo.RepositoryFormatKnit3() |
|
654 |
repo = self.make_repository('.', format=format) |
|
4053.1.4
by Robert Collins
Move the fetch control attributes from Repository to RepositoryFormat. |
655 |
self.assertEqual(True, repo._format._fetch_uses_deltas) |
3565.3.1
by Robert Collins
* The generic fetch code now uses two attributes on Repository objects |
656 |
|
1910.2.13
by Aaron Bentley
Start work on converter |
657 |
def test_convert(self): |
658 |
"""Ensure the upgrade adds weaves for roots"""
|
|
1910.2.35
by Aaron Bentley
Better fix for convesion test |
659 |
format = bzrdir.BzrDirMetaFormat1() |
2241.1.6
by Martin Pool
Move Knit repositories into the submodule bzrlib.repofmt.knitrepo and |
660 |
format.repository_format = knitrepo.RepositoryFormatKnit1() |
1910.2.35
by Aaron Bentley
Better fix for convesion test |
661 |
tree = self.make_branch_and_tree('.', format) |
1910.2.13
by Aaron Bentley
Start work on converter |
662 |
tree.commit("Dull commit", rev_id="dull") |
663 |
revision_tree = tree.branch.repository.revision_tree('dull') |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
664 |
revision_tree.lock_read() |
665 |
try: |
|
666 |
self.assertRaises(errors.NoSuchFile, revision_tree.get_file_lines, |
|
667 |
revision_tree.inventory.root.file_id) |
|
668 |
finally: |
|
669 |
revision_tree.unlock() |
|
1910.2.13
by Aaron Bentley
Start work on converter |
670 |
format = bzrdir.BzrDirMetaFormat1() |
2255.2.211
by Robert Collins
Remove knit2 repository format- it has never been supported. |
671 |
format.repository_format = knitrepo.RepositoryFormatKnit3() |
1910.2.13
by Aaron Bentley
Start work on converter |
672 |
upgrade.Convert('.', format) |
1910.2.27
by Aaron Bentley
Fixed conversion test |
673 |
tree = workingtree.WorkingTree.open('.') |
1910.2.13
by Aaron Bentley
Start work on converter |
674 |
revision_tree = tree.branch.repository.revision_tree('dull') |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
675 |
revision_tree.lock_read() |
676 |
try: |
|
677 |
revision_tree.get_file_lines(revision_tree.inventory.root.file_id) |
|
678 |
finally: |
|
679 |
revision_tree.unlock() |
|
1910.2.27
by Aaron Bentley
Fixed conversion test |
680 |
tree.commit("Another dull commit", rev_id='dull2') |
681 |
revision_tree = tree.branch.repository.revision_tree('dull2') |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
682 |
revision_tree.lock_read() |
683 |
self.addCleanup(revision_tree.unlock) |
|
1910.2.27
by Aaron Bentley
Fixed conversion test |
684 |
self.assertEqual('dull', revision_tree.inventory.root.revision) |
2220.2.2
by Martin Pool
Add tag command and basic implementation |
685 |
|
3221.3.1
by Robert Collins
* Repository formats have a new supported-feature attribute |
686 |
def test_supports_external_lookups(self): |
687 |
format = bzrdir.BzrDirMetaFormat1() |
|
688 |
format.repository_format = knitrepo.RepositoryFormatKnit3() |
|
689 |
repo = self.make_repository('.', format=format) |
|
690 |
self.assertFalse(repo._format.supports_external_lookups) |
|
691 |
||
2535.3.57
by Andrew Bennetts
Perform some sanity checking of data streams rather than blindly inserting them into our repository. |
692 |
|
4667.1.1
by John Arbash Meinel
Drop the Test2a test times from 5+s down to 1.4s |
693 |
class Test2a(tests.TestCaseWithMemoryTransport): |
4431.3.7
by Jonathan Lange
Cherrypick bzr.dev 4470, resolving conflicts. |
694 |
|
4634.20.1
by Robert Collins
Fix bug 402652 by recompressing all texts that are streamed - slightly slower at fetch, substantially faster and more compact at read. |
695 |
def test_fetch_combines_groups(self): |
696 |
builder = self.make_branch_builder('source', format='2a') |
|
697 |
builder.start_series() |
|
698 |
builder.build_snapshot('1', None, [ |
|
699 |
('add', ('', 'root-id', 'directory', '')), |
|
700 |
('add', ('file', 'file-id', 'file', 'content\n'))]) |
|
701 |
builder.build_snapshot('2', ['1'], [ |
|
702 |
('modify', ('file-id', 'content-2\n'))]) |
|
703 |
builder.finish_series() |
|
704 |
source = builder.get_branch() |
|
705 |
target = self.make_repository('target', format='2a') |
|
706 |
target.fetch(source.repository) |
|
707 |
target.lock_read() |
|
4665.3.2
by John Arbash Meinel
An alternative implementation that passes both tests. |
708 |
self.addCleanup(target.unlock) |
4634.20.1
by Robert Collins
Fix bug 402652 by recompressing all texts that are streamed - slightly slower at fetch, substantially faster and more compact at read. |
709 |
details = target.texts._index.get_build_details( |
710 |
[('file-id', '1',), ('file-id', '2',)]) |
|
711 |
file_1_details = details[('file-id', '1')] |
|
712 |
file_2_details = details[('file-id', '2')] |
|
713 |
# The index, and what to read off disk, should be the same for both
|
|
714 |
# versions of the file.
|
|
715 |
self.assertEqual(file_1_details[0][:3], file_2_details[0][:3]) |
|
716 |
||
4634.23.1
by Robert Collins
Cherrypick from bzr.dev: Fix bug 402652: recompress badly packed groups during fetch. (John Arbash Meinel, Robert Collins) |
717 |
def test_fetch_combines_groups(self): |
718 |
builder = self.make_branch_builder('source', format='2a') |
|
719 |
builder.start_series() |
|
720 |
builder.build_snapshot('1', None, [ |
|
721 |
('add', ('', 'root-id', 'directory', '')), |
|
722 |
('add', ('file', 'file-id', 'file', 'content\n'))]) |
|
723 |
builder.build_snapshot('2', ['1'], [ |
|
724 |
('modify', ('file-id', 'content-2\n'))]) |
|
725 |
builder.finish_series() |
|
726 |
source = builder.get_branch() |
|
727 |
target = self.make_repository('target', format='2a') |
|
728 |
target.fetch(source.repository) |
|
729 |
target.lock_read() |
|
730 |
self.addCleanup(target.unlock) |
|
731 |
details = target.texts._index.get_build_details( |
|
732 |
[('file-id', '1',), ('file-id', '2',)]) |
|
733 |
file_1_details = details[('file-id', '1')] |
|
734 |
file_2_details = details[('file-id', '2')] |
|
735 |
# The index, and what to read off disk, should be the same for both
|
|
736 |
# versions of the file.
|
|
737 |
self.assertEqual(file_1_details[0][:3], file_2_details[0][:3]) |
|
738 |
||
739 |
def test_fetch_combines_groups(self): |
|
740 |
builder = self.make_branch_builder('source', format='2a') |
|
741 |
builder.start_series() |
|
742 |
builder.build_snapshot('1', None, [ |
|
743 |
('add', ('', 'root-id', 'directory', '')), |
|
744 |
('add', ('file', 'file-id', 'file', 'content\n'))]) |
|
745 |
builder.build_snapshot('2', ['1'], [ |
|
746 |
('modify', ('file-id', 'content-2\n'))]) |
|
747 |
builder.finish_series() |
|
748 |
source = builder.get_branch() |
|
749 |
target = self.make_repository('target', format='2a') |
|
750 |
target.fetch(source.repository) |
|
751 |
target.lock_read() |
|
752 |
self.addCleanup(target.unlock) |
|
753 |
details = target.texts._index.get_build_details( |
|
754 |
[('file-id', '1',), ('file-id', '2',)]) |
|
755 |
file_1_details = details[('file-id', '1')] |
|
756 |
file_2_details = details[('file-id', '2')] |
|
757 |
# The index, and what to read off disk, should be the same for both
|
|
758 |
# versions of the file.
|
|
759 |
self.assertEqual(file_1_details[0][:3], file_2_details[0][:3]) |
|
760 |
||
4431.3.7
by Jonathan Lange
Cherrypick bzr.dev 4470, resolving conflicts. |
761 |
def test_format_pack_compresses_True(self): |
762 |
repo = self.make_repository('repo', format='2a') |
|
763 |
self.assertTrue(repo._format.pack_compresses) |
|
3735.2.40
by Robert Collins
Add development4 which has a parent_id to basename index on CHKInventory objects. |
764 |
|
765 |
def test_inventories_use_chk_map_with_parent_base_dict(self): |
|
4667.1.1
by John Arbash Meinel
Drop the Test2a test times from 5+s down to 1.4s |
766 |
tree = self.make_branch_and_memory_tree('repo', format="2a") |
767 |
tree.lock_write() |
|
768 |
tree.add([''], ['TREE_ROOT']) |
|
3735.2.40
by Robert Collins
Add development4 which has a parent_id to basename index on CHKInventory objects. |
769 |
revid = tree.commit("foo") |
4667.1.1
by John Arbash Meinel
Drop the Test2a test times from 5+s down to 1.4s |
770 |
tree.unlock() |
3735.2.40
by Robert Collins
Add development4 which has a parent_id to basename index on CHKInventory objects. |
771 |
tree.lock_read() |
772 |
self.addCleanup(tree.unlock) |
|
773 |
inv = tree.branch.repository.get_inventory(revid) |
|
3735.2.41
by Robert Collins
Make the parent_id_basename index be updated during CHKInventory.apply_delta. |
774 |
self.assertNotEqual(None, inv.parent_id_basename_to_file_id) |
775 |
inv.parent_id_basename_to_file_id._ensure_root() |
|
3735.2.40
by Robert Collins
Add development4 which has a parent_id to basename index on CHKInventory objects. |
776 |
inv.id_to_entry._ensure_root() |
4241.6.8
by Robert Collins, John Arbash Meinel, Ian Clatworthy, Vincent Ladeuil
Add --development6-rich-root, disabling the legacy and unneeded development2 format, and activating the tests for CHK features disabled pending this format. (Robert Collins, John Arbash Meinel, Ian Clatworthy, Vincent Ladeuil) |
777 |
self.assertEqual(65536, inv.id_to_entry._root_node.maximum_size) |
778 |
self.assertEqual(65536, |
|
3735.2.41
by Robert Collins
Make the parent_id_basename index be updated during CHKInventory.apply_delta. |
779 |
inv.parent_id_basename_to_file_id._root_node.maximum_size) |
3735.2.40
by Robert Collins
Add development4 which has a parent_id to basename index on CHKInventory objects. |
780 |
|
4431.3.7
by Jonathan Lange
Cherrypick bzr.dev 4470, resolving conflicts. |
781 |
def test_autopack_unchanged_chk_nodes(self): |
782 |
# at 20 unchanged commits, chk pages are packed that are split into
|
|
783 |
# two groups such that the new pack being made doesn't have all its
|
|
784 |
# pages in the source packs (though they are in the repository).
|
|
4667.1.1
by John Arbash Meinel
Drop the Test2a test times from 5+s down to 1.4s |
785 |
# Use a memory backed repository, we don't need to hit disk for this
|
786 |
tree = self.make_branch_and_memory_tree('tree', format='2a') |
|
787 |
tree.lock_write() |
|
788 |
self.addCleanup(tree.unlock) |
|
789 |
tree.add([''], ['TREE_ROOT']) |
|
4431.3.7
by Jonathan Lange
Cherrypick bzr.dev 4470, resolving conflicts. |
790 |
for pos in range(20): |
791 |
tree.commit(str(pos)) |
|
792 |
||
793 |
def test_pack_with_hint(self): |
|
4667.1.1
by John Arbash Meinel
Drop the Test2a test times from 5+s down to 1.4s |
794 |
tree = self.make_branch_and_memory_tree('tree', format='2a') |
795 |
tree.lock_write() |
|
796 |
self.addCleanup(tree.unlock) |
|
797 |
tree.add([''], ['TREE_ROOT']) |
|
4431.3.7
by Jonathan Lange
Cherrypick bzr.dev 4470, resolving conflicts. |
798 |
# 1 commit to leave untouched
|
799 |
tree.commit('1') |
|
800 |
to_keep = tree.branch.repository._pack_collection.names() |
|
801 |
# 2 to combine
|
|
802 |
tree.commit('2') |
|
803 |
tree.commit('3') |
|
804 |
all = tree.branch.repository._pack_collection.names() |
|
805 |
combine = list(set(all) - set(to_keep)) |
|
806 |
self.assertLength(3, all) |
|
807 |
self.assertLength(2, combine) |
|
808 |
tree.branch.repository.pack(hint=combine) |
|
809 |
final = tree.branch.repository._pack_collection.names() |
|
810 |
self.assertLength(2, final) |
|
811 |
self.assertFalse(combine[0] in final) |
|
812 |
self.assertFalse(combine[1] in final) |
|
813 |
self.assertSubset(to_keep, final) |
|
814 |
||
4360.4.3
by John Arbash Meinel
Introduce a KnitPackStreamSource which is used when |
815 |
def test_stream_source_to_gc(self): |
4462.2.1
by Robert Collins
Add new attribute to RepositoryFormat pack_compresses, hinting when pack can be useful. |
816 |
source = self.make_repository('source', format='2a') |
817 |
target = self.make_repository('target', format='2a') |
|
4360.4.3
by John Arbash Meinel
Introduce a KnitPackStreamSource which is used when |
818 |
stream = source._get_source(target._format) |
819 |
self.assertIsInstance(stream, groupcompress_repo.GroupCHKStreamSource) |
|
820 |
||
821 |
def test_stream_source_to_non_gc(self): |
|
4462.2.1
by Robert Collins
Add new attribute to RepositoryFormat pack_compresses, hinting when pack can be useful. |
822 |
source = self.make_repository('source', format='2a') |
4360.4.3
by John Arbash Meinel
Introduce a KnitPackStreamSource which is used when |
823 |
target = self.make_repository('target', format='rich-root-pack') |
824 |
stream = source._get_source(target._format) |
|
825 |
# We don't want the child GroupCHKStreamSource
|
|
826 |
self.assertIs(type(stream), repository.StreamSource) |
|
827 |
||
4360.4.9
by John Arbash Meinel
Merge bzr.dev, bringing in the gc stacking fixes. |
828 |
def test_get_stream_for_missing_keys_includes_all_chk_refs(self): |
829 |
source_builder = self.make_branch_builder('source', |
|
4462.2.1
by Robert Collins
Add new attribute to RepositoryFormat pack_compresses, hinting when pack can be useful. |
830 |
format='2a') |
4360.4.9
by John Arbash Meinel
Merge bzr.dev, bringing in the gc stacking fixes. |
831 |
# We have to build a fairly large tree, so that we are sure the chk
|
832 |
# pages will have split into multiple pages.
|
|
833 |
entries = [('add', ('', 'a-root-id', 'directory', None))] |
|
834 |
for i in 'abcdefghijklmnopqrstuvwxyz123456789': |
|
835 |
for j in 'abcdefghijklmnopqrstuvwxyz123456789': |
|
836 |
fname = i + j |
|
837 |
fid = fname + '-id' |
|
838 |
content = 'content for %s\n' % (fname,) |
|
839 |
entries.append(('add', (fname, fid, 'file', content))) |
|
840 |
source_builder.start_series() |
|
841 |
source_builder.build_snapshot('rev-1', None, entries) |
|
842 |
# Now change a few of them, so we get a few new pages for the second
|
|
843 |
# revision
|
|
844 |
source_builder.build_snapshot('rev-2', ['rev-1'], [ |
|
845 |
('modify', ('aa-id', 'new content for aa-id\n')), |
|
846 |
('modify', ('cc-id', 'new content for cc-id\n')), |
|
847 |
('modify', ('zz-id', 'new content for zz-id\n')), |
|
848 |
])
|
|
849 |
source_builder.finish_series() |
|
850 |
source_branch = source_builder.get_branch() |
|
851 |
source_branch.lock_read() |
|
852 |
self.addCleanup(source_branch.unlock) |
|
4462.2.1
by Robert Collins
Add new attribute to RepositoryFormat pack_compresses, hinting when pack can be useful. |
853 |
target = self.make_repository('target', format='2a') |
4360.4.9
by John Arbash Meinel
Merge bzr.dev, bringing in the gc stacking fixes. |
854 |
source = source_branch.repository._get_source(target._format) |
855 |
self.assertIsInstance(source, groupcompress_repo.GroupCHKStreamSource) |
|
856 |
||
857 |
# On a regular pass, getting the inventories and chk pages for rev-2
|
|
858 |
# would only get the newly created chk pages
|
|
859 |
search = graph.SearchResult(set(['rev-2']), set(['rev-1']), 1, |
|
860 |
set(['rev-2'])) |
|
861 |
simple_chk_records = [] |
|
862 |
for vf_name, substream in source.get_stream(search): |
|
863 |
if vf_name == 'chk_bytes': |
|
864 |
for record in substream: |
|
865 |
simple_chk_records.append(record.key) |
|
866 |
else: |
|
867 |
for _ in substream: |
|
868 |
continue
|
|
869 |
# 3 pages, the root (InternalNode), + 2 pages which actually changed
|
|
870 |
self.assertEqual([('sha1:91481f539e802c76542ea5e4c83ad416bf219f73',), |
|
871 |
('sha1:4ff91971043668583985aec83f4f0ab10a907d3f',), |
|
872 |
('sha1:81e7324507c5ca132eedaf2d8414ee4bb2226187',), |
|
873 |
('sha1:b101b7da280596c71a4540e9a1eeba8045985ee0',)], |
|
874 |
simple_chk_records) |
|
875 |
# Now, when we do a similar call using 'get_stream_for_missing_keys'
|
|
876 |
# we should get a much larger set of pages.
|
|
877 |
missing = [('inventories', 'rev-2')] |
|
878 |
full_chk_records = [] |
|
879 |
for vf_name, substream in source.get_stream_for_missing_keys(missing): |
|
880 |
if vf_name == 'inventories': |
|
881 |
for record in substream: |
|
882 |
self.assertEqual(('rev-2',), record.key) |
|
883 |
elif vf_name == 'chk_bytes': |
|
884 |
for record in substream: |
|
885 |
full_chk_records.append(record.key) |
|
886 |
else: |
|
887 |
self.fail('Should not be getting a stream of %s' % (vf_name,)) |
|
888 |
# We have 257 records now. This is because we have 1 root page, and 256
|
|
889 |
# leaf pages in a complete listing.
|
|
890 |
self.assertEqual(257, len(full_chk_records)) |
|
891 |
self.assertSubset(simple_chk_records, full_chk_records) |
|
892 |
||
4465.2.7
by Aaron Bentley
Move test_inconsistency_fatal to test_repository |
893 |
def test_inconsistency_fatal(self): |
894 |
repo = self.make_repository('repo', format='2a') |
|
895 |
self.assertTrue(repo.revisions._index._inconsistency_fatal) |
|
896 |
self.assertFalse(repo.texts._index._inconsistency_fatal) |
|
897 |
self.assertFalse(repo.inventories._index._inconsistency_fatal) |
|
898 |
self.assertFalse(repo.signatures._index._inconsistency_fatal) |
|
899 |
self.assertFalse(repo.chk_bytes._index._inconsistency_fatal) |
|
900 |
||
4360.4.3
by John Arbash Meinel
Introduce a KnitPackStreamSource which is used when |
901 |
|
902 |
class TestKnitPackStreamSource(tests.TestCaseWithMemoryTransport): |
|
903 |
||
904 |
def test_source_to_exact_pack_092(self): |
|
905 |
source = self.make_repository('source', format='pack-0.92') |
|
906 |
target = self.make_repository('target', format='pack-0.92') |
|
907 |
stream_source = source._get_source(target._format) |
|
908 |
self.assertIsInstance(stream_source, pack_repo.KnitPackStreamSource) |
|
909 |
||
910 |
def test_source_to_exact_pack_rich_root_pack(self): |
|
911 |
source = self.make_repository('source', format='rich-root-pack') |
|
912 |
target = self.make_repository('target', format='rich-root-pack') |
|
913 |
stream_source = source._get_source(target._format) |
|
914 |
self.assertIsInstance(stream_source, pack_repo.KnitPackStreamSource) |
|
915 |
||
916 |
def test_source_to_exact_pack_19(self): |
|
917 |
source = self.make_repository('source', format='1.9') |
|
918 |
target = self.make_repository('target', format='1.9') |
|
919 |
stream_source = source._get_source(target._format) |
|
920 |
self.assertIsInstance(stream_source, pack_repo.KnitPackStreamSource) |
|
921 |
||
922 |
def test_source_to_exact_pack_19_rich_root(self): |
|
923 |
source = self.make_repository('source', format='1.9-rich-root') |
|
924 |
target = self.make_repository('target', format='1.9-rich-root') |
|
925 |
stream_source = source._get_source(target._format) |
|
926 |
self.assertIsInstance(stream_source, pack_repo.KnitPackStreamSource) |
|
927 |
||
928 |
def test_source_to_remote_exact_pack_19(self): |
|
929 |
trans = self.make_smart_server('target') |
|
930 |
trans.ensure_base() |
|
931 |
source = self.make_repository('source', format='1.9') |
|
932 |
target = self.make_repository('target', format='1.9') |
|
933 |
target = repository.Repository.open(trans.base) |
|
934 |
stream_source = source._get_source(target._format) |
|
935 |
self.assertIsInstance(stream_source, pack_repo.KnitPackStreamSource) |
|
936 |
||
937 |
def test_stream_source_to_non_exact(self): |
|
938 |
source = self.make_repository('source', format='pack-0.92') |
|
939 |
target = self.make_repository('target', format='1.9') |
|
940 |
stream = source._get_source(target._format) |
|
941 |
self.assertIs(type(stream), repository.StreamSource) |
|
942 |
||
943 |
def test_stream_source_to_non_exact_rich_root(self): |
|
944 |
source = self.make_repository('source', format='1.9') |
|
945 |
target = self.make_repository('target', format='1.9-rich-root') |
|
946 |
stream = source._get_source(target._format) |
|
947 |
self.assertIs(type(stream), repository.StreamSource) |
|
948 |
||
949 |
def test_source_to_remote_non_exact_pack_19(self): |
|
950 |
trans = self.make_smart_server('target') |
|
951 |
trans.ensure_base() |
|
952 |
source = self.make_repository('source', format='1.9') |
|
953 |
target = self.make_repository('target', format='1.6') |
|
954 |
target = repository.Repository.open(trans.base) |
|
955 |
stream_source = source._get_source(target._format) |
|
956 |
self.assertIs(type(stream_source), repository.StreamSource) |
|
957 |
||
958 |
def test_stream_source_to_knit(self): |
|
959 |
source = self.make_repository('source', format='pack-0.92') |
|
960 |
target = self.make_repository('target', format='dirstate') |
|
961 |
stream = source._get_source(target._format) |
|
962 |
self.assertIs(type(stream), repository.StreamSource) |
|
963 |
||
3735.2.40
by Robert Collins
Add development4 which has a parent_id to basename index on CHKInventory objects. |
964 |
|
4343.3.32
by John Arbash Meinel
Change the tests for _find_revision_outside_set to the new _find_parent_ids function. |
965 |
class TestDevelopment6FindParentIdsOfRevisions(TestCaseWithTransport): |
966 |
"""Tests for _find_parent_ids_of_revisions."""
|
|
3735.4.1
by Andrew Bennetts
Add _find_revision_outside_set. |
967 |
|
968 |
def setUp(self): |
|
4343.3.32
by John Arbash Meinel
Change the tests for _find_revision_outside_set to the new _find_parent_ids function. |
969 |
super(TestDevelopment6FindParentIdsOfRevisions, self).setUp() |
4241.6.8
by Robert Collins, John Arbash Meinel, Ian Clatworthy, Vincent Ladeuil
Add --development6-rich-root, disabling the legacy and unneeded development2 format, and activating the tests for CHK features disabled pending this format. (Robert Collins, John Arbash Meinel, Ian Clatworthy, Vincent Ladeuil) |
970 |
self.builder = self.make_branch_builder('source', |
971 |
format='development6-rich-root') |
|
3735.4.1
by Andrew Bennetts
Add _find_revision_outside_set. |
972 |
self.builder.start_series() |
973 |
self.builder.build_snapshot('initial', None, |
|
974 |
[('add', ('', 'tree-root', 'directory', None))]) |
|
975 |
self.repo = self.builder.get_branch().repository |
|
976 |
self.addCleanup(self.builder.finish_series) |
|
3735.2.99
by John Arbash Meinel
Merge bzr.dev 4034. Whitespace cleanup |
977 |
|
4343.3.32
by John Arbash Meinel
Change the tests for _find_revision_outside_set to the new _find_parent_ids function. |
978 |
def assertParentIds(self, expected_result, rev_set): |
979 |
self.assertEqual(sorted(expected_result), |
|
980 |
sorted(self.repo._find_parent_ids_of_revisions(rev_set))) |
|
3735.4.1
by Andrew Bennetts
Add _find_revision_outside_set. |
981 |
|
982 |
def test_simple(self): |
|
983 |
self.builder.build_snapshot('revid1', None, []) |
|
4343.3.32
by John Arbash Meinel
Change the tests for _find_revision_outside_set to the new _find_parent_ids function. |
984 |
self.builder.build_snapshot('revid2', ['revid1'], []) |
3735.4.1
by Andrew Bennetts
Add _find_revision_outside_set. |
985 |
rev_set = ['revid2'] |
4343.3.32
by John Arbash Meinel
Change the tests for _find_revision_outside_set to the new _find_parent_ids function. |
986 |
self.assertParentIds(['revid1'], rev_set) |
3735.4.1
by Andrew Bennetts
Add _find_revision_outside_set. |
987 |
|
988 |
def test_not_first_parent(self): |
|
989 |
self.builder.build_snapshot('revid1', None, []) |
|
4343.3.32
by John Arbash Meinel
Change the tests for _find_revision_outside_set to the new _find_parent_ids function. |
990 |
self.builder.build_snapshot('revid2', ['revid1'], []) |
991 |
self.builder.build_snapshot('revid3', ['revid2'], []) |
|
3735.4.1
by Andrew Bennetts
Add _find_revision_outside_set. |
992 |
rev_set = ['revid3', 'revid2'] |
4343.3.32
by John Arbash Meinel
Change the tests for _find_revision_outside_set to the new _find_parent_ids function. |
993 |
self.assertParentIds(['revid1'], rev_set) |
3735.4.1
by Andrew Bennetts
Add _find_revision_outside_set. |
994 |
|
995 |
def test_not_null(self): |
|
996 |
rev_set = ['initial'] |
|
4343.3.32
by John Arbash Meinel
Change the tests for _find_revision_outside_set to the new _find_parent_ids function. |
997 |
self.assertParentIds([], rev_set) |
3735.4.1
by Andrew Bennetts
Add _find_revision_outside_set. |
998 |
|
999 |
def test_not_null_set(self): |
|
1000 |
self.builder.build_snapshot('revid1', None, []) |
|
1001 |
rev_set = [_mod_revision.NULL_REVISION] |
|
4343.3.32
by John Arbash Meinel
Change the tests for _find_revision_outside_set to the new _find_parent_ids function. |
1002 |
self.assertParentIds([], rev_set) |
3735.4.1
by Andrew Bennetts
Add _find_revision_outside_set. |
1003 |
|
1004 |
def test_ghost(self): |
|
1005 |
self.builder.build_snapshot('revid1', None, []) |
|
1006 |
rev_set = ['ghost', 'revid1'] |
|
4343.3.32
by John Arbash Meinel
Change the tests for _find_revision_outside_set to the new _find_parent_ids function. |
1007 |
self.assertParentIds(['initial'], rev_set) |
3735.4.1
by Andrew Bennetts
Add _find_revision_outside_set. |
1008 |
|
1009 |
def test_ghost_parent(self): |
|
1010 |
self.builder.build_snapshot('revid1', None, []) |
|
1011 |
self.builder.build_snapshot('revid2', ['revid1', 'ghost'], []) |
|
1012 |
rev_set = ['revid2', 'revid1'] |
|
4343.3.32
by John Arbash Meinel
Change the tests for _find_revision_outside_set to the new _find_parent_ids function. |
1013 |
self.assertParentIds(['ghost', 'initial'], rev_set) |
3735.4.1
by Andrew Bennetts
Add _find_revision_outside_set. |
1014 |
|
1015 |
def test_righthand_parent(self): |
|
1016 |
self.builder.build_snapshot('revid1', None, []) |
|
1017 |
self.builder.build_snapshot('revid2a', ['revid1'], []) |
|
1018 |
self.builder.build_snapshot('revid2b', ['revid1'], []) |
|
1019 |
self.builder.build_snapshot('revid3', ['revid2a', 'revid2b'], []) |
|
1020 |
rev_set = ['revid3', 'revid2a'] |
|
4343.3.32
by John Arbash Meinel
Change the tests for _find_revision_outside_set to the new _find_parent_ids function. |
1021 |
self.assertParentIds(['revid1', 'revid2b'], rev_set) |
3735.4.1
by Andrew Bennetts
Add _find_revision_outside_set. |
1022 |
|
1023 |
||
2535.3.57
by Andrew Bennetts
Perform some sanity checking of data streams rather than blindly inserting them into our repository. |
1024 |
class TestWithBrokenRepo(TestCaseWithTransport): |
2592.3.214
by Robert Collins
Merge bzr.dev. |
1025 |
"""These tests seem to be more appropriate as interface tests?"""
|
2535.3.57
by Andrew Bennetts
Perform some sanity checking of data streams rather than blindly inserting them into our repository. |
1026 |
|
1027 |
def make_broken_repository(self): |
|
1028 |
# XXX: This function is borrowed from Aaron's "Reconcile can fix bad
|
|
1029 |
# parent references" branch which is due to land in bzr.dev soon. Once
|
|
1030 |
# it does, this duplication should be removed.
|
|
1031 |
repo = self.make_repository('broken-repo') |
|
1032 |
cleanups = [] |
|
1033 |
try: |
|
1034 |
repo.lock_write() |
|
1035 |
cleanups.append(repo.unlock) |
|
1036 |
repo.start_write_group() |
|
1037 |
cleanups.append(repo.commit_write_group) |
|
1038 |
# make rev1a: A well-formed revision, containing 'file1'
|
|
1039 |
inv = inventory.Inventory(revision_id='rev1a') |
|
1040 |
inv.root.revision = 'rev1a' |
|
1041 |
self.add_file(repo, inv, 'file1', 'rev1a', []) |
|
4634.35.21
by Andrew Bennetts
Fix test_insert_from_broken_repo in test_repository. |
1042 |
repo.texts.add_lines((inv.root.file_id, 'rev1a'), [], []) |
2535.3.57
by Andrew Bennetts
Perform some sanity checking of data streams rather than blindly inserting them into our repository. |
1043 |
repo.add_inventory('rev1a', inv, []) |
1044 |
revision = _mod_revision.Revision('rev1a', |
|
1045 |
committer='jrandom@example.com', timestamp=0, |
|
1046 |
inventory_sha1='', timezone=0, message='foo', parent_ids=[]) |
|
1047 |
repo.add_revision('rev1a',revision, inv) |
|
1048 |
||
1049 |
# make rev1b, which has no Revision, but has an Inventory, and
|
|
1050 |
# file1
|
|
1051 |
inv = inventory.Inventory(revision_id='rev1b') |
|
1052 |
inv.root.revision = 'rev1b' |
|
1053 |
self.add_file(repo, inv, 'file1', 'rev1b', []) |
|
1054 |
repo.add_inventory('rev1b', inv, []) |
|
1055 |
||
1056 |
# make rev2, with file1 and file2
|
|
1057 |
# file2 is sane
|
|
1058 |
# file1 has 'rev1b' as an ancestor, even though this is not
|
|
1059 |
# mentioned by 'rev1a', making it an unreferenced ancestor
|
|
1060 |
inv = inventory.Inventory() |
|
1061 |
self.add_file(repo, inv, 'file1', 'rev2', ['rev1a', 'rev1b']) |
|
1062 |
self.add_file(repo, inv, 'file2', 'rev2', []) |
|
1063 |
self.add_revision(repo, 'rev2', inv, ['rev1a']) |
|
1064 |
||
1065 |
# make ghost revision rev1c
|
|
1066 |
inv = inventory.Inventory() |
|
1067 |
self.add_file(repo, inv, 'file2', 'rev1c', []) |
|
1068 |
||
1069 |
# make rev3 with file2
|
|
1070 |
# file2 refers to 'rev1c', which is a ghost in this repository, so
|
|
1071 |
# file2 cannot have rev1c as its ancestor.
|
|
1072 |
inv = inventory.Inventory() |
|
1073 |
self.add_file(repo, inv, 'file2', 'rev3', ['rev1c']) |
|
1074 |
self.add_revision(repo, 'rev3', inv, ['rev1c']) |
|
1075 |
return repo |
|
1076 |
finally: |
|
1077 |
for cleanup in reversed(cleanups): |
|
1078 |
cleanup() |
|
1079 |
||
1080 |
def add_revision(self, repo, revision_id, inv, parent_ids): |
|
1081 |
inv.revision_id = revision_id |
|
1082 |
inv.root.revision = revision_id |
|
4634.35.21
by Andrew Bennetts
Fix test_insert_from_broken_repo in test_repository. |
1083 |
repo.texts.add_lines((inv.root.file_id, revision_id), [], []) |
2535.3.57
by Andrew Bennetts
Perform some sanity checking of data streams rather than blindly inserting them into our repository. |
1084 |
repo.add_inventory(revision_id, inv, parent_ids) |
1085 |
revision = _mod_revision.Revision(revision_id, |
|
1086 |
committer='jrandom@example.com', timestamp=0, inventory_sha1='', |
|
1087 |
timezone=0, message='foo', parent_ids=parent_ids) |
|
1088 |
repo.add_revision(revision_id,revision, inv) |
|
1089 |
||
1090 |
def add_file(self, repo, inv, filename, revision, parents): |
|
1091 |
file_id = filename + '-id' |
|
1092 |
entry = inventory.InventoryFile(file_id, filename, 'TREE_ROOT') |
|
1093 |
entry.revision = revision |
|
2535.4.10
by Andrew Bennetts
Fix one failing test, disable another. |
1094 |
entry.text_size = 0 |
2535.3.57
by Andrew Bennetts
Perform some sanity checking of data streams rather than blindly inserting them into our repository. |
1095 |
inv.add(entry) |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1096 |
text_key = (file_id, revision) |
1097 |
parent_keys = [(file_id, parent) for parent in parents] |
|
1098 |
repo.texts.add_lines(text_key, parent_keys, ['line\n']) |
|
2535.3.57
by Andrew Bennetts
Perform some sanity checking of data streams rather than blindly inserting them into our repository. |
1099 |
|
1100 |
def test_insert_from_broken_repo(self): |
|
1101 |
"""Inserting a data stream from a broken repository won't silently
|
|
1102 |
corrupt the target repository.
|
|
1103 |
"""
|
|
1104 |
broken_repo = self.make_broken_repository() |
|
1105 |
empty_repo = self.make_repository('empty-repo') |
|
4606.1.1
by Robert Collins
Change test_insert_from_broken_repo from a known failure to a working test. |
1106 |
try: |
1107 |
empty_repo.fetch(broken_repo) |
|
1108 |
except (errors.RevisionNotPresent, errors.BzrCheckError): |
|
1109 |
# Test successful: compression parent not being copied leads to
|
|
1110 |
# error.
|
|
1111 |
return
|
|
1112 |
empty_repo.lock_read() |
|
1113 |
self.addCleanup(empty_repo.unlock) |
|
1114 |
text = empty_repo.texts.get_record_stream( |
|
1115 |
[('file2-id', 'rev3')], 'topological', True).next() |
|
1116 |
self.assertEqual('line\n', text.get_bytes_as('fulltext')) |
|
2592.3.214
by Robert Collins
Merge bzr.dev. |
1117 |
|
1118 |
||
2592.3.84
by Robert Collins
Start of autopacking logic. |
1119 |
class TestRepositoryPackCollection(TestCaseWithTransport): |
1120 |
||
1121 |
def get_format(self): |
|
3010.3.3
by Martin Pool
Merge trunk |
1122 |
return bzrdir.format_registry.make_bzrdir('pack-0.92') |
2592.3.84
by Robert Collins
Start of autopacking logic. |
1123 |
|
3711.4.1
by John Arbash Meinel
Fix bug #242510, when determining the autopack sequence, |
1124 |
def get_packs(self): |
1125 |
format = self.get_format() |
|
1126 |
repo = self.make_repository('.', format=format) |
|
1127 |
return repo._pack_collection |
|
1128 |
||
3789.2.20
by John Arbash Meinel
The autopack code can now trigger itself to retry when _copy_revision_texts fails. |
1129 |
def make_packs_and_alt_repo(self, write_lock=False): |
3789.2.19
by John Arbash Meinel
Refactor to make the tests a bit simpler |
1130 |
"""Create a pack repo with 3 packs, and access it via a second repo."""
|
4617.4.1
by Robert Collins
Fix a pack specific test which didn't lock its format down. |
1131 |
tree = self.make_branch_and_tree('.', format=self.get_format()) |
3789.2.19
by John Arbash Meinel
Refactor to make the tests a bit simpler |
1132 |
tree.lock_write() |
1133 |
self.addCleanup(tree.unlock) |
|
1134 |
rev1 = tree.commit('one') |
|
1135 |
rev2 = tree.commit('two') |
|
1136 |
rev3 = tree.commit('three') |
|
1137 |
r = repository.Repository.open('.') |
|
3789.2.20
by John Arbash Meinel
The autopack code can now trigger itself to retry when _copy_revision_texts fails. |
1138 |
if write_lock: |
1139 |
r.lock_write() |
|
1140 |
else: |
|
1141 |
r.lock_read() |
|
3789.2.19
by John Arbash Meinel
Refactor to make the tests a bit simpler |
1142 |
self.addCleanup(r.unlock) |
1143 |
packs = r._pack_collection |
|
1144 |
packs.ensure_loaded() |
|
1145 |
return tree, r, packs, [rev1, rev2, rev3] |
|
1146 |
||
4634.127.1
by John Arbash Meinel
Partial fix for bug #507557. |
1147 |
def test__clear_obsolete_packs(self): |
1148 |
packs = self.get_packs() |
|
1149 |
obsolete_pack_trans = packs.transport.clone('obsolete_packs') |
|
1150 |
obsolete_pack_trans.put_bytes('a-pack.pack', 'content\n') |
|
1151 |
obsolete_pack_trans.put_bytes('a-pack.rix', 'content\n') |
|
1152 |
obsolete_pack_trans.put_bytes('a-pack.iix', 'content\n') |
|
1153 |
obsolete_pack_trans.put_bytes('another-pack.pack', 'foo\n') |
|
1154 |
obsolete_pack_trans.put_bytes('not-a-pack.rix', 'foo\n') |
|
1155 |
res = packs._clear_obsolete_packs() |
|
1156 |
self.assertEqual(['a-pack', 'another-pack'], sorted(res)) |
|
1157 |
self.assertEqual([], obsolete_pack_trans.list_dir('.')) |
|
1158 |
||
1159 |
def test__clear_obsolete_packs_preserve(self): |
|
1160 |
packs = self.get_packs() |
|
1161 |
obsolete_pack_trans = packs.transport.clone('obsolete_packs') |
|
1162 |
obsolete_pack_trans.put_bytes('a-pack.pack', 'content\n') |
|
1163 |
obsolete_pack_trans.put_bytes('a-pack.rix', 'content\n') |
|
1164 |
obsolete_pack_trans.put_bytes('a-pack.iix', 'content\n') |
|
1165 |
obsolete_pack_trans.put_bytes('another-pack.pack', 'foo\n') |
|
1166 |
obsolete_pack_trans.put_bytes('not-a-pack.rix', 'foo\n') |
|
1167 |
res = packs._clear_obsolete_packs(preserve=set(['a-pack'])) |
|
1168 |
self.assertEqual(['a-pack', 'another-pack'], sorted(res)) |
|
1169 |
self.assertEqual(['a-pack.iix', 'a-pack.pack', 'a-pack.rix'], |
|
1170 |
sorted(obsolete_pack_trans.list_dir('.'))) |
|
1171 |
||
2592.3.84
by Robert Collins
Start of autopacking logic. |
1172 |
def test__max_pack_count(self): |
2592.3.219
by Robert Collins
Review feedback. |
1173 |
"""The maximum pack count is a function of the number of revisions."""
|
2592.3.84
by Robert Collins
Start of autopacking logic. |
1174 |
# no revisions - one pack, so that we can have a revision free repo
|
1175 |
# without it blowing up
|
|
3711.4.1
by John Arbash Meinel
Fix bug #242510, when determining the autopack sequence, |
1176 |
packs = self.get_packs() |
2592.3.84
by Robert Collins
Start of autopacking logic. |
1177 |
self.assertEqual(1, packs._max_pack_count(0)) |
1178 |
# after that the sum of the digits, - check the first 1-9
|
|
1179 |
self.assertEqual(1, packs._max_pack_count(1)) |
|
1180 |
self.assertEqual(2, packs._max_pack_count(2)) |
|
1181 |
self.assertEqual(3, packs._max_pack_count(3)) |
|
1182 |
self.assertEqual(4, packs._max_pack_count(4)) |
|
1183 |
self.assertEqual(5, packs._max_pack_count(5)) |
|
1184 |
self.assertEqual(6, packs._max_pack_count(6)) |
|
1185 |
self.assertEqual(7, packs._max_pack_count(7)) |
|
1186 |
self.assertEqual(8, packs._max_pack_count(8)) |
|
1187 |
self.assertEqual(9, packs._max_pack_count(9)) |
|
1188 |
# check the boundary cases with two digits for the next decade
|
|
1189 |
self.assertEqual(1, packs._max_pack_count(10)) |
|
1190 |
self.assertEqual(2, packs._max_pack_count(11)) |
|
1191 |
self.assertEqual(10, packs._max_pack_count(19)) |
|
1192 |
self.assertEqual(2, packs._max_pack_count(20)) |
|
1193 |
self.assertEqual(3, packs._max_pack_count(21)) |
|
1194 |
# check some arbitrary big numbers
|
|
1195 |
self.assertEqual(25, packs._max_pack_count(112894)) |
|
1196 |
||
4928.1.1
by Martin Pool
Give RepositoryPackCollection a repr |
1197 |
def test_repr(self): |
1198 |
packs = self.get_packs() |
|
1199 |
self.assertContainsRe(repr(packs), |
|
1200 |
'RepositoryPackCollection(.*Repository(.*))') |
|
1201 |
||
4634.127.2
by John Arbash Meinel
Change the _obsolete_packs code to handle files that are already gone. |
1202 |
def test__obsolete_packs(self): |
1203 |
tree, r, packs, revs = self.make_packs_and_alt_repo(write_lock=True) |
|
1204 |
names = packs.names() |
|
1205 |
pack = packs.get_pack_by_name(names[0]) |
|
1206 |
# Schedule this one for removal
|
|
1207 |
packs._remove_pack_from_memory(pack) |
|
1208 |
# Simulate a concurrent update by renaming the .pack file and one of
|
|
1209 |
# the indices
|
|
1210 |
packs.transport.rename('packs/%s.pack' % (names[0],), |
|
1211 |
'obsolete_packs/%s.pack' % (names[0],)) |
|
1212 |
packs.transport.rename('indices/%s.iix' % (names[0],), |
|
1213 |
'obsolete_packs/%s.iix' % (names[0],)) |
|
1214 |
# Now trigger the obsoletion, and ensure that all the remaining files
|
|
1215 |
# are still renamed
|
|
1216 |
packs._obsolete_packs([pack]) |
|
1217 |
self.assertEqual([n + '.pack' for n in names[1:]], |
|
1218 |
sorted(packs._pack_transport.list_dir('.'))) |
|
1219 |
# names[0] should not be present in the index anymore
|
|
1220 |
self.assertEqual(names[1:], |
|
1221 |
sorted(set([osutils.splitext(n)[0] for n in |
|
1222 |
packs._index_transport.list_dir('.')]))) |
|
1223 |
||
2592.3.84
by Robert Collins
Start of autopacking logic. |
1224 |
def test_pack_distribution_zero(self): |
3711.4.1
by John Arbash Meinel
Fix bug #242510, when determining the autopack sequence, |
1225 |
packs = self.get_packs() |
2592.3.84
by Robert Collins
Start of autopacking logic. |
1226 |
self.assertEqual([0], packs.pack_distribution(0)) |
3052.1.6
by John Arbash Meinel
Change the lock check to raise ObjectNotLocked. |
1227 |
|
1228 |
def test_ensure_loaded_unlocked(self): |
|
3711.4.1
by John Arbash Meinel
Fix bug #242510, when determining the autopack sequence, |
1229 |
packs = self.get_packs() |
3052.1.6
by John Arbash Meinel
Change the lock check to raise ObjectNotLocked. |
1230 |
self.assertRaises(errors.ObjectNotLocked, |
3711.4.1
by John Arbash Meinel
Fix bug #242510, when determining the autopack sequence, |
1231 |
packs.ensure_loaded) |
3052.1.6
by John Arbash Meinel
Change the lock check to raise ObjectNotLocked. |
1232 |
|
2592.3.84
by Robert Collins
Start of autopacking logic. |
1233 |
def test_pack_distribution_one_to_nine(self): |
3711.4.1
by John Arbash Meinel
Fix bug #242510, when determining the autopack sequence, |
1234 |
packs = self.get_packs() |
2592.3.84
by Robert Collins
Start of autopacking logic. |
1235 |
self.assertEqual([1], |
1236 |
packs.pack_distribution(1)) |
|
1237 |
self.assertEqual([1, 1], |
|
1238 |
packs.pack_distribution(2)) |
|
1239 |
self.assertEqual([1, 1, 1], |
|
1240 |
packs.pack_distribution(3)) |
|
1241 |
self.assertEqual([1, 1, 1, 1], |
|
1242 |
packs.pack_distribution(4)) |
|
1243 |
self.assertEqual([1, 1, 1, 1, 1], |
|
1244 |
packs.pack_distribution(5)) |
|
1245 |
self.assertEqual([1, 1, 1, 1, 1, 1], |
|
1246 |
packs.pack_distribution(6)) |
|
1247 |
self.assertEqual([1, 1, 1, 1, 1, 1, 1], |
|
1248 |
packs.pack_distribution(7)) |
|
1249 |
self.assertEqual([1, 1, 1, 1, 1, 1, 1, 1], |
|
1250 |
packs.pack_distribution(8)) |
|
1251 |
self.assertEqual([1, 1, 1, 1, 1, 1, 1, 1, 1], |
|
1252 |
packs.pack_distribution(9)) |
|
1253 |
||
1254 |
def test_pack_distribution_stable_at_boundaries(self): |
|
1255 |
"""When there are multi-rev packs the counts are stable."""
|
|
3711.4.1
by John Arbash Meinel
Fix bug #242510, when determining the autopack sequence, |
1256 |
packs = self.get_packs() |
2592.3.84
by Robert Collins
Start of autopacking logic. |
1257 |
# in 10s:
|
1258 |
self.assertEqual([10], packs.pack_distribution(10)) |
|
1259 |
self.assertEqual([10, 1], packs.pack_distribution(11)) |
|
1260 |
self.assertEqual([10, 10], packs.pack_distribution(20)) |
|
1261 |
self.assertEqual([10, 10, 1], packs.pack_distribution(21)) |
|
1262 |
# 100s
|
|
1263 |
self.assertEqual([100], packs.pack_distribution(100)) |
|
1264 |
self.assertEqual([100, 1], packs.pack_distribution(101)) |
|
1265 |
self.assertEqual([100, 10, 1], packs.pack_distribution(111)) |
|
1266 |
self.assertEqual([100, 100], packs.pack_distribution(200)) |
|
1267 |
self.assertEqual([100, 100, 1], packs.pack_distribution(201)) |
|
1268 |
self.assertEqual([100, 100, 10, 1], packs.pack_distribution(211)) |
|
1269 |
||
2592.3.85
by Robert Collins
Finish autopack corner cases. |
1270 |
def test_plan_pack_operations_2009_revisions_skip_all_packs(self): |
3711.4.1
by John Arbash Meinel
Fix bug #242510, when determining the autopack sequence, |
1271 |
packs = self.get_packs() |
2592.3.85
by Robert Collins
Finish autopack corner cases. |
1272 |
existing_packs = [(2000, "big"), (9, "medium")] |
1273 |
# rev count - 2009 -> 2x1000 + 9x1
|
|
1274 |
pack_operations = packs.plan_autopack_combinations( |
|
1275 |
existing_packs, [1000, 1000, 1, 1, 1, 1, 1, 1, 1, 1, 1]) |
|
1276 |
self.assertEqual([], pack_operations) |
|
1277 |
||
1278 |
def test_plan_pack_operations_2010_revisions_skip_all_packs(self): |
|
3711.4.1
by John Arbash Meinel
Fix bug #242510, when determining the autopack sequence, |
1279 |
packs = self.get_packs() |
2592.3.85
by Robert Collins
Finish autopack corner cases. |
1280 |
existing_packs = [(2000, "big"), (9, "medium"), (1, "single")] |
1281 |
# rev count - 2010 -> 2x1000 + 1x10
|
|
1282 |
pack_operations = packs.plan_autopack_combinations( |
|
1283 |
existing_packs, [1000, 1000, 10]) |
|
1284 |
self.assertEqual([], pack_operations) |
|
1285 |
||
1286 |
def test_plan_pack_operations_2010_combines_smallest_two(self): |
|
3711.4.1
by John Arbash Meinel
Fix bug #242510, when determining the autopack sequence, |
1287 |
packs = self.get_packs() |
2592.3.85
by Robert Collins
Finish autopack corner cases. |
1288 |
existing_packs = [(1999, "big"), (9, "medium"), (1, "single2"), |
1289 |
(1, "single1")] |
|
1290 |
# rev count - 2010 -> 2x1000 + 1x10 (3)
|
|
1291 |
pack_operations = packs.plan_autopack_combinations( |
|
1292 |
existing_packs, [1000, 1000, 10]) |
|
3711.4.2
by John Arbash Meinel
Change the logic to solve it in a different way. |
1293 |
self.assertEqual([[2, ["single2", "single1"]]], pack_operations) |
2592.3.85
by Robert Collins
Finish autopack corner cases. |
1294 |
|
3711.4.2
by John Arbash Meinel
Change the logic to solve it in a different way. |
1295 |
def test_plan_pack_operations_creates_a_single_op(self): |
3711.4.1
by John Arbash Meinel
Fix bug #242510, when determining the autopack sequence, |
1296 |
packs = self.get_packs() |
3711.4.2
by John Arbash Meinel
Change the logic to solve it in a different way. |
1297 |
existing_packs = [(50, 'a'), (40, 'b'), (30, 'c'), (10, 'd'), |
1298 |
(10, 'e'), (6, 'f'), (4, 'g')] |
|
1299 |
# rev count 150 -> 1x100 and 5x10
|
|
1300 |
# The two size 10 packs do not need to be touched. The 50, 40, 30 would
|
|
1301 |
# be combined into a single 120 size pack, and the 6 & 4 would
|
|
1302 |
# becombined into a size 10 pack. However, if we have to rewrite them,
|
|
1303 |
# we save a pack file with no increased I/O by putting them into the
|
|
1304 |
# same file.
|
|
1305 |
distribution = packs.pack_distribution(150) |
|
3711.4.1
by John Arbash Meinel
Fix bug #242510, when determining the autopack sequence, |
1306 |
pack_operations = packs.plan_autopack_combinations(existing_packs, |
3711.4.2
by John Arbash Meinel
Change the logic to solve it in a different way. |
1307 |
distribution) |
1308 |
self.assertEqual([[130, ['a', 'b', 'c', 'f', 'g']]], pack_operations) |
|
3711.4.1
by John Arbash Meinel
Fix bug #242510, when determining the autopack sequence, |
1309 |
|
2592.3.173
by Robert Collins
Basic implementation of all_packs. |
1310 |
def test_all_packs_none(self): |
1311 |
format = self.get_format() |
|
1312 |
tree = self.make_branch_and_tree('.', format=format) |
|
1313 |
tree.lock_read() |
|
1314 |
self.addCleanup(tree.unlock) |
|
2592.3.232
by Martin Pool
Disambiguate two member variables called _packs into _packs_by_name and _pack_collection |
1315 |
packs = tree.branch.repository._pack_collection |
2592.3.173
by Robert Collins
Basic implementation of all_packs. |
1316 |
packs.ensure_loaded() |
1317 |
self.assertEqual([], packs.all_packs()) |
|
1318 |
||
1319 |
def test_all_packs_one(self): |
|
1320 |
format = self.get_format() |
|
1321 |
tree = self.make_branch_and_tree('.', format=format) |
|
1322 |
tree.commit('start') |
|
1323 |
tree.lock_read() |
|
1324 |
self.addCleanup(tree.unlock) |
|
2592.3.232
by Martin Pool
Disambiguate two member variables called _packs into _packs_by_name and _pack_collection |
1325 |
packs = tree.branch.repository._pack_collection |
2592.3.173
by Robert Collins
Basic implementation of all_packs. |
1326 |
packs.ensure_loaded() |
2592.3.176
by Robert Collins
Various pack refactorings. |
1327 |
self.assertEqual([ |
1328 |
packs.get_pack_by_name(packs.names()[0])], |
|
1329 |
packs.all_packs()) |
|
2592.3.173
by Robert Collins
Basic implementation of all_packs. |
1330 |
|
1331 |
def test_all_packs_two(self): |
|
1332 |
format = self.get_format() |
|
1333 |
tree = self.make_branch_and_tree('.', format=format) |
|
1334 |
tree.commit('start') |
|
1335 |
tree.commit('continue') |
|
1336 |
tree.lock_read() |
|
1337 |
self.addCleanup(tree.unlock) |
|
2592.3.232
by Martin Pool
Disambiguate two member variables called _packs into _packs_by_name and _pack_collection |
1338 |
packs = tree.branch.repository._pack_collection |
2592.3.173
by Robert Collins
Basic implementation of all_packs. |
1339 |
packs.ensure_loaded() |
1340 |
self.assertEqual([ |
|
2592.3.176
by Robert Collins
Various pack refactorings. |
1341 |
packs.get_pack_by_name(packs.names()[0]), |
1342 |
packs.get_pack_by_name(packs.names()[1]), |
|
2592.3.173
by Robert Collins
Basic implementation of all_packs. |
1343 |
], packs.all_packs()) |
1344 |
||
2592.3.176
by Robert Collins
Various pack refactorings. |
1345 |
def test_get_pack_by_name(self): |
1346 |
format = self.get_format() |
|
1347 |
tree = self.make_branch_and_tree('.', format=format) |
|
1348 |
tree.commit('start') |
|
1349 |
tree.lock_read() |
|
1350 |
self.addCleanup(tree.unlock) |
|
2592.3.232
by Martin Pool
Disambiguate two member variables called _packs into _packs_by_name and _pack_collection |
1351 |
packs = tree.branch.repository._pack_collection |
4145.1.6
by Robert Collins
More test fallout, but all caught now. |
1352 |
packs.reset() |
2592.3.176
by Robert Collins
Various pack refactorings. |
1353 |
packs.ensure_loaded() |
1354 |
name = packs.names()[0] |
|
1355 |
pack_1 = packs.get_pack_by_name(name) |
|
1356 |
# the pack should be correctly initialised
|
|
3517.4.5
by Martin Pool
Correct use of packs._names in test_get_pack_by_name |
1357 |
sizes = packs._names[name] |
3221.12.4
by Robert Collins
Implement basic repository supporting external references. |
1358 |
rev_index = GraphIndex(packs._index_transport, name + '.rix', sizes[0]) |
1359 |
inv_index = GraphIndex(packs._index_transport, name + '.iix', sizes[1]) |
|
1360 |
txt_index = GraphIndex(packs._index_transport, name + '.tix', sizes[2]) |
|
1361 |
sig_index = GraphIndex(packs._index_transport, name + '.six', sizes[3]) |
|
2592.3.191
by Robert Collins
Give Pack responsibility for index naming, and two concrete classes - NewPack for new packs and ExistingPack for packs we read from disk. |
1362 |
self.assertEqual(pack_repo.ExistingPack(packs._pack_transport, |
2592.3.219
by Robert Collins
Review feedback. |
1363 |
name, rev_index, inv_index, txt_index, sig_index), pack_1) |
2592.3.176
by Robert Collins
Various pack refactorings. |
1364 |
# and the same instance should be returned on successive calls.
|
1365 |
self.assertTrue(pack_1 is packs.get_pack_by_name(name)) |
|
1366 |
||
3789.1.2
by John Arbash Meinel
Add RepositoryPackCollection.reload_pack_names() |
1367 |
def test_reload_pack_names_new_entry(self): |
3789.2.19
by John Arbash Meinel
Refactor to make the tests a bit simpler |
1368 |
tree, r, packs, revs = self.make_packs_and_alt_repo() |
3789.1.2
by John Arbash Meinel
Add RepositoryPackCollection.reload_pack_names() |
1369 |
names = packs.names() |
1370 |
# Add a new pack file into the repository
|
|
3789.2.19
by John Arbash Meinel
Refactor to make the tests a bit simpler |
1371 |
rev4 = tree.commit('four') |
3789.1.2
by John Arbash Meinel
Add RepositoryPackCollection.reload_pack_names() |
1372 |
new_names = tree.branch.repository._pack_collection.names() |
1373 |
new_name = set(new_names).difference(names) |
|
1374 |
self.assertEqual(1, len(new_name)) |
|
1375 |
new_name = new_name.pop() |
|
1376 |
# The old collection hasn't noticed yet
|
|
1377 |
self.assertEqual(names, packs.names()) |
|
3789.1.8
by John Arbash Meinel
Change the api of reload_pack_names(). |
1378 |
self.assertTrue(packs.reload_pack_names()) |
3789.1.2
by John Arbash Meinel
Add RepositoryPackCollection.reload_pack_names() |
1379 |
self.assertEqual(new_names, packs.names()) |
1380 |
# And the repository can access the new revision
|
|
3789.2.19
by John Arbash Meinel
Refactor to make the tests a bit simpler |
1381 |
self.assertEqual({rev4:(revs[-1],)}, r.get_parent_map([rev4])) |
3789.1.8
by John Arbash Meinel
Change the api of reload_pack_names(). |
1382 |
self.assertFalse(packs.reload_pack_names()) |
3789.1.2
by John Arbash Meinel
Add RepositoryPackCollection.reload_pack_names() |
1383 |
|
1384 |
def test_reload_pack_names_added_and_removed(self): |
|
3789.2.19
by John Arbash Meinel
Refactor to make the tests a bit simpler |
1385 |
tree, r, packs, revs = self.make_packs_and_alt_repo() |
3789.1.2
by John Arbash Meinel
Add RepositoryPackCollection.reload_pack_names() |
1386 |
names = packs.names() |
1387 |
# Now repack the whole thing
|
|
1388 |
tree.branch.repository.pack() |
|
1389 |
new_names = tree.branch.repository._pack_collection.names() |
|
1390 |
# The other collection hasn't noticed yet
|
|
1391 |
self.assertEqual(names, packs.names()) |
|
3789.1.8
by John Arbash Meinel
Change the api of reload_pack_names(). |
1392 |
self.assertTrue(packs.reload_pack_names()) |
3789.1.2
by John Arbash Meinel
Add RepositoryPackCollection.reload_pack_names() |
1393 |
self.assertEqual(new_names, packs.names()) |
3789.2.19
by John Arbash Meinel
Refactor to make the tests a bit simpler |
1394 |
self.assertEqual({revs[-1]:(revs[-2],)}, r.get_parent_map([revs[-1]])) |
3789.1.8
by John Arbash Meinel
Change the api of reload_pack_names(). |
1395 |
self.assertFalse(packs.reload_pack_names()) |
3789.1.2
by John Arbash Meinel
Add RepositoryPackCollection.reload_pack_names() |
1396 |
|
4634.126.1
by John Arbash Meinel
(jam) Fix bug #507566, concurrent autopacking correctness. |
1397 |
def test_reload_pack_names_preserves_pending(self): |
1398 |
# TODO: Update this to also test for pending-deleted names
|
|
1399 |
tree, r, packs, revs = self.make_packs_and_alt_repo(write_lock=True) |
|
1400 |
# We will add one pack (via start_write_group + insert_record_stream),
|
|
1401 |
# and remove another pack (via _remove_pack_from_memory)
|
|
1402 |
orig_names = packs.names() |
|
1403 |
orig_at_load = packs._packs_at_load |
|
1404 |
to_remove_name = iter(orig_names).next() |
|
1405 |
r.start_write_group() |
|
1406 |
self.addCleanup(r.abort_write_group) |
|
1407 |
r.texts.insert_record_stream([versionedfile.FulltextContentFactory( |
|
1408 |
('text', 'rev'), (), None, 'content\n')]) |
|
1409 |
new_pack = packs._new_pack |
|
1410 |
self.assertTrue(new_pack.data_inserted()) |
|
1411 |
new_pack.finish() |
|
1412 |
packs.allocate(new_pack) |
|
1413 |
packs._new_pack = None |
|
1414 |
removed_pack = packs.get_pack_by_name(to_remove_name) |
|
1415 |
packs._remove_pack_from_memory(removed_pack) |
|
1416 |
names = packs.names() |
|
4634.127.3
by John Arbash Meinel
Add code so we don't try to obsolete files someone else has 'claimed'. |
1417 |
all_nodes, deleted_nodes, new_nodes, _ = packs._diff_pack_names() |
4634.126.1
by John Arbash Meinel
(jam) Fix bug #507566, concurrent autopacking correctness. |
1418 |
new_names = set([x[0][0] for x in new_nodes]) |
1419 |
self.assertEqual(names, sorted([x[0][0] for x in all_nodes])) |
|
1420 |
self.assertEqual(set(names) - set(orig_names), new_names) |
|
1421 |
self.assertEqual(set([new_pack.name]), new_names) |
|
1422 |
self.assertEqual([to_remove_name], |
|
1423 |
sorted([x[0][0] for x in deleted_nodes])) |
|
1424 |
packs.reload_pack_names() |
|
1425 |
reloaded_names = packs.names() |
|
1426 |
self.assertEqual(orig_at_load, packs._packs_at_load) |
|
1427 |
self.assertEqual(names, reloaded_names) |
|
4634.127.3
by John Arbash Meinel
Add code so we don't try to obsolete files someone else has 'claimed'. |
1428 |
all_nodes, deleted_nodes, new_nodes, _ = packs._diff_pack_names() |
4634.126.1
by John Arbash Meinel
(jam) Fix bug #507566, concurrent autopacking correctness. |
1429 |
new_names = set([x[0][0] for x in new_nodes]) |
1430 |
self.assertEqual(names, sorted([x[0][0] for x in all_nodes])) |
|
1431 |
self.assertEqual(set(names) - set(orig_names), new_names) |
|
1432 |
self.assertEqual(set([new_pack.name]), new_names) |
|
1433 |
self.assertEqual([to_remove_name], |
|
1434 |
sorted([x[0][0] for x in deleted_nodes])) |
|
1435 |
||
4634.127.5
by John Arbash Meinel
Possible fix for making sure packs triggering autopacking get cleaned up. |
1436 |
def test_autopack_obsoletes_new_pack(self): |
1437 |
tree, r, packs, revs = self.make_packs_and_alt_repo(write_lock=True) |
|
1438 |
packs._max_pack_count = lambda x: 1 |
|
1439 |
packs.pack_distribution = lambda x: [10] |
|
1440 |
r.start_write_group() |
|
1441 |
r.revisions.insert_record_stream([versionedfile.FulltextContentFactory( |
|
1442 |
('bogus-rev',), (), None, 'bogus-content\n')]) |
|
1443 |
# This should trigger an autopack, which will combine everything into a
|
|
1444 |
# single pack file.
|
|
1445 |
new_names = r.commit_write_group() |
|
1446 |
names = packs.names() |
|
1447 |
self.assertEqual(1, len(names)) |
|
1448 |
self.assertEqual([names[0] + '.pack'], |
|
1449 |
packs._pack_transport.list_dir('.')) |
|
1450 |
||
3789.2.20
by John Arbash Meinel
The autopack code can now trigger itself to retry when _copy_revision_texts fails. |
1451 |
def test_autopack_reloads_and_stops(self): |
1452 |
tree, r, packs, revs = self.make_packs_and_alt_repo(write_lock=True) |
|
1453 |
# After we have determined what needs to be autopacked, trigger a
|
|
1454 |
# full-pack via the other repo which will cause us to re-evaluate and
|
|
1455 |
# decide we don't need to do anything
|
|
1456 |
orig_execute = packs._execute_pack_operations |
|
1457 |
def _munged_execute_pack_ops(*args, **kwargs): |
|
1458 |
tree.branch.repository.pack() |
|
1459 |
return orig_execute(*args, **kwargs) |
|
1460 |
packs._execute_pack_operations = _munged_execute_pack_ops |
|
1461 |
packs._max_pack_count = lambda x: 1 |
|
1462 |
packs.pack_distribution = lambda x: [10] |
|
1463 |
self.assertFalse(packs.autopack()) |
|
1464 |
self.assertEqual(1, len(packs.names())) |
|
1465 |
self.assertEqual(tree.branch.repository._pack_collection.names(), |
|
1466 |
packs.names()) |
|
1467 |
||
4634.127.1
by John Arbash Meinel
Partial fix for bug #507557. |
1468 |
def test__save_pack_names(self): |
1469 |
tree, r, packs, revs = self.make_packs_and_alt_repo(write_lock=True) |
|
1470 |
names = packs.names() |
|
1471 |
pack = packs.get_pack_by_name(names[0]) |
|
1472 |
packs._remove_pack_from_memory(pack) |
|
1473 |
packs._save_pack_names(obsolete_packs=[pack]) |
|
1474 |
cur_packs = packs._pack_transport.list_dir('.') |
|
1475 |
self.assertEqual([n + '.pack' for n in names[1:]], sorted(cur_packs)) |
|
1476 |
# obsolete_packs will also have stuff like .rix and .iix present.
|
|
1477 |
obsolete_packs = packs.transport.list_dir('obsolete_packs') |
|
1478 |
obsolete_names = set([osutils.splitext(n)[0] for n in obsolete_packs]) |
|
1479 |
self.assertEqual([pack.name], sorted(obsolete_names)) |
|
1480 |
||
1481 |
def test__save_pack_names_already_obsoleted(self): |
|
1482 |
tree, r, packs, revs = self.make_packs_and_alt_repo(write_lock=True) |
|
1483 |
names = packs.names() |
|
1484 |
pack = packs.get_pack_by_name(names[0]) |
|
1485 |
packs._remove_pack_from_memory(pack) |
|
1486 |
# We are going to simulate a concurrent autopack by manually obsoleting
|
|
1487 |
# the pack directly.
|
|
1488 |
packs._obsolete_packs([pack]) |
|
1489 |
packs._save_pack_names(clear_obsolete_packs=True, |
|
1490 |
obsolete_packs=[pack]) |
|
1491 |
cur_packs = packs._pack_transport.list_dir('.') |
|
1492 |
self.assertEqual([n + '.pack' for n in names[1:]], sorted(cur_packs)) |
|
1493 |
# Note that while we set clear_obsolete_packs=True, it should not
|
|
1494 |
# delete a pack file that we have also scheduled for obsoletion.
|
|
1495 |
obsolete_packs = packs.transport.list_dir('obsolete_packs') |
|
1496 |
obsolete_names = set([osutils.splitext(n)[0] for n in obsolete_packs]) |
|
1497 |
self.assertEqual([pack.name], sorted(obsolete_names)) |
|
1498 |
||
4634.127.3
by John Arbash Meinel
Add code so we don't try to obsolete files someone else has 'claimed'. |
1499 |
|
2592.3.173
by Robert Collins
Basic implementation of all_packs. |
1500 |
|
1501 |
class TestPack(TestCaseWithTransport): |
|
1502 |
"""Tests for the Pack object."""
|
|
1503 |
||
1504 |
def assertCurrentlyEqual(self, left, right): |
|
1505 |
self.assertTrue(left == right) |
|
1506 |
self.assertTrue(right == left) |
|
1507 |
self.assertFalse(left != right) |
|
1508 |
self.assertFalse(right != left) |
|
1509 |
||
1510 |
def assertCurrentlyNotEqual(self, left, right): |
|
1511 |
self.assertFalse(left == right) |
|
1512 |
self.assertFalse(right == left) |
|
1513 |
self.assertTrue(left != right) |
|
1514 |
self.assertTrue(right != left) |
|
1515 |
||
1516 |
def test___eq____ne__(self): |
|
2592.3.191
by Robert Collins
Give Pack responsibility for index naming, and two concrete classes - NewPack for new packs and ExistingPack for packs we read from disk. |
1517 |
left = pack_repo.ExistingPack('', '', '', '', '', '') |
1518 |
right = pack_repo.ExistingPack('', '', '', '', '', '') |
|
2592.3.173
by Robert Collins
Basic implementation of all_packs. |
1519 |
self.assertCurrentlyEqual(left, right) |
1520 |
# change all attributes and ensure equality changes as we do.
|
|
1521 |
left.revision_index = 'a' |
|
1522 |
self.assertCurrentlyNotEqual(left, right) |
|
1523 |
right.revision_index = 'a' |
|
1524 |
self.assertCurrentlyEqual(left, right) |
|
1525 |
left.inventory_index = 'a' |
|
1526 |
self.assertCurrentlyNotEqual(left, right) |
|
1527 |
right.inventory_index = 'a' |
|
1528 |
self.assertCurrentlyEqual(left, right) |
|
1529 |
left.text_index = 'a' |
|
1530 |
self.assertCurrentlyNotEqual(left, right) |
|
1531 |
right.text_index = 'a' |
|
1532 |
self.assertCurrentlyEqual(left, right) |
|
1533 |
left.signature_index = 'a' |
|
1534 |
self.assertCurrentlyNotEqual(left, right) |
|
1535 |
right.signature_index = 'a' |
|
1536 |
self.assertCurrentlyEqual(left, right) |
|
1537 |
left.name = 'a' |
|
1538 |
self.assertCurrentlyNotEqual(left, right) |
|
1539 |
right.name = 'a' |
|
1540 |
self.assertCurrentlyEqual(left, right) |
|
1541 |
left.transport = 'a' |
|
1542 |
self.assertCurrentlyNotEqual(left, right) |
|
1543 |
right.transport = 'a' |
|
1544 |
self.assertCurrentlyEqual(left, right) |
|
2592.3.179
by Robert Collins
Generate the revision_index_map for packing during the core operation, from the pack objects. |
1545 |
|
1546 |
def test_file_name(self): |
|
2592.3.191
by Robert Collins
Give Pack responsibility for index naming, and two concrete classes - NewPack for new packs and ExistingPack for packs we read from disk. |
1547 |
pack = pack_repo.ExistingPack('', 'a_name', '', '', '', '') |
2592.3.179
by Robert Collins
Generate the revision_index_map for packing during the core operation, from the pack objects. |
1548 |
self.assertEqual('a_name.pack', pack.file_name()) |
2592.3.192
by Robert Collins
Move new revision index management to NewPack. |
1549 |
|
1550 |
||
1551 |
class TestNewPack(TestCaseWithTransport): |
|
1552 |
"""Tests for pack_repo.NewPack."""
|
|
1553 |
||
2592.3.193
by Robert Collins
Move hash tracking of new packs into NewPack. |
1554 |
def test_new_instance_attributes(self): |
2592.3.194
by Robert Collins
Output the revision index from NewPack.finish |
1555 |
upload_transport = self.get_transport('upload') |
1556 |
pack_transport = self.get_transport('pack') |
|
1557 |
index_transport = self.get_transport('index') |
|
1558 |
upload_transport.mkdir('.') |
|
4241.6.8
by Robert Collins, John Arbash Meinel, Ian Clatworthy, Vincent Ladeuil
Add --development6-rich-root, disabling the legacy and unneeded development2 format, and activating the tests for CHK features disabled pending this format. (Robert Collins, John Arbash Meinel, Ian Clatworthy, Vincent Ladeuil) |
1559 |
collection = pack_repo.RepositoryPackCollection( |
1560 |
repo=None, |
|
3830.3.1
by Martin Pool
NewPack should be constructed from the PackCollection, rather than attributes of it |
1561 |
transport=self.get_transport('.'), |
1562 |
index_transport=index_transport, |
|
1563 |
upload_transport=upload_transport, |
|
1564 |
pack_transport=pack_transport, |
|
1565 |
index_builder_class=BTreeBuilder, |
|
4241.6.8
by Robert Collins, John Arbash Meinel, Ian Clatworthy, Vincent Ladeuil
Add --development6-rich-root, disabling the legacy and unneeded development2 format, and activating the tests for CHK features disabled pending this format. (Robert Collins, John Arbash Meinel, Ian Clatworthy, Vincent Ladeuil) |
1566 |
index_class=BTreeGraphIndex, |
1567 |
use_chk_index=False) |
|
3830.3.1
by Martin Pool
NewPack should be constructed from the PackCollection, rather than attributes of it |
1568 |
pack = pack_repo.NewPack(collection) |
4857.2.1
by John Arbash Meinel
2 test_repository tests weren't adding cleanups when opening files. |
1569 |
self.addCleanup(pack.abort) # Make sure the write stream gets closed |
3735.1.1
by Robert Collins
Add development2 formats using BTree indices. |
1570 |
self.assertIsInstance(pack.revision_index, BTreeBuilder) |
1571 |
self.assertIsInstance(pack.inventory_index, BTreeBuilder) |
|
2929.3.5
by Vincent Ladeuil
New files, same warnings, same fixes. |
1572 |
self.assertIsInstance(pack._hash, type(osutils.md5())) |
2592.3.194
by Robert Collins
Output the revision index from NewPack.finish |
1573 |
self.assertTrue(pack.upload_transport is upload_transport) |
1574 |
self.assertTrue(pack.index_transport is index_transport) |
|
1575 |
self.assertTrue(pack.pack_transport is pack_transport) |
|
1576 |
self.assertEqual(None, pack.index_sizes) |
|
1577 |
self.assertEqual(20, len(pack.random_name)) |
|
1578 |
self.assertIsInstance(pack.random_name, str) |
|
1579 |
self.assertIsInstance(pack.start_time, float) |
|
2951.1.2
by Robert Collins
Partial refactoring of pack_repo to create a Packer object for packing. |
1580 |
|
1581 |
||
1582 |
class TestPacker(TestCaseWithTransport): |
|
1583 |
"""Tests for the packs repository Packer class."""
|
|
2951.1.10
by Robert Collins
Peer review feedback with Ian. |
1584 |
|
3824.2.4
by John Arbash Meinel
Add a test that ensures the pack ordering changes as part of calling .pack() |
1585 |
def test_pack_optimizes_pack_order(self): |
4617.8.1
by Robert Collins
Lock down another test assuming the default was a PackRepository. |
1586 |
builder = self.make_branch_builder('.', format="1.9") |
3824.2.4
by John Arbash Meinel
Add a test that ensures the pack ordering changes as part of calling .pack() |
1587 |
builder.start_series() |
1588 |
builder.build_snapshot('A', None, [ |
|
1589 |
('add', ('', 'root-id', 'directory', None)), |
|
1590 |
('add', ('f', 'f-id', 'file', 'content\n'))]) |
|
1591 |
builder.build_snapshot('B', ['A'], |
|
1592 |
[('modify', ('f-id', 'new-content\n'))]) |
|
1593 |
builder.build_snapshot('C', ['B'], |
|
1594 |
[('modify', ('f-id', 'third-content\n'))]) |
|
1595 |
builder.build_snapshot('D', ['C'], |
|
1596 |
[('modify', ('f-id', 'fourth-content\n'))]) |
|
1597 |
b = builder.get_branch() |
|
1598 |
b.lock_read() |
|
1599 |
builder.finish_series() |
|
1600 |
self.addCleanup(b.unlock) |
|
1601 |
# At this point, we should have 4 pack files available
|
|
1602 |
# Because of how they were built, they correspond to
|
|
1603 |
# ['D', 'C', 'B', 'A']
|
|
1604 |
packs = b.repository._pack_collection.packs |
|
1605 |
packer = pack_repo.Packer(b.repository._pack_collection, |
|
1606 |
packs, 'testing', |
|
1607 |
revision_ids=['B', 'C']) |
|
1608 |
# Now, when we are copying the B & C revisions, their pack files should
|
|
1609 |
# be moved to the front of the stack
|
|
3824.2.5
by Andrew Bennetts
Minor tweaks to comments etc. |
1610 |
# The new ordering moves B & C to the front of the .packs attribute,
|
1611 |
# and leaves the others in the original order.
|
|
3824.2.4
by John Arbash Meinel
Add a test that ensures the pack ordering changes as part of calling .pack() |
1612 |
new_packs = [packs[1], packs[2], packs[0], packs[3]] |
1613 |
new_pack = packer.pack() |
|
1614 |
self.assertEqual(new_packs, packer.packs) |
|
3146.6.1
by Aaron Bentley
InterDifferingSerializer shows a progress bar |
1615 |
|
1616 |
||
3777.5.4
by John Arbash Meinel
OptimisingPacker now sets the optimize flags for the indexes being built. |
1617 |
class TestOptimisingPacker(TestCaseWithTransport): |
1618 |
"""Tests for the OptimisingPacker class."""
|
|
1619 |
||
1620 |
def get_pack_collection(self): |
|
1621 |
repo = self.make_repository('.') |
|
1622 |
return repo._pack_collection |
|
1623 |
||
1624 |
def test_open_pack_will_optimise(self): |
|
1625 |
packer = pack_repo.OptimisingPacker(self.get_pack_collection(), |
|
1626 |
[], '.test') |
|
1627 |
new_pack = packer.open_pack() |
|
4857.2.1
by John Arbash Meinel
2 test_repository tests weren't adding cleanups when opening files. |
1628 |
self.addCleanup(new_pack.abort) # ensure cleanup |
3777.5.4
by John Arbash Meinel
OptimisingPacker now sets the optimize flags for the indexes being built. |
1629 |
self.assertIsInstance(new_pack, pack_repo.NewPack) |
1630 |
self.assertTrue(new_pack.revision_index._optimize_for_size) |
|
1631 |
self.assertTrue(new_pack.inventory_index._optimize_for_size) |
|
1632 |
self.assertTrue(new_pack.text_index._optimize_for_size) |
|
1633 |
self.assertTrue(new_pack.signature_index._optimize_for_size) |
|
4462.2.6
by Robert Collins
Cause StreamSink to partially pack repositories after cross format fetches when beneficial. |
1634 |
|
1635 |
||
4431.3.7
by Jonathan Lange
Cherrypick bzr.dev 4470, resolving conflicts. |
1636 |
class TestCrossFormatPacks(TestCaseWithTransport): |
1637 |
||
1638 |
def log_pack(self, hint=None): |
|
1639 |
self.calls.append(('pack', hint)) |
|
1640 |
self.orig_pack(hint=hint) |
|
1641 |
if self.expect_hint: |
|
1642 |
self.assertTrue(hint) |
|
1643 |
||
1644 |
def run_stream(self, src_fmt, target_fmt, expect_pack_called): |
|
1645 |
self.expect_hint = expect_pack_called |
|
1646 |
self.calls = [] |
|
1647 |
source_tree = self.make_branch_and_tree('src', format=src_fmt) |
|
1648 |
source_tree.lock_write() |
|
1649 |
self.addCleanup(source_tree.unlock) |
|
1650 |
tip = source_tree.commit('foo') |
|
1651 |
target = self.make_repository('target', format=target_fmt) |
|
1652 |
target.lock_write() |
|
1653 |
self.addCleanup(target.unlock) |
|
1654 |
source = source_tree.branch.repository._get_source(target._format) |
|
1655 |
self.orig_pack = target.pack |
|
1656 |
target.pack = self.log_pack |
|
1657 |
search = target.search_missing_revision_ids( |
|
1658 |
source_tree.branch.repository, tip) |
|
1659 |
stream = source.get_stream(search) |
|
1660 |
from_format = source_tree.branch.repository._format |
|
1661 |
sink = target._get_sink() |
|
1662 |
sink.insert_stream(stream, from_format, []) |
|
1663 |
if expect_pack_called: |
|
1664 |
self.assertLength(1, self.calls) |
|
1665 |
else: |
|
1666 |
self.assertLength(0, self.calls) |
|
1667 |
||
1668 |
def run_fetch(self, src_fmt, target_fmt, expect_pack_called): |
|
1669 |
self.expect_hint = expect_pack_called |
|
1670 |
self.calls = [] |
|
1671 |
source_tree = self.make_branch_and_tree('src', format=src_fmt) |
|
1672 |
source_tree.lock_write() |
|
1673 |
self.addCleanup(source_tree.unlock) |
|
1674 |
tip = source_tree.commit('foo') |
|
1675 |
target = self.make_repository('target', format=target_fmt) |
|
1676 |
target.lock_write() |
|
1677 |
self.addCleanup(target.unlock) |
|
1678 |
source = source_tree.branch.repository |
|
1679 |
self.orig_pack = target.pack |
|
1680 |
target.pack = self.log_pack |
|
1681 |
target.fetch(source) |
|
1682 |
if expect_pack_called: |
|
1683 |
self.assertLength(1, self.calls) |
|
1684 |
else: |
|
1685 |
self.assertLength(0, self.calls) |
|
1686 |
||
1687 |
def test_sink_format_hint_no(self): |
|
1688 |
# When the target format says packing makes no difference, pack is not
|
|
1689 |
# called.
|
|
1690 |
self.run_stream('1.9', 'rich-root-pack', False) |
|
1691 |
||
1692 |
def test_sink_format_hint_yes(self): |
|
1693 |
# When the target format says packing makes a difference, pack is
|
|
1694 |
# called.
|
|
1695 |
self.run_stream('1.9', '2a', True) |
|
1696 |
||
1697 |
def test_sink_format_same_no(self): |
|
1698 |
# When the formats are the same, pack is not called.
|
|
1699 |
self.run_stream('2a', '2a', False) |
|
1700 |
||
1701 |
def test_IDS_format_hint_no(self): |
|
1702 |
# When the target format says packing makes no difference, pack is not
|
|
1703 |
# called.
|
|
1704 |
self.run_fetch('1.9', 'rich-root-pack', False) |
|
1705 |
||
1706 |
def test_IDS_format_hint_yes(self): |
|
1707 |
# When the target format says packing makes a difference, pack is
|
|
1708 |
# called.
|
|
1709 |
self.run_fetch('1.9', '2a', True) |
|
1710 |
||
1711 |
def test_IDS_format_same_no(self): |
|
1712 |
# When the formats are the same, pack is not called.
|
|
1713 |
self.run_fetch('2a', '2a', False) |