4634.126.1
by John Arbash Meinel
(jam) Fix bug #507566, concurrent autopacking correctness. |
1 |
# Copyright (C) 2006-2010 Canonical Ltd
|
1685.1.63
by Martin Pool
Small Transport fixups |
2 |
#
|
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
3 |
# This program is free software; you can redistribute it and/or modify
|
4 |
# it under the terms of the GNU General Public License as published by
|
|
5 |
# the Free Software Foundation; either version 2 of the License, or
|
|
6 |
# (at your option) any later version.
|
|
1685.1.63
by Martin Pool
Small Transport fixups |
7 |
#
|
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
8 |
# This program is distributed in the hope that it will be useful,
|
9 |
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
10 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
11 |
# GNU General Public License for more details.
|
|
1685.1.63
by Martin Pool
Small Transport fixups |
12 |
#
|
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
13 |
# You should have received a copy of the GNU General Public License
|
14 |
# along with this program; if not, write to the Free Software
|
|
4183.7.1
by Sabin Iacob
update FSF mailing address |
15 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
16 |
|
17 |
"""Tests for the Repository facility that are not interface tests.
|
|
18 |
||
3689.1.4
by John Arbash Meinel
Doc strings that reference repository_implementations |
19 |
For interface tests see tests/per_repository/*.py.
|
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
20 |
|
21 |
For concrete class tests see this file, and for storage formats tests
|
|
22 |
also see this file.
|
|
23 |
"""
|
|
24 |
||
1773.4.1
by Martin Pool
Add pyflakes makefile target; fix many warnings |
25 |
from stat import S_ISDIR |
4789.25.4
by John Arbash Meinel
Turn a repository format 7 failure into a KnownFailure. |
26 |
import sys |
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
27 |
|
1556.1.4
by Robert Collins
Add a new format for what will become knit, and the surrounding logic to upgrade repositories within metadirs, and tests for the same. |
28 |
import bzrlib |
5121.2.2
by Jelmer Vernooij
Remove more unused imports in the tests. |
29 |
from bzrlib.errors import (NoSuchFile, |
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
30 |
UnknownFormatError, |
31 |
UnsupportedFormatError, |
|
32 |
)
|
|
4360.4.3
by John Arbash Meinel
Introduce a KnitPackStreamSource which is used when |
33 |
from bzrlib import ( |
34 |
graph, |
|
35 |
tests, |
|
36 |
)
|
|
3735.1.1
by Robert Collins
Add development2 formats using BTree indices. |
37 |
from bzrlib.btree_index import BTreeBuilder, BTreeGraphIndex |
5121.2.2
by Jelmer Vernooij
Remove more unused imports in the tests. |
38 |
from bzrlib.index import GraphIndex |
2241.1.1
by Martin Pool
Change RepositoryFormat to use a Registry rather than ad-hoc dictionary |
39 |
from bzrlib.repository import RepositoryFormat |
2670.3.5
by Andrew Bennetts
Remove get_stream_as_bytes from KnitVersionedFile's API, make it a function in knitrepo.py instead. |
40 |
from bzrlib.tests import ( |
41 |
TestCase, |
|
42 |
TestCaseWithTransport, |
|
43 |
)
|
|
3446.2.1
by Martin Pool
Failure to delete an obsolete pack file should not be fatal. |
44 |
from bzrlib.transport import ( |
45 |
get_transport, |
|
46 |
)
|
|
2241.1.1
by Martin Pool
Change RepositoryFormat to use a Registry rather than ad-hoc dictionary |
47 |
from bzrlib import ( |
2535.3.41
by Andrew Bennetts
Add tests for InterRemoteToOther.is_compatible. |
48 |
bzrdir, |
49 |
errors, |
|
2535.3.57
by Andrew Bennetts
Perform some sanity checking of data streams rather than blindly inserting them into our repository. |
50 |
inventory, |
2929.3.5
by Vincent Ladeuil
New files, same warnings, same fixes. |
51 |
osutils, |
2241.1.1
by Martin Pool
Change RepositoryFormat to use a Registry rather than ad-hoc dictionary |
52 |
repository, |
2535.3.57
by Andrew Bennetts
Perform some sanity checking of data streams rather than blindly inserting them into our repository. |
53 |
revision as _mod_revision, |
2241.1.1
by Martin Pool
Change RepositoryFormat to use a Registry rather than ad-hoc dictionary |
54 |
upgrade, |
4634.126.1
by John Arbash Meinel
(jam) Fix bug #507566, concurrent autopacking correctness. |
55 |
versionedfile, |
2241.1.1
by Martin Pool
Change RepositoryFormat to use a Registry rather than ad-hoc dictionary |
56 |
workingtree, |
57 |
)
|
|
3735.42.5
by John Arbash Meinel
Change the tests so we now just use a direct test that _get_source is |
58 |
from bzrlib.repofmt import ( |
59 |
groupcompress_repo, |
|
60 |
knitrepo, |
|
61 |
pack_repo, |
|
62 |
weaverepo, |
|
63 |
)
|
|
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
64 |
|
65 |
||
66 |
class TestDefaultFormat(TestCase): |
|
67 |
||
1534.4.41
by Robert Collins
Branch now uses BzrDir reasonably sanely. |
68 |
def test_get_set_default_format(self): |
2204.5.3
by Aaron Bentley
zap old repository default handling |
69 |
old_default = bzrdir.format_registry.get('default') |
70 |
private_default = old_default().repository_format.__class__ |
|
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
71 |
old_format = repository.RepositoryFormat.get_default_format() |
1910.2.33
by Aaron Bentley
Fix default format test |
72 |
self.assertTrue(isinstance(old_format, private_default)) |
2204.5.3
by Aaron Bentley
zap old repository default handling |
73 |
def make_sample_bzrdir(): |
74 |
my_bzrdir = bzrdir.BzrDirMetaFormat1() |
|
75 |
my_bzrdir.repository_format = SampleRepositoryFormat() |
|
76 |
return my_bzrdir |
|
77 |
bzrdir.format_registry.remove('default') |
|
78 |
bzrdir.format_registry.register('sample', make_sample_bzrdir, '') |
|
79 |
bzrdir.format_registry.set_default('sample') |
|
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
80 |
# creating a repository should now create an instrumented dir.
|
81 |
try: |
|
1534.4.47
by Robert Collins
Split out repository into .bzr/repository |
82 |
# the default branch format is used by the meta dir format
|
83 |
# which is not the default bzrdir format at this point
|
|
1685.1.63
by Martin Pool
Small Transport fixups |
84 |
dir = bzrdir.BzrDirMetaFormat1().initialize('memory:///') |
1534.4.47
by Robert Collins
Split out repository into .bzr/repository |
85 |
result = dir.create_repository() |
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
86 |
self.assertEqual(result, 'A bzr repository dir') |
2241.1.1
by Martin Pool
Change RepositoryFormat to use a Registry rather than ad-hoc dictionary |
87 |
finally: |
2204.5.3
by Aaron Bentley
zap old repository default handling |
88 |
bzrdir.format_registry.remove('default') |
2363.5.14
by Aaron Bentley
Prevent repository.get_set_default_format from corrupting inventory |
89 |
bzrdir.format_registry.remove('sample') |
2204.5.3
by Aaron Bentley
zap old repository default handling |
90 |
bzrdir.format_registry.register('default', old_default, '') |
91 |
self.assertIsInstance(repository.RepositoryFormat.get_default_format(), |
|
92 |
old_format.__class__) |
|
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
93 |
|
94 |
||
95 |
class SampleRepositoryFormat(repository.RepositoryFormat): |
|
96 |
"""A sample format
|
|
97 |
||
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
98 |
this format is initializable, unsupported to aid in testing the
|
1534.4.47
by Robert Collins
Split out repository into .bzr/repository |
99 |
open and open(unsupported=True) routines.
|
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
100 |
"""
|
101 |
||
102 |
def get_format_string(self): |
|
103 |
"""See RepositoryFormat.get_format_string()."""
|
|
104 |
return "Sample .bzr repository format." |
|
105 |
||
1534.6.1
by Robert Collins
allow API creation of shared repositories |
106 |
def initialize(self, a_bzrdir, shared=False): |
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
107 |
"""Initialize a repository in a BzrDir"""
|
1534.4.47
by Robert Collins
Split out repository into .bzr/repository |
108 |
t = a_bzrdir.get_repository_transport(self) |
1955.3.13
by John Arbash Meinel
Run the full test suite, and fix up any deprecation warnings. |
109 |
t.put_bytes('format', self.get_format_string()) |
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
110 |
return 'A bzr repository dir' |
111 |
||
112 |
def is_supported(self): |
|
113 |
return False |
|
114 |
||
1534.4.47
by Robert Collins
Split out repository into .bzr/repository |
115 |
def open(self, a_bzrdir, _found=False): |
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
116 |
return "opened repository." |
1534.4.41
by Robert Collins
Branch now uses BzrDir reasonably sanely. |
117 |
|
118 |
||
1534.4.47
by Robert Collins
Split out repository into .bzr/repository |
119 |
class TestRepositoryFormat(TestCaseWithTransport): |
120 |
"""Tests for the Repository format detection used by the bzr meta dir facility.BzrBranchFormat facility."""
|
|
121 |
||
122 |
def test_find_format(self): |
|
123 |
# is the right format object found for a repository?
|
|
124 |
# create a branch with a few known format objects.
|
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
125 |
# this is not quite the same as
|
1534.4.47
by Robert Collins
Split out repository into .bzr/repository |
126 |
self.build_tree(["foo/", "bar/"]) |
127 |
def check_format(format, url): |
|
128 |
dir = format._matchingbzrdir.initialize(url) |
|
129 |
format.initialize(dir) |
|
130 |
t = get_transport(url) |
|
131 |
found_format = repository.RepositoryFormat.find_format(dir) |
|
132 |
self.failUnless(isinstance(found_format, format.__class__)) |
|
2241.1.4
by Martin Pool
Moved old weave-based repository formats into bzrlib.repofmt.weaverepo. |
133 |
check_format(weaverepo.RepositoryFormat7(), "bar") |
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
134 |
|
1534.4.47
by Robert Collins
Split out repository into .bzr/repository |
135 |
def test_find_format_no_repository(self): |
136 |
dir = bzrdir.BzrDirMetaFormat1().initialize(self.get_url()) |
|
137 |
self.assertRaises(errors.NoRepositoryPresent, |
|
138 |
repository.RepositoryFormat.find_format, |
|
139 |
dir) |
|
140 |
||
141 |
def test_find_format_unknown_format(self): |
|
142 |
dir = bzrdir.BzrDirMetaFormat1().initialize(self.get_url()) |
|
143 |
SampleRepositoryFormat().initialize(dir) |
|
144 |
self.assertRaises(UnknownFormatError, |
|
145 |
repository.RepositoryFormat.find_format, |
|
146 |
dir) |
|
147 |
||
148 |
def test_register_unregister_format(self): |
|
149 |
format = SampleRepositoryFormat() |
|
150 |
# make a control dir
|
|
151 |
dir = bzrdir.BzrDirMetaFormat1().initialize(self.get_url()) |
|
152 |
# make a repo
|
|
153 |
format.initialize(dir) |
|
154 |
# register a format for it.
|
|
155 |
repository.RepositoryFormat.register_format(format) |
|
156 |
# which repository.Open will refuse (not supported)
|
|
157 |
self.assertRaises(UnsupportedFormatError, repository.Repository.open, self.get_url()) |
|
158 |
# but open(unsupported) will work
|
|
159 |
self.assertEqual(format.open(dir), "opened repository.") |
|
160 |
# unregister the format
|
|
161 |
repository.RepositoryFormat.unregister_format(format) |
|
162 |
||
163 |
||
1534.4.41
by Robert Collins
Branch now uses BzrDir reasonably sanely. |
164 |
class TestFormat6(TestCaseWithTransport): |
165 |
||
3565.3.1
by Robert Collins
* The generic fetch code now uses two attributes on Repository objects |
166 |
def test_attribute__fetch_order(self): |
167 |
"""Weaves need topological data insertion."""
|
|
168 |
control = bzrdir.BzrDirFormat6().initialize(self.get_url()) |
|
169 |
repo = weaverepo.RepositoryFormat6().initialize(control) |
|
4053.1.4
by Robert Collins
Move the fetch control attributes from Repository to RepositoryFormat. |
170 |
self.assertEqual('topological', repo._format._fetch_order) |
3565.3.1
by Robert Collins
* The generic fetch code now uses two attributes on Repository objects |
171 |
|
172 |
def test_attribute__fetch_uses_deltas(self): |
|
173 |
"""Weaves do not reuse deltas."""
|
|
174 |
control = bzrdir.BzrDirFormat6().initialize(self.get_url()) |
|
175 |
repo = weaverepo.RepositoryFormat6().initialize(control) |
|
4053.1.4
by Robert Collins
Move the fetch control attributes from Repository to RepositoryFormat. |
176 |
self.assertEqual(False, repo._format._fetch_uses_deltas) |
3565.3.1
by Robert Collins
* The generic fetch code now uses two attributes on Repository objects |
177 |
|
3565.3.4
by Robert Collins
Defer decision to reconcile to the repository being fetched into. |
178 |
def test_attribute__fetch_reconcile(self): |
179 |
"""Weave repositories need a reconcile after fetch."""
|
|
180 |
control = bzrdir.BzrDirFormat6().initialize(self.get_url()) |
|
181 |
repo = weaverepo.RepositoryFormat6().initialize(control) |
|
4053.1.4
by Robert Collins
Move the fetch control attributes from Repository to RepositoryFormat. |
182 |
self.assertEqual(True, repo._format._fetch_reconcile) |
3565.3.4
by Robert Collins
Defer decision to reconcile to the repository being fetched into. |
183 |
|
1534.4.41
by Robert Collins
Branch now uses BzrDir reasonably sanely. |
184 |
def test_no_ancestry_weave(self): |
185 |
control = bzrdir.BzrDirFormat6().initialize(self.get_url()) |
|
2241.1.4
by Martin Pool
Moved old weave-based repository formats into bzrlib.repofmt.weaverepo. |
186 |
repo = weaverepo.RepositoryFormat6().initialize(control) |
1534.4.41
by Robert Collins
Branch now uses BzrDir reasonably sanely. |
187 |
# We no longer need to create the ancestry.weave file
|
188 |
# since it is *never* used.
|
|
189 |
self.assertRaises(NoSuchFile, |
|
190 |
control.transport.get, |
|
191 |
'ancestry.weave') |
|
192 |
||
3221.3.1
by Robert Collins
* Repository formats have a new supported-feature attribute |
193 |
def test_supports_external_lookups(self): |
194 |
control = bzrdir.BzrDirFormat6().initialize(self.get_url()) |
|
195 |
repo = weaverepo.RepositoryFormat6().initialize(control) |
|
196 |
self.assertFalse(repo._format.supports_external_lookups) |
|
197 |
||
1534.4.47
by Robert Collins
Split out repository into .bzr/repository |
198 |
|
199 |
class TestFormat7(TestCaseWithTransport): |
|
3565.3.1
by Robert Collins
* The generic fetch code now uses two attributes on Repository objects |
200 |
|
201 |
def test_attribute__fetch_order(self): |
|
202 |
"""Weaves need topological data insertion."""
|
|
203 |
control = bzrdir.BzrDirMetaFormat1().initialize(self.get_url()) |
|
204 |
repo = weaverepo.RepositoryFormat7().initialize(control) |
|
4053.1.4
by Robert Collins
Move the fetch control attributes from Repository to RepositoryFormat. |
205 |
self.assertEqual('topological', repo._format._fetch_order) |
3565.3.1
by Robert Collins
* The generic fetch code now uses two attributes on Repository objects |
206 |
|
207 |
def test_attribute__fetch_uses_deltas(self): |
|
208 |
"""Weaves do not reuse deltas."""
|
|
209 |
control = bzrdir.BzrDirMetaFormat1().initialize(self.get_url()) |
|
210 |
repo = weaverepo.RepositoryFormat7().initialize(control) |
|
4053.1.4
by Robert Collins
Move the fetch control attributes from Repository to RepositoryFormat. |
211 |
self.assertEqual(False, repo._format._fetch_uses_deltas) |
3565.3.1
by Robert Collins
* The generic fetch code now uses two attributes on Repository objects |
212 |
|
3565.3.4
by Robert Collins
Defer decision to reconcile to the repository being fetched into. |
213 |
def test_attribute__fetch_reconcile(self): |
214 |
"""Weave repositories need a reconcile after fetch."""
|
|
215 |
control = bzrdir.BzrDirMetaFormat1().initialize(self.get_url()) |
|
216 |
repo = weaverepo.RepositoryFormat7().initialize(control) |
|
4053.1.4
by Robert Collins
Move the fetch control attributes from Repository to RepositoryFormat. |
217 |
self.assertEqual(True, repo._format._fetch_reconcile) |
3565.3.4
by Robert Collins
Defer decision to reconcile to the repository being fetched into. |
218 |
|
1534.4.47
by Robert Collins
Split out repository into .bzr/repository |
219 |
def test_disk_layout(self): |
220 |
control = bzrdir.BzrDirMetaFormat1().initialize(self.get_url()) |
|
2241.1.4
by Martin Pool
Moved old weave-based repository formats into bzrlib.repofmt.weaverepo. |
221 |
repo = weaverepo.RepositoryFormat7().initialize(control) |
1534.5.3
by Robert Collins
Make format 4/5/6 branches share a single LockableFiles instance across wt/branch/repository. |
222 |
# in case of side effects of locking.
|
223 |
repo.lock_write() |
|
224 |
repo.unlock() |
|
1534.4.47
by Robert Collins
Split out repository into .bzr/repository |
225 |
# we want:
|
226 |
# format 'Bazaar-NG Repository format 7'
|
|
227 |
# lock ''
|
|
228 |
# inventory.weave == empty_weave
|
|
229 |
# empty revision-store directory
|
|
230 |
# empty weaves directory
|
|
231 |
t = control.get_repository_transport(None) |
|
232 |
self.assertEqualDiff('Bazaar-NG Repository format 7', |
|
233 |
t.get('format').read()) |
|
234 |
self.assertTrue(S_ISDIR(t.stat('revision-store').st_mode)) |
|
235 |
self.assertTrue(S_ISDIR(t.stat('weaves').st_mode)) |
|
236 |
self.assertEqualDiff('# bzr weave file v5\n' |
|
237 |
'w\n' |
|
238 |
'W\n', |
|
239 |
t.get('inventory.weave').read()) |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
240 |
# Creating a file with id Foo:Bar results in a non-escaped file name on
|
241 |
# disk.
|
|
242 |
control.create_branch() |
|
243 |
tree = control.create_workingtree() |
|
244 |
tree.add(['foo'], ['Foo:Bar'], ['file']) |
|
245 |
tree.put_file_bytes_non_atomic('Foo:Bar', 'content\n') |
|
4789.25.4
by John Arbash Meinel
Turn a repository format 7 failure into a KnownFailure. |
246 |
try: |
247 |
tree.commit('first post', rev_id='first') |
|
248 |
except errors.IllegalPath: |
|
249 |
if sys.platform != 'win32': |
|
250 |
raise
|
|
251 |
self.knownFailure('Foo:Bar cannot be used as a file-id on windows' |
|
252 |
' in repo format 7') |
|
253 |
return
|
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
254 |
self.assertEqualDiff( |
255 |
'# bzr weave file v5\n' |
|
256 |
'i\n' |
|
257 |
'1 7fe70820e08a1aac0ef224d9c66ab66831cc4ab1\n' |
|
258 |
'n first\n' |
|
259 |
'\n' |
|
260 |
'w\n' |
|
261 |
'{ 0\n' |
|
262 |
'. content\n' |
|
263 |
'}\n' |
|
264 |
'W\n', |
|
265 |
t.get('weaves/74/Foo%3ABar.weave').read()) |
|
1534.6.1
by Robert Collins
allow API creation of shared repositories |
266 |
|
267 |
def test_shared_disk_layout(self): |
|
268 |
control = bzrdir.BzrDirMetaFormat1().initialize(self.get_url()) |
|
2241.1.4
by Martin Pool
Moved old weave-based repository formats into bzrlib.repofmt.weaverepo. |
269 |
repo = weaverepo.RepositoryFormat7().initialize(control, shared=True) |
1534.6.1
by Robert Collins
allow API creation of shared repositories |
270 |
# we want:
|
271 |
# format 'Bazaar-NG Repository format 7'
|
|
272 |
# inventory.weave == empty_weave
|
|
273 |
# empty revision-store directory
|
|
274 |
# empty weaves directory
|
|
275 |
# a 'shared-storage' marker file.
|
|
1553.5.49
by Martin Pool
Use LockDirs for repo format 7 |
276 |
# lock is not present when unlocked
|
1534.6.1
by Robert Collins
allow API creation of shared repositories |
277 |
t = control.get_repository_transport(None) |
278 |
self.assertEqualDiff('Bazaar-NG Repository format 7', |
|
279 |
t.get('format').read()) |
|
280 |
self.assertEqualDiff('', t.get('shared-storage').read()) |
|
281 |
self.assertTrue(S_ISDIR(t.stat('revision-store').st_mode)) |
|
282 |
self.assertTrue(S_ISDIR(t.stat('weaves').st_mode)) |
|
283 |
self.assertEqualDiff('# bzr weave file v5\n' |
|
284 |
'w\n' |
|
285 |
'W\n', |
|
286 |
t.get('inventory.weave').read()) |
|
1553.5.49
by Martin Pool
Use LockDirs for repo format 7 |
287 |
self.assertFalse(t.has('branch-lock')) |
288 |
||
1553.5.56
by Martin Pool
Format 7 repo now uses LockDir! |
289 |
def test_creates_lockdir(self): |
1553.5.49
by Martin Pool
Use LockDirs for repo format 7 |
290 |
"""Make sure it appears to be controlled by a LockDir existence"""
|
291 |
control = bzrdir.BzrDirMetaFormat1().initialize(self.get_url()) |
|
2241.1.4
by Martin Pool
Moved old weave-based repository formats into bzrlib.repofmt.weaverepo. |
292 |
repo = weaverepo.RepositoryFormat7().initialize(control, shared=True) |
1553.5.49
by Martin Pool
Use LockDirs for repo format 7 |
293 |
t = control.get_repository_transport(None) |
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
294 |
# TODO: Should check there is a 'lock' toplevel directory,
|
1553.5.58
by Martin Pool
Change LockDirs to format "lock-name/held/info" |
295 |
# regardless of contents
|
296 |
self.assertFalse(t.has('lock/held/info')) |
|
1553.5.49
by Martin Pool
Use LockDirs for repo format 7 |
297 |
repo.lock_write() |
1658.1.4
by Martin Pool
Quieten warning from TestFormat7.test_creates_lockdir about failing to unlock |
298 |
try: |
299 |
self.assertTrue(t.has('lock/held/info')) |
|
300 |
finally: |
|
301 |
# unlock so we don't get a warning about failing to do so
|
|
302 |
repo.unlock() |
|
1553.5.56
by Martin Pool
Format 7 repo now uses LockDir! |
303 |
|
304 |
def test_uses_lockdir(self): |
|
305 |
"""repo format 7 actually locks on lockdir"""
|
|
306 |
base_url = self.get_url() |
|
307 |
control = bzrdir.BzrDirMetaFormat1().initialize(base_url) |
|
2241.1.4
by Martin Pool
Moved old weave-based repository formats into bzrlib.repofmt.weaverepo. |
308 |
repo = weaverepo.RepositoryFormat7().initialize(control, shared=True) |
1553.5.56
by Martin Pool
Format 7 repo now uses LockDir! |
309 |
t = control.get_repository_transport(None) |
310 |
repo.lock_write() |
|
311 |
repo.unlock() |
|
312 |
del repo |
|
313 |
# make sure the same lock is created by opening it
|
|
314 |
repo = repository.Repository.open(base_url) |
|
315 |
repo.lock_write() |
|
1553.5.58
by Martin Pool
Change LockDirs to format "lock-name/held/info" |
316 |
self.assertTrue(t.has('lock/held/info')) |
1553.5.56
by Martin Pool
Format 7 repo now uses LockDir! |
317 |
repo.unlock() |
1553.5.58
by Martin Pool
Change LockDirs to format "lock-name/held/info" |
318 |
self.assertFalse(t.has('lock/held/info')) |
1534.6.5
by Robert Collins
Cloning of repos preserves shared and make-working-tree attributes. |
319 |
|
320 |
def test_shared_no_tree_disk_layout(self): |
|
321 |
control = bzrdir.BzrDirMetaFormat1().initialize(self.get_url()) |
|
2241.1.4
by Martin Pool
Moved old weave-based repository formats into bzrlib.repofmt.weaverepo. |
322 |
repo = weaverepo.RepositoryFormat7().initialize(control, shared=True) |
1534.6.5
by Robert Collins
Cloning of repos preserves shared and make-working-tree attributes. |
323 |
repo.set_make_working_trees(False) |
324 |
# we want:
|
|
325 |
# format 'Bazaar-NG Repository format 7'
|
|
326 |
# lock ''
|
|
327 |
# inventory.weave == empty_weave
|
|
328 |
# empty revision-store directory
|
|
329 |
# empty weaves directory
|
|
330 |
# a 'shared-storage' marker file.
|
|
331 |
t = control.get_repository_transport(None) |
|
332 |
self.assertEqualDiff('Bazaar-NG Repository format 7', |
|
333 |
t.get('format').read()) |
|
1553.5.56
by Martin Pool
Format 7 repo now uses LockDir! |
334 |
## self.assertEqualDiff('', t.get('lock').read())
|
1534.6.5
by Robert Collins
Cloning of repos preserves shared and make-working-tree attributes. |
335 |
self.assertEqualDiff('', t.get('shared-storage').read()) |
336 |
self.assertEqualDiff('', t.get('no-working-trees').read()) |
|
337 |
repo.set_make_working_trees(True) |
|
338 |
self.assertFalse(t.has('no-working-trees')) |
|
339 |
self.assertTrue(S_ISDIR(t.stat('revision-store').st_mode)) |
|
340 |
self.assertTrue(S_ISDIR(t.stat('weaves').st_mode)) |
|
341 |
self.assertEqualDiff('# bzr weave file v5\n' |
|
342 |
'w\n' |
|
343 |
'W\n', |
|
344 |
t.get('inventory.weave').read()) |
|
1534.1.27
by Robert Collins
Start InterRepository with InterRepository.get. |
345 |
|
3221.3.1
by Robert Collins
* Repository formats have a new supported-feature attribute |
346 |
def test_supports_external_lookups(self): |
347 |
control = bzrdir.BzrDirMetaFormat1().initialize(self.get_url()) |
|
348 |
repo = weaverepo.RepositoryFormat7().initialize(control) |
|
349 |
self.assertFalse(repo._format.supports_external_lookups) |
|
350 |
||
1534.1.27
by Robert Collins
Start InterRepository with InterRepository.get. |
351 |
|
1556.1.3
by Robert Collins
Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids |
352 |
class TestFormatKnit1(TestCaseWithTransport): |
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
353 |
|
3565.3.1
by Robert Collins
* The generic fetch code now uses two attributes on Repository objects |
354 |
def test_attribute__fetch_order(self): |
355 |
"""Knits need topological data insertion."""
|
|
356 |
repo = self.make_repository('.', |
|
357 |
format=bzrdir.format_registry.get('knit')()) |
|
4053.1.4
by Robert Collins
Move the fetch control attributes from Repository to RepositoryFormat. |
358 |
self.assertEqual('topological', repo._format._fetch_order) |
3565.3.1
by Robert Collins
* The generic fetch code now uses two attributes on Repository objects |
359 |
|
360 |
def test_attribute__fetch_uses_deltas(self): |
|
361 |
"""Knits reuse deltas."""
|
|
362 |
repo = self.make_repository('.', |
|
363 |
format=bzrdir.format_registry.get('knit')()) |
|
4053.1.4
by Robert Collins
Move the fetch control attributes from Repository to RepositoryFormat. |
364 |
self.assertEqual(True, repo._format._fetch_uses_deltas) |
3565.3.1
by Robert Collins
* The generic fetch code now uses two attributes on Repository objects |
365 |
|
1556.1.3
by Robert Collins
Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids |
366 |
def test_disk_layout(self): |
367 |
control = bzrdir.BzrDirMetaFormat1().initialize(self.get_url()) |
|
2241.1.6
by Martin Pool
Move Knit repositories into the submodule bzrlib.repofmt.knitrepo and |
368 |
repo = knitrepo.RepositoryFormatKnit1().initialize(control) |
1556.1.3
by Robert Collins
Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids |
369 |
# in case of side effects of locking.
|
370 |
repo.lock_write() |
|
371 |
repo.unlock() |
|
372 |
# we want:
|
|
373 |
# format 'Bazaar-NG Knit Repository Format 1'
|
|
1553.5.62
by Martin Pool
Add tests that MetaDir repositories use LockDirs |
374 |
# lock: is a directory
|
1556.1.3
by Robert Collins
Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids |
375 |
# inventory.weave == empty_weave
|
376 |
# empty revision-store directory
|
|
377 |
# empty weaves directory
|
|
378 |
t = control.get_repository_transport(None) |
|
379 |
self.assertEqualDiff('Bazaar-NG Knit Repository Format 1', |
|
380 |
t.get('format').read()) |
|
1553.5.57
by Martin Pool
[merge] sync from bzr.dev |
381 |
# XXX: no locks left when unlocked at the moment
|
382 |
# self.assertEqualDiff('', t.get('lock').read())
|
|
1556.1.3
by Robert Collins
Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids |
383 |
self.assertTrue(S_ISDIR(t.stat('knits').st_mode)) |
1563.2.35
by Robert Collins
cleanup deprecation warnings and finish conversion so the inventory is knit based too. |
384 |
self.check_knits(t) |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
385 |
# Check per-file knits.
|
386 |
branch = control.create_branch() |
|
387 |
tree = control.create_workingtree() |
|
388 |
tree.add(['foo'], ['Nasty-IdC:'], ['file']) |
|
389 |
tree.put_file_bytes_non_atomic('Nasty-IdC:', '') |
|
390 |
tree.commit('1st post', rev_id='foo') |
|
391 |
self.assertHasKnit(t, 'knits/e8/%254easty-%2549d%2543%253a', |
|
392 |
'\nfoo fulltext 0 81 :') |
|
1563.2.35
by Robert Collins
cleanup deprecation warnings and finish conversion so the inventory is knit based too. |
393 |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
394 |
def assertHasKnit(self, t, knit_name, extra_content=''): |
1654.1.3
by Robert Collins
Refactor repository knit tests slightly to remove duplication - add a assertHasKnit method. |
395 |
"""Assert that knit_name exists on t."""
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
396 |
self.assertEqualDiff('# bzr knit index 8\n' + extra_content, |
1654.1.3
by Robert Collins
Refactor repository knit tests slightly to remove duplication - add a assertHasKnit method. |
397 |
t.get(knit_name + '.kndx').read()) |
398 |
||
1563.2.35
by Robert Collins
cleanup deprecation warnings and finish conversion so the inventory is knit based too. |
399 |
def check_knits(self, t): |
400 |
"""check knit content for a repository."""
|
|
1654.1.3
by Robert Collins
Refactor repository knit tests slightly to remove duplication - add a assertHasKnit method. |
401 |
self.assertHasKnit(t, 'inventory') |
402 |
self.assertHasKnit(t, 'revisions') |
|
403 |
self.assertHasKnit(t, 'signatures') |
|
1556.1.3
by Robert Collins
Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids |
404 |
|
405 |
def test_shared_disk_layout(self): |
|
406 |
control = bzrdir.BzrDirMetaFormat1().initialize(self.get_url()) |
|
2241.1.6
by Martin Pool
Move Knit repositories into the submodule bzrlib.repofmt.knitrepo and |
407 |
repo = knitrepo.RepositoryFormatKnit1().initialize(control, shared=True) |
1556.1.3
by Robert Collins
Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids |
408 |
# we want:
|
409 |
# format 'Bazaar-NG Knit Repository Format 1'
|
|
1553.5.62
by Martin Pool
Add tests that MetaDir repositories use LockDirs |
410 |
# lock: is a directory
|
1556.1.3
by Robert Collins
Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids |
411 |
# inventory.weave == empty_weave
|
412 |
# empty revision-store directory
|
|
413 |
# empty weaves directory
|
|
414 |
# a 'shared-storage' marker file.
|
|
415 |
t = control.get_repository_transport(None) |
|
416 |
self.assertEqualDiff('Bazaar-NG Knit Repository Format 1', |
|
417 |
t.get('format').read()) |
|
1553.5.57
by Martin Pool
[merge] sync from bzr.dev |
418 |
# XXX: no locks left when unlocked at the moment
|
419 |
# self.assertEqualDiff('', t.get('lock').read())
|
|
1556.1.3
by Robert Collins
Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids |
420 |
self.assertEqualDiff('', t.get('shared-storage').read()) |
421 |
self.assertTrue(S_ISDIR(t.stat('knits').st_mode)) |
|
1563.2.35
by Robert Collins
cleanup deprecation warnings and finish conversion so the inventory is knit based too. |
422 |
self.check_knits(t) |
1556.1.3
by Robert Collins
Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids |
423 |
|
424 |
def test_shared_no_tree_disk_layout(self): |
|
425 |
control = bzrdir.BzrDirMetaFormat1().initialize(self.get_url()) |
|
2241.1.6
by Martin Pool
Move Knit repositories into the submodule bzrlib.repofmt.knitrepo and |
426 |
repo = knitrepo.RepositoryFormatKnit1().initialize(control, shared=True) |
1556.1.3
by Robert Collins
Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids |
427 |
repo.set_make_working_trees(False) |
428 |
# we want:
|
|
429 |
# format 'Bazaar-NG Knit Repository Format 1'
|
|
430 |
# lock ''
|
|
431 |
# inventory.weave == empty_weave
|
|
432 |
# empty revision-store directory
|
|
433 |
# empty weaves directory
|
|
434 |
# a 'shared-storage' marker file.
|
|
435 |
t = control.get_repository_transport(None) |
|
436 |
self.assertEqualDiff('Bazaar-NG Knit Repository Format 1', |
|
437 |
t.get('format').read()) |
|
1553.5.57
by Martin Pool
[merge] sync from bzr.dev |
438 |
# XXX: no locks left when unlocked at the moment
|
439 |
# self.assertEqualDiff('', t.get('lock').read())
|
|
1556.1.3
by Robert Collins
Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids |
440 |
self.assertEqualDiff('', t.get('shared-storage').read()) |
441 |
self.assertEqualDiff('', t.get('no-working-trees').read()) |
|
442 |
repo.set_make_working_trees(True) |
|
443 |
self.assertFalse(t.has('no-working-trees')) |
|
444 |
self.assertTrue(S_ISDIR(t.stat('knits').st_mode)) |
|
1563.2.35
by Robert Collins
cleanup deprecation warnings and finish conversion so the inventory is knit based too. |
445 |
self.check_knits(t) |
1556.1.3
by Robert Collins
Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids |
446 |
|
2917.2.1
by John Arbash Meinel
Fix bug #152360. The xml5 serializer should be using |
447 |
def test_deserialise_sets_root_revision(self): |
448 |
"""We must have a inventory.root.revision
|
|
449 |
||
450 |
Old versions of the XML5 serializer did not set the revision_id for
|
|
451 |
the whole inventory. So we grab the one from the expected text. Which
|
|
452 |
is valid when the api is not being abused.
|
|
453 |
"""
|
|
454 |
repo = self.make_repository('.', |
|
455 |
format=bzrdir.format_registry.get('knit')()) |
|
456 |
inv_xml = '<inventory format="5">\n</inventory>\n' |
|
4988.3.3
by Jelmer Vernooij
rename Repository.deserialise_inventory to Repository._deserialise_inventory. |
457 |
inv = repo._deserialise_inventory('test-rev-id', inv_xml) |
2917.2.1
by John Arbash Meinel
Fix bug #152360. The xml5 serializer should be using |
458 |
self.assertEqual('test-rev-id', inv.root.revision) |
459 |
||
460 |
def test_deserialise_uses_global_revision_id(self): |
|
461 |
"""If it is set, then we re-use the global revision id"""
|
|
462 |
repo = self.make_repository('.', |
|
463 |
format=bzrdir.format_registry.get('knit')()) |
|
464 |
inv_xml = ('<inventory format="5" revision_id="other-rev-id">\n' |
|
465 |
'</inventory>\n') |
|
466 |
# Arguably, the deserialise_inventory should detect a mismatch, and
|
|
467 |
# raise an error, rather than silently using one revision_id over the
|
|
468 |
# other.
|
|
4988.3.3
by Jelmer Vernooij
rename Repository.deserialise_inventory to Repository._deserialise_inventory. |
469 |
self.assertRaises(AssertionError, repo._deserialise_inventory, |
3169.2.2
by Robert Collins
Add a test to Repository.deserialise_inventory that the resulting ivnentory is the one asked for, and update relevant tests. Also tweak the model 1 to 2 regenerate inventories logic to use the revision trees parent marker which is more accurate in some cases. |
470 |
'test-rev-id', inv_xml) |
4988.3.3
by Jelmer Vernooij
rename Repository.deserialise_inventory to Repository._deserialise_inventory. |
471 |
inv = repo._deserialise_inventory('other-rev-id', inv_xml) |
2917.2.1
by John Arbash Meinel
Fix bug #152360. The xml5 serializer should be using |
472 |
self.assertEqual('other-rev-id', inv.root.revision) |
473 |
||
3221.3.1
by Robert Collins
* Repository formats have a new supported-feature attribute |
474 |
def test_supports_external_lookups(self): |
475 |
repo = self.make_repository('.', |
|
476 |
format=bzrdir.format_registry.get('knit')()) |
|
477 |
self.assertFalse(repo._format.supports_external_lookups) |
|
478 |
||
2535.3.53
by Andrew Bennetts
Remove get_stream_as_bytes from KnitVersionedFile's API, make it a function in knitrepo.py instead. |
479 |
|
2305.2.3
by Andrew Bennetts
Bring across test_repository improvements from the hpss branch to fix the last test failures. |
480 |
class DummyRepository(object): |
481 |
"""A dummy repository for testing."""
|
|
482 |
||
3452.2.11
by Andrew Bennetts
Merge thread. |
483 |
_format = None |
2305.2.3
by Andrew Bennetts
Bring across test_repository improvements from the hpss branch to fix the last test failures. |
484 |
_serializer = None |
485 |
||
486 |
def supports_rich_root(self): |
|
4606.4.1
by Robert Collins
Prepare test_repository's inter_repository tests for 2a. |
487 |
if self._format is not None: |
488 |
return self._format.rich_root_data |
|
2305.2.3
by Andrew Bennetts
Bring across test_repository improvements from the hpss branch to fix the last test failures. |
489 |
return False |
490 |
||
3709.5.10
by Andrew Bennetts
Fix test failure caused by missing attributes on DummyRepository. |
491 |
def get_graph(self): |
492 |
raise NotImplementedError |
|
493 |
||
494 |
def get_parent_map(self, revision_ids): |
|
495 |
raise NotImplementedError |
|
496 |
||
2305.2.3
by Andrew Bennetts
Bring across test_repository improvements from the hpss branch to fix the last test failures. |
497 |
|
498 |
class InterDummy(repository.InterRepository): |
|
499 |
"""An inter-repository optimised code path for DummyRepository.
|
|
500 |
||
501 |
This is for use during testing where we use DummyRepository as repositories
|
|
1534.1.28
by Robert Collins
Allow for optimised InterRepository selection. |
502 |
so that none of the default regsitered inter-repository classes will
|
2818.4.2
by Robert Collins
Review feedback. |
503 |
MATCH.
|
1534.1.28
by Robert Collins
Allow for optimised InterRepository selection. |
504 |
"""
|
505 |
||
506 |
@staticmethod
|
|
507 |
def is_compatible(repo_source, repo_target): |
|
2305.2.3
by Andrew Bennetts
Bring across test_repository improvements from the hpss branch to fix the last test failures. |
508 |
"""InterDummy is compatible with DummyRepository."""
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
509 |
return (isinstance(repo_source, DummyRepository) and |
2305.2.3
by Andrew Bennetts
Bring across test_repository improvements from the hpss branch to fix the last test failures. |
510 |
isinstance(repo_target, DummyRepository)) |
1534.1.28
by Robert Collins
Allow for optimised InterRepository selection. |
511 |
|
512 |
||
1534.1.27
by Robert Collins
Start InterRepository with InterRepository.get. |
513 |
class TestInterRepository(TestCaseWithTransport): |
514 |
||
515 |
def test_get_default_inter_repository(self): |
|
516 |
# test that the InterRepository.get(repo_a, repo_b) probes
|
|
517 |
# for a inter_repo class where is_compatible(repo_a, repo_b) returns
|
|
518 |
# true and returns a default inter_repo otherwise.
|
|
519 |
# This also tests that the default registered optimised interrepository
|
|
520 |
# classes do not barf inappropriately when a surprising repository type
|
|
521 |
# is handed to them.
|
|
2305.2.3
by Andrew Bennetts
Bring across test_repository improvements from the hpss branch to fix the last test failures. |
522 |
dummy_a = DummyRepository() |
523 |
dummy_b = DummyRepository() |
|
1534.1.28
by Robert Collins
Allow for optimised InterRepository selection. |
524 |
self.assertGetsDefaultInterRepository(dummy_a, dummy_b) |
525 |
||
526 |
def assertGetsDefaultInterRepository(self, repo_a, repo_b): |
|
2305.2.3
by Andrew Bennetts
Bring across test_repository improvements from the hpss branch to fix the last test failures. |
527 |
"""Asserts that InterRepository.get(repo_a, repo_b) -> the default.
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
528 |
|
2305.2.3
by Andrew Bennetts
Bring across test_repository improvements from the hpss branch to fix the last test failures. |
529 |
The effective default is now InterSameDataRepository because there is
|
530 |
no actual sane default in the presence of incompatible data models.
|
|
531 |
"""
|
|
1534.1.28
by Robert Collins
Allow for optimised InterRepository selection. |
532 |
inter_repo = repository.InterRepository.get(repo_a, repo_b) |
2305.2.3
by Andrew Bennetts
Bring across test_repository improvements from the hpss branch to fix the last test failures. |
533 |
self.assertEqual(repository.InterSameDataRepository, |
1534.1.27
by Robert Collins
Start InterRepository with InterRepository.get. |
534 |
inter_repo.__class__) |
1534.1.28
by Robert Collins
Allow for optimised InterRepository selection. |
535 |
self.assertEqual(repo_a, inter_repo.source) |
536 |
self.assertEqual(repo_b, inter_repo.target) |
|
537 |
||
538 |
def test_register_inter_repository_class(self): |
|
539 |
# test that a optimised code path provider - a
|
|
540 |
# InterRepository subclass can be registered and unregistered
|
|
541 |
# and that it is correctly selected when given a repository
|
|
542 |
# pair that it returns true on for the is_compatible static method
|
|
543 |
# check
|
|
2305.2.3
by Andrew Bennetts
Bring across test_repository improvements from the hpss branch to fix the last test failures. |
544 |
dummy_a = DummyRepository() |
4606.4.1
by Robert Collins
Prepare test_repository's inter_repository tests for 2a. |
545 |
dummy_a._format = RepositoryFormat() |
2305.2.3
by Andrew Bennetts
Bring across test_repository improvements from the hpss branch to fix the last test failures. |
546 |
dummy_b = DummyRepository() |
4606.4.1
by Robert Collins
Prepare test_repository's inter_repository tests for 2a. |
547 |
dummy_b._format = RepositoryFormat() |
2305.2.3
by Andrew Bennetts
Bring across test_repository improvements from the hpss branch to fix the last test failures. |
548 |
repo = self.make_repository('.') |
549 |
# hack dummies to look like repo somewhat.
|
|
550 |
dummy_a._serializer = repo._serializer |
|
4606.4.1
by Robert Collins
Prepare test_repository's inter_repository tests for 2a. |
551 |
dummy_a._format.supports_tree_reference = repo._format.supports_tree_reference |
552 |
dummy_a._format.rich_root_data = repo._format.rich_root_data |
|
2305.2.3
by Andrew Bennetts
Bring across test_repository improvements from the hpss branch to fix the last test failures. |
553 |
dummy_b._serializer = repo._serializer |
4606.4.1
by Robert Collins
Prepare test_repository's inter_repository tests for 2a. |
554 |
dummy_b._format.supports_tree_reference = repo._format.supports_tree_reference |
555 |
dummy_b._format.rich_root_data = repo._format.rich_root_data |
|
2305.2.3
by Andrew Bennetts
Bring across test_repository improvements from the hpss branch to fix the last test failures. |
556 |
repository.InterRepository.register_optimiser(InterDummy) |
1534.1.28
by Robert Collins
Allow for optimised InterRepository selection. |
557 |
try: |
2305.2.3
by Andrew Bennetts
Bring across test_repository improvements from the hpss branch to fix the last test failures. |
558 |
# we should get the default for something InterDummy returns False
|
1534.1.28
by Robert Collins
Allow for optimised InterRepository selection. |
559 |
# to
|
2305.2.3
by Andrew Bennetts
Bring across test_repository improvements from the hpss branch to fix the last test failures. |
560 |
self.assertFalse(InterDummy.is_compatible(dummy_a, repo)) |
561 |
self.assertGetsDefaultInterRepository(dummy_a, repo) |
|
562 |
# and we should get an InterDummy for a pair it 'likes'
|
|
563 |
self.assertTrue(InterDummy.is_compatible(dummy_a, dummy_b)) |
|
1534.1.28
by Robert Collins
Allow for optimised InterRepository selection. |
564 |
inter_repo = repository.InterRepository.get(dummy_a, dummy_b) |
2305.2.3
by Andrew Bennetts
Bring across test_repository improvements from the hpss branch to fix the last test failures. |
565 |
self.assertEqual(InterDummy, inter_repo.__class__) |
1534.1.28
by Robert Collins
Allow for optimised InterRepository selection. |
566 |
self.assertEqual(dummy_a, inter_repo.source) |
567 |
self.assertEqual(dummy_b, inter_repo.target) |
|
568 |
finally: |
|
2305.2.3
by Andrew Bennetts
Bring across test_repository improvements from the hpss branch to fix the last test failures. |
569 |
repository.InterRepository.unregister_optimiser(InterDummy) |
1534.1.28
by Robert Collins
Allow for optimised InterRepository selection. |
570 |
# now we should get the default InterRepository object again.
|
571 |
self.assertGetsDefaultInterRepository(dummy_a, dummy_b) |
|
1534.1.33
by Robert Collins
Move copy_content_into into InterRepository and InterWeaveRepo, and disable the default codepath test as we have optimised paths for all current combinations. |
572 |
|
2241.1.17
by Martin Pool
Restore old InterWeave tests |
573 |
|
574 |
class TestInterWeaveRepo(TestCaseWithTransport): |
|
575 |
||
576 |
def test_is_compatible_and_registered(self): |
|
577 |
# InterWeaveRepo is compatible when either side
|
|
578 |
# is a format 5/6/7 branch
|
|
2241.1.20
by mbp at sourcefrog
update tests for new locations of weave repos |
579 |
from bzrlib.repofmt import knitrepo, weaverepo |
580 |
formats = [weaverepo.RepositoryFormat5(), |
|
581 |
weaverepo.RepositoryFormat6(), |
|
582 |
weaverepo.RepositoryFormat7()] |
|
583 |
incompatible_formats = [weaverepo.RepositoryFormat4(), |
|
584 |
knitrepo.RepositoryFormatKnit1(), |
|
2241.1.17
by Martin Pool
Restore old InterWeave tests |
585 |
]
|
586 |
repo_a = self.make_repository('a') |
|
587 |
repo_b = self.make_repository('b') |
|
588 |
is_compatible = repository.InterWeaveRepo.is_compatible |
|
589 |
for source in incompatible_formats: |
|
590 |
# force incompatible left then right
|
|
591 |
repo_a._format = source |
|
592 |
repo_b._format = formats[0] |
|
593 |
self.assertFalse(is_compatible(repo_a, repo_b)) |
|
594 |
self.assertFalse(is_compatible(repo_b, repo_a)) |
|
595 |
for source in formats: |
|
596 |
repo_a._format = source |
|
597 |
for target in formats: |
|
598 |
repo_b._format = target |
|
599 |
self.assertTrue(is_compatible(repo_a, repo_b)) |
|
600 |
self.assertEqual(repository.InterWeaveRepo, |
|
601 |
repository.InterRepository.get(repo_a, |
|
602 |
repo_b).__class__) |
|
603 |
||
1556.1.4
by Robert Collins
Add a new format for what will become knit, and the surrounding logic to upgrade repositories within metadirs, and tests for the same. |
604 |
|
605 |
class TestRepositoryConverter(TestCaseWithTransport): |
|
606 |
||
607 |
def test_convert_empty(self): |
|
608 |
t = get_transport(self.get_url('.')) |
|
609 |
t.mkdir('repository') |
|
610 |
repo_dir = bzrdir.BzrDirMetaFormat1().initialize('repository') |
|
2241.1.4
by Martin Pool
Moved old weave-based repository formats into bzrlib.repofmt.weaverepo. |
611 |
repo = weaverepo.RepositoryFormat7().initialize(repo_dir) |
2241.1.6
by Martin Pool
Move Knit repositories into the submodule bzrlib.repofmt.knitrepo and |
612 |
target_format = knitrepo.RepositoryFormatKnit1() |
1556.1.4
by Robert Collins
Add a new format for what will become knit, and the surrounding logic to upgrade repositories within metadirs, and tests for the same. |
613 |
converter = repository.CopyConverter(target_format) |
1594.1.3
by Robert Collins
Fixup pb usage to use nested_progress_bar. |
614 |
pb = bzrlib.ui.ui_factory.nested_progress_bar() |
615 |
try: |
|
616 |
converter.convert(repo, pb) |
|
617 |
finally: |
|
618 |
pb.finished() |
|
1556.1.4
by Robert Collins
Add a new format for what will become knit, and the surrounding logic to upgrade repositories within metadirs, and tests for the same. |
619 |
repo = repo_dir.open_repository() |
620 |
self.assertTrue(isinstance(target_format, repo._format.__class__)) |
|
1843.2.5
by Aaron Bentley
Add test of _unescape_xml |
621 |
|
622 |
||
623 |
class TestMisc(TestCase): |
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
624 |
|
1843.2.5
by Aaron Bentley
Add test of _unescape_xml |
625 |
def test_unescape_xml(self): |
626 |
"""We get some kind of error when malformed entities are passed"""
|
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
627 |
self.assertRaises(KeyError, repository._unescape_xml, 'foo&bar;') |
1910.2.13
by Aaron Bentley
Start work on converter |
628 |
|
629 |
||
2255.2.211
by Robert Collins
Remove knit2 repository format- it has never been supported. |
630 |
class TestRepositoryFormatKnit3(TestCaseWithTransport): |
1910.2.13
by Aaron Bentley
Start work on converter |
631 |
|
3565.3.1
by Robert Collins
* The generic fetch code now uses two attributes on Repository objects |
632 |
def test_attribute__fetch_order(self): |
633 |
"""Knits need topological data insertion."""
|
|
634 |
format = bzrdir.BzrDirMetaFormat1() |
|
635 |
format.repository_format = knitrepo.RepositoryFormatKnit3() |
|
636 |
repo = self.make_repository('.', format=format) |
|
4053.1.4
by Robert Collins
Move the fetch control attributes from Repository to RepositoryFormat. |
637 |
self.assertEqual('topological', repo._format._fetch_order) |
3565.3.1
by Robert Collins
* The generic fetch code now uses two attributes on Repository objects |
638 |
|
639 |
def test_attribute__fetch_uses_deltas(self): |
|
640 |
"""Knits reuse deltas."""
|
|
641 |
format = bzrdir.BzrDirMetaFormat1() |
|
642 |
format.repository_format = knitrepo.RepositoryFormatKnit3() |
|
643 |
repo = self.make_repository('.', format=format) |
|
4053.1.4
by Robert Collins
Move the fetch control attributes from Repository to RepositoryFormat. |
644 |
self.assertEqual(True, repo._format._fetch_uses_deltas) |
3565.3.1
by Robert Collins
* The generic fetch code now uses two attributes on Repository objects |
645 |
|
1910.2.13
by Aaron Bentley
Start work on converter |
646 |
def test_convert(self): |
647 |
"""Ensure the upgrade adds weaves for roots"""
|
|
1910.2.35
by Aaron Bentley
Better fix for convesion test |
648 |
format = bzrdir.BzrDirMetaFormat1() |
2241.1.6
by Martin Pool
Move Knit repositories into the submodule bzrlib.repofmt.knitrepo and |
649 |
format.repository_format = knitrepo.RepositoryFormatKnit1() |
1910.2.35
by Aaron Bentley
Better fix for convesion test |
650 |
tree = self.make_branch_and_tree('.', format) |
1910.2.13
by Aaron Bentley
Start work on converter |
651 |
tree.commit("Dull commit", rev_id="dull") |
652 |
revision_tree = tree.branch.repository.revision_tree('dull') |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
653 |
revision_tree.lock_read() |
654 |
try: |
|
655 |
self.assertRaises(errors.NoSuchFile, revision_tree.get_file_lines, |
|
656 |
revision_tree.inventory.root.file_id) |
|
657 |
finally: |
|
658 |
revision_tree.unlock() |
|
1910.2.13
by Aaron Bentley
Start work on converter |
659 |
format = bzrdir.BzrDirMetaFormat1() |
2255.2.211
by Robert Collins
Remove knit2 repository format- it has never been supported. |
660 |
format.repository_format = knitrepo.RepositoryFormatKnit3() |
1910.2.13
by Aaron Bentley
Start work on converter |
661 |
upgrade.Convert('.', format) |
1910.2.27
by Aaron Bentley
Fixed conversion test |
662 |
tree = workingtree.WorkingTree.open('.') |
1910.2.13
by Aaron Bentley
Start work on converter |
663 |
revision_tree = tree.branch.repository.revision_tree('dull') |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
664 |
revision_tree.lock_read() |
665 |
try: |
|
666 |
revision_tree.get_file_lines(revision_tree.inventory.root.file_id) |
|
667 |
finally: |
|
668 |
revision_tree.unlock() |
|
1910.2.27
by Aaron Bentley
Fixed conversion test |
669 |
tree.commit("Another dull commit", rev_id='dull2') |
670 |
revision_tree = tree.branch.repository.revision_tree('dull2') |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
671 |
revision_tree.lock_read() |
672 |
self.addCleanup(revision_tree.unlock) |
|
1910.2.27
by Aaron Bentley
Fixed conversion test |
673 |
self.assertEqual('dull', revision_tree.inventory.root.revision) |
2220.2.2
by Martin Pool
Add tag command and basic implementation |
674 |
|
3221.3.1
by Robert Collins
* Repository formats have a new supported-feature attribute |
675 |
def test_supports_external_lookups(self): |
676 |
format = bzrdir.BzrDirMetaFormat1() |
|
677 |
format.repository_format = knitrepo.RepositoryFormatKnit3() |
|
678 |
repo = self.make_repository('.', format=format) |
|
679 |
self.assertFalse(repo._format.supports_external_lookups) |
|
680 |
||
2535.3.57
by Andrew Bennetts
Perform some sanity checking of data streams rather than blindly inserting them into our repository. |
681 |
|
4667.1.1
by John Arbash Meinel
Drop the Test2a test times from 5+s down to 1.4s |
682 |
class Test2a(tests.TestCaseWithMemoryTransport): |
4431.3.7
by Jonathan Lange
Cherrypick bzr.dev 4470, resolving conflicts. |
683 |
|
4634.20.1
by Robert Collins
Fix bug 402652 by recompressing all texts that are streamed - slightly slower at fetch, substantially faster and more compact at read. |
684 |
def test_fetch_combines_groups(self): |
685 |
builder = self.make_branch_builder('source', format='2a') |
|
686 |
builder.start_series() |
|
687 |
builder.build_snapshot('1', None, [ |
|
688 |
('add', ('', 'root-id', 'directory', '')), |
|
689 |
('add', ('file', 'file-id', 'file', 'content\n'))]) |
|
690 |
builder.build_snapshot('2', ['1'], [ |
|
691 |
('modify', ('file-id', 'content-2\n'))]) |
|
692 |
builder.finish_series() |
|
693 |
source = builder.get_branch() |
|
694 |
target = self.make_repository('target', format='2a') |
|
695 |
target.fetch(source.repository) |
|
696 |
target.lock_read() |
|
4665.3.2
by John Arbash Meinel
An alternative implementation that passes both tests. |
697 |
self.addCleanup(target.unlock) |
4634.20.1
by Robert Collins
Fix bug 402652 by recompressing all texts that are streamed - slightly slower at fetch, substantially faster and more compact at read. |
698 |
details = target.texts._index.get_build_details( |
699 |
[('file-id', '1',), ('file-id', '2',)]) |
|
700 |
file_1_details = details[('file-id', '1')] |
|
701 |
file_2_details = details[('file-id', '2')] |
|
702 |
# The index, and what to read off disk, should be the same for both
|
|
703 |
# versions of the file.
|
|
704 |
self.assertEqual(file_1_details[0][:3], file_2_details[0][:3]) |
|
705 |
||
4634.23.1
by Robert Collins
Cherrypick from bzr.dev: Fix bug 402652: recompress badly packed groups during fetch. (John Arbash Meinel, Robert Collins) |
706 |
def test_fetch_combines_groups(self): |
707 |
builder = self.make_branch_builder('source', format='2a') |
|
708 |
builder.start_series() |
|
709 |
builder.build_snapshot('1', None, [ |
|
710 |
('add', ('', 'root-id', 'directory', '')), |
|
711 |
('add', ('file', 'file-id', 'file', 'content\n'))]) |
|
712 |
builder.build_snapshot('2', ['1'], [ |
|
713 |
('modify', ('file-id', 'content-2\n'))]) |
|
714 |
builder.finish_series() |
|
715 |
source = builder.get_branch() |
|
716 |
target = self.make_repository('target', format='2a') |
|
717 |
target.fetch(source.repository) |
|
718 |
target.lock_read() |
|
719 |
self.addCleanup(target.unlock) |
|
720 |
details = target.texts._index.get_build_details( |
|
721 |
[('file-id', '1',), ('file-id', '2',)]) |
|
722 |
file_1_details = details[('file-id', '1')] |
|
723 |
file_2_details = details[('file-id', '2')] |
|
724 |
# The index, and what to read off disk, should be the same for both
|
|
725 |
# versions of the file.
|
|
726 |
self.assertEqual(file_1_details[0][:3], file_2_details[0][:3]) |
|
727 |
||
728 |
def test_fetch_combines_groups(self): |
|
729 |
builder = self.make_branch_builder('source', format='2a') |
|
730 |
builder.start_series() |
|
731 |
builder.build_snapshot('1', None, [ |
|
732 |
('add', ('', 'root-id', 'directory', '')), |
|
733 |
('add', ('file', 'file-id', 'file', 'content\n'))]) |
|
734 |
builder.build_snapshot('2', ['1'], [ |
|
735 |
('modify', ('file-id', 'content-2\n'))]) |
|
736 |
builder.finish_series() |
|
737 |
source = builder.get_branch() |
|
738 |
target = self.make_repository('target', format='2a') |
|
739 |
target.fetch(source.repository) |
|
740 |
target.lock_read() |
|
741 |
self.addCleanup(target.unlock) |
|
742 |
details = target.texts._index.get_build_details( |
|
743 |
[('file-id', '1',), ('file-id', '2',)]) |
|
744 |
file_1_details = details[('file-id', '1')] |
|
745 |
file_2_details = details[('file-id', '2')] |
|
746 |
# The index, and what to read off disk, should be the same for both
|
|
747 |
# versions of the file.
|
|
748 |
self.assertEqual(file_1_details[0][:3], file_2_details[0][:3]) |
|
749 |
||
4431.3.7
by Jonathan Lange
Cherrypick bzr.dev 4470, resolving conflicts. |
750 |
def test_format_pack_compresses_True(self): |
751 |
repo = self.make_repository('repo', format='2a') |
|
752 |
self.assertTrue(repo._format.pack_compresses) |
|
3735.2.40
by Robert Collins
Add development4 which has a parent_id to basename index on CHKInventory objects. |
753 |
|
754 |
def test_inventories_use_chk_map_with_parent_base_dict(self): |
|
4667.1.1
by John Arbash Meinel
Drop the Test2a test times from 5+s down to 1.4s |
755 |
tree = self.make_branch_and_memory_tree('repo', format="2a") |
756 |
tree.lock_write() |
|
757 |
tree.add([''], ['TREE_ROOT']) |
|
3735.2.40
by Robert Collins
Add development4 which has a parent_id to basename index on CHKInventory objects. |
758 |
revid = tree.commit("foo") |
4667.1.1
by John Arbash Meinel
Drop the Test2a test times from 5+s down to 1.4s |
759 |
tree.unlock() |
3735.2.40
by Robert Collins
Add development4 which has a parent_id to basename index on CHKInventory objects. |
760 |
tree.lock_read() |
761 |
self.addCleanup(tree.unlock) |
|
762 |
inv = tree.branch.repository.get_inventory(revid) |
|
3735.2.41
by Robert Collins
Make the parent_id_basename index be updated during CHKInventory.apply_delta. |
763 |
self.assertNotEqual(None, inv.parent_id_basename_to_file_id) |
764 |
inv.parent_id_basename_to_file_id._ensure_root() |
|
3735.2.40
by Robert Collins
Add development4 which has a parent_id to basename index on CHKInventory objects. |
765 |
inv.id_to_entry._ensure_root() |
4241.6.8
by Robert Collins, John Arbash Meinel, Ian Clatworthy, Vincent Ladeuil
Add --development6-rich-root, disabling the legacy and unneeded development2 format, and activating the tests for CHK features disabled pending this format. (Robert Collins, John Arbash Meinel, Ian Clatworthy, Vincent Ladeuil) |
766 |
self.assertEqual(65536, inv.id_to_entry._root_node.maximum_size) |
767 |
self.assertEqual(65536, |
|
3735.2.41
by Robert Collins
Make the parent_id_basename index be updated during CHKInventory.apply_delta. |
768 |
inv.parent_id_basename_to_file_id._root_node.maximum_size) |
3735.2.40
by Robert Collins
Add development4 which has a parent_id to basename index on CHKInventory objects. |
769 |
|
4431.3.7
by Jonathan Lange
Cherrypick bzr.dev 4470, resolving conflicts. |
770 |
def test_autopack_unchanged_chk_nodes(self): |
771 |
# at 20 unchanged commits, chk pages are packed that are split into
|
|
772 |
# two groups such that the new pack being made doesn't have all its
|
|
773 |
# pages in the source packs (though they are in the repository).
|
|
4667.1.1
by John Arbash Meinel
Drop the Test2a test times from 5+s down to 1.4s |
774 |
# Use a memory backed repository, we don't need to hit disk for this
|
775 |
tree = self.make_branch_and_memory_tree('tree', format='2a') |
|
776 |
tree.lock_write() |
|
777 |
self.addCleanup(tree.unlock) |
|
778 |
tree.add([''], ['TREE_ROOT']) |
|
4431.3.7
by Jonathan Lange
Cherrypick bzr.dev 4470, resolving conflicts. |
779 |
for pos in range(20): |
780 |
tree.commit(str(pos)) |
|
781 |
||
782 |
def test_pack_with_hint(self): |
|
4667.1.1
by John Arbash Meinel
Drop the Test2a test times from 5+s down to 1.4s |
783 |
tree = self.make_branch_and_memory_tree('tree', format='2a') |
784 |
tree.lock_write() |
|
785 |
self.addCleanup(tree.unlock) |
|
786 |
tree.add([''], ['TREE_ROOT']) |
|
4431.3.7
by Jonathan Lange
Cherrypick bzr.dev 4470, resolving conflicts. |
787 |
# 1 commit to leave untouched
|
788 |
tree.commit('1') |
|
789 |
to_keep = tree.branch.repository._pack_collection.names() |
|
790 |
# 2 to combine
|
|
791 |
tree.commit('2') |
|
792 |
tree.commit('3') |
|
793 |
all = tree.branch.repository._pack_collection.names() |
|
794 |
combine = list(set(all) - set(to_keep)) |
|
795 |
self.assertLength(3, all) |
|
796 |
self.assertLength(2, combine) |
|
797 |
tree.branch.repository.pack(hint=combine) |
|
798 |
final = tree.branch.repository._pack_collection.names() |
|
799 |
self.assertLength(2, final) |
|
800 |
self.assertFalse(combine[0] in final) |
|
801 |
self.assertFalse(combine[1] in final) |
|
802 |
self.assertSubset(to_keep, final) |
|
803 |
||
4360.4.3
by John Arbash Meinel
Introduce a KnitPackStreamSource which is used when |
804 |
def test_stream_source_to_gc(self): |
4462.2.1
by Robert Collins
Add new attribute to RepositoryFormat pack_compresses, hinting when pack can be useful. |
805 |
source = self.make_repository('source', format='2a') |
806 |
target = self.make_repository('target', format='2a') |
|
4360.4.3
by John Arbash Meinel
Introduce a KnitPackStreamSource which is used when |
807 |
stream = source._get_source(target._format) |
808 |
self.assertIsInstance(stream, groupcompress_repo.GroupCHKStreamSource) |
|
809 |
||
810 |
def test_stream_source_to_non_gc(self): |
|
4462.2.1
by Robert Collins
Add new attribute to RepositoryFormat pack_compresses, hinting when pack can be useful. |
811 |
source = self.make_repository('source', format='2a') |
4360.4.3
by John Arbash Meinel
Introduce a KnitPackStreamSource which is used when |
812 |
target = self.make_repository('target', format='rich-root-pack') |
813 |
stream = source._get_source(target._format) |
|
814 |
# We don't want the child GroupCHKStreamSource
|
|
815 |
self.assertIs(type(stream), repository.StreamSource) |
|
816 |
||
4360.4.9
by John Arbash Meinel
Merge bzr.dev, bringing in the gc stacking fixes. |
817 |
def test_get_stream_for_missing_keys_includes_all_chk_refs(self): |
818 |
source_builder = self.make_branch_builder('source', |
|
4462.2.1
by Robert Collins
Add new attribute to RepositoryFormat pack_compresses, hinting when pack can be useful. |
819 |
format='2a') |
4360.4.9
by John Arbash Meinel
Merge bzr.dev, bringing in the gc stacking fixes. |
820 |
# We have to build a fairly large tree, so that we are sure the chk
|
821 |
# pages will have split into multiple pages.
|
|
822 |
entries = [('add', ('', 'a-root-id', 'directory', None))] |
|
823 |
for i in 'abcdefghijklmnopqrstuvwxyz123456789': |
|
824 |
for j in 'abcdefghijklmnopqrstuvwxyz123456789': |
|
825 |
fname = i + j |
|
826 |
fid = fname + '-id' |
|
827 |
content = 'content for %s\n' % (fname,) |
|
828 |
entries.append(('add', (fname, fid, 'file', content))) |
|
829 |
source_builder.start_series() |
|
830 |
source_builder.build_snapshot('rev-1', None, entries) |
|
831 |
# Now change a few of them, so we get a few new pages for the second
|
|
832 |
# revision
|
|
833 |
source_builder.build_snapshot('rev-2', ['rev-1'], [ |
|
834 |
('modify', ('aa-id', 'new content for aa-id\n')), |
|
835 |
('modify', ('cc-id', 'new content for cc-id\n')), |
|
836 |
('modify', ('zz-id', 'new content for zz-id\n')), |
|
837 |
])
|
|
838 |
source_builder.finish_series() |
|
839 |
source_branch = source_builder.get_branch() |
|
840 |
source_branch.lock_read() |
|
841 |
self.addCleanup(source_branch.unlock) |
|
4462.2.1
by Robert Collins
Add new attribute to RepositoryFormat pack_compresses, hinting when pack can be useful. |
842 |
target = self.make_repository('target', format='2a') |
4360.4.9
by John Arbash Meinel
Merge bzr.dev, bringing in the gc stacking fixes. |
843 |
source = source_branch.repository._get_source(target._format) |
844 |
self.assertIsInstance(source, groupcompress_repo.GroupCHKStreamSource) |
|
845 |
||
846 |
# On a regular pass, getting the inventories and chk pages for rev-2
|
|
847 |
# would only get the newly created chk pages
|
|
848 |
search = graph.SearchResult(set(['rev-2']), set(['rev-1']), 1, |
|
849 |
set(['rev-2'])) |
|
850 |
simple_chk_records = [] |
|
851 |
for vf_name, substream in source.get_stream(search): |
|
852 |
if vf_name == 'chk_bytes': |
|
853 |
for record in substream: |
|
854 |
simple_chk_records.append(record.key) |
|
855 |
else: |
|
856 |
for _ in substream: |
|
857 |
continue
|
|
858 |
# 3 pages, the root (InternalNode), + 2 pages which actually changed
|
|
859 |
self.assertEqual([('sha1:91481f539e802c76542ea5e4c83ad416bf219f73',), |
|
860 |
('sha1:4ff91971043668583985aec83f4f0ab10a907d3f',), |
|
861 |
('sha1:81e7324507c5ca132eedaf2d8414ee4bb2226187',), |
|
862 |
('sha1:b101b7da280596c71a4540e9a1eeba8045985ee0',)], |
|
863 |
simple_chk_records) |
|
864 |
# Now, when we do a similar call using 'get_stream_for_missing_keys'
|
|
865 |
# we should get a much larger set of pages.
|
|
866 |
missing = [('inventories', 'rev-2')] |
|
867 |
full_chk_records = [] |
|
868 |
for vf_name, substream in source.get_stream_for_missing_keys(missing): |
|
869 |
if vf_name == 'inventories': |
|
870 |
for record in substream: |
|
871 |
self.assertEqual(('rev-2',), record.key) |
|
872 |
elif vf_name == 'chk_bytes': |
|
873 |
for record in substream: |
|
874 |
full_chk_records.append(record.key) |
|
875 |
else: |
|
876 |
self.fail('Should not be getting a stream of %s' % (vf_name,)) |
|
877 |
# We have 257 records now. This is because we have 1 root page, and 256
|
|
878 |
# leaf pages in a complete listing.
|
|
879 |
self.assertEqual(257, len(full_chk_records)) |
|
880 |
self.assertSubset(simple_chk_records, full_chk_records) |
|
881 |
||
4465.2.7
by Aaron Bentley
Move test_inconsistency_fatal to test_repository |
882 |
def test_inconsistency_fatal(self): |
883 |
repo = self.make_repository('repo', format='2a') |
|
884 |
self.assertTrue(repo.revisions._index._inconsistency_fatal) |
|
885 |
self.assertFalse(repo.texts._index._inconsistency_fatal) |
|
886 |
self.assertFalse(repo.inventories._index._inconsistency_fatal) |
|
887 |
self.assertFalse(repo.signatures._index._inconsistency_fatal) |
|
888 |
self.assertFalse(repo.chk_bytes._index._inconsistency_fatal) |
|
889 |
||
4360.4.3
by John Arbash Meinel
Introduce a KnitPackStreamSource which is used when |
890 |
|
891 |
class TestKnitPackStreamSource(tests.TestCaseWithMemoryTransport): |
|
892 |
||
893 |
def test_source_to_exact_pack_092(self): |
|
894 |
source = self.make_repository('source', format='pack-0.92') |
|
895 |
target = self.make_repository('target', format='pack-0.92') |
|
896 |
stream_source = source._get_source(target._format) |
|
897 |
self.assertIsInstance(stream_source, pack_repo.KnitPackStreamSource) |
|
898 |
||
899 |
def test_source_to_exact_pack_rich_root_pack(self): |
|
900 |
source = self.make_repository('source', format='rich-root-pack') |
|
901 |
target = self.make_repository('target', format='rich-root-pack') |
|
902 |
stream_source = source._get_source(target._format) |
|
903 |
self.assertIsInstance(stream_source, pack_repo.KnitPackStreamSource) |
|
904 |
||
905 |
def test_source_to_exact_pack_19(self): |
|
906 |
source = self.make_repository('source', format='1.9') |
|
907 |
target = self.make_repository('target', format='1.9') |
|
908 |
stream_source = source._get_source(target._format) |
|
909 |
self.assertIsInstance(stream_source, pack_repo.KnitPackStreamSource) |
|
910 |
||
911 |
def test_source_to_exact_pack_19_rich_root(self): |
|
912 |
source = self.make_repository('source', format='1.9-rich-root') |
|
913 |
target = self.make_repository('target', format='1.9-rich-root') |
|
914 |
stream_source = source._get_source(target._format) |
|
915 |
self.assertIsInstance(stream_source, pack_repo.KnitPackStreamSource) |
|
916 |
||
917 |
def test_source_to_remote_exact_pack_19(self): |
|
918 |
trans = self.make_smart_server('target') |
|
919 |
trans.ensure_base() |
|
920 |
source = self.make_repository('source', format='1.9') |
|
921 |
target = self.make_repository('target', format='1.9') |
|
922 |
target = repository.Repository.open(trans.base) |
|
923 |
stream_source = source._get_source(target._format) |
|
924 |
self.assertIsInstance(stream_source, pack_repo.KnitPackStreamSource) |
|
925 |
||
926 |
def test_stream_source_to_non_exact(self): |
|
927 |
source = self.make_repository('source', format='pack-0.92') |
|
928 |
target = self.make_repository('target', format='1.9') |
|
929 |
stream = source._get_source(target._format) |
|
930 |
self.assertIs(type(stream), repository.StreamSource) |
|
931 |
||
932 |
def test_stream_source_to_non_exact_rich_root(self): |
|
933 |
source = self.make_repository('source', format='1.9') |
|
934 |
target = self.make_repository('target', format='1.9-rich-root') |
|
935 |
stream = source._get_source(target._format) |
|
936 |
self.assertIs(type(stream), repository.StreamSource) |
|
937 |
||
938 |
def test_source_to_remote_non_exact_pack_19(self): |
|
939 |
trans = self.make_smart_server('target') |
|
940 |
trans.ensure_base() |
|
941 |
source = self.make_repository('source', format='1.9') |
|
942 |
target = self.make_repository('target', format='1.6') |
|
943 |
target = repository.Repository.open(trans.base) |
|
944 |
stream_source = source._get_source(target._format) |
|
945 |
self.assertIs(type(stream_source), repository.StreamSource) |
|
946 |
||
947 |
def test_stream_source_to_knit(self): |
|
948 |
source = self.make_repository('source', format='pack-0.92') |
|
949 |
target = self.make_repository('target', format='dirstate') |
|
950 |
stream = source._get_source(target._format) |
|
951 |
self.assertIs(type(stream), repository.StreamSource) |
|
952 |
||
3735.2.40
by Robert Collins
Add development4 which has a parent_id to basename index on CHKInventory objects. |
953 |
|
4343.3.32
by John Arbash Meinel
Change the tests for _find_revision_outside_set to the new _find_parent_ids function. |
954 |
class TestDevelopment6FindParentIdsOfRevisions(TestCaseWithTransport): |
955 |
"""Tests for _find_parent_ids_of_revisions."""
|
|
3735.4.1
by Andrew Bennetts
Add _find_revision_outside_set. |
956 |
|
957 |
def setUp(self): |
|
4343.3.32
by John Arbash Meinel
Change the tests for _find_revision_outside_set to the new _find_parent_ids function. |
958 |
super(TestDevelopment6FindParentIdsOfRevisions, self).setUp() |
4241.6.8
by Robert Collins, John Arbash Meinel, Ian Clatworthy, Vincent Ladeuil
Add --development6-rich-root, disabling the legacy and unneeded development2 format, and activating the tests for CHK features disabled pending this format. (Robert Collins, John Arbash Meinel, Ian Clatworthy, Vincent Ladeuil) |
959 |
self.builder = self.make_branch_builder('source', |
960 |
format='development6-rich-root') |
|
3735.4.1
by Andrew Bennetts
Add _find_revision_outside_set. |
961 |
self.builder.start_series() |
962 |
self.builder.build_snapshot('initial', None, |
|
963 |
[('add', ('', 'tree-root', 'directory', None))]) |
|
964 |
self.repo = self.builder.get_branch().repository |
|
965 |
self.addCleanup(self.builder.finish_series) |
|
3735.2.99
by John Arbash Meinel
Merge bzr.dev 4034. Whitespace cleanup |
966 |
|
4343.3.32
by John Arbash Meinel
Change the tests for _find_revision_outside_set to the new _find_parent_ids function. |
967 |
def assertParentIds(self, expected_result, rev_set): |
968 |
self.assertEqual(sorted(expected_result), |
|
969 |
sorted(self.repo._find_parent_ids_of_revisions(rev_set))) |
|
3735.4.1
by Andrew Bennetts
Add _find_revision_outside_set. |
970 |
|
971 |
def test_simple(self): |
|
972 |
self.builder.build_snapshot('revid1', None, []) |
|
4343.3.32
by John Arbash Meinel
Change the tests for _find_revision_outside_set to the new _find_parent_ids function. |
973 |
self.builder.build_snapshot('revid2', ['revid1'], []) |
3735.4.1
by Andrew Bennetts
Add _find_revision_outside_set. |
974 |
rev_set = ['revid2'] |
4343.3.32
by John Arbash Meinel
Change the tests for _find_revision_outside_set to the new _find_parent_ids function. |
975 |
self.assertParentIds(['revid1'], rev_set) |
3735.4.1
by Andrew Bennetts
Add _find_revision_outside_set. |
976 |
|
977 |
def test_not_first_parent(self): |
|
978 |
self.builder.build_snapshot('revid1', None, []) |
|
4343.3.32
by John Arbash Meinel
Change the tests for _find_revision_outside_set to the new _find_parent_ids function. |
979 |
self.builder.build_snapshot('revid2', ['revid1'], []) |
980 |
self.builder.build_snapshot('revid3', ['revid2'], []) |
|
3735.4.1
by Andrew Bennetts
Add _find_revision_outside_set. |
981 |
rev_set = ['revid3', 'revid2'] |
4343.3.32
by John Arbash Meinel
Change the tests for _find_revision_outside_set to the new _find_parent_ids function. |
982 |
self.assertParentIds(['revid1'], rev_set) |
3735.4.1
by Andrew Bennetts
Add _find_revision_outside_set. |
983 |
|
984 |
def test_not_null(self): |
|
985 |
rev_set = ['initial'] |
|
4343.3.32
by John Arbash Meinel
Change the tests for _find_revision_outside_set to the new _find_parent_ids function. |
986 |
self.assertParentIds([], rev_set) |
3735.4.1
by Andrew Bennetts
Add _find_revision_outside_set. |
987 |
|
988 |
def test_not_null_set(self): |
|
989 |
self.builder.build_snapshot('revid1', None, []) |
|
990 |
rev_set = [_mod_revision.NULL_REVISION] |
|
4343.3.32
by John Arbash Meinel
Change the tests for _find_revision_outside_set to the new _find_parent_ids function. |
991 |
self.assertParentIds([], rev_set) |
3735.4.1
by Andrew Bennetts
Add _find_revision_outside_set. |
992 |
|
993 |
def test_ghost(self): |
|
994 |
self.builder.build_snapshot('revid1', None, []) |
|
995 |
rev_set = ['ghost', 'revid1'] |
|
4343.3.32
by John Arbash Meinel
Change the tests for _find_revision_outside_set to the new _find_parent_ids function. |
996 |
self.assertParentIds(['initial'], rev_set) |
3735.4.1
by Andrew Bennetts
Add _find_revision_outside_set. |
997 |
|
998 |
def test_ghost_parent(self): |
|
999 |
self.builder.build_snapshot('revid1', None, []) |
|
1000 |
self.builder.build_snapshot('revid2', ['revid1', 'ghost'], []) |
|
1001 |
rev_set = ['revid2', 'revid1'] |
|
4343.3.32
by John Arbash Meinel
Change the tests for _find_revision_outside_set to the new _find_parent_ids function. |
1002 |
self.assertParentIds(['ghost', 'initial'], rev_set) |
3735.4.1
by Andrew Bennetts
Add _find_revision_outside_set. |
1003 |
|
1004 |
def test_righthand_parent(self): |
|
1005 |
self.builder.build_snapshot('revid1', None, []) |
|
1006 |
self.builder.build_snapshot('revid2a', ['revid1'], []) |
|
1007 |
self.builder.build_snapshot('revid2b', ['revid1'], []) |
|
1008 |
self.builder.build_snapshot('revid3', ['revid2a', 'revid2b'], []) |
|
1009 |
rev_set = ['revid3', 'revid2a'] |
|
4343.3.32
by John Arbash Meinel
Change the tests for _find_revision_outside_set to the new _find_parent_ids function. |
1010 |
self.assertParentIds(['revid1', 'revid2b'], rev_set) |
3735.4.1
by Andrew Bennetts
Add _find_revision_outside_set. |
1011 |
|
1012 |
||
2535.3.57
by Andrew Bennetts
Perform some sanity checking of data streams rather than blindly inserting them into our repository. |
1013 |
class TestWithBrokenRepo(TestCaseWithTransport): |
2592.3.214
by Robert Collins
Merge bzr.dev. |
1014 |
"""These tests seem to be more appropriate as interface tests?"""
|
2535.3.57
by Andrew Bennetts
Perform some sanity checking of data streams rather than blindly inserting them into our repository. |
1015 |
|
1016 |
def make_broken_repository(self): |
|
1017 |
# XXX: This function is borrowed from Aaron's "Reconcile can fix bad
|
|
1018 |
# parent references" branch which is due to land in bzr.dev soon. Once
|
|
1019 |
# it does, this duplication should be removed.
|
|
1020 |
repo = self.make_repository('broken-repo') |
|
1021 |
cleanups = [] |
|
1022 |
try: |
|
1023 |
repo.lock_write() |
|
1024 |
cleanups.append(repo.unlock) |
|
1025 |
repo.start_write_group() |
|
1026 |
cleanups.append(repo.commit_write_group) |
|
1027 |
# make rev1a: A well-formed revision, containing 'file1'
|
|
1028 |
inv = inventory.Inventory(revision_id='rev1a') |
|
1029 |
inv.root.revision = 'rev1a' |
|
1030 |
self.add_file(repo, inv, 'file1', 'rev1a', []) |
|
4634.35.21
by Andrew Bennetts
Fix test_insert_from_broken_repo in test_repository. |
1031 |
repo.texts.add_lines((inv.root.file_id, 'rev1a'), [], []) |
2535.3.57
by Andrew Bennetts
Perform some sanity checking of data streams rather than blindly inserting them into our repository. |
1032 |
repo.add_inventory('rev1a', inv, []) |
1033 |
revision = _mod_revision.Revision('rev1a', |
|
1034 |
committer='jrandom@example.com', timestamp=0, |
|
1035 |
inventory_sha1='', timezone=0, message='foo', parent_ids=[]) |
|
1036 |
repo.add_revision('rev1a',revision, inv) |
|
1037 |
||
1038 |
# make rev1b, which has no Revision, but has an Inventory, and
|
|
1039 |
# file1
|
|
1040 |
inv = inventory.Inventory(revision_id='rev1b') |
|
1041 |
inv.root.revision = 'rev1b' |
|
1042 |
self.add_file(repo, inv, 'file1', 'rev1b', []) |
|
1043 |
repo.add_inventory('rev1b', inv, []) |
|
1044 |
||
1045 |
# make rev2, with file1 and file2
|
|
1046 |
# file2 is sane
|
|
1047 |
# file1 has 'rev1b' as an ancestor, even though this is not
|
|
1048 |
# mentioned by 'rev1a', making it an unreferenced ancestor
|
|
1049 |
inv = inventory.Inventory() |
|
1050 |
self.add_file(repo, inv, 'file1', 'rev2', ['rev1a', 'rev1b']) |
|
1051 |
self.add_file(repo, inv, 'file2', 'rev2', []) |
|
1052 |
self.add_revision(repo, 'rev2', inv, ['rev1a']) |
|
1053 |
||
1054 |
# make ghost revision rev1c
|
|
1055 |
inv = inventory.Inventory() |
|
1056 |
self.add_file(repo, inv, 'file2', 'rev1c', []) |
|
1057 |
||
1058 |
# make rev3 with file2
|
|
1059 |
# file2 refers to 'rev1c', which is a ghost in this repository, so
|
|
1060 |
# file2 cannot have rev1c as its ancestor.
|
|
1061 |
inv = inventory.Inventory() |
|
1062 |
self.add_file(repo, inv, 'file2', 'rev3', ['rev1c']) |
|
1063 |
self.add_revision(repo, 'rev3', inv, ['rev1c']) |
|
1064 |
return repo |
|
1065 |
finally: |
|
1066 |
for cleanup in reversed(cleanups): |
|
1067 |
cleanup() |
|
1068 |
||
1069 |
def add_revision(self, repo, revision_id, inv, parent_ids): |
|
1070 |
inv.revision_id = revision_id |
|
1071 |
inv.root.revision = revision_id |
|
4634.35.21
by Andrew Bennetts
Fix test_insert_from_broken_repo in test_repository. |
1072 |
repo.texts.add_lines((inv.root.file_id, revision_id), [], []) |
2535.3.57
by Andrew Bennetts
Perform some sanity checking of data streams rather than blindly inserting them into our repository. |
1073 |
repo.add_inventory(revision_id, inv, parent_ids) |
1074 |
revision = _mod_revision.Revision(revision_id, |
|
1075 |
committer='jrandom@example.com', timestamp=0, inventory_sha1='', |
|
1076 |
timezone=0, message='foo', parent_ids=parent_ids) |
|
1077 |
repo.add_revision(revision_id,revision, inv) |
|
1078 |
||
1079 |
def add_file(self, repo, inv, filename, revision, parents): |
|
1080 |
file_id = filename + '-id' |
|
1081 |
entry = inventory.InventoryFile(file_id, filename, 'TREE_ROOT') |
|
1082 |
entry.revision = revision |
|
2535.4.10
by Andrew Bennetts
Fix one failing test, disable another. |
1083 |
entry.text_size = 0 |
2535.3.57
by Andrew Bennetts
Perform some sanity checking of data streams rather than blindly inserting them into our repository. |
1084 |
inv.add(entry) |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1085 |
text_key = (file_id, revision) |
1086 |
parent_keys = [(file_id, parent) for parent in parents] |
|
1087 |
repo.texts.add_lines(text_key, parent_keys, ['line\n']) |
|
2535.3.57
by Andrew Bennetts
Perform some sanity checking of data streams rather than blindly inserting them into our repository. |
1088 |
|
1089 |
def test_insert_from_broken_repo(self): |
|
1090 |
"""Inserting a data stream from a broken repository won't silently
|
|
1091 |
corrupt the target repository.
|
|
1092 |
"""
|
|
1093 |
broken_repo = self.make_broken_repository() |
|
1094 |
empty_repo = self.make_repository('empty-repo') |
|
4606.1.1
by Robert Collins
Change test_insert_from_broken_repo from a known failure to a working test. |
1095 |
try: |
1096 |
empty_repo.fetch(broken_repo) |
|
1097 |
except (errors.RevisionNotPresent, errors.BzrCheckError): |
|
1098 |
# Test successful: compression parent not being copied leads to
|
|
1099 |
# error.
|
|
1100 |
return
|
|
1101 |
empty_repo.lock_read() |
|
1102 |
self.addCleanup(empty_repo.unlock) |
|
1103 |
text = empty_repo.texts.get_record_stream( |
|
1104 |
[('file2-id', 'rev3')], 'topological', True).next() |
|
1105 |
self.assertEqual('line\n', text.get_bytes_as('fulltext')) |
|
2592.3.214
by Robert Collins
Merge bzr.dev. |
1106 |
|
1107 |
||
2592.3.84
by Robert Collins
Start of autopacking logic. |
1108 |
class TestRepositoryPackCollection(TestCaseWithTransport): |
1109 |
||
1110 |
def get_format(self): |
|
3010.3.3
by Martin Pool
Merge trunk |
1111 |
return bzrdir.format_registry.make_bzrdir('pack-0.92') |
2592.3.84
by Robert Collins
Start of autopacking logic. |
1112 |
|
3711.4.1
by John Arbash Meinel
Fix bug #242510, when determining the autopack sequence, |
1113 |
def get_packs(self): |
1114 |
format = self.get_format() |
|
1115 |
repo = self.make_repository('.', format=format) |
|
1116 |
return repo._pack_collection |
|
1117 |
||
3789.2.20
by John Arbash Meinel
The autopack code can now trigger itself to retry when _copy_revision_texts fails. |
1118 |
def make_packs_and_alt_repo(self, write_lock=False): |
3789.2.19
by John Arbash Meinel
Refactor to make the tests a bit simpler |
1119 |
"""Create a pack repo with 3 packs, and access it via a second repo."""
|
4617.4.1
by Robert Collins
Fix a pack specific test which didn't lock its format down. |
1120 |
tree = self.make_branch_and_tree('.', format=self.get_format()) |
3789.2.19
by John Arbash Meinel
Refactor to make the tests a bit simpler |
1121 |
tree.lock_write() |
1122 |
self.addCleanup(tree.unlock) |
|
1123 |
rev1 = tree.commit('one') |
|
1124 |
rev2 = tree.commit('two') |
|
1125 |
rev3 = tree.commit('three') |
|
1126 |
r = repository.Repository.open('.') |
|
3789.2.20
by John Arbash Meinel
The autopack code can now trigger itself to retry when _copy_revision_texts fails. |
1127 |
if write_lock: |
1128 |
r.lock_write() |
|
1129 |
else: |
|
1130 |
r.lock_read() |
|
3789.2.19
by John Arbash Meinel
Refactor to make the tests a bit simpler |
1131 |
self.addCleanup(r.unlock) |
1132 |
packs = r._pack_collection |
|
1133 |
packs.ensure_loaded() |
|
1134 |
return tree, r, packs, [rev1, rev2, rev3] |
|
1135 |
||
4634.127.1
by John Arbash Meinel
Partial fix for bug #507557. |
1136 |
def test__clear_obsolete_packs(self): |
1137 |
packs = self.get_packs() |
|
1138 |
obsolete_pack_trans = packs.transport.clone('obsolete_packs') |
|
1139 |
obsolete_pack_trans.put_bytes('a-pack.pack', 'content\n') |
|
1140 |
obsolete_pack_trans.put_bytes('a-pack.rix', 'content\n') |
|
1141 |
obsolete_pack_trans.put_bytes('a-pack.iix', 'content\n') |
|
1142 |
obsolete_pack_trans.put_bytes('another-pack.pack', 'foo\n') |
|
1143 |
obsolete_pack_trans.put_bytes('not-a-pack.rix', 'foo\n') |
|
1144 |
res = packs._clear_obsolete_packs() |
|
1145 |
self.assertEqual(['a-pack', 'another-pack'], sorted(res)) |
|
1146 |
self.assertEqual([], obsolete_pack_trans.list_dir('.')) |
|
1147 |
||
1148 |
def test__clear_obsolete_packs_preserve(self): |
|
1149 |
packs = self.get_packs() |
|
1150 |
obsolete_pack_trans = packs.transport.clone('obsolete_packs') |
|
1151 |
obsolete_pack_trans.put_bytes('a-pack.pack', 'content\n') |
|
1152 |
obsolete_pack_trans.put_bytes('a-pack.rix', 'content\n') |
|
1153 |
obsolete_pack_trans.put_bytes('a-pack.iix', 'content\n') |
|
1154 |
obsolete_pack_trans.put_bytes('another-pack.pack', 'foo\n') |
|
1155 |
obsolete_pack_trans.put_bytes('not-a-pack.rix', 'foo\n') |
|
1156 |
res = packs._clear_obsolete_packs(preserve=set(['a-pack'])) |
|
1157 |
self.assertEqual(['a-pack', 'another-pack'], sorted(res)) |
|
1158 |
self.assertEqual(['a-pack.iix', 'a-pack.pack', 'a-pack.rix'], |
|
1159 |
sorted(obsolete_pack_trans.list_dir('.'))) |
|
1160 |
||
2592.3.84
by Robert Collins
Start of autopacking logic. |
1161 |
def test__max_pack_count(self): |
2592.3.219
by Robert Collins
Review feedback. |
1162 |
"""The maximum pack count is a function of the number of revisions."""
|
2592.3.84
by Robert Collins
Start of autopacking logic. |
1163 |
# no revisions - one pack, so that we can have a revision free repo
|
1164 |
# without it blowing up
|
|
3711.4.1
by John Arbash Meinel
Fix bug #242510, when determining the autopack sequence, |
1165 |
packs = self.get_packs() |
2592.3.84
by Robert Collins
Start of autopacking logic. |
1166 |
self.assertEqual(1, packs._max_pack_count(0)) |
1167 |
# after that the sum of the digits, - check the first 1-9
|
|
1168 |
self.assertEqual(1, packs._max_pack_count(1)) |
|
1169 |
self.assertEqual(2, packs._max_pack_count(2)) |
|
1170 |
self.assertEqual(3, packs._max_pack_count(3)) |
|
1171 |
self.assertEqual(4, packs._max_pack_count(4)) |
|
1172 |
self.assertEqual(5, packs._max_pack_count(5)) |
|
1173 |
self.assertEqual(6, packs._max_pack_count(6)) |
|
1174 |
self.assertEqual(7, packs._max_pack_count(7)) |
|
1175 |
self.assertEqual(8, packs._max_pack_count(8)) |
|
1176 |
self.assertEqual(9, packs._max_pack_count(9)) |
|
1177 |
# check the boundary cases with two digits for the next decade
|
|
1178 |
self.assertEqual(1, packs._max_pack_count(10)) |
|
1179 |
self.assertEqual(2, packs._max_pack_count(11)) |
|
1180 |
self.assertEqual(10, packs._max_pack_count(19)) |
|
1181 |
self.assertEqual(2, packs._max_pack_count(20)) |
|
1182 |
self.assertEqual(3, packs._max_pack_count(21)) |
|
1183 |
# check some arbitrary big numbers
|
|
1184 |
self.assertEqual(25, packs._max_pack_count(112894)) |
|
1185 |
||
4928.1.1
by Martin Pool
Give RepositoryPackCollection a repr |
1186 |
def test_repr(self): |
1187 |
packs = self.get_packs() |
|
1188 |
self.assertContainsRe(repr(packs), |
|
1189 |
'RepositoryPackCollection(.*Repository(.*))') |
|
1190 |
||
4634.127.2
by John Arbash Meinel
Change the _obsolete_packs code to handle files that are already gone. |
1191 |
def test__obsolete_packs(self): |
1192 |
tree, r, packs, revs = self.make_packs_and_alt_repo(write_lock=True) |
|
1193 |
names = packs.names() |
|
1194 |
pack = packs.get_pack_by_name(names[0]) |
|
1195 |
# Schedule this one for removal
|
|
1196 |
packs._remove_pack_from_memory(pack) |
|
1197 |
# Simulate a concurrent update by renaming the .pack file and one of
|
|
1198 |
# the indices
|
|
1199 |
packs.transport.rename('packs/%s.pack' % (names[0],), |
|
1200 |
'obsolete_packs/%s.pack' % (names[0],)) |
|
1201 |
packs.transport.rename('indices/%s.iix' % (names[0],), |
|
1202 |
'obsolete_packs/%s.iix' % (names[0],)) |
|
1203 |
# Now trigger the obsoletion, and ensure that all the remaining files
|
|
1204 |
# are still renamed
|
|
1205 |
packs._obsolete_packs([pack]) |
|
1206 |
self.assertEqual([n + '.pack' for n in names[1:]], |
|
1207 |
sorted(packs._pack_transport.list_dir('.'))) |
|
1208 |
# names[0] should not be present in the index anymore
|
|
1209 |
self.assertEqual(names[1:], |
|
1210 |
sorted(set([osutils.splitext(n)[0] for n in |
|
1211 |
packs._index_transport.list_dir('.')]))) |
|
1212 |
||
2592.3.84
by Robert Collins
Start of autopacking logic. |
1213 |
def test_pack_distribution_zero(self): |
3711.4.1
by John Arbash Meinel
Fix bug #242510, when determining the autopack sequence, |
1214 |
packs = self.get_packs() |
2592.3.84
by Robert Collins
Start of autopacking logic. |
1215 |
self.assertEqual([0], packs.pack_distribution(0)) |
3052.1.6
by John Arbash Meinel
Change the lock check to raise ObjectNotLocked. |
1216 |
|
1217 |
def test_ensure_loaded_unlocked(self): |
|
3711.4.1
by John Arbash Meinel
Fix bug #242510, when determining the autopack sequence, |
1218 |
packs = self.get_packs() |
3052.1.6
by John Arbash Meinel
Change the lock check to raise ObjectNotLocked. |
1219 |
self.assertRaises(errors.ObjectNotLocked, |
3711.4.1
by John Arbash Meinel
Fix bug #242510, when determining the autopack sequence, |
1220 |
packs.ensure_loaded) |
3052.1.6
by John Arbash Meinel
Change the lock check to raise ObjectNotLocked. |
1221 |
|
2592.3.84
by Robert Collins
Start of autopacking logic. |
1222 |
def test_pack_distribution_one_to_nine(self): |
3711.4.1
by John Arbash Meinel
Fix bug #242510, when determining the autopack sequence, |
1223 |
packs = self.get_packs() |
2592.3.84
by Robert Collins
Start of autopacking logic. |
1224 |
self.assertEqual([1], |
1225 |
packs.pack_distribution(1)) |
|
1226 |
self.assertEqual([1, 1], |
|
1227 |
packs.pack_distribution(2)) |
|
1228 |
self.assertEqual([1, 1, 1], |
|
1229 |
packs.pack_distribution(3)) |
|
1230 |
self.assertEqual([1, 1, 1, 1], |
|
1231 |
packs.pack_distribution(4)) |
|
1232 |
self.assertEqual([1, 1, 1, 1, 1], |
|
1233 |
packs.pack_distribution(5)) |
|
1234 |
self.assertEqual([1, 1, 1, 1, 1, 1], |
|
1235 |
packs.pack_distribution(6)) |
|
1236 |
self.assertEqual([1, 1, 1, 1, 1, 1, 1], |
|
1237 |
packs.pack_distribution(7)) |
|
1238 |
self.assertEqual([1, 1, 1, 1, 1, 1, 1, 1], |
|
1239 |
packs.pack_distribution(8)) |
|
1240 |
self.assertEqual([1, 1, 1, 1, 1, 1, 1, 1, 1], |
|
1241 |
packs.pack_distribution(9)) |
|
1242 |
||
1243 |
def test_pack_distribution_stable_at_boundaries(self): |
|
1244 |
"""When there are multi-rev packs the counts are stable."""
|
|
3711.4.1
by John Arbash Meinel
Fix bug #242510, when determining the autopack sequence, |
1245 |
packs = self.get_packs() |
2592.3.84
by Robert Collins
Start of autopacking logic. |
1246 |
# in 10s:
|
1247 |
self.assertEqual([10], packs.pack_distribution(10)) |
|
1248 |
self.assertEqual([10, 1], packs.pack_distribution(11)) |
|
1249 |
self.assertEqual([10, 10], packs.pack_distribution(20)) |
|
1250 |
self.assertEqual([10, 10, 1], packs.pack_distribution(21)) |
|
1251 |
# 100s
|
|
1252 |
self.assertEqual([100], packs.pack_distribution(100)) |
|
1253 |
self.assertEqual([100, 1], packs.pack_distribution(101)) |
|
1254 |
self.assertEqual([100, 10, 1], packs.pack_distribution(111)) |
|
1255 |
self.assertEqual([100, 100], packs.pack_distribution(200)) |
|
1256 |
self.assertEqual([100, 100, 1], packs.pack_distribution(201)) |
|
1257 |
self.assertEqual([100, 100, 10, 1], packs.pack_distribution(211)) |
|
1258 |
||
2592.3.85
by Robert Collins
Finish autopack corner cases. |
1259 |
def test_plan_pack_operations_2009_revisions_skip_all_packs(self): |
3711.4.1
by John Arbash Meinel
Fix bug #242510, when determining the autopack sequence, |
1260 |
packs = self.get_packs() |
2592.3.85
by Robert Collins
Finish autopack corner cases. |
1261 |
existing_packs = [(2000, "big"), (9, "medium")] |
1262 |
# rev count - 2009 -> 2x1000 + 9x1
|
|
1263 |
pack_operations = packs.plan_autopack_combinations( |
|
1264 |
existing_packs, [1000, 1000, 1, 1, 1, 1, 1, 1, 1, 1, 1]) |
|
1265 |
self.assertEqual([], pack_operations) |
|
1266 |
||
1267 |
def test_plan_pack_operations_2010_revisions_skip_all_packs(self): |
|
3711.4.1
by John Arbash Meinel
Fix bug #242510, when determining the autopack sequence, |
1268 |
packs = self.get_packs() |
2592.3.85
by Robert Collins
Finish autopack corner cases. |
1269 |
existing_packs = [(2000, "big"), (9, "medium"), (1, "single")] |
1270 |
# rev count - 2010 -> 2x1000 + 1x10
|
|
1271 |
pack_operations = packs.plan_autopack_combinations( |
|
1272 |
existing_packs, [1000, 1000, 10]) |
|
1273 |
self.assertEqual([], pack_operations) |
|
1274 |
||
1275 |
def test_plan_pack_operations_2010_combines_smallest_two(self): |
|
3711.4.1
by John Arbash Meinel
Fix bug #242510, when determining the autopack sequence, |
1276 |
packs = self.get_packs() |
2592.3.85
by Robert Collins
Finish autopack corner cases. |
1277 |
existing_packs = [(1999, "big"), (9, "medium"), (1, "single2"), |
1278 |
(1, "single1")] |
|
1279 |
# rev count - 2010 -> 2x1000 + 1x10 (3)
|
|
1280 |
pack_operations = packs.plan_autopack_combinations( |
|
1281 |
existing_packs, [1000, 1000, 10]) |
|
3711.4.2
by John Arbash Meinel
Change the logic to solve it in a different way. |
1282 |
self.assertEqual([[2, ["single2", "single1"]]], pack_operations) |
2592.3.85
by Robert Collins
Finish autopack corner cases. |
1283 |
|
3711.4.2
by John Arbash Meinel
Change the logic to solve it in a different way. |
1284 |
def test_plan_pack_operations_creates_a_single_op(self): |
3711.4.1
by John Arbash Meinel
Fix bug #242510, when determining the autopack sequence, |
1285 |
packs = self.get_packs() |
3711.4.2
by John Arbash Meinel
Change the logic to solve it in a different way. |
1286 |
existing_packs = [(50, 'a'), (40, 'b'), (30, 'c'), (10, 'd'), |
1287 |
(10, 'e'), (6, 'f'), (4, 'g')] |
|
1288 |
# rev count 150 -> 1x100 and 5x10
|
|
1289 |
# The two size 10 packs do not need to be touched. The 50, 40, 30 would
|
|
1290 |
# be combined into a single 120 size pack, and the 6 & 4 would
|
|
1291 |
# becombined into a size 10 pack. However, if we have to rewrite them,
|
|
1292 |
# we save a pack file with no increased I/O by putting them into the
|
|
1293 |
# same file.
|
|
1294 |
distribution = packs.pack_distribution(150) |
|
3711.4.1
by John Arbash Meinel
Fix bug #242510, when determining the autopack sequence, |
1295 |
pack_operations = packs.plan_autopack_combinations(existing_packs, |
3711.4.2
by John Arbash Meinel
Change the logic to solve it in a different way. |
1296 |
distribution) |
1297 |
self.assertEqual([[130, ['a', 'b', 'c', 'f', 'g']]], pack_operations) |
|
3711.4.1
by John Arbash Meinel
Fix bug #242510, when determining the autopack sequence, |
1298 |
|
2592.3.173
by Robert Collins
Basic implementation of all_packs. |
1299 |
def test_all_packs_none(self): |
1300 |
format = self.get_format() |
|
1301 |
tree = self.make_branch_and_tree('.', format=format) |
|
1302 |
tree.lock_read() |
|
1303 |
self.addCleanup(tree.unlock) |
|
2592.3.232
by Martin Pool
Disambiguate two member variables called _packs into _packs_by_name and _pack_collection |
1304 |
packs = tree.branch.repository._pack_collection |
2592.3.173
by Robert Collins
Basic implementation of all_packs. |
1305 |
packs.ensure_loaded() |
1306 |
self.assertEqual([], packs.all_packs()) |
|
1307 |
||
1308 |
def test_all_packs_one(self): |
|
1309 |
format = self.get_format() |
|
1310 |
tree = self.make_branch_and_tree('.', format=format) |
|
1311 |
tree.commit('start') |
|
1312 |
tree.lock_read() |
|
1313 |
self.addCleanup(tree.unlock) |
|
2592.3.232
by Martin Pool
Disambiguate two member variables called _packs into _packs_by_name and _pack_collection |
1314 |
packs = tree.branch.repository._pack_collection |
2592.3.173
by Robert Collins
Basic implementation of all_packs. |
1315 |
packs.ensure_loaded() |
2592.3.176
by Robert Collins
Various pack refactorings. |
1316 |
self.assertEqual([ |
1317 |
packs.get_pack_by_name(packs.names()[0])], |
|
1318 |
packs.all_packs()) |
|
2592.3.173
by Robert Collins
Basic implementation of all_packs. |
1319 |
|
1320 |
def test_all_packs_two(self): |
|
1321 |
format = self.get_format() |
|
1322 |
tree = self.make_branch_and_tree('.', format=format) |
|
1323 |
tree.commit('start') |
|
1324 |
tree.commit('continue') |
|
1325 |
tree.lock_read() |
|
1326 |
self.addCleanup(tree.unlock) |
|
2592.3.232
by Martin Pool
Disambiguate two member variables called _packs into _packs_by_name and _pack_collection |
1327 |
packs = tree.branch.repository._pack_collection |
2592.3.173
by Robert Collins
Basic implementation of all_packs. |
1328 |
packs.ensure_loaded() |
1329 |
self.assertEqual([ |
|
2592.3.176
by Robert Collins
Various pack refactorings. |
1330 |
packs.get_pack_by_name(packs.names()[0]), |
1331 |
packs.get_pack_by_name(packs.names()[1]), |
|
2592.3.173
by Robert Collins
Basic implementation of all_packs. |
1332 |
], packs.all_packs()) |
1333 |
||
2592.3.176
by Robert Collins
Various pack refactorings. |
1334 |
def test_get_pack_by_name(self): |
1335 |
format = self.get_format() |
|
1336 |
tree = self.make_branch_and_tree('.', format=format) |
|
1337 |
tree.commit('start') |
|
1338 |
tree.lock_read() |
|
1339 |
self.addCleanup(tree.unlock) |
|
2592.3.232
by Martin Pool
Disambiguate two member variables called _packs into _packs_by_name and _pack_collection |
1340 |
packs = tree.branch.repository._pack_collection |
4145.1.6
by Robert Collins
More test fallout, but all caught now. |
1341 |
packs.reset() |
2592.3.176
by Robert Collins
Various pack refactorings. |
1342 |
packs.ensure_loaded() |
1343 |
name = packs.names()[0] |
|
1344 |
pack_1 = packs.get_pack_by_name(name) |
|
1345 |
# the pack should be correctly initialised
|
|
3517.4.5
by Martin Pool
Correct use of packs._names in test_get_pack_by_name |
1346 |
sizes = packs._names[name] |
3221.12.4
by Robert Collins
Implement basic repository supporting external references. |
1347 |
rev_index = GraphIndex(packs._index_transport, name + '.rix', sizes[0]) |
1348 |
inv_index = GraphIndex(packs._index_transport, name + '.iix', sizes[1]) |
|
1349 |
txt_index = GraphIndex(packs._index_transport, name + '.tix', sizes[2]) |
|
1350 |
sig_index = GraphIndex(packs._index_transport, name + '.six', sizes[3]) |
|
2592.3.191
by Robert Collins
Give Pack responsibility for index naming, and two concrete classes - NewPack for new packs and ExistingPack for packs we read from disk. |
1351 |
self.assertEqual(pack_repo.ExistingPack(packs._pack_transport, |
2592.3.219
by Robert Collins
Review feedback. |
1352 |
name, rev_index, inv_index, txt_index, sig_index), pack_1) |
2592.3.176
by Robert Collins
Various pack refactorings. |
1353 |
# and the same instance should be returned on successive calls.
|
1354 |
self.assertTrue(pack_1 is packs.get_pack_by_name(name)) |
|
1355 |
||
3789.1.2
by John Arbash Meinel
Add RepositoryPackCollection.reload_pack_names() |
1356 |
def test_reload_pack_names_new_entry(self): |
3789.2.19
by John Arbash Meinel
Refactor to make the tests a bit simpler |
1357 |
tree, r, packs, revs = self.make_packs_and_alt_repo() |
3789.1.2
by John Arbash Meinel
Add RepositoryPackCollection.reload_pack_names() |
1358 |
names = packs.names() |
1359 |
# Add a new pack file into the repository
|
|
3789.2.19
by John Arbash Meinel
Refactor to make the tests a bit simpler |
1360 |
rev4 = tree.commit('four') |
3789.1.2
by John Arbash Meinel
Add RepositoryPackCollection.reload_pack_names() |
1361 |
new_names = tree.branch.repository._pack_collection.names() |
1362 |
new_name = set(new_names).difference(names) |
|
1363 |
self.assertEqual(1, len(new_name)) |
|
1364 |
new_name = new_name.pop() |
|
1365 |
# The old collection hasn't noticed yet
|
|
1366 |
self.assertEqual(names, packs.names()) |
|
3789.1.8
by John Arbash Meinel
Change the api of reload_pack_names(). |
1367 |
self.assertTrue(packs.reload_pack_names()) |
3789.1.2
by John Arbash Meinel
Add RepositoryPackCollection.reload_pack_names() |
1368 |
self.assertEqual(new_names, packs.names()) |
1369 |
# And the repository can access the new revision
|
|
3789.2.19
by John Arbash Meinel
Refactor to make the tests a bit simpler |
1370 |
self.assertEqual({rev4:(revs[-1],)}, r.get_parent_map([rev4])) |
3789.1.8
by John Arbash Meinel
Change the api of reload_pack_names(). |
1371 |
self.assertFalse(packs.reload_pack_names()) |
3789.1.2
by John Arbash Meinel
Add RepositoryPackCollection.reload_pack_names() |
1372 |
|
1373 |
def test_reload_pack_names_added_and_removed(self): |
|
3789.2.19
by John Arbash Meinel
Refactor to make the tests a bit simpler |
1374 |
tree, r, packs, revs = self.make_packs_and_alt_repo() |
3789.1.2
by John Arbash Meinel
Add RepositoryPackCollection.reload_pack_names() |
1375 |
names = packs.names() |
1376 |
# Now repack the whole thing
|
|
1377 |
tree.branch.repository.pack() |
|
1378 |
new_names = tree.branch.repository._pack_collection.names() |
|
1379 |
# The other collection hasn't noticed yet
|
|
1380 |
self.assertEqual(names, packs.names()) |
|
3789.1.8
by John Arbash Meinel
Change the api of reload_pack_names(). |
1381 |
self.assertTrue(packs.reload_pack_names()) |
3789.1.2
by John Arbash Meinel
Add RepositoryPackCollection.reload_pack_names() |
1382 |
self.assertEqual(new_names, packs.names()) |
3789.2.19
by John Arbash Meinel
Refactor to make the tests a bit simpler |
1383 |
self.assertEqual({revs[-1]:(revs[-2],)}, r.get_parent_map([revs[-1]])) |
3789.1.8
by John Arbash Meinel
Change the api of reload_pack_names(). |
1384 |
self.assertFalse(packs.reload_pack_names()) |
3789.1.2
by John Arbash Meinel
Add RepositoryPackCollection.reload_pack_names() |
1385 |
|
4634.126.1
by John Arbash Meinel
(jam) Fix bug #507566, concurrent autopacking correctness. |
1386 |
def test_reload_pack_names_preserves_pending(self): |
1387 |
# TODO: Update this to also test for pending-deleted names
|
|
1388 |
tree, r, packs, revs = self.make_packs_and_alt_repo(write_lock=True) |
|
1389 |
# We will add one pack (via start_write_group + insert_record_stream),
|
|
1390 |
# and remove another pack (via _remove_pack_from_memory)
|
|
1391 |
orig_names = packs.names() |
|
1392 |
orig_at_load = packs._packs_at_load |
|
1393 |
to_remove_name = iter(orig_names).next() |
|
1394 |
r.start_write_group() |
|
1395 |
self.addCleanup(r.abort_write_group) |
|
1396 |
r.texts.insert_record_stream([versionedfile.FulltextContentFactory( |
|
1397 |
('text', 'rev'), (), None, 'content\n')]) |
|
1398 |
new_pack = packs._new_pack |
|
1399 |
self.assertTrue(new_pack.data_inserted()) |
|
1400 |
new_pack.finish() |
|
1401 |
packs.allocate(new_pack) |
|
1402 |
packs._new_pack = None |
|
1403 |
removed_pack = packs.get_pack_by_name(to_remove_name) |
|
1404 |
packs._remove_pack_from_memory(removed_pack) |
|
1405 |
names = packs.names() |
|
4634.127.3
by John Arbash Meinel
Add code so we don't try to obsolete files someone else has 'claimed'. |
1406 |
all_nodes, deleted_nodes, new_nodes, _ = packs._diff_pack_names() |
4634.126.1
by John Arbash Meinel
(jam) Fix bug #507566, concurrent autopacking correctness. |
1407 |
new_names = set([x[0][0] for x in new_nodes]) |
1408 |
self.assertEqual(names, sorted([x[0][0] for x in all_nodes])) |
|
1409 |
self.assertEqual(set(names) - set(orig_names), new_names) |
|
1410 |
self.assertEqual(set([new_pack.name]), new_names) |
|
1411 |
self.assertEqual([to_remove_name], |
|
1412 |
sorted([x[0][0] for x in deleted_nodes])) |
|
1413 |
packs.reload_pack_names() |
|
1414 |
reloaded_names = packs.names() |
|
1415 |
self.assertEqual(orig_at_load, packs._packs_at_load) |
|
1416 |
self.assertEqual(names, reloaded_names) |
|
4634.127.3
by John Arbash Meinel
Add code so we don't try to obsolete files someone else has 'claimed'. |
1417 |
all_nodes, deleted_nodes, new_nodes, _ = packs._diff_pack_names() |
4634.126.1
by John Arbash Meinel
(jam) Fix bug #507566, concurrent autopacking correctness. |
1418 |
new_names = set([x[0][0] for x in new_nodes]) |
1419 |
self.assertEqual(names, sorted([x[0][0] for x in all_nodes])) |
|
1420 |
self.assertEqual(set(names) - set(orig_names), new_names) |
|
1421 |
self.assertEqual(set([new_pack.name]), new_names) |
|
1422 |
self.assertEqual([to_remove_name], |
|
1423 |
sorted([x[0][0] for x in deleted_nodes])) |
|
1424 |
||
4634.127.5
by John Arbash Meinel
Possible fix for making sure packs triggering autopacking get cleaned up. |
1425 |
def test_autopack_obsoletes_new_pack(self): |
1426 |
tree, r, packs, revs = self.make_packs_and_alt_repo(write_lock=True) |
|
1427 |
packs._max_pack_count = lambda x: 1 |
|
1428 |
packs.pack_distribution = lambda x: [10] |
|
1429 |
r.start_write_group() |
|
1430 |
r.revisions.insert_record_stream([versionedfile.FulltextContentFactory( |
|
1431 |
('bogus-rev',), (), None, 'bogus-content\n')]) |
|
1432 |
# This should trigger an autopack, which will combine everything into a
|
|
1433 |
# single pack file.
|
|
1434 |
new_names = r.commit_write_group() |
|
1435 |
names = packs.names() |
|
1436 |
self.assertEqual(1, len(names)) |
|
1437 |
self.assertEqual([names[0] + '.pack'], |
|
1438 |
packs._pack_transport.list_dir('.')) |
|
1439 |
||
3789.2.20
by John Arbash Meinel
The autopack code can now trigger itself to retry when _copy_revision_texts fails. |
1440 |
def test_autopack_reloads_and_stops(self): |
1441 |
tree, r, packs, revs = self.make_packs_and_alt_repo(write_lock=True) |
|
1442 |
# After we have determined what needs to be autopacked, trigger a
|
|
1443 |
# full-pack via the other repo which will cause us to re-evaluate and
|
|
1444 |
# decide we don't need to do anything
|
|
1445 |
orig_execute = packs._execute_pack_operations |
|
1446 |
def _munged_execute_pack_ops(*args, **kwargs): |
|
1447 |
tree.branch.repository.pack() |
|
1448 |
return orig_execute(*args, **kwargs) |
|
1449 |
packs._execute_pack_operations = _munged_execute_pack_ops |
|
1450 |
packs._max_pack_count = lambda x: 1 |
|
1451 |
packs.pack_distribution = lambda x: [10] |
|
1452 |
self.assertFalse(packs.autopack()) |
|
1453 |
self.assertEqual(1, len(packs.names())) |
|
1454 |
self.assertEqual(tree.branch.repository._pack_collection.names(), |
|
1455 |
packs.names()) |
|
1456 |
||
4634.127.1
by John Arbash Meinel
Partial fix for bug #507557. |
1457 |
def test__save_pack_names(self): |
1458 |
tree, r, packs, revs = self.make_packs_and_alt_repo(write_lock=True) |
|
1459 |
names = packs.names() |
|
1460 |
pack = packs.get_pack_by_name(names[0]) |
|
1461 |
packs._remove_pack_from_memory(pack) |
|
1462 |
packs._save_pack_names(obsolete_packs=[pack]) |
|
1463 |
cur_packs = packs._pack_transport.list_dir('.') |
|
1464 |
self.assertEqual([n + '.pack' for n in names[1:]], sorted(cur_packs)) |
|
1465 |
# obsolete_packs will also have stuff like .rix and .iix present.
|
|
1466 |
obsolete_packs = packs.transport.list_dir('obsolete_packs') |
|
1467 |
obsolete_names = set([osutils.splitext(n)[0] for n in obsolete_packs]) |
|
1468 |
self.assertEqual([pack.name], sorted(obsolete_names)) |
|
1469 |
||
1470 |
def test__save_pack_names_already_obsoleted(self): |
|
1471 |
tree, r, packs, revs = self.make_packs_and_alt_repo(write_lock=True) |
|
1472 |
names = packs.names() |
|
1473 |
pack = packs.get_pack_by_name(names[0]) |
|
1474 |
packs._remove_pack_from_memory(pack) |
|
1475 |
# We are going to simulate a concurrent autopack by manually obsoleting
|
|
1476 |
# the pack directly.
|
|
1477 |
packs._obsolete_packs([pack]) |
|
1478 |
packs._save_pack_names(clear_obsolete_packs=True, |
|
1479 |
obsolete_packs=[pack]) |
|
1480 |
cur_packs = packs._pack_transport.list_dir('.') |
|
1481 |
self.assertEqual([n + '.pack' for n in names[1:]], sorted(cur_packs)) |
|
1482 |
# Note that while we set clear_obsolete_packs=True, it should not
|
|
1483 |
# delete a pack file that we have also scheduled for obsoletion.
|
|
1484 |
obsolete_packs = packs.transport.list_dir('obsolete_packs') |
|
1485 |
obsolete_names = set([osutils.splitext(n)[0] for n in obsolete_packs]) |
|
1486 |
self.assertEqual([pack.name], sorted(obsolete_names)) |
|
1487 |
||
4634.127.3
by John Arbash Meinel
Add code so we don't try to obsolete files someone else has 'claimed'. |
1488 |
|
2592.3.173
by Robert Collins
Basic implementation of all_packs. |
1489 |
|
1490 |
class TestPack(TestCaseWithTransport): |
|
1491 |
"""Tests for the Pack object."""
|
|
1492 |
||
1493 |
def assertCurrentlyEqual(self, left, right): |
|
1494 |
self.assertTrue(left == right) |
|
1495 |
self.assertTrue(right == left) |
|
1496 |
self.assertFalse(left != right) |
|
1497 |
self.assertFalse(right != left) |
|
1498 |
||
1499 |
def assertCurrentlyNotEqual(self, left, right): |
|
1500 |
self.assertFalse(left == right) |
|
1501 |
self.assertFalse(right == left) |
|
1502 |
self.assertTrue(left != right) |
|
1503 |
self.assertTrue(right != left) |
|
1504 |
||
1505 |
def test___eq____ne__(self): |
|
2592.3.191
by Robert Collins
Give Pack responsibility for index naming, and two concrete classes - NewPack for new packs and ExistingPack for packs we read from disk. |
1506 |
left = pack_repo.ExistingPack('', '', '', '', '', '') |
1507 |
right = pack_repo.ExistingPack('', '', '', '', '', '') |
|
2592.3.173
by Robert Collins
Basic implementation of all_packs. |
1508 |
self.assertCurrentlyEqual(left, right) |
1509 |
# change all attributes and ensure equality changes as we do.
|
|
1510 |
left.revision_index = 'a' |
|
1511 |
self.assertCurrentlyNotEqual(left, right) |
|
1512 |
right.revision_index = 'a' |
|
1513 |
self.assertCurrentlyEqual(left, right) |
|
1514 |
left.inventory_index = 'a' |
|
1515 |
self.assertCurrentlyNotEqual(left, right) |
|
1516 |
right.inventory_index = 'a' |
|
1517 |
self.assertCurrentlyEqual(left, right) |
|
1518 |
left.text_index = 'a' |
|
1519 |
self.assertCurrentlyNotEqual(left, right) |
|
1520 |
right.text_index = 'a' |
|
1521 |
self.assertCurrentlyEqual(left, right) |
|
1522 |
left.signature_index = 'a' |
|
1523 |
self.assertCurrentlyNotEqual(left, right) |
|
1524 |
right.signature_index = 'a' |
|
1525 |
self.assertCurrentlyEqual(left, right) |
|
1526 |
left.name = 'a' |
|
1527 |
self.assertCurrentlyNotEqual(left, right) |
|
1528 |
right.name = 'a' |
|
1529 |
self.assertCurrentlyEqual(left, right) |
|
1530 |
left.transport = 'a' |
|
1531 |
self.assertCurrentlyNotEqual(left, right) |
|
1532 |
right.transport = 'a' |
|
1533 |
self.assertCurrentlyEqual(left, right) |
|
2592.3.179
by Robert Collins
Generate the revision_index_map for packing during the core operation, from the pack objects. |
1534 |
|
1535 |
def test_file_name(self): |
|
2592.3.191
by Robert Collins
Give Pack responsibility for index naming, and two concrete classes - NewPack for new packs and ExistingPack for packs we read from disk. |
1536 |
pack = pack_repo.ExistingPack('', 'a_name', '', '', '', '') |
2592.3.179
by Robert Collins
Generate the revision_index_map for packing during the core operation, from the pack objects. |
1537 |
self.assertEqual('a_name.pack', pack.file_name()) |
2592.3.192
by Robert Collins
Move new revision index management to NewPack. |
1538 |
|
1539 |
||
1540 |
class TestNewPack(TestCaseWithTransport): |
|
1541 |
"""Tests for pack_repo.NewPack."""
|
|
1542 |
||
2592.3.193
by Robert Collins
Move hash tracking of new packs into NewPack. |
1543 |
def test_new_instance_attributes(self): |
2592.3.194
by Robert Collins
Output the revision index from NewPack.finish |
1544 |
upload_transport = self.get_transport('upload') |
1545 |
pack_transport = self.get_transport('pack') |
|
1546 |
index_transport = self.get_transport('index') |
|
1547 |
upload_transport.mkdir('.') |
|
4241.6.8
by Robert Collins, John Arbash Meinel, Ian Clatworthy, Vincent Ladeuil
Add --development6-rich-root, disabling the legacy and unneeded development2 format, and activating the tests for CHK features disabled pending this format. (Robert Collins, John Arbash Meinel, Ian Clatworthy, Vincent Ladeuil) |
1548 |
collection = pack_repo.RepositoryPackCollection( |
1549 |
repo=None, |
|
3830.3.1
by Martin Pool
NewPack should be constructed from the PackCollection, rather than attributes of it |
1550 |
transport=self.get_transport('.'), |
1551 |
index_transport=index_transport, |
|
1552 |
upload_transport=upload_transport, |
|
1553 |
pack_transport=pack_transport, |
|
1554 |
index_builder_class=BTreeBuilder, |
|
4241.6.8
by Robert Collins, John Arbash Meinel, Ian Clatworthy, Vincent Ladeuil
Add --development6-rich-root, disabling the legacy and unneeded development2 format, and activating the tests for CHK features disabled pending this format. (Robert Collins, John Arbash Meinel, Ian Clatworthy, Vincent Ladeuil) |
1555 |
index_class=BTreeGraphIndex, |
1556 |
use_chk_index=False) |
|
3830.3.1
by Martin Pool
NewPack should be constructed from the PackCollection, rather than attributes of it |
1557 |
pack = pack_repo.NewPack(collection) |
4857.2.1
by John Arbash Meinel
2 test_repository tests weren't adding cleanups when opening files. |
1558 |
self.addCleanup(pack.abort) # Make sure the write stream gets closed |
3735.1.1
by Robert Collins
Add development2 formats using BTree indices. |
1559 |
self.assertIsInstance(pack.revision_index, BTreeBuilder) |
1560 |
self.assertIsInstance(pack.inventory_index, BTreeBuilder) |
|
2929.3.5
by Vincent Ladeuil
New files, same warnings, same fixes. |
1561 |
self.assertIsInstance(pack._hash, type(osutils.md5())) |
2592.3.194
by Robert Collins
Output the revision index from NewPack.finish |
1562 |
self.assertTrue(pack.upload_transport is upload_transport) |
1563 |
self.assertTrue(pack.index_transport is index_transport) |
|
1564 |
self.assertTrue(pack.pack_transport is pack_transport) |
|
1565 |
self.assertEqual(None, pack.index_sizes) |
|
1566 |
self.assertEqual(20, len(pack.random_name)) |
|
1567 |
self.assertIsInstance(pack.random_name, str) |
|
1568 |
self.assertIsInstance(pack.start_time, float) |
|
2951.1.2
by Robert Collins
Partial refactoring of pack_repo to create a Packer object for packing. |
1569 |
|
1570 |
||
1571 |
class TestPacker(TestCaseWithTransport): |
|
1572 |
"""Tests for the packs repository Packer class."""
|
|
2951.1.10
by Robert Collins
Peer review feedback with Ian. |
1573 |
|
3824.2.4
by John Arbash Meinel
Add a test that ensures the pack ordering changes as part of calling .pack() |
1574 |
def test_pack_optimizes_pack_order(self): |
4617.8.1
by Robert Collins
Lock down another test assuming the default was a PackRepository. |
1575 |
builder = self.make_branch_builder('.', format="1.9") |
3824.2.4
by John Arbash Meinel
Add a test that ensures the pack ordering changes as part of calling .pack() |
1576 |
builder.start_series() |
1577 |
builder.build_snapshot('A', None, [ |
|
1578 |
('add', ('', 'root-id', 'directory', None)), |
|
1579 |
('add', ('f', 'f-id', 'file', 'content\n'))]) |
|
1580 |
builder.build_snapshot('B', ['A'], |
|
1581 |
[('modify', ('f-id', 'new-content\n'))]) |
|
1582 |
builder.build_snapshot('C', ['B'], |
|
1583 |
[('modify', ('f-id', 'third-content\n'))]) |
|
1584 |
builder.build_snapshot('D', ['C'], |
|
1585 |
[('modify', ('f-id', 'fourth-content\n'))]) |
|
1586 |
b = builder.get_branch() |
|
1587 |
b.lock_read() |
|
1588 |
builder.finish_series() |
|
1589 |
self.addCleanup(b.unlock) |
|
1590 |
# At this point, we should have 4 pack files available
|
|
1591 |
# Because of how they were built, they correspond to
|
|
1592 |
# ['D', 'C', 'B', 'A']
|
|
1593 |
packs = b.repository._pack_collection.packs |
|
1594 |
packer = pack_repo.Packer(b.repository._pack_collection, |
|
1595 |
packs, 'testing', |
|
1596 |
revision_ids=['B', 'C']) |
|
1597 |
# Now, when we are copying the B & C revisions, their pack files should
|
|
1598 |
# be moved to the front of the stack
|
|
3824.2.5
by Andrew Bennetts
Minor tweaks to comments etc. |
1599 |
# The new ordering moves B & C to the front of the .packs attribute,
|
1600 |
# and leaves the others in the original order.
|
|
3824.2.4
by John Arbash Meinel
Add a test that ensures the pack ordering changes as part of calling .pack() |
1601 |
new_packs = [packs[1], packs[2], packs[0], packs[3]] |
1602 |
new_pack = packer.pack() |
|
1603 |
self.assertEqual(new_packs, packer.packs) |
|
3146.6.1
by Aaron Bentley
InterDifferingSerializer shows a progress bar |
1604 |
|
1605 |
||
3777.5.4
by John Arbash Meinel
OptimisingPacker now sets the optimize flags for the indexes being built. |
1606 |
class TestOptimisingPacker(TestCaseWithTransport): |
1607 |
"""Tests for the OptimisingPacker class."""
|
|
1608 |
||
1609 |
def get_pack_collection(self): |
|
1610 |
repo = self.make_repository('.') |
|
1611 |
return repo._pack_collection |
|
1612 |
||
1613 |
def test_open_pack_will_optimise(self): |
|
1614 |
packer = pack_repo.OptimisingPacker(self.get_pack_collection(), |
|
1615 |
[], '.test') |
|
1616 |
new_pack = packer.open_pack() |
|
4857.2.1
by John Arbash Meinel
2 test_repository tests weren't adding cleanups when opening files. |
1617 |
self.addCleanup(new_pack.abort) # ensure cleanup |
3777.5.4
by John Arbash Meinel
OptimisingPacker now sets the optimize flags for the indexes being built. |
1618 |
self.assertIsInstance(new_pack, pack_repo.NewPack) |
1619 |
self.assertTrue(new_pack.revision_index._optimize_for_size) |
|
1620 |
self.assertTrue(new_pack.inventory_index._optimize_for_size) |
|
1621 |
self.assertTrue(new_pack.text_index._optimize_for_size) |
|
1622 |
self.assertTrue(new_pack.signature_index._optimize_for_size) |
|
4462.2.6
by Robert Collins
Cause StreamSink to partially pack repositories after cross format fetches when beneficial. |
1623 |
|
1624 |
||
4431.3.7
by Jonathan Lange
Cherrypick bzr.dev 4470, resolving conflicts. |
1625 |
class TestCrossFormatPacks(TestCaseWithTransport): |
1626 |
||
1627 |
def log_pack(self, hint=None): |
|
1628 |
self.calls.append(('pack', hint)) |
|
1629 |
self.orig_pack(hint=hint) |
|
1630 |
if self.expect_hint: |
|
1631 |
self.assertTrue(hint) |
|
1632 |
||
1633 |
def run_stream(self, src_fmt, target_fmt, expect_pack_called): |
|
1634 |
self.expect_hint = expect_pack_called |
|
1635 |
self.calls = [] |
|
1636 |
source_tree = self.make_branch_and_tree('src', format=src_fmt) |
|
1637 |
source_tree.lock_write() |
|
1638 |
self.addCleanup(source_tree.unlock) |
|
1639 |
tip = source_tree.commit('foo') |
|
1640 |
target = self.make_repository('target', format=target_fmt) |
|
1641 |
target.lock_write() |
|
1642 |
self.addCleanup(target.unlock) |
|
1643 |
source = source_tree.branch.repository._get_source(target._format) |
|
1644 |
self.orig_pack = target.pack |
|
1645 |
target.pack = self.log_pack |
|
1646 |
search = target.search_missing_revision_ids( |
|
1647 |
source_tree.branch.repository, tip) |
|
1648 |
stream = source.get_stream(search) |
|
1649 |
from_format = source_tree.branch.repository._format |
|
1650 |
sink = target._get_sink() |
|
1651 |
sink.insert_stream(stream, from_format, []) |
|
1652 |
if expect_pack_called: |
|
1653 |
self.assertLength(1, self.calls) |
|
1654 |
else: |
|
1655 |
self.assertLength(0, self.calls) |
|
1656 |
||
1657 |
def run_fetch(self, src_fmt, target_fmt, expect_pack_called): |
|
1658 |
self.expect_hint = expect_pack_called |
|
1659 |
self.calls = [] |
|
1660 |
source_tree = self.make_branch_and_tree('src', format=src_fmt) |
|
1661 |
source_tree.lock_write() |
|
1662 |
self.addCleanup(source_tree.unlock) |
|
1663 |
tip = source_tree.commit('foo') |
|
1664 |
target = self.make_repository('target', format=target_fmt) |
|
1665 |
target.lock_write() |
|
1666 |
self.addCleanup(target.unlock) |
|
1667 |
source = source_tree.branch.repository |
|
1668 |
self.orig_pack = target.pack |
|
1669 |
target.pack = self.log_pack |
|
1670 |
target.fetch(source) |
|
1671 |
if expect_pack_called: |
|
1672 |
self.assertLength(1, self.calls) |
|
1673 |
else: |
|
1674 |
self.assertLength(0, self.calls) |
|
1675 |
||
1676 |
def test_sink_format_hint_no(self): |
|
1677 |
# When the target format says packing makes no difference, pack is not
|
|
1678 |
# called.
|
|
1679 |
self.run_stream('1.9', 'rich-root-pack', False) |
|
1680 |
||
1681 |
def test_sink_format_hint_yes(self): |
|
1682 |
# When the target format says packing makes a difference, pack is
|
|
1683 |
# called.
|
|
1684 |
self.run_stream('1.9', '2a', True) |
|
1685 |
||
1686 |
def test_sink_format_same_no(self): |
|
1687 |
# When the formats are the same, pack is not called.
|
|
1688 |
self.run_stream('2a', '2a', False) |
|
1689 |
||
1690 |
def test_IDS_format_hint_no(self): |
|
1691 |
# When the target format says packing makes no difference, pack is not
|
|
1692 |
# called.
|
|
1693 |
self.run_fetch('1.9', 'rich-root-pack', False) |
|
1694 |
||
1695 |
def test_IDS_format_hint_yes(self): |
|
1696 |
# When the target format says packing makes a difference, pack is
|
|
1697 |
# called.
|
|
1698 |
self.run_fetch('1.9', '2a', True) |
|
1699 |
||
1700 |
def test_IDS_format_same_no(self): |
|
1701 |
# When the formats are the same, pack is not called.
|
|
1702 |
self.run_fetch('2a', '2a', False) |