1
# Copyright (C) 2006, 2007 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17
"""Tests for the Repository facility that are not interface tests.
19
For interface tests see tests/repository_implementations/*.py.
21
For concrete class tests see this file, and for storage formats tests
26
from stat import S_ISDIR
27
from StringIO import StringIO
30
from bzrlib.errors import (NotBranchError,
33
UnsupportedFormatError,
35
from bzrlib import graph
36
from bzrlib.index import GraphIndex, InMemoryGraphIndex
37
from bzrlib.repository import RepositoryFormat
38
from bzrlib.smart import server
39
from bzrlib.tests import (
41
TestCaseWithTransport,
45
from bzrlib.transport import (
49
from bzrlib.transport.memory import MemoryServer
50
from bzrlib.util import bencode
57
revision as _mod_revision,
62
from bzrlib.repofmt import knitrepo, weaverepo, pack_repo
65
class TestDefaultFormat(TestCase):
67
def test_get_set_default_format(self):
68
old_default = bzrdir.format_registry.get('default')
69
private_default = old_default().repository_format.__class__
70
old_format = repository.RepositoryFormat.get_default_format()
71
self.assertTrue(isinstance(old_format, private_default))
72
def make_sample_bzrdir():
73
my_bzrdir = bzrdir.BzrDirMetaFormat1()
74
my_bzrdir.repository_format = SampleRepositoryFormat()
76
bzrdir.format_registry.remove('default')
77
bzrdir.format_registry.register('sample', make_sample_bzrdir, '')
78
bzrdir.format_registry.set_default('sample')
79
# creating a repository should now create an instrumented dir.
81
# the default branch format is used by the meta dir format
82
# which is not the default bzrdir format at this point
83
dir = bzrdir.BzrDirMetaFormat1().initialize('memory:///')
84
result = dir.create_repository()
85
self.assertEqual(result, 'A bzr repository dir')
87
bzrdir.format_registry.remove('default')
88
bzrdir.format_registry.remove('sample')
89
bzrdir.format_registry.register('default', old_default, '')
90
self.assertIsInstance(repository.RepositoryFormat.get_default_format(),
94
class SampleRepositoryFormat(repository.RepositoryFormat):
97
this format is initializable, unsupported to aid in testing the
98
open and open(unsupported=True) routines.
101
def get_format_string(self):
102
"""See RepositoryFormat.get_format_string()."""
103
return "Sample .bzr repository format."
105
def initialize(self, a_bzrdir, shared=False):
106
"""Initialize a repository in a BzrDir"""
107
t = a_bzrdir.get_repository_transport(self)
108
t.put_bytes('format', self.get_format_string())
109
return 'A bzr repository dir'
111
def is_supported(self):
114
def open(self, a_bzrdir, _found=False):
115
return "opened repository."
118
class TestRepositoryFormat(TestCaseWithTransport):
119
"""Tests for the Repository format detection used by the bzr meta dir facility.BzrBranchFormat facility."""
121
def test_find_format(self):
122
# is the right format object found for a repository?
123
# create a branch with a few known format objects.
124
# this is not quite the same as
125
self.build_tree(["foo/", "bar/"])
126
def check_format(format, url):
127
dir = format._matchingbzrdir.initialize(url)
128
format.initialize(dir)
129
t = get_transport(url)
130
found_format = repository.RepositoryFormat.find_format(dir)
131
self.failUnless(isinstance(found_format, format.__class__))
132
check_format(weaverepo.RepositoryFormat7(), "bar")
134
def test_find_format_no_repository(self):
135
dir = bzrdir.BzrDirMetaFormat1().initialize(self.get_url())
136
self.assertRaises(errors.NoRepositoryPresent,
137
repository.RepositoryFormat.find_format,
140
def test_find_format_unknown_format(self):
141
dir = bzrdir.BzrDirMetaFormat1().initialize(self.get_url())
142
SampleRepositoryFormat().initialize(dir)
143
self.assertRaises(UnknownFormatError,
144
repository.RepositoryFormat.find_format,
147
def test_register_unregister_format(self):
148
format = SampleRepositoryFormat()
150
dir = bzrdir.BzrDirMetaFormat1().initialize(self.get_url())
152
format.initialize(dir)
153
# register a format for it.
154
repository.RepositoryFormat.register_format(format)
155
# which repository.Open will refuse (not supported)
156
self.assertRaises(UnsupportedFormatError, repository.Repository.open, self.get_url())
157
# but open(unsupported) will work
158
self.assertEqual(format.open(dir), "opened repository.")
159
# unregister the format
160
repository.RepositoryFormat.unregister_format(format)
163
class TestFormat6(TestCaseWithTransport):
165
def test_no_ancestry_weave(self):
166
control = bzrdir.BzrDirFormat6().initialize(self.get_url())
167
repo = weaverepo.RepositoryFormat6().initialize(control)
168
# We no longer need to create the ancestry.weave file
169
# since it is *never* used.
170
self.assertRaises(NoSuchFile,
171
control.transport.get,
174
def test_exposed_versioned_files_are_marked_dirty(self):
175
control = bzrdir.BzrDirFormat6().initialize(self.get_url())
176
repo = weaverepo.RepositoryFormat6().initialize(control)
178
inv = repo.get_inventory_weave()
180
self.assertRaises(errors.OutSideTransaction,
181
inv.add_lines, 'foo', [], [])
183
def test_supports_external_lookups(self):
184
control = bzrdir.BzrDirFormat6().initialize(self.get_url())
185
repo = weaverepo.RepositoryFormat6().initialize(control)
186
self.assertFalse(repo._format.supports_external_lookups)
189
class TestFormat7(TestCaseWithTransport):
191
def test_disk_layout(self):
192
control = bzrdir.BzrDirMetaFormat1().initialize(self.get_url())
193
repo = weaverepo.RepositoryFormat7().initialize(control)
194
# in case of side effects of locking.
198
# format 'Bazaar-NG Repository format 7'
200
# inventory.weave == empty_weave
201
# empty revision-store directory
202
# empty weaves directory
203
t = control.get_repository_transport(None)
204
self.assertEqualDiff('Bazaar-NG Repository format 7',
205
t.get('format').read())
206
self.assertTrue(S_ISDIR(t.stat('revision-store').st_mode))
207
self.assertTrue(S_ISDIR(t.stat('weaves').st_mode))
208
self.assertEqualDiff('# bzr weave file v5\n'
211
t.get('inventory.weave').read())
213
def test_shared_disk_layout(self):
214
control = bzrdir.BzrDirMetaFormat1().initialize(self.get_url())
215
repo = weaverepo.RepositoryFormat7().initialize(control, shared=True)
217
# format 'Bazaar-NG Repository format 7'
218
# inventory.weave == empty_weave
219
# empty revision-store directory
220
# empty weaves directory
221
# a 'shared-storage' marker file.
222
# lock is not present when unlocked
223
t = control.get_repository_transport(None)
224
self.assertEqualDiff('Bazaar-NG Repository format 7',
225
t.get('format').read())
226
self.assertEqualDiff('', t.get('shared-storage').read())
227
self.assertTrue(S_ISDIR(t.stat('revision-store').st_mode))
228
self.assertTrue(S_ISDIR(t.stat('weaves').st_mode))
229
self.assertEqualDiff('# bzr weave file v5\n'
232
t.get('inventory.weave').read())
233
self.assertFalse(t.has('branch-lock'))
235
def test_creates_lockdir(self):
236
"""Make sure it appears to be controlled by a LockDir existence"""
237
control = bzrdir.BzrDirMetaFormat1().initialize(self.get_url())
238
repo = weaverepo.RepositoryFormat7().initialize(control, shared=True)
239
t = control.get_repository_transport(None)
240
# TODO: Should check there is a 'lock' toplevel directory,
241
# regardless of contents
242
self.assertFalse(t.has('lock/held/info'))
245
self.assertTrue(t.has('lock/held/info'))
247
# unlock so we don't get a warning about failing to do so
250
def test_uses_lockdir(self):
251
"""repo format 7 actually locks on lockdir"""
252
base_url = self.get_url()
253
control = bzrdir.BzrDirMetaFormat1().initialize(base_url)
254
repo = weaverepo.RepositoryFormat7().initialize(control, shared=True)
255
t = control.get_repository_transport(None)
259
# make sure the same lock is created by opening it
260
repo = repository.Repository.open(base_url)
262
self.assertTrue(t.has('lock/held/info'))
264
self.assertFalse(t.has('lock/held/info'))
266
def test_shared_no_tree_disk_layout(self):
267
control = bzrdir.BzrDirMetaFormat1().initialize(self.get_url())
268
repo = weaverepo.RepositoryFormat7().initialize(control, shared=True)
269
repo.set_make_working_trees(False)
271
# format 'Bazaar-NG Repository format 7'
273
# inventory.weave == empty_weave
274
# empty revision-store directory
275
# empty weaves directory
276
# a 'shared-storage' marker file.
277
t = control.get_repository_transport(None)
278
self.assertEqualDiff('Bazaar-NG Repository format 7',
279
t.get('format').read())
280
## self.assertEqualDiff('', t.get('lock').read())
281
self.assertEqualDiff('', t.get('shared-storage').read())
282
self.assertEqualDiff('', t.get('no-working-trees').read())
283
repo.set_make_working_trees(True)
284
self.assertFalse(t.has('no-working-trees'))
285
self.assertTrue(S_ISDIR(t.stat('revision-store').st_mode))
286
self.assertTrue(S_ISDIR(t.stat('weaves').st_mode))
287
self.assertEqualDiff('# bzr weave file v5\n'
290
t.get('inventory.weave').read())
292
def test_exposed_versioned_files_are_marked_dirty(self):
293
control = bzrdir.BzrDirMetaFormat1().initialize(self.get_url())
294
repo = weaverepo.RepositoryFormat7().initialize(control)
296
inv = repo.get_inventory_weave()
298
self.assertRaises(errors.OutSideTransaction,
299
inv.add_lines, 'foo', [], [])
301
def test_supports_external_lookups(self):
302
control = bzrdir.BzrDirMetaFormat1().initialize(self.get_url())
303
repo = weaverepo.RepositoryFormat7().initialize(control)
304
self.assertFalse(repo._format.supports_external_lookups)
307
class TestFormatKnit1(TestCaseWithTransport):
309
def test_disk_layout(self):
310
control = bzrdir.BzrDirMetaFormat1().initialize(self.get_url())
311
repo = knitrepo.RepositoryFormatKnit1().initialize(control)
312
# in case of side effects of locking.
316
# format 'Bazaar-NG Knit Repository Format 1'
317
# lock: is a directory
318
# inventory.weave == empty_weave
319
# empty revision-store directory
320
# empty weaves directory
321
t = control.get_repository_transport(None)
322
self.assertEqualDiff('Bazaar-NG Knit Repository Format 1',
323
t.get('format').read())
324
# XXX: no locks left when unlocked at the moment
325
# self.assertEqualDiff('', t.get('lock').read())
326
self.assertTrue(S_ISDIR(t.stat('knits').st_mode))
329
def assertHasKnit(self, t, knit_name):
330
"""Assert that knit_name exists on t."""
331
self.assertEqualDiff('# bzr knit index 8\n',
332
t.get(knit_name + '.kndx').read())
334
self.assertTrue(t.has(knit_name + '.knit'))
336
def check_knits(self, t):
337
"""check knit content for a repository."""
338
self.assertHasKnit(t, 'inventory')
339
self.assertHasKnit(t, 'revisions')
340
self.assertHasKnit(t, 'signatures')
342
def test_shared_disk_layout(self):
343
control = bzrdir.BzrDirMetaFormat1().initialize(self.get_url())
344
repo = knitrepo.RepositoryFormatKnit1().initialize(control, shared=True)
346
# format 'Bazaar-NG Knit Repository Format 1'
347
# lock: is a directory
348
# inventory.weave == empty_weave
349
# empty revision-store directory
350
# empty weaves directory
351
# a 'shared-storage' marker file.
352
t = control.get_repository_transport(None)
353
self.assertEqualDiff('Bazaar-NG Knit Repository Format 1',
354
t.get('format').read())
355
# XXX: no locks left when unlocked at the moment
356
# self.assertEqualDiff('', t.get('lock').read())
357
self.assertEqualDiff('', t.get('shared-storage').read())
358
self.assertTrue(S_ISDIR(t.stat('knits').st_mode))
361
def test_shared_no_tree_disk_layout(self):
362
control = bzrdir.BzrDirMetaFormat1().initialize(self.get_url())
363
repo = knitrepo.RepositoryFormatKnit1().initialize(control, shared=True)
364
repo.set_make_working_trees(False)
366
# format 'Bazaar-NG Knit Repository Format 1'
368
# inventory.weave == empty_weave
369
# empty revision-store directory
370
# empty weaves directory
371
# a 'shared-storage' marker file.
372
t = control.get_repository_transport(None)
373
self.assertEqualDiff('Bazaar-NG Knit Repository Format 1',
374
t.get('format').read())
375
# XXX: no locks left when unlocked at the moment
376
# self.assertEqualDiff('', t.get('lock').read())
377
self.assertEqualDiff('', t.get('shared-storage').read())
378
self.assertEqualDiff('', t.get('no-working-trees').read())
379
repo.set_make_working_trees(True)
380
self.assertFalse(t.has('no-working-trees'))
381
self.assertTrue(S_ISDIR(t.stat('knits').st_mode))
384
def test_exposed_versioned_files_are_marked_dirty(self):
385
format = bzrdir.BzrDirMetaFormat1()
386
format.repository_format = knitrepo.RepositoryFormatKnit1()
387
repo = self.make_repository('.', format=format)
389
inv = repo.get_inventory_weave()
391
self.assertRaises(errors.OutSideTransaction,
392
inv.add_lines, 'foo', [], [])
394
def test_deserialise_sets_root_revision(self):
395
"""We must have a inventory.root.revision
397
Old versions of the XML5 serializer did not set the revision_id for
398
the whole inventory. So we grab the one from the expected text. Which
399
is valid when the api is not being abused.
401
repo = self.make_repository('.',
402
format=bzrdir.format_registry.get('knit')())
403
inv_xml = '<inventory format="5">\n</inventory>\n'
404
inv = repo.deserialise_inventory('test-rev-id', inv_xml)
405
self.assertEqual('test-rev-id', inv.root.revision)
407
def test_deserialise_uses_global_revision_id(self):
408
"""If it is set, then we re-use the global revision id"""
409
repo = self.make_repository('.',
410
format=bzrdir.format_registry.get('knit')())
411
inv_xml = ('<inventory format="5" revision_id="other-rev-id">\n'
413
# Arguably, the deserialise_inventory should detect a mismatch, and
414
# raise an error, rather than silently using one revision_id over the
416
self.assertRaises(AssertionError, repo.deserialise_inventory,
417
'test-rev-id', inv_xml)
418
inv = repo.deserialise_inventory('other-rev-id', inv_xml)
419
self.assertEqual('other-rev-id', inv.root.revision)
421
def test_supports_external_lookups(self):
422
repo = self.make_repository('.',
423
format=bzrdir.format_registry.get('knit')())
424
self.assertFalse(repo._format.supports_external_lookups)
427
class KnitRepositoryStreamTests(test_knit.KnitTests):
428
"""Tests for knitrepo._get_stream_as_bytes."""
430
def test_get_stream_as_bytes(self):
432
k1 = self.make_test_knit()
433
k1.add_lines('text-a', [], test_knit.split_lines(test_knit.TEXT_1))
435
# Serialise it, check the output.
436
bytes = knitrepo._get_stream_as_bytes(k1, ['text-a'])
437
data = bencode.bdecode(bytes)
438
format, record = data
439
self.assertEqual('knit-plain', format)
440
self.assertEqual(['text-a', ['fulltext'], []], record[:3])
441
self.assertRecordContentEqual(k1, 'text-a', record[3])
443
def test_get_stream_as_bytes_all(self):
444
"""Get a serialised data stream for all the records in a knit.
446
Much like test_get_stream_all, except for get_stream_as_bytes.
448
k1 = self.make_test_knit()
449
# Insert the same data as BasicKnitTests.test_knit_join, as they seem
450
# to cover a range of cases (no parents, one parent, multiple parents).
452
('text-a', [], test_knit.TEXT_1),
453
('text-b', ['text-a'], test_knit.TEXT_1),
454
('text-c', [], test_knit.TEXT_1),
455
('text-d', ['text-c'], test_knit.TEXT_1),
456
('text-m', ['text-b', 'text-d'], test_knit.TEXT_1),
458
# This test is actually a bit strict as the order in which they're
459
# returned is not defined. This matches the current (deterministic)
461
expected_data_list = [
462
# version, options, parents
463
('text-a', ['fulltext'], []),
464
('text-b', ['line-delta'], ['text-a']),
465
('text-m', ['line-delta'], ['text-b', 'text-d']),
466
('text-c', ['fulltext'], []),
467
('text-d', ['line-delta'], ['text-c']),
469
for version_id, parents, lines in test_data:
470
k1.add_lines(version_id, parents, test_knit.split_lines(lines))
472
bytes = knitrepo._get_stream_as_bytes(
473
k1, ['text-a', 'text-b', 'text-m', 'text-c', 'text-d', ])
475
data = bencode.bdecode(bytes)
477
self.assertEqual('knit-plain', format)
479
for expected, actual in zip(expected_data_list, data):
480
expected_version = expected[0]
481
expected_options = expected[1]
482
expected_parents = expected[2]
483
version, options, parents, bytes = actual
484
self.assertEqual(expected_version, version)
485
self.assertEqual(expected_options, options)
486
self.assertEqual(expected_parents, parents)
487
self.assertRecordContentEqual(k1, version, bytes)
490
class DummyRepository(object):
491
"""A dummy repository for testing."""
495
def supports_rich_root(self):
499
class InterDummy(repository.InterRepository):
500
"""An inter-repository optimised code path for DummyRepository.
502
This is for use during testing where we use DummyRepository as repositories
503
so that none of the default regsitered inter-repository classes will
508
def is_compatible(repo_source, repo_target):
509
"""InterDummy is compatible with DummyRepository."""
510
return (isinstance(repo_source, DummyRepository) and
511
isinstance(repo_target, DummyRepository))
514
class TestInterRepository(TestCaseWithTransport):
516
def test_get_default_inter_repository(self):
517
# test that the InterRepository.get(repo_a, repo_b) probes
518
# for a inter_repo class where is_compatible(repo_a, repo_b) returns
519
# true and returns a default inter_repo otherwise.
520
# This also tests that the default registered optimised interrepository
521
# classes do not barf inappropriately when a surprising repository type
523
dummy_a = DummyRepository()
524
dummy_b = DummyRepository()
525
self.assertGetsDefaultInterRepository(dummy_a, dummy_b)
527
def assertGetsDefaultInterRepository(self, repo_a, repo_b):
528
"""Asserts that InterRepository.get(repo_a, repo_b) -> the default.
530
The effective default is now InterSameDataRepository because there is
531
no actual sane default in the presence of incompatible data models.
533
inter_repo = repository.InterRepository.get(repo_a, repo_b)
534
self.assertEqual(repository.InterSameDataRepository,
535
inter_repo.__class__)
536
self.assertEqual(repo_a, inter_repo.source)
537
self.assertEqual(repo_b, inter_repo.target)
539
def test_register_inter_repository_class(self):
540
# test that a optimised code path provider - a
541
# InterRepository subclass can be registered and unregistered
542
# and that it is correctly selected when given a repository
543
# pair that it returns true on for the is_compatible static method
545
dummy_a = DummyRepository()
546
dummy_b = DummyRepository()
547
repo = self.make_repository('.')
548
# hack dummies to look like repo somewhat.
549
dummy_a._serializer = repo._serializer
550
dummy_b._serializer = repo._serializer
551
repository.InterRepository.register_optimiser(InterDummy)
553
# we should get the default for something InterDummy returns False
555
self.assertFalse(InterDummy.is_compatible(dummy_a, repo))
556
self.assertGetsDefaultInterRepository(dummy_a, repo)
557
# and we should get an InterDummy for a pair it 'likes'
558
self.assertTrue(InterDummy.is_compatible(dummy_a, dummy_b))
559
inter_repo = repository.InterRepository.get(dummy_a, dummy_b)
560
self.assertEqual(InterDummy, inter_repo.__class__)
561
self.assertEqual(dummy_a, inter_repo.source)
562
self.assertEqual(dummy_b, inter_repo.target)
564
repository.InterRepository.unregister_optimiser(InterDummy)
565
# now we should get the default InterRepository object again.
566
self.assertGetsDefaultInterRepository(dummy_a, dummy_b)
569
class TestInterWeaveRepo(TestCaseWithTransport):
571
def test_is_compatible_and_registered(self):
572
# InterWeaveRepo is compatible when either side
573
# is a format 5/6/7 branch
574
from bzrlib.repofmt import knitrepo, weaverepo
575
formats = [weaverepo.RepositoryFormat5(),
576
weaverepo.RepositoryFormat6(),
577
weaverepo.RepositoryFormat7()]
578
incompatible_formats = [weaverepo.RepositoryFormat4(),
579
knitrepo.RepositoryFormatKnit1(),
581
repo_a = self.make_repository('a')
582
repo_b = self.make_repository('b')
583
is_compatible = repository.InterWeaveRepo.is_compatible
584
for source in incompatible_formats:
585
# force incompatible left then right
586
repo_a._format = source
587
repo_b._format = formats[0]
588
self.assertFalse(is_compatible(repo_a, repo_b))
589
self.assertFalse(is_compatible(repo_b, repo_a))
590
for source in formats:
591
repo_a._format = source
592
for target in formats:
593
repo_b._format = target
594
self.assertTrue(is_compatible(repo_a, repo_b))
595
self.assertEqual(repository.InterWeaveRepo,
596
repository.InterRepository.get(repo_a,
600
class TestInterRemoteToOther(TestCaseWithTransport):
602
def make_remote_repository(self, path, backing_format=None):
603
"""Make a RemoteRepository object backed by a real repository that will
604
be created at the given path."""
605
self.make_repository(path, format=backing_format)
606
smart_server = server.SmartTCPServer_for_testing()
608
remote_transport = get_transport(smart_server.get_url()).clone(path)
609
self.addCleanup(smart_server.tearDown)
610
remote_bzrdir = bzrdir.BzrDir.open_from_transport(remote_transport)
611
remote_repo = remote_bzrdir.open_repository()
614
def test_is_compatible_same_format(self):
615
"""InterRemoteToOther is compatible with a remote repository and a
616
second repository that have the same format."""
617
local_repo = self.make_repository('local')
618
remote_repo = self.make_remote_repository('remote')
619
is_compatible = repository.InterRemoteToOther.is_compatible
621
is_compatible(remote_repo, local_repo),
622
"InterRemoteToOther(%r, %r) is false" % (remote_repo, local_repo))
624
def test_is_incompatible_different_format(self):
625
local_repo = self.make_repository('local', 'dirstate')
626
remote_repo = self.make_remote_repository('a', 'dirstate-with-subtree')
627
is_compatible = repository.InterRemoteToOther.is_compatible
629
is_compatible(remote_repo, local_repo),
630
"InterRemoteToOther(%r, %r) is true" % (local_repo, remote_repo))
632
def test_is_incompatible_different_format_both_remote(self):
633
remote_repo_a = self.make_remote_repository(
634
'a', 'dirstate-with-subtree')
635
remote_repo_b = self.make_remote_repository('b', 'dirstate')
636
is_compatible = repository.InterRemoteToOther.is_compatible
638
is_compatible(remote_repo_a, remote_repo_b),
639
"InterRemoteToOther(%r, %r) is true"
640
% (remote_repo_a, remote_repo_b))
643
class TestRepositoryConverter(TestCaseWithTransport):
645
def test_convert_empty(self):
646
t = get_transport(self.get_url('.'))
647
t.mkdir('repository')
648
repo_dir = bzrdir.BzrDirMetaFormat1().initialize('repository')
649
repo = weaverepo.RepositoryFormat7().initialize(repo_dir)
650
target_format = knitrepo.RepositoryFormatKnit1()
651
converter = repository.CopyConverter(target_format)
652
pb = bzrlib.ui.ui_factory.nested_progress_bar()
654
converter.convert(repo, pb)
657
repo = repo_dir.open_repository()
658
self.assertTrue(isinstance(target_format, repo._format.__class__))
661
class TestMisc(TestCase):
663
def test_unescape_xml(self):
664
"""We get some kind of error when malformed entities are passed"""
665
self.assertRaises(KeyError, repository._unescape_xml, 'foo&bar;')
668
class TestRepositoryFormatKnit3(TestCaseWithTransport):
670
def test_convert(self):
671
"""Ensure the upgrade adds weaves for roots"""
672
format = bzrdir.BzrDirMetaFormat1()
673
format.repository_format = knitrepo.RepositoryFormatKnit1()
674
tree = self.make_branch_and_tree('.', format)
675
tree.commit("Dull commit", rev_id="dull")
676
revision_tree = tree.branch.repository.revision_tree('dull')
677
self.assertRaises(errors.NoSuchFile, revision_tree.get_file_lines,
678
revision_tree.inventory.root.file_id)
679
format = bzrdir.BzrDirMetaFormat1()
680
format.repository_format = knitrepo.RepositoryFormatKnit3()
681
upgrade.Convert('.', format)
682
tree = workingtree.WorkingTree.open('.')
683
revision_tree = tree.branch.repository.revision_tree('dull')
684
revision_tree.get_file_lines(revision_tree.inventory.root.file_id)
685
tree.commit("Another dull commit", rev_id='dull2')
686
revision_tree = tree.branch.repository.revision_tree('dull2')
687
self.assertEqual('dull', revision_tree.inventory.root.revision)
689
def test_exposed_versioned_files_are_marked_dirty(self):
690
format = bzrdir.BzrDirMetaFormat1()
691
format.repository_format = knitrepo.RepositoryFormatKnit3()
692
repo = self.make_repository('.', format=format)
694
inv = repo.get_inventory_weave()
696
self.assertRaises(errors.OutSideTransaction,
697
inv.add_lines, 'foo', [], [])
699
def test_supports_external_lookups(self):
700
format = bzrdir.BzrDirMetaFormat1()
701
format.repository_format = knitrepo.RepositoryFormatKnit3()
702
repo = self.make_repository('.', format=format)
703
self.assertFalse(repo._format.supports_external_lookups)
706
class TestWithBrokenRepo(TestCaseWithTransport):
707
"""These tests seem to be more appropriate as interface tests?"""
709
def make_broken_repository(self):
710
# XXX: This function is borrowed from Aaron's "Reconcile can fix bad
711
# parent references" branch which is due to land in bzr.dev soon. Once
712
# it does, this duplication should be removed.
713
repo = self.make_repository('broken-repo')
717
cleanups.append(repo.unlock)
718
repo.start_write_group()
719
cleanups.append(repo.commit_write_group)
720
# make rev1a: A well-formed revision, containing 'file1'
721
inv = inventory.Inventory(revision_id='rev1a')
722
inv.root.revision = 'rev1a'
723
self.add_file(repo, inv, 'file1', 'rev1a', [])
724
repo.add_inventory('rev1a', inv, [])
725
revision = _mod_revision.Revision('rev1a',
726
committer='jrandom@example.com', timestamp=0,
727
inventory_sha1='', timezone=0, message='foo', parent_ids=[])
728
repo.add_revision('rev1a',revision, inv)
730
# make rev1b, which has no Revision, but has an Inventory, and
732
inv = inventory.Inventory(revision_id='rev1b')
733
inv.root.revision = 'rev1b'
734
self.add_file(repo, inv, 'file1', 'rev1b', [])
735
repo.add_inventory('rev1b', inv, [])
737
# make rev2, with file1 and file2
739
# file1 has 'rev1b' as an ancestor, even though this is not
740
# mentioned by 'rev1a', making it an unreferenced ancestor
741
inv = inventory.Inventory()
742
self.add_file(repo, inv, 'file1', 'rev2', ['rev1a', 'rev1b'])
743
self.add_file(repo, inv, 'file2', 'rev2', [])
744
self.add_revision(repo, 'rev2', inv, ['rev1a'])
746
# make ghost revision rev1c
747
inv = inventory.Inventory()
748
self.add_file(repo, inv, 'file2', 'rev1c', [])
750
# make rev3 with file2
751
# file2 refers to 'rev1c', which is a ghost in this repository, so
752
# file2 cannot have rev1c as its ancestor.
753
inv = inventory.Inventory()
754
self.add_file(repo, inv, 'file2', 'rev3', ['rev1c'])
755
self.add_revision(repo, 'rev3', inv, ['rev1c'])
758
for cleanup in reversed(cleanups):
761
def add_revision(self, repo, revision_id, inv, parent_ids):
762
inv.revision_id = revision_id
763
inv.root.revision = revision_id
764
repo.add_inventory(revision_id, inv, parent_ids)
765
revision = _mod_revision.Revision(revision_id,
766
committer='jrandom@example.com', timestamp=0, inventory_sha1='',
767
timezone=0, message='foo', parent_ids=parent_ids)
768
repo.add_revision(revision_id,revision, inv)
770
def add_file(self, repo, inv, filename, revision, parents):
771
file_id = filename + '-id'
772
entry = inventory.InventoryFile(file_id, filename, 'TREE_ROOT')
773
entry.revision = revision
776
vf = repo.weave_store.get_weave_or_empty(file_id,
777
repo.get_transaction())
778
vf.add_lines(revision, parents, ['line\n'])
780
def test_insert_from_broken_repo(self):
781
"""Inserting a data stream from a broken repository won't silently
782
corrupt the target repository.
784
broken_repo = self.make_broken_repository()
785
empty_repo = self.make_repository('empty-repo')
786
search = graph.SearchResult(set(['rev1a', 'rev2', 'rev3']),
787
set(), 3, ['rev1a', 'rev2', 'rev3'])
788
broken_repo.lock_read()
789
self.addCleanup(broken_repo.unlock)
790
stream = broken_repo.get_data_stream_for_search(search)
791
empty_repo.lock_write()
792
self.addCleanup(empty_repo.unlock)
793
empty_repo.start_write_group()
796
errors.KnitCorrupt, empty_repo.insert_data_stream, stream)
798
empty_repo.abort_write_group()
801
class TestKnitPackNoSubtrees(TestCaseWithTransport):
803
def get_format(self):
804
return bzrdir.format_registry.make_bzrdir('pack-0.92')
806
def test_disk_layout(self):
807
format = self.get_format()
808
repo = self.make_repository('.', format=format)
809
# in case of side effects of locking.
812
t = repo.bzrdir.get_repository_transport(None)
814
# XXX: no locks left when unlocked at the moment
815
# self.assertEqualDiff('', t.get('lock').read())
816
self.check_databases(t)
818
def check_format(self, t):
819
self.assertEqualDiff(
820
"Bazaar pack repository format 1 (needs bzr 0.92)\n",
821
t.get('format').read())
823
def assertHasKndx(self, t, knit_name):
824
"""Assert that knit_name exists on t."""
825
self.assertEqualDiff('# bzr knit index 8\n',
826
t.get(knit_name + '.kndx').read())
828
def assertHasNoKndx(self, t, knit_name):
829
"""Assert that knit_name has no index on t."""
830
self.assertFalse(t.has(knit_name + '.kndx'))
832
def assertHasNoKnit(self, t, knit_name):
833
"""Assert that knit_name exists on t."""
835
self.assertFalse(t.has(knit_name + '.knit'))
837
def check_databases(self, t):
838
"""check knit content for a repository."""
839
# check conversion worked
840
self.assertHasNoKndx(t, 'inventory')
841
self.assertHasNoKnit(t, 'inventory')
842
self.assertHasNoKndx(t, 'revisions')
843
self.assertHasNoKnit(t, 'revisions')
844
self.assertHasNoKndx(t, 'signatures')
845
self.assertHasNoKnit(t, 'signatures')
846
self.assertFalse(t.has('knits'))
847
# revision-indexes file-container directory
849
list(GraphIndex(t, 'pack-names', None).iter_all_entries()))
850
self.assertTrue(S_ISDIR(t.stat('packs').st_mode))
851
self.assertTrue(S_ISDIR(t.stat('upload').st_mode))
852
self.assertTrue(S_ISDIR(t.stat('indices').st_mode))
853
self.assertTrue(S_ISDIR(t.stat('obsolete_packs').st_mode))
855
def test_shared_disk_layout(self):
856
format = self.get_format()
857
repo = self.make_repository('.', shared=True, format=format)
859
t = repo.bzrdir.get_repository_transport(None)
861
# XXX: no locks left when unlocked at the moment
862
# self.assertEqualDiff('', t.get('lock').read())
863
# We should have a 'shared-storage' marker file.
864
self.assertEqualDiff('', t.get('shared-storage').read())
865
self.check_databases(t)
867
def test_shared_no_tree_disk_layout(self):
868
format = self.get_format()
869
repo = self.make_repository('.', shared=True, format=format)
870
repo.set_make_working_trees(False)
872
t = repo.bzrdir.get_repository_transport(None)
874
# XXX: no locks left when unlocked at the moment
875
# self.assertEqualDiff('', t.get('lock').read())
876
# We should have a 'shared-storage' marker file.
877
self.assertEqualDiff('', t.get('shared-storage').read())
878
# We should have a marker for the no-working-trees flag.
879
self.assertEqualDiff('', t.get('no-working-trees').read())
880
# The marker should go when we toggle the setting.
881
repo.set_make_working_trees(True)
882
self.assertFalse(t.has('no-working-trees'))
883
self.check_databases(t)
885
def test_adding_revision_creates_pack_indices(self):
886
format = self.get_format()
887
tree = self.make_branch_and_tree('.', format=format)
888
trans = tree.branch.repository.bzrdir.get_repository_transport(None)
890
list(GraphIndex(trans, 'pack-names', None).iter_all_entries()))
891
tree.commit('foobarbaz')
892
index = GraphIndex(trans, 'pack-names', None)
893
index_nodes = list(index.iter_all_entries())
894
self.assertEqual(1, len(index_nodes))
895
node = index_nodes[0]
897
# the pack sizes should be listed in the index
899
sizes = [int(digits) for digits in pack_value.split(' ')]
900
for size, suffix in zip(sizes, ['.rix', '.iix', '.tix', '.six']):
901
stat = trans.stat('indices/%s%s' % (name, suffix))
902
self.assertEqual(size, stat.st_size)
904
def test_pulling_nothing_leads_to_no_new_names(self):
905
format = self.get_format()
906
tree1 = self.make_branch_and_tree('1', format=format)
907
tree2 = self.make_branch_and_tree('2', format=format)
908
tree1.branch.repository.fetch(tree2.branch.repository)
909
trans = tree1.branch.repository.bzrdir.get_repository_transport(None)
911
list(GraphIndex(trans, 'pack-names', None).iter_all_entries()))
913
def test_commit_across_pack_shape_boundary_autopacks(self):
914
format = self.get_format()
915
tree = self.make_branch_and_tree('.', format=format)
916
trans = tree.branch.repository.bzrdir.get_repository_transport(None)
917
# This test could be a little cheaper by replacing the packs
918
# attribute on the repository to allow a different pack distribution
919
# and max packs policy - so we are checking the policy is honoured
920
# in the test. But for now 11 commits is not a big deal in a single
923
tree.commit('commit %s' % x)
924
# there should be 9 packs:
925
index = GraphIndex(trans, 'pack-names', None)
926
self.assertEqual(9, len(list(index.iter_all_entries())))
927
# insert some files in obsolete_packs which should be removed by pack.
928
trans.put_bytes('obsolete_packs/foo', '123')
929
trans.put_bytes('obsolete_packs/bar', '321')
930
# committing one more should coalesce to 1 of 10.
931
tree.commit('commit triggering pack')
932
index = GraphIndex(trans, 'pack-names', None)
933
self.assertEqual(1, len(list(index.iter_all_entries())))
934
# packing should not damage data
935
tree = tree.bzrdir.open_workingtree()
936
check_result = tree.branch.repository.check(
937
[tree.branch.last_revision()])
938
# We should have 50 (10x5) files in the obsolete_packs directory.
939
obsolete_files = list(trans.list_dir('obsolete_packs'))
940
self.assertFalse('foo' in obsolete_files)
941
self.assertFalse('bar' in obsolete_files)
942
self.assertEqual(50, len(obsolete_files))
943
# XXX: Todo check packs obsoleted correctly - old packs and indices
944
# in the obsolete_packs directory.
945
large_pack_name = list(index.iter_all_entries())[0][1][0]
946
# finally, committing again should not touch the large pack.
947
tree.commit('commit not triggering pack')
948
index = GraphIndex(trans, 'pack-names', None)
949
self.assertEqual(2, len(list(index.iter_all_entries())))
950
pack_names = [node[1][0] for node in index.iter_all_entries()]
951
self.assertTrue(large_pack_name in pack_names)
953
def test_fail_obsolete_deletion(self):
954
# failing to delete obsolete packs is not fatal
955
format = self.get_format()
956
server = fakenfs.FakeNFSServer()
958
self.addCleanup(server.tearDown)
959
transport = get_transport(server.get_url())
960
bzrdir = self.get_format().initialize_on_transport(transport)
961
repo = bzrdir.create_repository()
962
repo_transport = bzrdir.get_repository_transport(None)
963
self.assertTrue(repo_transport.has('obsolete_packs'))
964
# these files are in use by another client and typically can't be deleted
965
repo_transport.put_bytes('obsolete_packs/.nfsblahblah', 'contents')
966
repo._pack_collection._clear_obsolete_packs()
967
self.assertTrue(repo_transport.has('obsolete_packs/.nfsblahblah'))
969
def test_pack_after_two_commits_packs_everything(self):
970
format = self.get_format()
971
tree = self.make_branch_and_tree('.', format=format)
972
trans = tree.branch.repository.bzrdir.get_repository_transport(None)
974
tree.commit('more work')
975
tree.branch.repository.pack()
976
# there should be 1 pack:
977
index = GraphIndex(trans, 'pack-names', None)
978
self.assertEqual(1, len(list(index.iter_all_entries())))
979
self.assertEqual(2, len(tree.branch.repository.all_revision_ids()))
981
def test_pack_layout(self):
982
format = self.get_format()
983
tree = self.make_branch_and_tree('.', format=format)
984
trans = tree.branch.repository.bzrdir.get_repository_transport(None)
985
tree.commit('start', rev_id='1')
986
tree.commit('more work', rev_id='2')
987
tree.branch.repository.pack()
989
self.addCleanup(tree.unlock)
990
pack = tree.branch.repository._pack_collection.get_pack_by_name(
991
tree.branch.repository._pack_collection.names()[0])
992
# revision access tends to be tip->ancestor, so ordering that way on
993
# disk is a good idea.
994
for _1, key, val, refs in pack.revision_index.iter_all_entries():
996
pos_1 = int(val[1:].split()[0])
998
pos_2 = int(val[1:].split()[0])
999
self.assertTrue(pos_2 < pos_1)
1001
def test_pack_repositories_support_multiple_write_locks(self):
1002
format = self.get_format()
1003
self.make_repository('.', shared=True, format=format)
1004
r1 = repository.Repository.open('.')
1005
r2 = repository.Repository.open('.')
1007
self.addCleanup(r1.unlock)
1011
def _add_text(self, repo, fileid):
1012
"""Add a text to the repository within a write group."""
1013
vf =repo.weave_store.get_weave(fileid, repo.get_transaction())
1014
vf.add_lines('samplerev+' + fileid, [], [])
1016
def test_concurrent_writers_merge_new_packs(self):
1017
format = self.get_format()
1018
self.make_repository('.', shared=True, format=format)
1019
r1 = repository.Repository.open('.')
1020
r2 = repository.Repository.open('.')
1023
# access enough data to load the names list
1024
list(r1.all_revision_ids())
1027
# access enough data to load the names list
1028
list(r2.all_revision_ids())
1029
r1.start_write_group()
1031
r2.start_write_group()
1033
self._add_text(r1, 'fileidr1')
1034
self._add_text(r2, 'fileidr2')
1036
r2.abort_write_group()
1039
r1.abort_write_group()
1041
# both r1 and r2 have open write groups with data in them
1042
# created while the other's write group was open.
1043
# Commit both which requires a merge to the pack-names.
1045
r1.commit_write_group()
1047
r1.abort_write_group()
1048
r2.abort_write_group()
1050
r2.commit_write_group()
1051
# tell r1 to reload from disk
1052
r1._pack_collection.reset()
1053
# Now both repositories should know about both names
1054
r1._pack_collection.ensure_loaded()
1055
r2._pack_collection.ensure_loaded()
1056
self.assertEqual(r1._pack_collection.names(), r2._pack_collection.names())
1057
self.assertEqual(2, len(r1._pack_collection.names()))
1063
def test_concurrent_writer_second_preserves_dropping_a_pack(self):
1064
format = self.get_format()
1065
self.make_repository('.', shared=True, format=format)
1066
r1 = repository.Repository.open('.')
1067
r2 = repository.Repository.open('.')
1068
# add a pack to drop
1071
r1.start_write_group()
1073
self._add_text(r1, 'fileidr1')
1075
r1.abort_write_group()
1078
r1.commit_write_group()
1079
r1._pack_collection.ensure_loaded()
1080
name_to_drop = r1._pack_collection.all_packs()[0].name
1085
# access enough data to load the names list
1086
list(r1.all_revision_ids())
1089
# access enough data to load the names list
1090
list(r2.all_revision_ids())
1091
r1._pack_collection.ensure_loaded()
1093
r2.start_write_group()
1095
# in r1, drop the pack
1096
r1._pack_collection._remove_pack_from_memory(
1097
r1._pack_collection.get_pack_by_name(name_to_drop))
1099
self._add_text(r2, 'fileidr2')
1101
r2.abort_write_group()
1104
r1._pack_collection.reset()
1106
# r1 has a changed names list, and r2 an open write groups with
1108
# save r1, and then commit the r2 write group, which requires a
1109
# merge to the pack-names, which should not reinstate
1112
r1._pack_collection._save_pack_names()
1113
r1._pack_collection.reset()
1115
r2.abort_write_group()
1118
r2.commit_write_group()
1120
r2.abort_write_group()
1122
# Now both repositories should now about just one name.
1123
r1._pack_collection.ensure_loaded()
1124
r2._pack_collection.ensure_loaded()
1125
self.assertEqual(r1._pack_collection.names(), r2._pack_collection.names())
1126
self.assertEqual(1, len(r1._pack_collection.names()))
1127
self.assertFalse(name_to_drop in r1._pack_collection.names())
1133
def test_lock_write_does_not_physically_lock(self):
1134
repo = self.make_repository('.', format=self.get_format())
1136
self.addCleanup(repo.unlock)
1137
self.assertFalse(repo.get_physical_lock_status())
1139
def prepare_for_break_lock(self):
1140
# Setup the global ui factory state so that a break-lock method call
1141
# will find usable input in the input stream.
1142
old_factory = bzrlib.ui.ui_factory
1143
def restoreFactory():
1144
bzrlib.ui.ui_factory = old_factory
1145
self.addCleanup(restoreFactory)
1146
bzrlib.ui.ui_factory = bzrlib.ui.SilentUIFactory()
1147
bzrlib.ui.ui_factory.stdin = StringIO("y\n")
1149
def test_break_lock_breaks_physical_lock(self):
1150
repo = self.make_repository('.', format=self.get_format())
1151
repo._pack_collection.lock_names()
1152
repo2 = repository.Repository.open('.')
1153
self.assertTrue(repo.get_physical_lock_status())
1154
self.prepare_for_break_lock()
1156
self.assertFalse(repo.get_physical_lock_status())
1158
def test_broken_physical_locks_error_on__unlock_names_lock(self):
1159
repo = self.make_repository('.', format=self.get_format())
1160
repo._pack_collection.lock_names()
1161
self.assertTrue(repo.get_physical_lock_status())
1162
repo2 = repository.Repository.open('.')
1163
self.prepare_for_break_lock()
1165
self.assertRaises(errors.LockBroken, repo._pack_collection._unlock_names)
1167
def test_fetch_without_find_ghosts_ignores_ghosts(self):
1168
# we want two repositories at this point:
1169
# one with a revision that is a ghost in the other
1171
# 'ghost' is present in has_ghost, 'ghost' is absent in 'missing_ghost'.
1172
# 'references' is present in both repositories, and 'tip' is present
1173
# just in has_ghost.
1174
# has_ghost missing_ghost
1175
#------------------------------
1177
# 'references' 'references'
1179
# In this test we fetch 'tip' which should not fetch 'ghost'
1180
has_ghost = self.make_repository('has_ghost', format=self.get_format())
1181
missing_ghost = self.make_repository('missing_ghost',
1182
format=self.get_format())
1184
def add_commit(repo, revision_id, parent_ids):
1186
repo.start_write_group()
1187
inv = inventory.Inventory(revision_id=revision_id)
1188
inv.root.revision = revision_id
1189
root_id = inv.root.file_id
1190
sha1 = repo.add_inventory(revision_id, inv, [])
1191
vf = repo.weave_store.get_weave_or_empty(root_id,
1192
repo.get_transaction())
1193
vf.add_lines(revision_id, [], [])
1194
rev = bzrlib.revision.Revision(timestamp=0,
1196
committer="Foo Bar <foo@example.com>",
1198
inventory_sha1=sha1,
1199
revision_id=revision_id)
1200
rev.parent_ids = parent_ids
1201
repo.add_revision(revision_id, rev)
1202
repo.commit_write_group()
1204
add_commit(has_ghost, 'ghost', [])
1205
add_commit(has_ghost, 'references', ['ghost'])
1206
add_commit(missing_ghost, 'references', ['ghost'])
1207
add_commit(has_ghost, 'tip', ['references'])
1208
missing_ghost.fetch(has_ghost, 'tip')
1209
# missing ghost now has tip and not ghost.
1210
rev = missing_ghost.get_revision('tip')
1211
inv = missing_ghost.get_inventory('tip')
1212
self.assertRaises(errors.NoSuchRevision,
1213
missing_ghost.get_revision, 'ghost')
1214
self.assertRaises(errors.RevisionNotPresent,
1215
missing_ghost.get_inventory, 'ghost')
1217
def test_supports_external_lookups(self):
1218
repo = self.make_repository('.', format=self.get_format())
1219
self.assertFalse(repo._format.supports_external_lookups)
1222
class TestKnitPackSubtrees(TestKnitPackNoSubtrees):
1224
def get_format(self):
1225
return bzrdir.format_registry.make_bzrdir(
1226
'pack-0.92-subtree')
1228
def check_format(self, t):
1229
self.assertEqualDiff(
1230
"Bazaar pack repository format 1 with subtree support (needs bzr 0.92)\n",
1231
t.get('format').read())
1234
class TestDevelopment0(TestKnitPackNoSubtrees):
1236
def get_format(self):
1237
return bzrdir.format_registry.make_bzrdir(
1240
def check_format(self, t):
1241
self.assertEqualDiff(
1242
"Bazaar development format 0 (needs bzr.dev from before 1.3)\n",
1243
t.get('format').read())
1246
class TestDevelopment0Subtree(TestKnitPackNoSubtrees):
1248
def get_format(self):
1249
return bzrdir.format_registry.make_bzrdir(
1250
'development-subtree')
1252
def check_format(self, t):
1253
self.assertEqualDiff(
1254
"Bazaar development format 0 with subtree support "
1255
"(needs bzr.dev from before 1.3)\n",
1256
t.get('format').read())
1259
class TestRepositoryPackCollection(TestCaseWithTransport):
1261
def get_format(self):
1262
return bzrdir.format_registry.make_bzrdir('pack-0.92')
1264
def test__max_pack_count(self):
1265
"""The maximum pack count is a function of the number of revisions."""
1266
format = self.get_format()
1267
repo = self.make_repository('.', format=format)
1268
packs = repo._pack_collection
1269
# no revisions - one pack, so that we can have a revision free repo
1270
# without it blowing up
1271
self.assertEqual(1, packs._max_pack_count(0))
1272
# after that the sum of the digits, - check the first 1-9
1273
self.assertEqual(1, packs._max_pack_count(1))
1274
self.assertEqual(2, packs._max_pack_count(2))
1275
self.assertEqual(3, packs._max_pack_count(3))
1276
self.assertEqual(4, packs._max_pack_count(4))
1277
self.assertEqual(5, packs._max_pack_count(5))
1278
self.assertEqual(6, packs._max_pack_count(6))
1279
self.assertEqual(7, packs._max_pack_count(7))
1280
self.assertEqual(8, packs._max_pack_count(8))
1281
self.assertEqual(9, packs._max_pack_count(9))
1282
# check the boundary cases with two digits for the next decade
1283
self.assertEqual(1, packs._max_pack_count(10))
1284
self.assertEqual(2, packs._max_pack_count(11))
1285
self.assertEqual(10, packs._max_pack_count(19))
1286
self.assertEqual(2, packs._max_pack_count(20))
1287
self.assertEqual(3, packs._max_pack_count(21))
1288
# check some arbitrary big numbers
1289
self.assertEqual(25, packs._max_pack_count(112894))
1291
def test_pack_distribution_zero(self):
1292
format = self.get_format()
1293
repo = self.make_repository('.', format=format)
1294
packs = repo._pack_collection
1295
self.assertEqual([0], packs.pack_distribution(0))
1297
def test_ensure_loaded_unlocked(self):
1298
format = self.get_format()
1299
repo = self.make_repository('.', format=format)
1300
self.assertRaises(errors.ObjectNotLocked,
1301
repo._pack_collection.ensure_loaded)
1303
def test_pack_distribution_one_to_nine(self):
1304
format = self.get_format()
1305
repo = self.make_repository('.', format=format)
1306
packs = repo._pack_collection
1307
self.assertEqual([1],
1308
packs.pack_distribution(1))
1309
self.assertEqual([1, 1],
1310
packs.pack_distribution(2))
1311
self.assertEqual([1, 1, 1],
1312
packs.pack_distribution(3))
1313
self.assertEqual([1, 1, 1, 1],
1314
packs.pack_distribution(4))
1315
self.assertEqual([1, 1, 1, 1, 1],
1316
packs.pack_distribution(5))
1317
self.assertEqual([1, 1, 1, 1, 1, 1],
1318
packs.pack_distribution(6))
1319
self.assertEqual([1, 1, 1, 1, 1, 1, 1],
1320
packs.pack_distribution(7))
1321
self.assertEqual([1, 1, 1, 1, 1, 1, 1, 1],
1322
packs.pack_distribution(8))
1323
self.assertEqual([1, 1, 1, 1, 1, 1, 1, 1, 1],
1324
packs.pack_distribution(9))
1326
def test_pack_distribution_stable_at_boundaries(self):
1327
"""When there are multi-rev packs the counts are stable."""
1328
format = self.get_format()
1329
repo = self.make_repository('.', format=format)
1330
packs = repo._pack_collection
1332
self.assertEqual([10], packs.pack_distribution(10))
1333
self.assertEqual([10, 1], packs.pack_distribution(11))
1334
self.assertEqual([10, 10], packs.pack_distribution(20))
1335
self.assertEqual([10, 10, 1], packs.pack_distribution(21))
1337
self.assertEqual([100], packs.pack_distribution(100))
1338
self.assertEqual([100, 1], packs.pack_distribution(101))
1339
self.assertEqual([100, 10, 1], packs.pack_distribution(111))
1340
self.assertEqual([100, 100], packs.pack_distribution(200))
1341
self.assertEqual([100, 100, 1], packs.pack_distribution(201))
1342
self.assertEqual([100, 100, 10, 1], packs.pack_distribution(211))
1344
def test_plan_pack_operations_2009_revisions_skip_all_packs(self):
1345
format = self.get_format()
1346
repo = self.make_repository('.', format=format)
1347
packs = repo._pack_collection
1348
existing_packs = [(2000, "big"), (9, "medium")]
1349
# rev count - 2009 -> 2x1000 + 9x1
1350
pack_operations = packs.plan_autopack_combinations(
1351
existing_packs, [1000, 1000, 1, 1, 1, 1, 1, 1, 1, 1, 1])
1352
self.assertEqual([], pack_operations)
1354
def test_plan_pack_operations_2010_revisions_skip_all_packs(self):
1355
format = self.get_format()
1356
repo = self.make_repository('.', format=format)
1357
packs = repo._pack_collection
1358
existing_packs = [(2000, "big"), (9, "medium"), (1, "single")]
1359
# rev count - 2010 -> 2x1000 + 1x10
1360
pack_operations = packs.plan_autopack_combinations(
1361
existing_packs, [1000, 1000, 10])
1362
self.assertEqual([], pack_operations)
1364
def test_plan_pack_operations_2010_combines_smallest_two(self):
1365
format = self.get_format()
1366
repo = self.make_repository('.', format=format)
1367
packs = repo._pack_collection
1368
existing_packs = [(1999, "big"), (9, "medium"), (1, "single2"),
1370
# rev count - 2010 -> 2x1000 + 1x10 (3)
1371
pack_operations = packs.plan_autopack_combinations(
1372
existing_packs, [1000, 1000, 10])
1373
self.assertEqual([[2, ["single2", "single1"]], [0, []]], pack_operations)
1375
def test_all_packs_none(self):
1376
format = self.get_format()
1377
tree = self.make_branch_and_tree('.', format=format)
1379
self.addCleanup(tree.unlock)
1380
packs = tree.branch.repository._pack_collection
1381
packs.ensure_loaded()
1382
self.assertEqual([], packs.all_packs())
1384
def test_all_packs_one(self):
1385
format = self.get_format()
1386
tree = self.make_branch_and_tree('.', format=format)
1387
tree.commit('start')
1389
self.addCleanup(tree.unlock)
1390
packs = tree.branch.repository._pack_collection
1391
packs.ensure_loaded()
1393
packs.get_pack_by_name(packs.names()[0])],
1396
def test_all_packs_two(self):
1397
format = self.get_format()
1398
tree = self.make_branch_and_tree('.', format=format)
1399
tree.commit('start')
1400
tree.commit('continue')
1402
self.addCleanup(tree.unlock)
1403
packs = tree.branch.repository._pack_collection
1404
packs.ensure_loaded()
1406
packs.get_pack_by_name(packs.names()[0]),
1407
packs.get_pack_by_name(packs.names()[1]),
1408
], packs.all_packs())
1410
def test_get_pack_by_name(self):
1411
format = self.get_format()
1412
tree = self.make_branch_and_tree('.', format=format)
1413
tree.commit('start')
1415
self.addCleanup(tree.unlock)
1416
packs = tree.branch.repository._pack_collection
1417
packs.ensure_loaded()
1418
name = packs.names()[0]
1419
pack_1 = packs.get_pack_by_name(name)
1420
# the pack should be correctly initialised
1421
rev_index = GraphIndex(packs._index_transport, name + '.rix',
1422
packs._names[name][0])
1423
inv_index = GraphIndex(packs._index_transport, name + '.iix',
1424
packs._names[name][1])
1425
txt_index = GraphIndex(packs._index_transport, name + '.tix',
1426
packs._names[name][2])
1427
sig_index = GraphIndex(packs._index_transport, name + '.six',
1428
packs._names[name][3])
1429
self.assertEqual(pack_repo.ExistingPack(packs._pack_transport,
1430
name, rev_index, inv_index, txt_index, sig_index), pack_1)
1431
# and the same instance should be returned on successive calls.
1432
self.assertTrue(pack_1 is packs.get_pack_by_name(name))
1435
class TestPack(TestCaseWithTransport):
1436
"""Tests for the Pack object."""
1438
def assertCurrentlyEqual(self, left, right):
1439
self.assertTrue(left == right)
1440
self.assertTrue(right == left)
1441
self.assertFalse(left != right)
1442
self.assertFalse(right != left)
1444
def assertCurrentlyNotEqual(self, left, right):
1445
self.assertFalse(left == right)
1446
self.assertFalse(right == left)
1447
self.assertTrue(left != right)
1448
self.assertTrue(right != left)
1450
def test___eq____ne__(self):
1451
left = pack_repo.ExistingPack('', '', '', '', '', '')
1452
right = pack_repo.ExistingPack('', '', '', '', '', '')
1453
self.assertCurrentlyEqual(left, right)
1454
# change all attributes and ensure equality changes as we do.
1455
left.revision_index = 'a'
1456
self.assertCurrentlyNotEqual(left, right)
1457
right.revision_index = 'a'
1458
self.assertCurrentlyEqual(left, right)
1459
left.inventory_index = 'a'
1460
self.assertCurrentlyNotEqual(left, right)
1461
right.inventory_index = 'a'
1462
self.assertCurrentlyEqual(left, right)
1463
left.text_index = 'a'
1464
self.assertCurrentlyNotEqual(left, right)
1465
right.text_index = 'a'
1466
self.assertCurrentlyEqual(left, right)
1467
left.signature_index = 'a'
1468
self.assertCurrentlyNotEqual(left, right)
1469
right.signature_index = 'a'
1470
self.assertCurrentlyEqual(left, right)
1472
self.assertCurrentlyNotEqual(left, right)
1474
self.assertCurrentlyEqual(left, right)
1475
left.transport = 'a'
1476
self.assertCurrentlyNotEqual(left, right)
1477
right.transport = 'a'
1478
self.assertCurrentlyEqual(left, right)
1480
def test_file_name(self):
1481
pack = pack_repo.ExistingPack('', 'a_name', '', '', '', '')
1482
self.assertEqual('a_name.pack', pack.file_name())
1485
class TestNewPack(TestCaseWithTransport):
1486
"""Tests for pack_repo.NewPack."""
1488
def test_new_instance_attributes(self):
1489
upload_transport = self.get_transport('upload')
1490
pack_transport = self.get_transport('pack')
1491
index_transport = self.get_transport('index')
1492
upload_transport.mkdir('.')
1493
pack = pack_repo.NewPack(upload_transport, index_transport,
1495
self.assertIsInstance(pack.revision_index, InMemoryGraphIndex)
1496
self.assertIsInstance(pack.inventory_index, InMemoryGraphIndex)
1497
self.assertIsInstance(pack._hash, type(md5.new()))
1498
self.assertTrue(pack.upload_transport is upload_transport)
1499
self.assertTrue(pack.index_transport is index_transport)
1500
self.assertTrue(pack.pack_transport is pack_transport)
1501
self.assertEqual(None, pack.index_sizes)
1502
self.assertEqual(20, len(pack.random_name))
1503
self.assertIsInstance(pack.random_name, str)
1504
self.assertIsInstance(pack.start_time, float)
1507
class TestPacker(TestCaseWithTransport):
1508
"""Tests for the packs repository Packer class."""
1510
# To date, this class has been factored out and nothing new added to it;
1511
# thus there are not yet any tests.
1514
class TestInterDifferingSerializer(TestCaseWithTransport):
1516
def test_progress_bar(self):
1517
tree = self.make_branch_and_tree('tree')
1518
tree.commit('rev1', rev_id='rev-1')
1519
tree.commit('rev2', rev_id='rev-2')
1520
tree.commit('rev3', rev_id='rev-3')
1521
repo = self.make_repository('repo')
1522
inter_repo = repository.InterDifferingSerializer(
1523
tree.branch.repository, repo)
1524
pb = progress.InstrumentedProgress(to_file=StringIO())
1525
pb.never_throttle = True
1526
inter_repo.fetch('rev-1', pb)
1527
self.assertEqual('Transferring revisions', pb.last_msg)
1528
self.assertEqual(1, pb.last_cnt)
1529
self.assertEqual(1, pb.last_total)
1530
inter_repo.fetch('rev-3', pb)
1531
self.assertEqual(2, pb.last_cnt)
1532
self.assertEqual(2, pb.last_total)