~bzr-pqm/bzr/bzr.dev

« back to all changes in this revision

Viewing changes to bzrlib/tests/per_pack_repository.py

  • Committer: Martin von Gagern
  • Date: 2010-04-20 08:47:38 UTC
  • mfrom: (5167 +trunk)
  • mto: This revision was merged to the branch mainline in revision 5195.
  • Revision ID: martin.vgagern@gmx.net-20100420084738-ygymnqmdllzrhpfn
merge trunk

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
 
# Copyright (C) 2008 Canonical Ltd
 
1
# Copyright (C) 2008, 2009, 2010 Canonical Ltd
2
2
#
3
3
# This program is free software; you can redistribute it and/or modify
4
4
# it under the terms of the GNU General Public License as published by
12
12
#
13
13
# You should have received a copy of the GNU General Public License
14
14
# along with this program; if not, write to the Free Software
15
 
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
16
16
 
17
17
"""Tests for pack repositories.
18
18
 
28
28
    bzrdir,
29
29
    errors,
30
30
    inventory,
 
31
    osutils,
31
32
    progress,
32
33
    repository,
33
34
    revision as _mod_revision,
37
38
    upgrade,
38
39
    workingtree,
39
40
    )
 
41
from bzrlib.repofmt import (
 
42
    pack_repo,
 
43
    groupcompress_repo,
 
44
    )
 
45
from bzrlib.repofmt.groupcompress_repo import RepositoryFormat2a
 
46
from bzrlib.smart import (
 
47
    client,
 
48
    )
40
49
from bzrlib.tests import (
41
50
    TestCase,
42
51
    TestCaseWithTransport,
44
53
    TestSkipped,
45
54
    )
46
55
from bzrlib.transport import (
47
 
    fakenfs,
48
56
    get_transport,
 
57
    memory,
49
58
    )
 
59
from bzrlib.tests import test_server
 
60
from bzrlib.tests.per_repository import TestCaseWithRepository
50
61
 
51
62
 
52
63
class TestPackRepository(TestCaseWithTransport):
66
77
        """Packs do not need ordered data retrieval."""
67
78
        format = self.get_format()
68
79
        repo = self.make_repository('.', format=format)
69
 
        self.assertEqual('unordered', repo._fetch_order)
 
80
        self.assertEqual('unordered', repo._format._fetch_order)
70
81
 
71
82
    def test_attribute__fetch_uses_deltas(self):
72
83
        """Packs reuse deltas."""
73
84
        format = self.get_format()
74
85
        repo = self.make_repository('.', format=format)
75
 
        self.assertEqual(True, repo._fetch_uses_deltas)
 
86
        if isinstance(format.repository_format, RepositoryFormat2a):
 
87
            # TODO: This is currently a workaround. CHK format repositories
 
88
            #       ignore the 'deltas' flag, but during conversions, we can't
 
89
            #       do unordered delta fetches. Remove this clause once we
 
90
            #       improve the inter-format fetching.
 
91
            self.assertEqual(False, repo._format._fetch_uses_deltas)
 
92
        else:
 
93
            self.assertEqual(True, repo._format._fetch_uses_deltas)
76
94
 
77
95
    def test_disk_layout(self):
78
96
        format = self.get_format()
201
219
        tree = tree.bzrdir.open_workingtree()
202
220
        check_result = tree.branch.repository.check(
203
221
            [tree.branch.last_revision()])
204
 
        # We should have 50 (10x5) files in the obsolete_packs directory.
 
222
        nb_files = 5 # .pack, .rix, .iix, .tix, .six
 
223
        if tree.branch.repository._format.supports_chks:
 
224
            nb_files += 1 # .cix
 
225
        # We should have 10 x nb_files files in the obsolete_packs directory.
205
226
        obsolete_files = list(trans.list_dir('obsolete_packs'))
206
227
        self.assertFalse('foo' in obsolete_files)
207
228
        self.assertFalse('bar' in obsolete_files)
208
 
        self.assertEqual(50, len(obsolete_files))
 
229
        self.assertEqual(10 * nb_files, len(obsolete_files))
209
230
        # XXX: Todo check packs obsoleted correctly - old packs and indices
210
231
        # in the obsolete_packs directory.
211
232
        large_pack_name = list(index.iter_all_entries())[0][1][0]
216
237
        pack_names = [node[1][0] for node in index.iter_all_entries()]
217
238
        self.assertTrue(large_pack_name in pack_names)
218
239
 
 
240
    def test_commit_write_group_returns_new_pack_names(self):
 
241
        # This test doesn't need real disk.
 
242
        self.vfs_transport_factory = memory.MemoryServer
 
243
        format = self.get_format()
 
244
        repo = self.make_repository('foo', format=format)
 
245
        repo.lock_write()
 
246
        try:
 
247
            # All current pack repository styles autopack at 10 revisions; and
 
248
            # autopack as well as regular commit write group needs to return
 
249
            # the new pack name. Looping is a little ugly, but we don't have a
 
250
            # clean way to test both the autopack logic and the normal code
 
251
            # path without doing this loop.
 
252
            for pos in range(10):
 
253
                revid = str(pos)
 
254
                repo.start_write_group()
 
255
                try:
 
256
                    inv = inventory.Inventory(revision_id=revid)
 
257
                    inv.root.revision = revid
 
258
                    repo.texts.add_lines((inv.root.file_id, revid), [], [])
 
259
                    rev = _mod_revision.Revision(timestamp=0, timezone=None,
 
260
                        committer="Foo Bar <foo@example.com>", message="Message",
 
261
                        revision_id=revid)
 
262
                    rev.parent_ids = ()
 
263
                    repo.add_revision(revid, rev, inv=inv)
 
264
                except:
 
265
                    repo.abort_write_group()
 
266
                    raise
 
267
                else:
 
268
                    old_names = repo._pack_collection._names.keys()
 
269
                    result = repo.commit_write_group()
 
270
                    cur_names = repo._pack_collection._names.keys()
 
271
                    new_names = list(set(cur_names) - set(old_names))
 
272
                    self.assertEqual(new_names, result)
 
273
        finally:
 
274
            repo.unlock()
 
275
 
219
276
    def test_fail_obsolete_deletion(self):
220
277
        # failing to delete obsolete packs is not fatal
221
278
        format = self.get_format()
222
 
        server = fakenfs.FakeNFSServer()
223
 
        server.setUp()
224
 
        self.addCleanup(server.tearDown)
 
279
        server = test_server.FakeNFSServer()
 
280
        self.start_server(server)
225
281
        transport = get_transport(server.get_url())
226
282
        bzrdir = self.get_format().initialize_on_transport(transport)
227
283
        repo = bzrdir.create_repository()
232
288
        repo._pack_collection._clear_obsolete_packs()
233
289
        self.assertTrue(repo_transport.has('obsolete_packs/.nfsblahblah'))
234
290
 
 
291
    def test_pack_collection_sets_sibling_indices(self):
 
292
        """The CombinedGraphIndex objects in the pack collection are all
 
293
        siblings of each other, so that search-order reorderings will be copied
 
294
        to each other.
 
295
        """
 
296
        repo = self.make_repository('repo')
 
297
        pack_coll = repo._pack_collection
 
298
        indices = set([pack_coll.revision_index, pack_coll.inventory_index,
 
299
                pack_coll.text_index, pack_coll.signature_index])
 
300
        if pack_coll.chk_index is not None:
 
301
            indices.add(pack_coll.chk_index)
 
302
        combined_indices = set(idx.combined_index for idx in indices)
 
303
        for combined_index in combined_indices:
 
304
            self.assertEqual(
 
305
                combined_indices.difference([combined_index]),
 
306
                combined_index._sibling_indices)
 
307
 
235
308
    def test_pack_after_two_commits_packs_everything(self):
236
309
        format = self.get_format()
237
310
        tree = self.make_branch_and_tree('.', format=format)
244
317
        self.assertEqual(1, len(list(index.iter_all_entries())))
245
318
        self.assertEqual(2, len(tree.branch.repository.all_revision_ids()))
246
319
 
 
320
    def test_pack_preserves_all_inventories(self):
 
321
        # This is related to bug:
 
322
        #   https://bugs.launchpad.net/bzr/+bug/412198
 
323
        # Stacked repositories need to keep the inventory for parents, even
 
324
        # after a pack operation. However, it is harder to test that, then just
 
325
        # test that all inventory texts are preserved.
 
326
        format = self.get_format()
 
327
        builder = self.make_branch_builder('source', format=format)
 
328
        builder.start_series()
 
329
        builder.build_snapshot('A-id', None, [
 
330
            ('add', ('', 'root-id', 'directory', None))])
 
331
        builder.build_snapshot('B-id', None, [
 
332
            ('add', ('file', 'file-id', 'file', 'B content\n'))])
 
333
        builder.build_snapshot('C-id', None, [
 
334
            ('modify', ('file-id', 'C content\n'))])
 
335
        builder.finish_series()
 
336
        b = builder.get_branch()
 
337
        b.lock_read()
 
338
        self.addCleanup(b.unlock)
 
339
        repo = self.make_repository('repo', shared=True, format=format)
 
340
        repo.lock_write()
 
341
        self.addCleanup(repo.unlock)
 
342
        repo.fetch(b.repository, revision_id='B-id')
 
343
        inv = b.repository.iter_inventories(['C-id']).next()
 
344
        repo.start_write_group()
 
345
        repo.add_inventory('C-id', inv, ['B-id'])
 
346
        repo.commit_write_group()
 
347
        self.assertEqual([('A-id',), ('B-id',), ('C-id',)],
 
348
                         sorted(repo.inventories.keys()))
 
349
        repo.pack()
 
350
        self.assertEqual([('A-id',), ('B-id',), ('C-id',)],
 
351
                         sorted(repo.inventories.keys()))
 
352
        # Content should be preserved as well
 
353
        self.assertEqual(inv, repo.iter_inventories(['C-id']).next())
 
354
 
247
355
    def test_pack_layout(self):
 
356
        # Test that the ordering of revisions in pack repositories is
 
357
        # tip->ancestor
248
358
        format = self.get_format()
249
359
        tree = self.make_branch_and_tree('.', format=format)
250
360
        trans = tree.branch.repository.bzrdir.get_repository_transport(None)
255
365
        self.addCleanup(tree.unlock)
256
366
        pack = tree.branch.repository._pack_collection.get_pack_by_name(
257
367
            tree.branch.repository._pack_collection.names()[0])
258
 
        # revision access tends to be tip->ancestor, so ordering that way on 
 
368
        # revision access tends to be tip->ancestor, so ordering that way on
259
369
        # disk is a good idea.
260
370
        for _1, key, val, refs in pack.revision_index.iter_all_entries():
 
371
            if type(format.repository_format) is RepositoryFormat2a:
 
372
                # group_start, group_len, internal_start, internal_len
 
373
                pos = map(int, val.split())
 
374
            else:
 
375
                # eol_flag, start, len
 
376
                pos = int(val[1:].split()[0])
261
377
            if key == ('1',):
262
 
                pos_1 = int(val[1:].split()[0])
 
378
                pos_1 = pos
263
379
            else:
264
 
                pos_2 = int(val[1:].split()[0])
265
 
        self.assertTrue(pos_2 < pos_1)
 
380
                pos_2 = pos
 
381
        self.assertTrue(pos_2 < pos_1, 'rev 1 came before rev 2 %s > %s'
 
382
                                       % (pos_1, pos_2))
266
383
 
267
384
    def test_pack_repositories_support_multiple_write_locks(self):
268
385
        format = self.get_format()
276
393
 
277
394
    def _add_text(self, repo, fileid):
278
395
        """Add a text to the repository within a write group."""
279
 
        repo.texts.add_lines((fileid, 'samplerev+'+fileid), [], [])
 
396
        repo.texts.add_lines((fileid, 'samplerev+'+fileid), [],
 
397
            ['smaplerev+'+fileid])
280
398
 
281
399
    def test_concurrent_writers_merge_new_packs(self):
282
400
        format = self.get_format()
395
513
        finally:
396
514
            r1.unlock()
397
515
 
 
516
    def test_concurrent_pack_triggers_reload(self):
 
517
        # create 2 packs, which we will then collapse
 
518
        tree = self.make_branch_and_tree('tree')
 
519
        tree.lock_write()
 
520
        try:
 
521
            rev1 = tree.commit('one')
 
522
            rev2 = tree.commit('two')
 
523
            r2 = repository.Repository.open('tree')
 
524
            r2.lock_read()
 
525
            try:
 
526
                # Now r2 has read the pack-names file, but will need to reload
 
527
                # it after r1 has repacked
 
528
                tree.branch.repository.pack()
 
529
                self.assertEqual({rev2:(rev1,)}, r2.get_parent_map([rev2]))
 
530
            finally:
 
531
                r2.unlock()
 
532
        finally:
 
533
            tree.unlock()
 
534
 
 
535
    def test_concurrent_pack_during_get_record_reloads(self):
 
536
        tree = self.make_branch_and_tree('tree')
 
537
        tree.lock_write()
 
538
        try:
 
539
            rev1 = tree.commit('one')
 
540
            rev2 = tree.commit('two')
 
541
            keys = [(rev1,), (rev2,)]
 
542
            r2 = repository.Repository.open('tree')
 
543
            r2.lock_read()
 
544
            try:
 
545
                # At this point, we will start grabbing a record stream, and
 
546
                # trigger a repack mid-way
 
547
                packed = False
 
548
                result = {}
 
549
                record_stream = r2.revisions.get_record_stream(keys,
 
550
                                    'unordered', False)
 
551
                for record in record_stream:
 
552
                    result[record.key] = record
 
553
                    if not packed:
 
554
                        tree.branch.repository.pack()
 
555
                        packed = True
 
556
                # The first record will be found in the original location, but
 
557
                # after the pack, we have to reload to find the next record
 
558
                self.assertEqual(sorted(keys), sorted(result.keys()))
 
559
            finally:
 
560
                r2.unlock()
 
561
        finally:
 
562
            tree.unlock()
 
563
 
 
564
    def test_concurrent_pack_during_autopack(self):
 
565
        tree = self.make_branch_and_tree('tree')
 
566
        tree.lock_write()
 
567
        try:
 
568
            for i in xrange(9):
 
569
                tree.commit('rev %d' % (i,))
 
570
            r2 = repository.Repository.open('tree')
 
571
            r2.lock_write()
 
572
            try:
 
573
                # Monkey patch so that pack occurs while the other repo is
 
574
                # autopacking. This is slightly bad, but all current pack
 
575
                # repository implementations have a _pack_collection, and we
 
576
                # test that it gets triggered. So if a future format changes
 
577
                # things, the test will fail rather than succeed accidentally.
 
578
                autopack_count = [0]
 
579
                r1 = tree.branch.repository
 
580
                orig = r1._pack_collection.pack_distribution
 
581
                def trigger_during_auto(*args, **kwargs):
 
582
                    ret = orig(*args, **kwargs)
 
583
                    if not autopack_count[0]:
 
584
                        r2.pack()
 
585
                    autopack_count[0] += 1
 
586
                    return ret
 
587
                r1._pack_collection.pack_distribution = trigger_during_auto
 
588
                tree.commit('autopack-rev')
 
589
                # This triggers 2 autopacks. The first one causes r2.pack() to
 
590
                # fire, but r2 doesn't see the new pack file yet. The
 
591
                # autopack restarts and sees there are 2 files and there
 
592
                # should be only 1 for 10 commits. So it goes ahead and
 
593
                # finishes autopacking.
 
594
                self.assertEqual([2], autopack_count)
 
595
            finally:
 
596
                r2.unlock()
 
597
        finally:
 
598
            tree.unlock()
 
599
 
398
600
    def test_lock_write_does_not_physically_lock(self):
399
601
        repo = self.make_repository('.', format=self.get_format())
400
602
        repo.lock_write()
404
606
    def prepare_for_break_lock(self):
405
607
        # Setup the global ui factory state so that a break-lock method call
406
608
        # will find usable input in the input stream.
407
 
        old_factory = ui.ui_factory
408
 
        def restoreFactory():
409
 
            ui.ui_factory = old_factory
410
 
        self.addCleanup(restoreFactory)
411
 
        ui.ui_factory = ui.SilentUIFactory()
412
 
        ui.ui_factory.stdin = StringIO("y\n")
 
609
        ui.ui_factory = ui.CannedInputUIFactory([True])
413
610
 
414
611
    def test_break_lock_breaks_physical_lock(self):
415
612
        repo = self.make_repository('.', format=self.get_format())
479
676
        self.assertRaises(errors.NoSuchRevision,
480
677
            missing_ghost.get_inventory, 'ghost')
481
678
 
 
679
    def make_write_ready_repo(self):
 
680
        format = self.get_format()
 
681
        if isinstance(format.repository_format, RepositoryFormat2a):
 
682
            raise TestNotApplicable("No missing compression parents")
 
683
        repo = self.make_repository('.', format=format)
 
684
        repo.lock_write()
 
685
        self.addCleanup(repo.unlock)
 
686
        repo.start_write_group()
 
687
        self.addCleanup(repo.abort_write_group)
 
688
        return repo
 
689
 
 
690
    def test_missing_inventories_compression_parent_prevents_commit(self):
 
691
        repo = self.make_write_ready_repo()
 
692
        key = ('junk',)
 
693
        repo.inventories._index._missing_compression_parents.add(key)
 
694
        self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
 
695
        self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
 
696
 
 
697
    def test_missing_revisions_compression_parent_prevents_commit(self):
 
698
        repo = self.make_write_ready_repo()
 
699
        key = ('junk',)
 
700
        repo.revisions._index._missing_compression_parents.add(key)
 
701
        self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
 
702
        self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
 
703
 
 
704
    def test_missing_signatures_compression_parent_prevents_commit(self):
 
705
        repo = self.make_write_ready_repo()
 
706
        key = ('junk',)
 
707
        repo.signatures._index._missing_compression_parents.add(key)
 
708
        self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
 
709
        self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
 
710
 
 
711
    def test_missing_text_compression_parent_prevents_commit(self):
 
712
        repo = self.make_write_ready_repo()
 
713
        key = ('some', 'junk')
 
714
        repo.texts._index._missing_compression_parents.add(key)
 
715
        self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
 
716
        e = self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
 
717
 
482
718
    def test_supports_external_lookups(self):
483
719
        repo = self.make_repository('.', format=self.get_format())
484
720
        self.assertEqual(self.format_supports_external_lookups,
485
721
            repo._format.supports_external_lookups)
486
722
 
 
723
    def test_abort_write_group_does_not_raise_when_suppressed(self):
 
724
        """Similar to per_repository.test_write_group's test of the same name.
 
725
 
 
726
        Also requires that the exception is logged.
 
727
        """
 
728
        self.vfs_transport_factory = memory.MemoryServer
 
729
        repo = self.make_repository('repo', format=self.get_format())
 
730
        token = repo.lock_write()
 
731
        self.addCleanup(repo.unlock)
 
732
        repo.start_write_group()
 
733
        # Damage the repository on the filesystem
 
734
        self.get_transport('').rename('repo', 'foo')
 
735
        # abort_write_group will not raise an error
 
736
        self.assertEqual(None, repo.abort_write_group(suppress_errors=True))
 
737
        # But it does log an error
 
738
        log = self.get_log()
 
739
        self.assertContainsRe(log, 'abort_write_group failed')
 
740
        self.assertContainsRe(log, r'INFO  bzr: ERROR \(ignored\):')
 
741
        if token is not None:
 
742
            repo.leave_lock_in_place()
 
743
 
 
744
    def test_abort_write_group_does_raise_when_not_suppressed(self):
 
745
        self.vfs_transport_factory = memory.MemoryServer
 
746
        repo = self.make_repository('repo', format=self.get_format())
 
747
        token = repo.lock_write()
 
748
        self.addCleanup(repo.unlock)
 
749
        repo.start_write_group()
 
750
        # Damage the repository on the filesystem
 
751
        self.get_transport('').rename('repo', 'foo')
 
752
        # abort_write_group will not raise an error
 
753
        self.assertRaises(Exception, repo.abort_write_group)
 
754
        if token is not None:
 
755
            repo.leave_lock_in_place()
 
756
 
 
757
    def test_suspend_write_group(self):
 
758
        self.vfs_transport_factory = memory.MemoryServer
 
759
        repo = self.make_repository('repo', format=self.get_format())
 
760
        token = repo.lock_write()
 
761
        self.addCleanup(repo.unlock)
 
762
        repo.start_write_group()
 
763
        repo.texts.add_lines(('file-id', 'revid'), (), ['lines'])
 
764
        wg_tokens = repo.suspend_write_group()
 
765
        expected_pack_name = wg_tokens[0] + '.pack'
 
766
        expected_names = [wg_tokens[0] + ext for ext in
 
767
                            ('.rix', '.iix', '.tix', '.six')]
 
768
        if repo.chk_bytes is not None:
 
769
            expected_names.append(wg_tokens[0] + '.cix')
 
770
        expected_names.append(expected_pack_name)
 
771
        upload_transport = repo._pack_collection._upload_transport
 
772
        limbo_files = upload_transport.list_dir('')
 
773
        self.assertEqual(sorted(expected_names), sorted(limbo_files))
 
774
        md5 = osutils.md5(upload_transport.get_bytes(expected_pack_name))
 
775
        self.assertEqual(wg_tokens[0], md5.hexdigest())
 
776
 
 
777
    def test_resume_chk_bytes(self):
 
778
        self.vfs_transport_factory = memory.MemoryServer
 
779
        repo = self.make_repository('repo', format=self.get_format())
 
780
        if repo.chk_bytes is None:
 
781
            raise TestNotApplicable('no chk_bytes for this repository')
 
782
        token = repo.lock_write()
 
783
        self.addCleanup(repo.unlock)
 
784
        repo.start_write_group()
 
785
        text = 'a bit of text\n'
 
786
        key = ('sha1:' + osutils.sha_string(text),)
 
787
        repo.chk_bytes.add_lines(key, (), [text])
 
788
        wg_tokens = repo.suspend_write_group()
 
789
        same_repo = repo.bzrdir.open_repository()
 
790
        same_repo.lock_write()
 
791
        self.addCleanup(same_repo.unlock)
 
792
        same_repo.resume_write_group(wg_tokens)
 
793
        self.assertEqual([key], list(same_repo.chk_bytes.keys()))
 
794
        self.assertEqual(
 
795
            text, same_repo.chk_bytes.get_record_stream([key],
 
796
                'unordered', True).next().get_bytes_as('fulltext'))
 
797
        same_repo.abort_write_group()
 
798
        self.assertEqual([], list(same_repo.chk_bytes.keys()))
 
799
 
 
800
    def test_resume_write_group_then_abort(self):
 
801
        # Create a repo, start a write group, insert some data, suspend.
 
802
        self.vfs_transport_factory = memory.MemoryServer
 
803
        repo = self.make_repository('repo', format=self.get_format())
 
804
        token = repo.lock_write()
 
805
        self.addCleanup(repo.unlock)
 
806
        repo.start_write_group()
 
807
        text_key = ('file-id', 'revid')
 
808
        repo.texts.add_lines(text_key, (), ['lines'])
 
809
        wg_tokens = repo.suspend_write_group()
 
810
        # Get a fresh repository object for the repo on the filesystem.
 
811
        same_repo = repo.bzrdir.open_repository()
 
812
        # Resume
 
813
        same_repo.lock_write()
 
814
        self.addCleanup(same_repo.unlock)
 
815
        same_repo.resume_write_group(wg_tokens)
 
816
        same_repo.abort_write_group()
 
817
        self.assertEqual(
 
818
            [], same_repo._pack_collection._upload_transport.list_dir(''))
 
819
        self.assertEqual(
 
820
            [], same_repo._pack_collection._pack_transport.list_dir(''))
 
821
 
 
822
    def test_commit_resumed_write_group(self):
 
823
        self.vfs_transport_factory = memory.MemoryServer
 
824
        repo = self.make_repository('repo', format=self.get_format())
 
825
        token = repo.lock_write()
 
826
        self.addCleanup(repo.unlock)
 
827
        repo.start_write_group()
 
828
        text_key = ('file-id', 'revid')
 
829
        repo.texts.add_lines(text_key, (), ['lines'])
 
830
        wg_tokens = repo.suspend_write_group()
 
831
        # Get a fresh repository object for the repo on the filesystem.
 
832
        same_repo = repo.bzrdir.open_repository()
 
833
        # Resume
 
834
        same_repo.lock_write()
 
835
        self.addCleanup(same_repo.unlock)
 
836
        same_repo.resume_write_group(wg_tokens)
 
837
        same_repo.commit_write_group()
 
838
        expected_pack_name = wg_tokens[0] + '.pack'
 
839
        expected_names = [wg_tokens[0] + ext for ext in
 
840
                            ('.rix', '.iix', '.tix', '.six')]
 
841
        if repo.chk_bytes is not None:
 
842
            expected_names.append(wg_tokens[0] + '.cix')
 
843
        self.assertEqual(
 
844
            [], same_repo._pack_collection._upload_transport.list_dir(''))
 
845
        index_names = repo._pack_collection._index_transport.list_dir('')
 
846
        self.assertEqual(sorted(expected_names), sorted(index_names))
 
847
        pack_names = repo._pack_collection._pack_transport.list_dir('')
 
848
        self.assertEqual([expected_pack_name], pack_names)
 
849
 
 
850
    def test_resume_malformed_token(self):
 
851
        self.vfs_transport_factory = memory.MemoryServer
 
852
        # Make a repository with a suspended write group
 
853
        repo = self.make_repository('repo', format=self.get_format())
 
854
        token = repo.lock_write()
 
855
        self.addCleanup(repo.unlock)
 
856
        repo.start_write_group()
 
857
        text_key = ('file-id', 'revid')
 
858
        repo.texts.add_lines(text_key, (), ['lines'])
 
859
        wg_tokens = repo.suspend_write_group()
 
860
        # Make a new repository
 
861
        new_repo = self.make_repository('new_repo', format=self.get_format())
 
862
        token = new_repo.lock_write()
 
863
        self.addCleanup(new_repo.unlock)
 
864
        hacked_wg_token = (
 
865
            '../../../../repo/.bzr/repository/upload/' + wg_tokens[0])
 
866
        self.assertRaises(
 
867
            errors.UnresumableWriteGroup,
 
868
            new_repo.resume_write_group, [hacked_wg_token])
 
869
 
487
870
 
488
871
class TestPackRepositoryStacking(TestCaseWithTransport):
489
872
 
491
874
 
492
875
    def setUp(self):
493
876
        if not self.format_supports_external_lookups:
494
 
            raise TestNotApplicable("%r doesn't support stacking" 
 
877
            raise TestNotApplicable("%r doesn't support stacking"
495
878
                % (self.format_name,))
496
879
        super(TestPackRepositoryStacking, self).setUp()
497
880
 
513
896
            if getattr(repo._format, 'supports_tree_reference', False):
514
897
                matching_format_name = 'pack-0.92-subtree'
515
898
            else:
516
 
                matching_format_name = 'rich-root-pack'
 
899
                if repo._format.supports_chks:
 
900
                    matching_format_name = '2a'
 
901
                else:
 
902
                    matching_format_name = 'rich-root-pack'
517
903
            mismatching_format_name = 'pack-0.92'
518
904
        else:
519
 
            matching_format_name = 'pack-0.92'
 
905
            # We don't have a non-rich-root CHK format.
 
906
            if repo._format.supports_chks:
 
907
                raise AssertionError("no non-rich-root CHK formats known")
 
908
            else:
 
909
                matching_format_name = 'pack-0.92'
520
910
            mismatching_format_name = 'pack-0.92-subtree'
521
911
        base = self.make_repository('base', format=matching_format_name)
522
912
        repo.add_fallback_repository(base)
527
917
            repo.add_fallback_repository, bad_repo)
528
918
        self.assertContainsRe(str(e),
529
919
            r'(?m)KnitPackRepository.*/mismatch/.*\nis not compatible with\n'
530
 
            r'KnitPackRepository.*/repo/.*\n'
 
920
            r'.*Repository.*/repo/.*\n'
531
921
            r'different rich-root support')
532
922
 
533
923
    def test_stack_checks_serializers_compatibility(self):
539
929
            mismatching_format_name = 'rich-root-pack'
540
930
        else:
541
931
            if repo.supports_rich_root():
542
 
                matching_format_name = 'rich-root-pack'
 
932
                if repo._format.supports_chks:
 
933
                    matching_format_name = '2a'
 
934
                else:
 
935
                    matching_format_name = 'rich-root-pack'
543
936
                mismatching_format_name = 'pack-0.92-subtree'
544
937
            else:
545
938
                raise TestNotApplicable('No formats use non-v5 serializer'
553
946
            repo.add_fallback_repository, bad_repo)
554
947
        self.assertContainsRe(str(e),
555
948
            r'(?m)KnitPackRepository.*/mismatch/.*\nis not compatible with\n'
556
 
            r'KnitPackRepository.*/repo/.*\n'
 
949
            r'.*Repository.*/repo/.*\n'
557
950
            r'different serializers')
558
951
 
559
952
    def test_adding_pack_does_not_record_pack_names_from_other_repositories(self):
561
954
        base.commit('foo')
562
955
        referencing = self.make_branch_and_tree('repo', format=self.get_format())
563
956
        referencing.branch.repository.add_fallback_repository(base.branch.repository)
564
 
        referencing.commit('bar')
 
957
        local_tree = referencing.branch.create_checkout('local')
 
958
        local_tree.commit('bar')
565
959
        new_instance = referencing.bzrdir.open_repository()
566
960
        new_instance.lock_read()
567
961
        self.addCleanup(new_instance.unlock)
569
963
        self.assertEqual(1, len(new_instance._pack_collection.all_packs()))
570
964
 
571
965
    def test_autopack_only_considers_main_repo_packs(self):
572
 
        base = self.make_branch_and_tree('base', format=self.get_format())
 
966
        format = self.get_format()
 
967
        base = self.make_branch_and_tree('base', format=format)
573
968
        base.commit('foo')
574
 
        tree = self.make_branch_and_tree('repo', format=self.get_format())
 
969
        tree = self.make_branch_and_tree('repo', format=format)
575
970
        tree.branch.repository.add_fallback_repository(base.branch.repository)
576
971
        trans = tree.branch.repository.bzrdir.get_repository_transport(None)
577
972
        # This test could be a little cheaper by replacing the packs
579
974
        # and max packs policy - so we are checking the policy is honoured
580
975
        # in the test. But for now 11 commits is not a big deal in a single
581
976
        # test.
 
977
        local_tree = tree.branch.create_checkout('local')
582
978
        for x in range(9):
583
 
            tree.commit('commit %s' % x)
 
979
            local_tree.commit('commit %s' % x)
584
980
        # there should be 9 packs:
585
981
        index = self.index_class(trans, 'pack-names', None)
586
982
        self.assertEqual(9, len(list(index.iter_all_entries())))
587
983
        # committing one more should coalesce to 1 of 10.
588
 
        tree.commit('commit triggering pack')
 
984
        local_tree.commit('commit triggering pack')
589
985
        index = self.index_class(trans, 'pack-names', None)
590
986
        self.assertEqual(1, len(list(index.iter_all_entries())))
591
987
        # packing should not damage data
592
988
        tree = tree.bzrdir.open_workingtree()
593
989
        check_result = tree.branch.repository.check(
594
990
            [tree.branch.last_revision()])
595
 
        # We should have 50 (10x5) files in the obsolete_packs directory.
 
991
        nb_files = 5 # .pack, .rix, .iix, .tix, .six
 
992
        if tree.branch.repository._format.supports_chks:
 
993
            nb_files += 1 # .cix
 
994
        # We should have 10 x nb_files files in the obsolete_packs directory.
596
995
        obsolete_files = list(trans.list_dir('obsolete_packs'))
597
996
        self.assertFalse('foo' in obsolete_files)
598
997
        self.assertFalse('bar' in obsolete_files)
599
 
        self.assertEqual(50, len(obsolete_files))
 
998
        self.assertEqual(10 * nb_files, len(obsolete_files))
600
999
        # XXX: Todo check packs obsoleted correctly - old packs and indices
601
1000
        # in the obsolete_packs directory.
602
1001
        large_pack_name = list(index.iter_all_entries())[0][1][0]
603
1002
        # finally, committing again should not touch the large pack.
604
 
        tree.commit('commit not triggering pack')
 
1003
        local_tree.commit('commit not triggering pack')
605
1004
        index = self.index_class(trans, 'pack-names', None)
606
1005
        self.assertEqual(2, len(list(index.iter_all_entries())))
607
1006
        pack_names = [node[1][0] for node in index.iter_all_entries()]
608
1007
        self.assertTrue(large_pack_name in pack_names)
609
1008
 
610
1009
 
611
 
def load_tests(basic_tests, module, test_loader):
 
1010
class TestKeyDependencies(TestCaseWithTransport):
 
1011
 
 
1012
    def get_format(self):
 
1013
        return bzrdir.format_registry.make_bzrdir(self.format_name)
 
1014
 
 
1015
    def create_source_and_target(self):
 
1016
        builder = self.make_branch_builder('source', format=self.get_format())
 
1017
        builder.start_series()
 
1018
        builder.build_snapshot('A-id', None, [
 
1019
            ('add', ('', 'root-id', 'directory', None))])
 
1020
        builder.build_snapshot('B-id', ['A-id', 'ghost-id'], [])
 
1021
        builder.finish_series()
 
1022
        repo = self.make_repository('target', format=self.get_format())
 
1023
        b = builder.get_branch()
 
1024
        b.lock_read()
 
1025
        self.addCleanup(b.unlock)
 
1026
        repo.lock_write()
 
1027
        self.addCleanup(repo.unlock)
 
1028
        return b.repository, repo
 
1029
 
 
1030
    def test_key_dependencies_cleared_on_abort(self):
 
1031
        source_repo, target_repo = self.create_source_and_target()
 
1032
        target_repo.start_write_group()
 
1033
        try:
 
1034
            stream = source_repo.revisions.get_record_stream([('B-id',)],
 
1035
                                                             'unordered', True)
 
1036
            target_repo.revisions.insert_record_stream(stream)
 
1037
            key_refs = target_repo.revisions._index._key_dependencies
 
1038
            self.assertEqual([('B-id',)], sorted(key_refs.get_referrers()))
 
1039
        finally:
 
1040
            target_repo.abort_write_group()
 
1041
        self.assertEqual([], sorted(key_refs.get_referrers()))
 
1042
 
 
1043
    def test_key_dependencies_cleared_on_suspend(self):
 
1044
        source_repo, target_repo = self.create_source_and_target()
 
1045
        target_repo.start_write_group()
 
1046
        try:
 
1047
            stream = source_repo.revisions.get_record_stream([('B-id',)],
 
1048
                                                             'unordered', True)
 
1049
            target_repo.revisions.insert_record_stream(stream)
 
1050
            key_refs = target_repo.revisions._index._key_dependencies
 
1051
            self.assertEqual([('B-id',)], sorted(key_refs.get_referrers()))
 
1052
        finally:
 
1053
            target_repo.suspend_write_group()
 
1054
        self.assertEqual([], sorted(key_refs.get_referrers()))
 
1055
 
 
1056
    def test_key_dependencies_cleared_on_commit(self):
 
1057
        source_repo, target_repo = self.create_source_and_target()
 
1058
        target_repo.start_write_group()
 
1059
        try:
 
1060
            # Copy all texts, inventories, and chks so that nothing is missing
 
1061
            # for revision B-id.
 
1062
            for vf_name in ['texts', 'chk_bytes', 'inventories']:
 
1063
                source_vf = getattr(source_repo, vf_name, None)
 
1064
                if source_vf is None:
 
1065
                    continue
 
1066
                target_vf = getattr(target_repo, vf_name)
 
1067
                stream = source_vf.get_record_stream(
 
1068
                    source_vf.keys(), 'unordered', True)
 
1069
                target_vf.insert_record_stream(stream)
 
1070
            # Copy just revision B-id
 
1071
            stream = source_repo.revisions.get_record_stream(
 
1072
                [('B-id',)], 'unordered', True)
 
1073
            target_repo.revisions.insert_record_stream(stream)
 
1074
            key_refs = target_repo.revisions._index._key_dependencies
 
1075
            self.assertEqual([('B-id',)], sorted(key_refs.get_referrers()))
 
1076
        finally:
 
1077
            target_repo.commit_write_group()
 
1078
        self.assertEqual([], sorted(key_refs.get_referrers()))
 
1079
 
 
1080
 
 
1081
class TestSmartServerAutopack(TestCaseWithTransport):
 
1082
 
 
1083
    def setUp(self):
 
1084
        super(TestSmartServerAutopack, self).setUp()
 
1085
        # Create a smart server that publishes whatever the backing VFS server
 
1086
        # does.
 
1087
        self.smart_server = test_server.SmartTCPServer_for_testing()
 
1088
        self.start_server(self.smart_server, self.get_server())
 
1089
        # Log all HPSS calls into self.hpss_calls.
 
1090
        client._SmartClient.hooks.install_named_hook(
 
1091
            'call', self.capture_hpss_call, None)
 
1092
        self.hpss_calls = []
 
1093
 
 
1094
    def capture_hpss_call(self, params):
 
1095
        self.hpss_calls.append(params.method)
 
1096
 
 
1097
    def get_format(self):
 
1098
        return bzrdir.format_registry.make_bzrdir(self.format_name)
 
1099
 
 
1100
    def test_autopack_or_streaming_rpc_is_used_when_using_hpss(self):
 
1101
        # Make local and remote repos
 
1102
        format = self.get_format()
 
1103
        tree = self.make_branch_and_tree('local', format=format)
 
1104
        self.make_branch_and_tree('remote', format=format)
 
1105
        remote_branch_url = self.smart_server.get_url() + 'remote'
 
1106
        remote_branch = bzrdir.BzrDir.open(remote_branch_url).open_branch()
 
1107
        # Make 9 local revisions, and push them one at a time to the remote
 
1108
        # repo to produce 9 pack files.
 
1109
        for x in range(9):
 
1110
            tree.commit('commit %s' % x)
 
1111
            tree.branch.push(remote_branch)
 
1112
        # Make one more push to trigger an autopack
 
1113
        self.hpss_calls = []
 
1114
        tree.commit('commit triggering pack')
 
1115
        tree.branch.push(remote_branch)
 
1116
        autopack_calls = len([call for call in self.hpss_calls if call ==
 
1117
            'PackRepository.autopack'])
 
1118
        streaming_calls = len([call for call in self.hpss_calls if call in
 
1119
            ('Repository.insert_stream', 'Repository.insert_stream_1.19')])
 
1120
        if autopack_calls:
 
1121
            # Non streaming server
 
1122
            self.assertEqual(1, autopack_calls)
 
1123
            self.assertEqual(0, streaming_calls)
 
1124
        else:
 
1125
            # Streaming was used, which autopacks on the remote end.
 
1126
            self.assertEqual(0, autopack_calls)
 
1127
            # NB: The 2 calls are because of the sanity check that the server
 
1128
            # supports the verb (see remote.py:RemoteSink.insert_stream for
 
1129
            # details).
 
1130
            self.assertEqual(2, streaming_calls)
 
1131
 
 
1132
 
 
1133
def load_tests(basic_tests, module, loader):
612
1134
    # these give the bzrdir canned format name, and the repository on-disk
613
1135
    # format string
614
1136
    scenarios_params = [
630
1152
                  "(bzr 1.6.1)\n",
631
1153
              format_supports_external_lookups=True,
632
1154
              index_class=GraphIndex),
633
 
         dict(format_name='development2',
634
 
              format_string="Bazaar development format 2 "
635
 
                  "(needs bzr.dev from before 1.8)\n",
636
 
              format_supports_external_lookups=True,
637
 
              index_class=BTreeGraphIndex),
638
 
         dict(format_name='development2-subtree',
639
 
              format_string="Bazaar development format 2 "
640
 
                  "with subtree support (needs bzr.dev from before 1.8)\n",
 
1155
         dict(format_name='1.9',
 
1156
              format_string="Bazaar RepositoryFormatKnitPack6 (bzr 1.9)\n",
 
1157
              format_supports_external_lookups=True,
 
1158
              index_class=BTreeGraphIndex),
 
1159
         dict(format_name='1.9-rich-root',
 
1160
              format_string="Bazaar RepositoryFormatKnitPack6RichRoot "
 
1161
                  "(bzr 1.9)\n",
 
1162
              format_supports_external_lookups=True,
 
1163
              index_class=BTreeGraphIndex),
 
1164
         dict(format_name='2a',
 
1165
              format_string="Bazaar repository format 2a "
 
1166
                "(needs bzr 1.16 or later)\n",
641
1167
              format_supports_external_lookups=True,
642
1168
              index_class=BTreeGraphIndex),
643
1169
         ]
644
 
    adapter = tests.TestScenarioApplier()
645
1170
    # name of the scenario is the format name
646
 
    adapter.scenarios = [(s['format_name'], s) for s in scenarios_params]
647
 
    suite = tests.TestSuite()
648
 
    tests.adapt_tests(basic_tests, adapter, suite)
649
 
    return suite
 
1171
    scenarios = [(s['format_name'], s) for s in scenarios_params]
 
1172
    return tests.multiply_tests(basic_tests, scenarios, loader.suiteClass())