~bzr-pqm/bzr/bzr.dev

« back to all changes in this revision

Viewing changes to bzrlib/tests/test_pack_repository.py

  • Committer: John Arbash Meinel
  • Author(s): Mark Hammond
  • Date: 2008-09-09 17:02:21 UTC
  • mto: This revision was merged to the branch mainline in revision 3697.
  • Revision ID: john@arbash-meinel.com-20080909170221-svim3jw2mrz0amp3
An updated transparent icon for bzr.

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
 
# Copyright (C) 2008-2011 Canonical Ltd
 
1
# Copyright (C) 2008 Canonical Ltd
2
2
#
3
3
# This program is free software; you can redistribute it and/or modify
4
4
# it under the terms of the GNU General Public License as published by
12
12
#
13
13
# You should have received a copy of the GNU General Public License
14
14
# along with this program; if not, write to the Free Software
15
 
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
16
16
 
17
17
"""Tests for pack repositories.
18
18
 
19
19
These tests are repeated for all pack-based repository formats.
20
20
"""
21
21
 
 
22
from cStringIO import StringIO
22
23
from stat import S_ISDIR
23
24
 
24
 
from bzrlib.btree_index import BTreeGraphIndex
25
 
from bzrlib.index import GraphIndex
 
25
from bzrlib.index import GraphIndex, InMemoryGraphIndex
26
26
from bzrlib import (
27
 
    controldir,
 
27
    bzrdir,
28
28
    errors,
29
29
    inventory,
30
 
    osutils,
 
30
    progress,
31
31
    repository,
32
32
    revision as _mod_revision,
 
33
    symbol_versioning,
33
34
    tests,
34
 
    transport,
35
35
    ui,
36
 
    )
37
 
from bzrlib.repofmt.groupcompress_repo import RepositoryFormat2a
38
 
from bzrlib.smart import (
39
 
    client,
 
36
    upgrade,
 
37
    workingtree,
40
38
    )
41
39
from bzrlib.tests import (
 
40
    TestCase,
42
41
    TestCaseWithTransport,
43
42
    TestNotApplicable,
 
43
    TestSkipped,
44
44
    )
45
45
from bzrlib.transport import (
46
 
    memory,
 
46
    fakenfs,
 
47
    get_transport,
47
48
    )
48
 
from bzrlib.tests import test_server
49
49
 
50
50
 
51
51
class TestPackRepository(TestCaseWithTransport):
59
59
    """
60
60
 
61
61
    def get_format(self):
62
 
        return controldir.format_registry.make_bzrdir(self.format_name)
 
62
        return bzrdir.format_registry.make_bzrdir(self.format_name)
63
63
 
64
64
    def test_attribute__fetch_order(self):
65
65
        """Packs do not need ordered data retrieval."""
66
66
        format = self.get_format()
67
67
        repo = self.make_repository('.', format=format)
68
 
        self.assertEqual('unordered', repo._format._fetch_order)
 
68
        self.assertEqual('unordered', repo._fetch_order)
69
69
 
70
70
    def test_attribute__fetch_uses_deltas(self):
71
71
        """Packs reuse deltas."""
72
72
        format = self.get_format()
73
73
        repo = self.make_repository('.', format=format)
74
 
        if isinstance(format.repository_format, RepositoryFormat2a):
75
 
            # TODO: This is currently a workaround. CHK format repositories
76
 
            #       ignore the 'deltas' flag, but during conversions, we can't
77
 
            #       do unordered delta fetches. Remove this clause once we
78
 
            #       improve the inter-format fetching.
79
 
            self.assertEqual(False, repo._format._fetch_uses_deltas)
80
 
        else:
81
 
            self.assertEqual(True, repo._format._fetch_uses_deltas)
 
74
        self.assertEqual(True, repo._fetch_uses_deltas)
82
75
 
83
76
    def test_disk_layout(self):
84
77
        format = self.get_format()
118
111
        self.assertFalse(t.has('knits'))
119
112
        # revision-indexes file-container directory
120
113
        self.assertEqual([],
121
 
            list(self.index_class(t, 'pack-names', None).iter_all_entries()))
 
114
            list(GraphIndex(t, 'pack-names', None).iter_all_entries()))
122
115
        self.assertTrue(S_ISDIR(t.stat('packs').st_mode))
123
116
        self.assertTrue(S_ISDIR(t.stat('upload').st_mode))
124
117
        self.assertTrue(S_ISDIR(t.stat('indices').st_mode))
159
152
        tree = self.make_branch_and_tree('.', format=format)
160
153
        trans = tree.branch.repository.bzrdir.get_repository_transport(None)
161
154
        self.assertEqual([],
162
 
            list(self.index_class(trans, 'pack-names', None).iter_all_entries()))
 
155
            list(GraphIndex(trans, 'pack-names', None).iter_all_entries()))
163
156
        tree.commit('foobarbaz')
164
 
        index = self.index_class(trans, 'pack-names', None)
 
157
        index = GraphIndex(trans, 'pack-names', None)
165
158
        index_nodes = list(index.iter_all_entries())
166
159
        self.assertEqual(1, len(index_nodes))
167
160
        node = index_nodes[0]
180
173
        tree1.branch.repository.fetch(tree2.branch.repository)
181
174
        trans = tree1.branch.repository.bzrdir.get_repository_transport(None)
182
175
        self.assertEqual([],
183
 
            list(self.index_class(trans, 'pack-names', None).iter_all_entries()))
 
176
            list(GraphIndex(trans, 'pack-names', None).iter_all_entries()))
184
177
 
185
178
    def test_commit_across_pack_shape_boundary_autopacks(self):
186
179
        format = self.get_format()
194
187
        for x in range(9):
195
188
            tree.commit('commit %s' % x)
196
189
        # there should be 9 packs:
197
 
        index = self.index_class(trans, 'pack-names', None)
 
190
        index = GraphIndex(trans, 'pack-names', None)
198
191
        self.assertEqual(9, len(list(index.iter_all_entries())))
199
192
        # insert some files in obsolete_packs which should be removed by pack.
200
193
        trans.put_bytes('obsolete_packs/foo', '123')
201
194
        trans.put_bytes('obsolete_packs/bar', '321')
202
195
        # committing one more should coalesce to 1 of 10.
203
196
        tree.commit('commit triggering pack')
204
 
        index = self.index_class(trans, 'pack-names', None)
 
197
        index = GraphIndex(trans, 'pack-names', None)
205
198
        self.assertEqual(1, len(list(index.iter_all_entries())))
206
199
        # packing should not damage data
207
200
        tree = tree.bzrdir.open_workingtree()
208
201
        check_result = tree.branch.repository.check(
209
202
            [tree.branch.last_revision()])
210
 
        nb_files = 5 # .pack, .rix, .iix, .tix, .six
211
 
        if tree.branch.repository._format.supports_chks:
212
 
            nb_files += 1 # .cix
213
 
        # We should have 10 x nb_files files in the obsolete_packs directory.
 
203
        # We should have 50 (10x5) files in the obsolete_packs directory.
214
204
        obsolete_files = list(trans.list_dir('obsolete_packs'))
215
205
        self.assertFalse('foo' in obsolete_files)
216
206
        self.assertFalse('bar' in obsolete_files)
217
 
        self.assertEqual(10 * nb_files, len(obsolete_files))
 
207
        self.assertEqual(50, len(obsolete_files))
218
208
        # XXX: Todo check packs obsoleted correctly - old packs and indices
219
209
        # in the obsolete_packs directory.
220
210
        large_pack_name = list(index.iter_all_entries())[0][1][0]
221
211
        # finally, committing again should not touch the large pack.
222
212
        tree.commit('commit not triggering pack')
223
 
        index = self.index_class(trans, 'pack-names', None)
 
213
        index = GraphIndex(trans, 'pack-names', None)
224
214
        self.assertEqual(2, len(list(index.iter_all_entries())))
225
215
        pack_names = [node[1][0] for node in index.iter_all_entries()]
226
216
        self.assertTrue(large_pack_name in pack_names)
227
217
 
228
 
    def test_commit_write_group_returns_new_pack_names(self):
229
 
        # This test doesn't need real disk.
230
 
        self.vfs_transport_factory = memory.MemoryServer
231
 
        format = self.get_format()
232
 
        repo = self.make_repository('foo', format=format)
233
 
        repo.lock_write()
234
 
        try:
235
 
            # All current pack repository styles autopack at 10 revisions; and
236
 
            # autopack as well as regular commit write group needs to return
237
 
            # the new pack name. Looping is a little ugly, but we don't have a
238
 
            # clean way to test both the autopack logic and the normal code
239
 
            # path without doing this loop.
240
 
            for pos in range(10):
241
 
                revid = str(pos)
242
 
                repo.start_write_group()
243
 
                try:
244
 
                    inv = inventory.Inventory(revision_id=revid)
245
 
                    inv.root.revision = revid
246
 
                    repo.texts.add_lines((inv.root.file_id, revid), [], [])
247
 
                    rev = _mod_revision.Revision(timestamp=0, timezone=None,
248
 
                        committer="Foo Bar <foo@example.com>", message="Message",
249
 
                        revision_id=revid)
250
 
                    rev.parent_ids = ()
251
 
                    repo.add_revision(revid, rev, inv=inv)
252
 
                except:
253
 
                    repo.abort_write_group()
254
 
                    raise
255
 
                else:
256
 
                    old_names = repo._pack_collection._names.keys()
257
 
                    result = repo.commit_write_group()
258
 
                    cur_names = repo._pack_collection._names.keys()
259
 
                    new_names = list(set(cur_names) - set(old_names))
260
 
                    self.assertEqual(new_names, result)
261
 
        finally:
262
 
            repo.unlock()
263
 
 
264
218
    def test_fail_obsolete_deletion(self):
265
219
        # failing to delete obsolete packs is not fatal
266
220
        format = self.get_format()
267
 
        server = test_server.FakeNFSServer()
268
 
        self.start_server(server)
269
 
        t = transport.get_transport_from_url(server.get_url())
270
 
        bzrdir = self.get_format().initialize_on_transport(t)
 
221
        server = fakenfs.FakeNFSServer()
 
222
        server.setUp()
 
223
        self.addCleanup(server.tearDown)
 
224
        transport = get_transport(server.get_url())
 
225
        bzrdir = self.get_format().initialize_on_transport(transport)
271
226
        repo = bzrdir.create_repository()
272
227
        repo_transport = bzrdir.get_repository_transport(None)
273
228
        self.assertTrue(repo_transport.has('obsolete_packs'))
276
231
        repo._pack_collection._clear_obsolete_packs()
277
232
        self.assertTrue(repo_transport.has('obsolete_packs/.nfsblahblah'))
278
233
 
279
 
    def test_pack_collection_sets_sibling_indices(self):
280
 
        """The CombinedGraphIndex objects in the pack collection are all
281
 
        siblings of each other, so that search-order reorderings will be copied
282
 
        to each other.
283
 
        """
284
 
        repo = self.make_repository('repo')
285
 
        pack_coll = repo._pack_collection
286
 
        indices = set([pack_coll.revision_index, pack_coll.inventory_index,
287
 
                pack_coll.text_index, pack_coll.signature_index])
288
 
        if pack_coll.chk_index is not None:
289
 
            indices.add(pack_coll.chk_index)
290
 
        combined_indices = set(idx.combined_index for idx in indices)
291
 
        for combined_index in combined_indices:
292
 
            self.assertEqual(
293
 
                combined_indices.difference([combined_index]),
294
 
                combined_index._sibling_indices)
295
 
 
296
234
    def test_pack_after_two_commits_packs_everything(self):
297
235
        format = self.get_format()
298
236
        tree = self.make_branch_and_tree('.', format=format)
301
239
        tree.commit('more work')
302
240
        tree.branch.repository.pack()
303
241
        # there should be 1 pack:
304
 
        index = self.index_class(trans, 'pack-names', None)
 
242
        index = GraphIndex(trans, 'pack-names', None)
305
243
        self.assertEqual(1, len(list(index.iter_all_entries())))
306
244
        self.assertEqual(2, len(tree.branch.repository.all_revision_ids()))
307
245
 
308
 
    def test_pack_preserves_all_inventories(self):
309
 
        # This is related to bug:
310
 
        #   https://bugs.launchpad.net/bzr/+bug/412198
311
 
        # Stacked repositories need to keep the inventory for parents, even
312
 
        # after a pack operation. However, it is harder to test that, then just
313
 
        # test that all inventory texts are preserved.
314
 
        format = self.get_format()
315
 
        builder = self.make_branch_builder('source', format=format)
316
 
        builder.start_series()
317
 
        builder.build_snapshot('A-id', None, [
318
 
            ('add', ('', 'root-id', 'directory', None))])
319
 
        builder.build_snapshot('B-id', None, [
320
 
            ('add', ('file', 'file-id', 'file', 'B content\n'))])
321
 
        builder.build_snapshot('C-id', None, [
322
 
            ('modify', ('file-id', 'C content\n'))])
323
 
        builder.finish_series()
324
 
        b = builder.get_branch()
325
 
        b.lock_read()
326
 
        self.addCleanup(b.unlock)
327
 
        repo = self.make_repository('repo', shared=True, format=format)
328
 
        repo.lock_write()
329
 
        self.addCleanup(repo.unlock)
330
 
        repo.fetch(b.repository, revision_id='B-id')
331
 
        inv = b.repository.iter_inventories(['C-id']).next()
332
 
        repo.start_write_group()
333
 
        repo.add_inventory('C-id', inv, ['B-id'])
334
 
        repo.commit_write_group()
335
 
        self.assertEqual([('A-id',), ('B-id',), ('C-id',)],
336
 
                         sorted(repo.inventories.keys()))
337
 
        repo.pack()
338
 
        self.assertEqual([('A-id',), ('B-id',), ('C-id',)],
339
 
                         sorted(repo.inventories.keys()))
340
 
        # Content should be preserved as well
341
 
        self.assertEqual(inv, repo.iter_inventories(['C-id']).next())
342
 
 
343
246
    def test_pack_layout(self):
344
 
        # Test that the ordering of revisions in pack repositories is
345
 
        # tip->ancestor
346
247
        format = self.get_format()
347
248
        tree = self.make_branch_and_tree('.', format=format)
348
249
        trans = tree.branch.repository.bzrdir.get_repository_transport(None)
353
254
        self.addCleanup(tree.unlock)
354
255
        pack = tree.branch.repository._pack_collection.get_pack_by_name(
355
256
            tree.branch.repository._pack_collection.names()[0])
356
 
        # revision access tends to be tip->ancestor, so ordering that way on
 
257
        # revision access tends to be tip->ancestor, so ordering that way on 
357
258
        # disk is a good idea.
358
259
        for _1, key, val, refs in pack.revision_index.iter_all_entries():
359
 
            if type(format.repository_format) is RepositoryFormat2a:
360
 
                # group_start, group_len, internal_start, internal_len
361
 
                pos = map(int, val.split())
362
 
            else:
363
 
                # eol_flag, start, len
364
 
                pos = int(val[1:].split()[0])
365
260
            if key == ('1',):
366
 
                pos_1 = pos
 
261
                pos_1 = int(val[1:].split()[0])
367
262
            else:
368
 
                pos_2 = pos
369
 
        self.assertTrue(pos_2 < pos_1, 'rev 1 came before rev 2 %s > %s'
370
 
                                       % (pos_1, pos_2))
 
263
                pos_2 = int(val[1:].split()[0])
 
264
        self.assertTrue(pos_2 < pos_1)
371
265
 
372
266
    def test_pack_repositories_support_multiple_write_locks(self):
373
267
        format = self.get_format()
381
275
 
382
276
    def _add_text(self, repo, fileid):
383
277
        """Add a text to the repository within a write group."""
384
 
        repo.texts.add_lines((fileid, 'samplerev+'+fileid), [],
385
 
            ['smaplerev+'+fileid])
 
278
        repo.texts.add_lines((fileid, 'samplerev+'+fileid), [], [])
386
279
 
387
280
    def test_concurrent_writers_merge_new_packs(self):
388
281
        format = self.get_format()
501
394
        finally:
502
395
            r1.unlock()
503
396
 
504
 
    def test_concurrent_pack_triggers_reload(self):
505
 
        # create 2 packs, which we will then collapse
506
 
        tree = self.make_branch_and_tree('tree')
507
 
        tree.lock_write()
508
 
        try:
509
 
            rev1 = tree.commit('one')
510
 
            rev2 = tree.commit('two')
511
 
            r2 = repository.Repository.open('tree')
512
 
            r2.lock_read()
513
 
            try:
514
 
                # Now r2 has read the pack-names file, but will need to reload
515
 
                # it after r1 has repacked
516
 
                tree.branch.repository.pack()
517
 
                self.assertEqual({rev2:(rev1,)}, r2.get_parent_map([rev2]))
518
 
            finally:
519
 
                r2.unlock()
520
 
        finally:
521
 
            tree.unlock()
522
 
 
523
 
    def test_concurrent_pack_during_get_record_reloads(self):
524
 
        tree = self.make_branch_and_tree('tree')
525
 
        tree.lock_write()
526
 
        try:
527
 
            rev1 = tree.commit('one')
528
 
            rev2 = tree.commit('two')
529
 
            keys = [(rev1,), (rev2,)]
530
 
            r2 = repository.Repository.open('tree')
531
 
            r2.lock_read()
532
 
            try:
533
 
                # At this point, we will start grabbing a record stream, and
534
 
                # trigger a repack mid-way
535
 
                packed = False
536
 
                result = {}
537
 
                record_stream = r2.revisions.get_record_stream(keys,
538
 
                                    'unordered', False)
539
 
                for record in record_stream:
540
 
                    result[record.key] = record
541
 
                    if not packed:
542
 
                        tree.branch.repository.pack()
543
 
                        packed = True
544
 
                # The first record will be found in the original location, but
545
 
                # after the pack, we have to reload to find the next record
546
 
                self.assertEqual(sorted(keys), sorted(result.keys()))
547
 
            finally:
548
 
                r2.unlock()
549
 
        finally:
550
 
            tree.unlock()
551
 
 
552
 
    def test_concurrent_pack_during_autopack(self):
553
 
        tree = self.make_branch_and_tree('tree')
554
 
        tree.lock_write()
555
 
        try:
556
 
            for i in xrange(9):
557
 
                tree.commit('rev %d' % (i,))
558
 
            r2 = repository.Repository.open('tree')
559
 
            r2.lock_write()
560
 
            try:
561
 
                # Monkey patch so that pack occurs while the other repo is
562
 
                # autopacking. This is slightly bad, but all current pack
563
 
                # repository implementations have a _pack_collection, and we
564
 
                # test that it gets triggered. So if a future format changes
565
 
                # things, the test will fail rather than succeed accidentally.
566
 
                autopack_count = [0]
567
 
                r1 = tree.branch.repository
568
 
                orig = r1._pack_collection.pack_distribution
569
 
                def trigger_during_auto(*args, **kwargs):
570
 
                    ret = orig(*args, **kwargs)
571
 
                    if not autopack_count[0]:
572
 
                        r2.pack()
573
 
                    autopack_count[0] += 1
574
 
                    return ret
575
 
                r1._pack_collection.pack_distribution = trigger_during_auto
576
 
                tree.commit('autopack-rev')
577
 
                # This triggers 2 autopacks. The first one causes r2.pack() to
578
 
                # fire, but r2 doesn't see the new pack file yet. The
579
 
                # autopack restarts and sees there are 2 files and there
580
 
                # should be only 1 for 10 commits. So it goes ahead and
581
 
                # finishes autopacking.
582
 
                self.assertEqual([2], autopack_count)
583
 
            finally:
584
 
                r2.unlock()
585
 
        finally:
586
 
            tree.unlock()
587
 
 
588
397
    def test_lock_write_does_not_physically_lock(self):
589
398
        repo = self.make_repository('.', format=self.get_format())
590
399
        repo.lock_write()
594
403
    def prepare_for_break_lock(self):
595
404
        # Setup the global ui factory state so that a break-lock method call
596
405
        # will find usable input in the input stream.
597
 
        ui.ui_factory = ui.CannedInputUIFactory([True])
 
406
        old_factory = ui.ui_factory
 
407
        def restoreFactory():
 
408
            ui.ui_factory = old_factory
 
409
        self.addCleanup(restoreFactory)
 
410
        ui.ui_factory = ui.SilentUIFactory()
 
411
        ui.ui_factory.stdin = StringIO("y\n")
598
412
 
599
413
    def test_break_lock_breaks_physical_lock(self):
600
414
        repo = self.make_repository('.', format=self.get_format())
664
478
        self.assertRaises(errors.NoSuchRevision,
665
479
            missing_ghost.get_inventory, 'ghost')
666
480
 
667
 
    def make_write_ready_repo(self):
668
 
        format = self.get_format()
669
 
        if isinstance(format.repository_format, RepositoryFormat2a):
670
 
            raise TestNotApplicable("No missing compression parents")
671
 
        repo = self.make_repository('.', format=format)
672
 
        repo.lock_write()
673
 
        self.addCleanup(repo.unlock)
674
 
        repo.start_write_group()
675
 
        self.addCleanup(repo.abort_write_group)
676
 
        return repo
677
 
 
678
 
    def test_missing_inventories_compression_parent_prevents_commit(self):
679
 
        repo = self.make_write_ready_repo()
680
 
        key = ('junk',)
681
 
        repo.inventories._index._missing_compression_parents.add(key)
682
 
        self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
683
 
        self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
684
 
 
685
 
    def test_missing_revisions_compression_parent_prevents_commit(self):
686
 
        repo = self.make_write_ready_repo()
687
 
        key = ('junk',)
688
 
        repo.revisions._index._missing_compression_parents.add(key)
689
 
        self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
690
 
        self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
691
 
 
692
 
    def test_missing_signatures_compression_parent_prevents_commit(self):
693
 
        repo = self.make_write_ready_repo()
694
 
        key = ('junk',)
695
 
        repo.signatures._index._missing_compression_parents.add(key)
696
 
        self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
697
 
        self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
698
 
 
699
 
    def test_missing_text_compression_parent_prevents_commit(self):
700
 
        repo = self.make_write_ready_repo()
701
 
        key = ('some', 'junk')
702
 
        repo.texts._index._missing_compression_parents.add(key)
703
 
        self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
704
 
        e = self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
705
 
 
706
481
    def test_supports_external_lookups(self):
707
482
        repo = self.make_repository('.', format=self.get_format())
708
483
        self.assertEqual(self.format_supports_external_lookups,
709
484
            repo._format.supports_external_lookups)
710
485
 
711
 
    def _lock_write(self, write_lockable):
712
 
        """Lock write_lockable, add a cleanup and return the result.
713
 
        
714
 
        :param write_lockable: An object with a lock_write method.
715
 
        :return: The result of write_lockable.lock_write().
716
 
        """
717
 
        result = write_lockable.lock_write()
718
 
        self.addCleanup(result.unlock)
719
 
        return result
720
 
 
721
 
    def test_abort_write_group_does_not_raise_when_suppressed(self):
722
 
        """Similar to per_repository.test_write_group's test of the same name.
723
 
 
724
 
        Also requires that the exception is logged.
725
 
        """
726
 
        self.vfs_transport_factory = memory.MemoryServer
727
 
        repo = self.make_repository('repo', format=self.get_format())
728
 
        token = self._lock_write(repo).repository_token
729
 
        repo.start_write_group()
730
 
        # Damage the repository on the filesystem
731
 
        self.get_transport('').rename('repo', 'foo')
732
 
        # abort_write_group will not raise an error
733
 
        self.assertEqual(None, repo.abort_write_group(suppress_errors=True))
734
 
        # But it does log an error
735
 
        log = self.get_log()
736
 
        self.assertContainsRe(log, 'abort_write_group failed')
737
 
        self.assertContainsRe(log, r'INFO  bzr: ERROR \(ignored\):')
738
 
        if token is not None:
739
 
            repo.leave_lock_in_place()
740
 
 
741
 
    def test_abort_write_group_does_raise_when_not_suppressed(self):
742
 
        self.vfs_transport_factory = memory.MemoryServer
743
 
        repo = self.make_repository('repo', format=self.get_format())
744
 
        token = self._lock_write(repo).repository_token
745
 
        repo.start_write_group()
746
 
        # Damage the repository on the filesystem
747
 
        self.get_transport('').rename('repo', 'foo')
748
 
        # abort_write_group will not raise an error
749
 
        self.assertRaises(Exception, repo.abort_write_group)
750
 
        if token is not None:
751
 
            repo.leave_lock_in_place()
752
 
 
753
 
    def test_suspend_write_group(self):
754
 
        self.vfs_transport_factory = memory.MemoryServer
755
 
        repo = self.make_repository('repo', format=self.get_format())
756
 
        token = self._lock_write(repo).repository_token
757
 
        repo.start_write_group()
758
 
        repo.texts.add_lines(('file-id', 'revid'), (), ['lines'])
759
 
        wg_tokens = repo.suspend_write_group()
760
 
        expected_pack_name = wg_tokens[0] + '.pack'
761
 
        expected_names = [wg_tokens[0] + ext for ext in
762
 
                            ('.rix', '.iix', '.tix', '.six')]
763
 
        if repo.chk_bytes is not None:
764
 
            expected_names.append(wg_tokens[0] + '.cix')
765
 
        expected_names.append(expected_pack_name)
766
 
        upload_transport = repo._pack_collection._upload_transport
767
 
        limbo_files = upload_transport.list_dir('')
768
 
        self.assertEqual(sorted(expected_names), sorted(limbo_files))
769
 
        md5 = osutils.md5(upload_transport.get_bytes(expected_pack_name))
770
 
        self.assertEqual(wg_tokens[0], md5.hexdigest())
771
 
 
772
 
    def test_resume_chk_bytes(self):
773
 
        self.vfs_transport_factory = memory.MemoryServer
774
 
        repo = self.make_repository('repo', format=self.get_format())
775
 
        if repo.chk_bytes is None:
776
 
            raise TestNotApplicable('no chk_bytes for this repository')
777
 
        token = self._lock_write(repo).repository_token
778
 
        repo.start_write_group()
779
 
        text = 'a bit of text\n'
780
 
        key = ('sha1:' + osutils.sha_string(text),)
781
 
        repo.chk_bytes.add_lines(key, (), [text])
782
 
        wg_tokens = repo.suspend_write_group()
783
 
        same_repo = repo.bzrdir.open_repository()
784
 
        same_repo.lock_write()
785
 
        self.addCleanup(same_repo.unlock)
786
 
        same_repo.resume_write_group(wg_tokens)
787
 
        self.assertEqual([key], list(same_repo.chk_bytes.keys()))
788
 
        self.assertEqual(
789
 
            text, same_repo.chk_bytes.get_record_stream([key],
790
 
                'unordered', True).next().get_bytes_as('fulltext'))
791
 
        same_repo.abort_write_group()
792
 
        self.assertEqual([], list(same_repo.chk_bytes.keys()))
793
 
 
794
 
    def test_resume_write_group_then_abort(self):
795
 
        # Create a repo, start a write group, insert some data, suspend.
796
 
        self.vfs_transport_factory = memory.MemoryServer
797
 
        repo = self.make_repository('repo', format=self.get_format())
798
 
        token = self._lock_write(repo).repository_token
799
 
        repo.start_write_group()
800
 
        text_key = ('file-id', 'revid')
801
 
        repo.texts.add_lines(text_key, (), ['lines'])
802
 
        wg_tokens = repo.suspend_write_group()
803
 
        # Get a fresh repository object for the repo on the filesystem.
804
 
        same_repo = repo.bzrdir.open_repository()
805
 
        # Resume
806
 
        same_repo.lock_write()
807
 
        self.addCleanup(same_repo.unlock)
808
 
        same_repo.resume_write_group(wg_tokens)
809
 
        same_repo.abort_write_group()
810
 
        self.assertEqual(
811
 
            [], same_repo._pack_collection._upload_transport.list_dir(''))
812
 
        self.assertEqual(
813
 
            [], same_repo._pack_collection._pack_transport.list_dir(''))
814
 
 
815
 
    def test_commit_resumed_write_group(self):
816
 
        self.vfs_transport_factory = memory.MemoryServer
817
 
        repo = self.make_repository('repo', format=self.get_format())
818
 
        token = self._lock_write(repo).repository_token
819
 
        repo.start_write_group()
820
 
        text_key = ('file-id', 'revid')
821
 
        repo.texts.add_lines(text_key, (), ['lines'])
822
 
        wg_tokens = repo.suspend_write_group()
823
 
        # Get a fresh repository object for the repo on the filesystem.
824
 
        same_repo = repo.bzrdir.open_repository()
825
 
        # Resume
826
 
        same_repo.lock_write()
827
 
        self.addCleanup(same_repo.unlock)
828
 
        same_repo.resume_write_group(wg_tokens)
829
 
        same_repo.commit_write_group()
830
 
        expected_pack_name = wg_tokens[0] + '.pack'
831
 
        expected_names = [wg_tokens[0] + ext for ext in
832
 
                            ('.rix', '.iix', '.tix', '.six')]
833
 
        if repo.chk_bytes is not None:
834
 
            expected_names.append(wg_tokens[0] + '.cix')
835
 
        self.assertEqual(
836
 
            [], same_repo._pack_collection._upload_transport.list_dir(''))
837
 
        index_names = repo._pack_collection._index_transport.list_dir('')
838
 
        self.assertEqual(sorted(expected_names), sorted(index_names))
839
 
        pack_names = repo._pack_collection._pack_transport.list_dir('')
840
 
        self.assertEqual([expected_pack_name], pack_names)
841
 
 
842
 
    def test_resume_malformed_token(self):
843
 
        self.vfs_transport_factory = memory.MemoryServer
844
 
        # Make a repository with a suspended write group
845
 
        repo = self.make_repository('repo', format=self.get_format())
846
 
        token = self._lock_write(repo).repository_token
847
 
        repo.start_write_group()
848
 
        text_key = ('file-id', 'revid')
849
 
        repo.texts.add_lines(text_key, (), ['lines'])
850
 
        wg_tokens = repo.suspend_write_group()
851
 
        # Make a new repository
852
 
        new_repo = self.make_repository('new_repo', format=self.get_format())
853
 
        token = self._lock_write(new_repo).repository_token
854
 
        hacked_wg_token = (
855
 
            '../../../../repo/.bzr/repository/upload/' + wg_tokens[0])
856
 
        self.assertRaises(
857
 
            errors.UnresumableWriteGroup,
858
 
            new_repo.resume_write_group, [hacked_wg_token])
859
 
 
860
486
 
861
487
class TestPackRepositoryStacking(TestCaseWithTransport):
862
488
 
864
490
 
865
491
    def setUp(self):
866
492
        if not self.format_supports_external_lookups:
867
 
            raise TestNotApplicable("%r doesn't support stacking"
 
493
            raise TestNotApplicable("%r doesn't support stacking" 
868
494
                % (self.format_name,))
869
495
        super(TestPackRepositoryStacking, self).setUp()
870
496
 
871
497
    def get_format(self):
872
 
        return controldir.format_registry.make_bzrdir(self.format_name)
 
498
        return bzrdir.format_registry.make_bzrdir(self.format_name)
873
499
 
874
500
    def test_stack_checks_rich_root_compatibility(self):
875
501
        # early versions of the packing code relied on pack internals to
886
512
            if getattr(repo._format, 'supports_tree_reference', False):
887
513
                matching_format_name = 'pack-0.92-subtree'
888
514
            else:
889
 
                if repo._format.supports_chks:
890
 
                    matching_format_name = '2a'
891
 
                else:
892
 
                    matching_format_name = 'rich-root-pack'
 
515
                matching_format_name = 'rich-root-pack'
893
516
            mismatching_format_name = 'pack-0.92'
894
517
        else:
895
 
            # We don't have a non-rich-root CHK format.
896
 
            if repo._format.supports_chks:
897
 
                raise AssertionError("no non-rich-root CHK formats known")
898
 
            else:
899
 
                matching_format_name = 'pack-0.92'
 
518
            matching_format_name = 'pack-0.92'
900
519
            mismatching_format_name = 'pack-0.92-subtree'
901
520
        base = self.make_repository('base', format=matching_format_name)
902
521
        repo.add_fallback_repository(base)
907
526
            repo.add_fallback_repository, bad_repo)
908
527
        self.assertContainsRe(str(e),
909
528
            r'(?m)KnitPackRepository.*/mismatch/.*\nis not compatible with\n'
910
 
            r'.*Repository.*/repo/.*\n'
 
529
            r'KnitPackRepository.*/repo/.*\n'
911
530
            r'different rich-root support')
912
531
 
913
532
    def test_stack_checks_serializers_compatibility(self):
919
538
            mismatching_format_name = 'rich-root-pack'
920
539
        else:
921
540
            if repo.supports_rich_root():
922
 
                if repo._format.supports_chks:
923
 
                    matching_format_name = '2a'
924
 
                else:
925
 
                    matching_format_name = 'rich-root-pack'
 
541
                matching_format_name = 'rich-root-pack'
926
542
                mismatching_format_name = 'pack-0.92-subtree'
927
543
            else:
928
544
                raise TestNotApplicable('No formats use non-v5 serializer'
936
552
            repo.add_fallback_repository, bad_repo)
937
553
        self.assertContainsRe(str(e),
938
554
            r'(?m)KnitPackRepository.*/mismatch/.*\nis not compatible with\n'
939
 
            r'.*Repository.*/repo/.*\n'
 
555
            r'KnitPackRepository.*/repo/.*\n'
940
556
            r'different serializers')
941
557
 
942
558
    def test_adding_pack_does_not_record_pack_names_from_other_repositories(self):
944
560
        base.commit('foo')
945
561
        referencing = self.make_branch_and_tree('repo', format=self.get_format())
946
562
        referencing.branch.repository.add_fallback_repository(base.branch.repository)
947
 
        local_tree = referencing.branch.create_checkout('local')
948
 
        local_tree.commit('bar')
 
563
        referencing.commit('bar')
949
564
        new_instance = referencing.bzrdir.open_repository()
950
565
        new_instance.lock_read()
951
566
        self.addCleanup(new_instance.unlock)
953
568
        self.assertEqual(1, len(new_instance._pack_collection.all_packs()))
954
569
 
955
570
    def test_autopack_only_considers_main_repo_packs(self):
956
 
        format = self.get_format()
957
 
        base = self.make_branch_and_tree('base', format=format)
 
571
        base = self.make_branch_and_tree('base', format=self.get_format())
958
572
        base.commit('foo')
959
 
        tree = self.make_branch_and_tree('repo', format=format)
 
573
        tree = self.make_branch_and_tree('repo', format=self.get_format())
960
574
        tree.branch.repository.add_fallback_repository(base.branch.repository)
961
575
        trans = tree.branch.repository.bzrdir.get_repository_transport(None)
962
576
        # This test could be a little cheaper by replacing the packs
964
578
        # and max packs policy - so we are checking the policy is honoured
965
579
        # in the test. But for now 11 commits is not a big deal in a single
966
580
        # test.
967
 
        local_tree = tree.branch.create_checkout('local')
968
581
        for x in range(9):
969
 
            local_tree.commit('commit %s' % x)
 
582
            tree.commit('commit %s' % x)
970
583
        # there should be 9 packs:
971
 
        index = self.index_class(trans, 'pack-names', None)
 
584
        index = GraphIndex(trans, 'pack-names', None)
972
585
        self.assertEqual(9, len(list(index.iter_all_entries())))
973
586
        # committing one more should coalesce to 1 of 10.
974
 
        local_tree.commit('commit triggering pack')
975
 
        index = self.index_class(trans, 'pack-names', None)
 
587
        tree.commit('commit triggering pack')
 
588
        index = GraphIndex(trans, 'pack-names', None)
976
589
        self.assertEqual(1, len(list(index.iter_all_entries())))
977
590
        # packing should not damage data
978
591
        tree = tree.bzrdir.open_workingtree()
979
592
        check_result = tree.branch.repository.check(
980
593
            [tree.branch.last_revision()])
981
 
        nb_files = 5 # .pack, .rix, .iix, .tix, .six
982
 
        if tree.branch.repository._format.supports_chks:
983
 
            nb_files += 1 # .cix
984
 
        # We should have 10 x nb_files files in the obsolete_packs directory.
 
594
        # We should have 50 (10x5) files in the obsolete_packs directory.
985
595
        obsolete_files = list(trans.list_dir('obsolete_packs'))
986
596
        self.assertFalse('foo' in obsolete_files)
987
597
        self.assertFalse('bar' in obsolete_files)
988
 
        self.assertEqual(10 * nb_files, len(obsolete_files))
 
598
        self.assertEqual(50, len(obsolete_files))
989
599
        # XXX: Todo check packs obsoleted correctly - old packs and indices
990
600
        # in the obsolete_packs directory.
991
601
        large_pack_name = list(index.iter_all_entries())[0][1][0]
992
602
        # finally, committing again should not touch the large pack.
993
 
        local_tree.commit('commit not triggering pack')
994
 
        index = self.index_class(trans, 'pack-names', None)
 
603
        tree.commit('commit not triggering pack')
 
604
        index = GraphIndex(trans, 'pack-names', None)
995
605
        self.assertEqual(2, len(list(index.iter_all_entries())))
996
606
        pack_names = [node[1][0] for node in index.iter_all_entries()]
997
607
        self.assertTrue(large_pack_name in pack_names)
998
608
 
999
609
 
1000
 
class TestKeyDependencies(TestCaseWithTransport):
1001
 
 
1002
 
    def get_format(self):
1003
 
        return controldir.format_registry.make_bzrdir(self.format_name)
1004
 
 
1005
 
    def create_source_and_target(self):
1006
 
        builder = self.make_branch_builder('source', format=self.get_format())
1007
 
        builder.start_series()
1008
 
        builder.build_snapshot('A-id', None, [
1009
 
            ('add', ('', 'root-id', 'directory', None))])
1010
 
        builder.build_snapshot('B-id', ['A-id', 'ghost-id'], [])
1011
 
        builder.finish_series()
1012
 
        repo = self.make_repository('target', format=self.get_format())
1013
 
        b = builder.get_branch()
1014
 
        b.lock_read()
1015
 
        self.addCleanup(b.unlock)
1016
 
        repo.lock_write()
1017
 
        self.addCleanup(repo.unlock)
1018
 
        return b.repository, repo
1019
 
 
1020
 
    def test_key_dependencies_cleared_on_abort(self):
1021
 
        source_repo, target_repo = self.create_source_and_target()
1022
 
        target_repo.start_write_group()
1023
 
        try:
1024
 
            stream = source_repo.revisions.get_record_stream([('B-id',)],
1025
 
                                                             'unordered', True)
1026
 
            target_repo.revisions.insert_record_stream(stream)
1027
 
            key_refs = target_repo.revisions._index._key_dependencies
1028
 
            self.assertEqual([('B-id',)], sorted(key_refs.get_referrers()))
1029
 
        finally:
1030
 
            target_repo.abort_write_group()
1031
 
        self.assertEqual([], sorted(key_refs.get_referrers()))
1032
 
 
1033
 
    def test_key_dependencies_cleared_on_suspend(self):
1034
 
        source_repo, target_repo = self.create_source_and_target()
1035
 
        target_repo.start_write_group()
1036
 
        try:
1037
 
            stream = source_repo.revisions.get_record_stream([('B-id',)],
1038
 
                                                             'unordered', True)
1039
 
            target_repo.revisions.insert_record_stream(stream)
1040
 
            key_refs = target_repo.revisions._index._key_dependencies
1041
 
            self.assertEqual([('B-id',)], sorted(key_refs.get_referrers()))
1042
 
        finally:
1043
 
            target_repo.suspend_write_group()
1044
 
        self.assertEqual([], sorted(key_refs.get_referrers()))
1045
 
 
1046
 
    def test_key_dependencies_cleared_on_commit(self):
1047
 
        source_repo, target_repo = self.create_source_and_target()
1048
 
        target_repo.start_write_group()
1049
 
        try:
1050
 
            # Copy all texts, inventories, and chks so that nothing is missing
1051
 
            # for revision B-id.
1052
 
            for vf_name in ['texts', 'chk_bytes', 'inventories']:
1053
 
                source_vf = getattr(source_repo, vf_name, None)
1054
 
                if source_vf is None:
1055
 
                    continue
1056
 
                target_vf = getattr(target_repo, vf_name)
1057
 
                stream = source_vf.get_record_stream(
1058
 
                    source_vf.keys(), 'unordered', True)
1059
 
                target_vf.insert_record_stream(stream)
1060
 
            # Copy just revision B-id
1061
 
            stream = source_repo.revisions.get_record_stream(
1062
 
                [('B-id',)], 'unordered', True)
1063
 
            target_repo.revisions.insert_record_stream(stream)
1064
 
            key_refs = target_repo.revisions._index._key_dependencies
1065
 
            self.assertEqual([('B-id',)], sorted(key_refs.get_referrers()))
1066
 
        finally:
1067
 
            target_repo.commit_write_group()
1068
 
        self.assertEqual([], sorted(key_refs.get_referrers()))
1069
 
 
1070
 
 
1071
 
class TestSmartServerAutopack(TestCaseWithTransport):
1072
 
 
1073
 
    def setUp(self):
1074
 
        super(TestSmartServerAutopack, self).setUp()
1075
 
        # Create a smart server that publishes whatever the backing VFS server
1076
 
        # does.
1077
 
        self.smart_server = test_server.SmartTCPServer_for_testing()
1078
 
        self.start_server(self.smart_server, self.get_server())
1079
 
        # Log all HPSS calls into self.hpss_calls.
1080
 
        client._SmartClient.hooks.install_named_hook(
1081
 
            'call', self.capture_hpss_call, None)
1082
 
        self.hpss_calls = []
1083
 
 
1084
 
    def capture_hpss_call(self, params):
1085
 
        self.hpss_calls.append(params.method)
1086
 
 
1087
 
    def get_format(self):
1088
 
        return controldir.format_registry.make_bzrdir(self.format_name)
1089
 
 
1090
 
    def test_autopack_or_streaming_rpc_is_used_when_using_hpss(self):
1091
 
        # Make local and remote repos
1092
 
        format = self.get_format()
1093
 
        tree = self.make_branch_and_tree('local', format=format)
1094
 
        self.make_branch_and_tree('remote', format=format)
1095
 
        remote_branch_url = self.smart_server.get_url() + 'remote'
1096
 
        remote_branch = controldir.ControlDir.open(remote_branch_url).open_branch()
1097
 
        # Make 9 local revisions, and push them one at a time to the remote
1098
 
        # repo to produce 9 pack files.
1099
 
        for x in range(9):
1100
 
            tree.commit('commit %s' % x)
1101
 
            tree.branch.push(remote_branch)
1102
 
        # Make one more push to trigger an autopack
1103
 
        self.hpss_calls = []
1104
 
        tree.commit('commit triggering pack')
1105
 
        tree.branch.push(remote_branch)
1106
 
        autopack_calls = len([call for call in self.hpss_calls if call ==
1107
 
            'PackRepository.autopack'])
1108
 
        streaming_calls = len([call for call in self.hpss_calls if call in
1109
 
            ('Repository.insert_stream', 'Repository.insert_stream_1.19')])
1110
 
        if autopack_calls:
1111
 
            # Non streaming server
1112
 
            self.assertEqual(1, autopack_calls)
1113
 
            self.assertEqual(0, streaming_calls)
1114
 
        else:
1115
 
            # Streaming was used, which autopacks on the remote end.
1116
 
            self.assertEqual(0, autopack_calls)
1117
 
            # NB: The 2 calls are because of the sanity check that the server
1118
 
            # supports the verb (see remote.py:RemoteSink.insert_stream for
1119
 
            # details).
1120
 
            self.assertEqual(2, streaming_calls)
1121
 
 
1122
 
 
1123
 
def load_tests(basic_tests, module, loader):
 
610
def load_tests(basic_tests, module, test_loader):
1124
611
    # these give the bzrdir canned format name, and the repository on-disk
1125
612
    # format string
1126
613
    scenarios_params = [
1127
614
         dict(format_name='pack-0.92',
1128
615
              format_string="Bazaar pack repository format 1 (needs bzr 0.92)\n",
1129
 
              format_supports_external_lookups=False,
1130
 
              index_class=GraphIndex),
 
616
              format_supports_external_lookups=False),
1131
617
         dict(format_name='pack-0.92-subtree',
1132
618
              format_string="Bazaar pack repository format 1 "
1133
619
              "with subtree support (needs bzr 0.92)\n",
1134
 
              format_supports_external_lookups=False,
1135
 
              index_class=GraphIndex),
 
620
              format_supports_external_lookups=False),
1136
621
         dict(format_name='1.6',
1137
622
              format_string="Bazaar RepositoryFormatKnitPack5 (bzr 1.6)\n",
1138
 
              format_supports_external_lookups=True,
1139
 
              index_class=GraphIndex),
 
623
              format_supports_external_lookups=True),
1140
624
         dict(format_name='1.6.1-rich-root',
1141
625
              format_string="Bazaar RepositoryFormatKnitPack5RichRoot "
1142
626
                  "(bzr 1.6.1)\n",
1143
 
              format_supports_external_lookups=True,
1144
 
              index_class=GraphIndex),
1145
 
         dict(format_name='1.9',
1146
 
              format_string="Bazaar RepositoryFormatKnitPack6 (bzr 1.9)\n",
1147
 
              format_supports_external_lookups=True,
1148
 
              index_class=BTreeGraphIndex),
1149
 
         dict(format_name='1.9-rich-root',
1150
 
              format_string="Bazaar RepositoryFormatKnitPack6RichRoot "
1151
 
                  "(bzr 1.9)\n",
1152
 
              format_supports_external_lookups=True,
1153
 
              index_class=BTreeGraphIndex),
1154
 
         dict(format_name='2a',
1155
 
              format_string="Bazaar repository format 2a "
1156
 
                "(needs bzr 1.16 or later)\n",
1157
 
              format_supports_external_lookups=True,
1158
 
              index_class=BTreeGraphIndex),
 
627
              format_supports_external_lookups=True),
 
628
         dict(format_name='development',
 
629
              format_string="Bazaar development format 1 "
 
630
                  "(needs bzr.dev from before 1.6)\n",
 
631
              format_supports_external_lookups=True),
 
632
         dict(format_name='development-subtree',
 
633
              format_string="Bazaar development format 1 "
 
634
                  "with subtree support (needs bzr.dev from before 1.6)\n",
 
635
              format_supports_external_lookups=True),
1159
636
         ]
 
637
    adapter = tests.TestScenarioApplier()
1160
638
    # name of the scenario is the format name
1161
 
    scenarios = [(s['format_name'], s) for s in scenarios_params]
1162
 
    return tests.multiply_tests(basic_tests, scenarios, loader.suiteClass())
 
639
    adapter.scenarios = [(s['format_name'], s) for s in scenarios_params]
 
640
    suite = tests.TestSuite()
 
641
    tests.adapt_tests(basic_tests, adapter, suite)
 
642
    return suite