~bzr-pqm/bzr/bzr.dev

« back to all changes in this revision

Viewing changes to bzrlib/tests/per_pack_repository.py

  • Committer: Alexander Belchenko
  • Date: 2006-07-30 16:43:12 UTC
  • mto: (1711.2.111 jam-integration)
  • mto: This revision was merged to the branch mainline in revision 1906.
  • Revision ID: bialix@ukr.net-20060730164312-b025fd3ff0cee59e
rename  gpl.txt => COPYING.txt

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
 
# Copyright (C) 2008, 2009, 2010 Canonical Ltd
2
 
#
3
 
# This program is free software; you can redistribute it and/or modify
4
 
# it under the terms of the GNU General Public License as published by
5
 
# the Free Software Foundation; either version 2 of the License, or
6
 
# (at your option) any later version.
7
 
#
8
 
# This program is distributed in the hope that it will be useful,
9
 
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
 
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11
 
# GNU General Public License for more details.
12
 
#
13
 
# You should have received a copy of the GNU General Public License
14
 
# along with this program; if not, write to the Free Software
15
 
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
16
 
 
17
 
"""Tests for pack repositories.
18
 
 
19
 
These tests are repeated for all pack-based repository formats.
20
 
"""
21
 
 
22
 
from cStringIO import StringIO
23
 
from stat import S_ISDIR
24
 
 
25
 
from bzrlib.btree_index import BTreeGraphIndex
26
 
from bzrlib.index import GraphIndex
27
 
from bzrlib import (
28
 
    bzrdir,
29
 
    errors,
30
 
    inventory,
31
 
    osutils,
32
 
    progress,
33
 
    repository,
34
 
    revision as _mod_revision,
35
 
    symbol_versioning,
36
 
    tests,
37
 
    ui,
38
 
    upgrade,
39
 
    workingtree,
40
 
    )
41
 
from bzrlib.repofmt import (
42
 
    pack_repo,
43
 
    groupcompress_repo,
44
 
    )
45
 
from bzrlib.repofmt.groupcompress_repo import RepositoryFormat2a
46
 
from bzrlib.smart import (
47
 
    client,
48
 
    )
49
 
from bzrlib.tests import (
50
 
    TestCase,
51
 
    TestCaseWithTransport,
52
 
    TestNotApplicable,
53
 
    TestSkipped,
54
 
    )
55
 
from bzrlib.transport import (
56
 
    get_transport,
57
 
    memory,
58
 
    )
59
 
from bzrlib.tests import test_server
60
 
from bzrlib.tests.per_repository import TestCaseWithRepository
61
 
 
62
 
 
63
 
class TestPackRepository(TestCaseWithTransport):
64
 
    """Tests to be repeated across all pack-based formats.
65
 
 
66
 
    The following are populated from the test scenario:
67
 
 
68
 
    :ivar format_name: Registered name fo the format to test.
69
 
    :ivar format_string: On-disk format marker.
70
 
    :ivar format_supports_external_lookups: Boolean.
71
 
    """
72
 
 
73
 
    def get_format(self):
74
 
        return bzrdir.format_registry.make_bzrdir(self.format_name)
75
 
 
76
 
    def test_attribute__fetch_order(self):
77
 
        """Packs do not need ordered data retrieval."""
78
 
        format = self.get_format()
79
 
        repo = self.make_repository('.', format=format)
80
 
        self.assertEqual('unordered', repo._format._fetch_order)
81
 
 
82
 
    def test_attribute__fetch_uses_deltas(self):
83
 
        """Packs reuse deltas."""
84
 
        format = self.get_format()
85
 
        repo = self.make_repository('.', format=format)
86
 
        if isinstance(format.repository_format, RepositoryFormat2a):
87
 
            # TODO: This is currently a workaround. CHK format repositories
88
 
            #       ignore the 'deltas' flag, but during conversions, we can't
89
 
            #       do unordered delta fetches. Remove this clause once we
90
 
            #       improve the inter-format fetching.
91
 
            self.assertEqual(False, repo._format._fetch_uses_deltas)
92
 
        else:
93
 
            self.assertEqual(True, repo._format._fetch_uses_deltas)
94
 
 
95
 
    def test_disk_layout(self):
96
 
        format = self.get_format()
97
 
        repo = self.make_repository('.', format=format)
98
 
        # in case of side effects of locking.
99
 
        repo.lock_write()
100
 
        repo.unlock()
101
 
        t = repo.bzrdir.get_repository_transport(None)
102
 
        self.check_format(t)
103
 
        # XXX: no locks left when unlocked at the moment
104
 
        # self.assertEqualDiff('', t.get('lock').read())
105
 
        self.check_databases(t)
106
 
 
107
 
    def check_format(self, t):
108
 
        self.assertEqualDiff(
109
 
            self.format_string, # from scenario
110
 
            t.get('format').read())
111
 
 
112
 
    def assertHasNoKndx(self, t, knit_name):
113
 
        """Assert that knit_name has no index on t."""
114
 
        self.assertFalse(t.has(knit_name + '.kndx'))
115
 
 
116
 
    def assertHasNoKnit(self, t, knit_name):
117
 
        """Assert that knit_name exists on t."""
118
 
        # no default content
119
 
        self.assertFalse(t.has(knit_name + '.knit'))
120
 
 
121
 
    def check_databases(self, t):
122
 
        """check knit content for a repository."""
123
 
        # check conversion worked
124
 
        self.assertHasNoKndx(t, 'inventory')
125
 
        self.assertHasNoKnit(t, 'inventory')
126
 
        self.assertHasNoKndx(t, 'revisions')
127
 
        self.assertHasNoKnit(t, 'revisions')
128
 
        self.assertHasNoKndx(t, 'signatures')
129
 
        self.assertHasNoKnit(t, 'signatures')
130
 
        self.assertFalse(t.has('knits'))
131
 
        # revision-indexes file-container directory
132
 
        self.assertEqual([],
133
 
            list(self.index_class(t, 'pack-names', None).iter_all_entries()))
134
 
        self.assertTrue(S_ISDIR(t.stat('packs').st_mode))
135
 
        self.assertTrue(S_ISDIR(t.stat('upload').st_mode))
136
 
        self.assertTrue(S_ISDIR(t.stat('indices').st_mode))
137
 
        self.assertTrue(S_ISDIR(t.stat('obsolete_packs').st_mode))
138
 
 
139
 
    def test_shared_disk_layout(self):
140
 
        format = self.get_format()
141
 
        repo = self.make_repository('.', shared=True, format=format)
142
 
        # we want:
143
 
        t = repo.bzrdir.get_repository_transport(None)
144
 
        self.check_format(t)
145
 
        # XXX: no locks left when unlocked at the moment
146
 
        # self.assertEqualDiff('', t.get('lock').read())
147
 
        # We should have a 'shared-storage' marker file.
148
 
        self.assertEqualDiff('', t.get('shared-storage').read())
149
 
        self.check_databases(t)
150
 
 
151
 
    def test_shared_no_tree_disk_layout(self):
152
 
        format = self.get_format()
153
 
        repo = self.make_repository('.', shared=True, format=format)
154
 
        repo.set_make_working_trees(False)
155
 
        # we want:
156
 
        t = repo.bzrdir.get_repository_transport(None)
157
 
        self.check_format(t)
158
 
        # XXX: no locks left when unlocked at the moment
159
 
        # self.assertEqualDiff('', t.get('lock').read())
160
 
        # We should have a 'shared-storage' marker file.
161
 
        self.assertEqualDiff('', t.get('shared-storage').read())
162
 
        # We should have a marker for the no-working-trees flag.
163
 
        self.assertEqualDiff('', t.get('no-working-trees').read())
164
 
        # The marker should go when we toggle the setting.
165
 
        repo.set_make_working_trees(True)
166
 
        self.assertFalse(t.has('no-working-trees'))
167
 
        self.check_databases(t)
168
 
 
169
 
    def test_adding_revision_creates_pack_indices(self):
170
 
        format = self.get_format()
171
 
        tree = self.make_branch_and_tree('.', format=format)
172
 
        trans = tree.branch.repository.bzrdir.get_repository_transport(None)
173
 
        self.assertEqual([],
174
 
            list(self.index_class(trans, 'pack-names', None).iter_all_entries()))
175
 
        tree.commit('foobarbaz')
176
 
        index = self.index_class(trans, 'pack-names', None)
177
 
        index_nodes = list(index.iter_all_entries())
178
 
        self.assertEqual(1, len(index_nodes))
179
 
        node = index_nodes[0]
180
 
        name = node[1][0]
181
 
        # the pack sizes should be listed in the index
182
 
        pack_value = node[2]
183
 
        sizes = [int(digits) for digits in pack_value.split(' ')]
184
 
        for size, suffix in zip(sizes, ['.rix', '.iix', '.tix', '.six']):
185
 
            stat = trans.stat('indices/%s%s' % (name, suffix))
186
 
            self.assertEqual(size, stat.st_size)
187
 
 
188
 
    def test_pulling_nothing_leads_to_no_new_names(self):
189
 
        format = self.get_format()
190
 
        tree1 = self.make_branch_and_tree('1', format=format)
191
 
        tree2 = self.make_branch_and_tree('2', format=format)
192
 
        tree1.branch.repository.fetch(tree2.branch.repository)
193
 
        trans = tree1.branch.repository.bzrdir.get_repository_transport(None)
194
 
        self.assertEqual([],
195
 
            list(self.index_class(trans, 'pack-names', None).iter_all_entries()))
196
 
 
197
 
    def test_commit_across_pack_shape_boundary_autopacks(self):
198
 
        format = self.get_format()
199
 
        tree = self.make_branch_and_tree('.', format=format)
200
 
        trans = tree.branch.repository.bzrdir.get_repository_transport(None)
201
 
        # This test could be a little cheaper by replacing the packs
202
 
        # attribute on the repository to allow a different pack distribution
203
 
        # and max packs policy - so we are checking the policy is honoured
204
 
        # in the test. But for now 11 commits is not a big deal in a single
205
 
        # test.
206
 
        for x in range(9):
207
 
            tree.commit('commit %s' % x)
208
 
        # there should be 9 packs:
209
 
        index = self.index_class(trans, 'pack-names', None)
210
 
        self.assertEqual(9, len(list(index.iter_all_entries())))
211
 
        # insert some files in obsolete_packs which should be removed by pack.
212
 
        trans.put_bytes('obsolete_packs/foo', '123')
213
 
        trans.put_bytes('obsolete_packs/bar', '321')
214
 
        # committing one more should coalesce to 1 of 10.
215
 
        tree.commit('commit triggering pack')
216
 
        index = self.index_class(trans, 'pack-names', None)
217
 
        self.assertEqual(1, len(list(index.iter_all_entries())))
218
 
        # packing should not damage data
219
 
        tree = tree.bzrdir.open_workingtree()
220
 
        check_result = tree.branch.repository.check(
221
 
            [tree.branch.last_revision()])
222
 
        nb_files = 5 # .pack, .rix, .iix, .tix, .six
223
 
        if tree.branch.repository._format.supports_chks:
224
 
            nb_files += 1 # .cix
225
 
        # We should have 10 x nb_files files in the obsolete_packs directory.
226
 
        obsolete_files = list(trans.list_dir('obsolete_packs'))
227
 
        self.assertFalse('foo' in obsolete_files)
228
 
        self.assertFalse('bar' in obsolete_files)
229
 
        self.assertEqual(10 * nb_files, len(obsolete_files))
230
 
        # XXX: Todo check packs obsoleted correctly - old packs and indices
231
 
        # in the obsolete_packs directory.
232
 
        large_pack_name = list(index.iter_all_entries())[0][1][0]
233
 
        # finally, committing again should not touch the large pack.
234
 
        tree.commit('commit not triggering pack')
235
 
        index = self.index_class(trans, 'pack-names', None)
236
 
        self.assertEqual(2, len(list(index.iter_all_entries())))
237
 
        pack_names = [node[1][0] for node in index.iter_all_entries()]
238
 
        self.assertTrue(large_pack_name in pack_names)
239
 
 
240
 
    def test_commit_write_group_returns_new_pack_names(self):
241
 
        # This test doesn't need real disk.
242
 
        self.vfs_transport_factory = memory.MemoryServer
243
 
        format = self.get_format()
244
 
        repo = self.make_repository('foo', format=format)
245
 
        repo.lock_write()
246
 
        try:
247
 
            # All current pack repository styles autopack at 10 revisions; and
248
 
            # autopack as well as regular commit write group needs to return
249
 
            # the new pack name. Looping is a little ugly, but we don't have a
250
 
            # clean way to test both the autopack logic and the normal code
251
 
            # path without doing this loop.
252
 
            for pos in range(10):
253
 
                revid = str(pos)
254
 
                repo.start_write_group()
255
 
                try:
256
 
                    inv = inventory.Inventory(revision_id=revid)
257
 
                    inv.root.revision = revid
258
 
                    repo.texts.add_lines((inv.root.file_id, revid), [], [])
259
 
                    rev = _mod_revision.Revision(timestamp=0, timezone=None,
260
 
                        committer="Foo Bar <foo@example.com>", message="Message",
261
 
                        revision_id=revid)
262
 
                    rev.parent_ids = ()
263
 
                    repo.add_revision(revid, rev, inv=inv)
264
 
                except:
265
 
                    repo.abort_write_group()
266
 
                    raise
267
 
                else:
268
 
                    old_names = repo._pack_collection._names.keys()
269
 
                    result = repo.commit_write_group()
270
 
                    cur_names = repo._pack_collection._names.keys()
271
 
                    new_names = list(set(cur_names) - set(old_names))
272
 
                    self.assertEqual(new_names, result)
273
 
        finally:
274
 
            repo.unlock()
275
 
 
276
 
    def test_fail_obsolete_deletion(self):
277
 
        # failing to delete obsolete packs is not fatal
278
 
        format = self.get_format()
279
 
        server = test_server.FakeNFSServer()
280
 
        self.start_server(server)
281
 
        transport = get_transport(server.get_url())
282
 
        bzrdir = self.get_format().initialize_on_transport(transport)
283
 
        repo = bzrdir.create_repository()
284
 
        repo_transport = bzrdir.get_repository_transport(None)
285
 
        self.assertTrue(repo_transport.has('obsolete_packs'))
286
 
        # these files are in use by another client and typically can't be deleted
287
 
        repo_transport.put_bytes('obsolete_packs/.nfsblahblah', 'contents')
288
 
        repo._pack_collection._clear_obsolete_packs()
289
 
        self.assertTrue(repo_transport.has('obsolete_packs/.nfsblahblah'))
290
 
 
291
 
    def test_pack_collection_sets_sibling_indices(self):
292
 
        """The CombinedGraphIndex objects in the pack collection are all
293
 
        siblings of each other, so that search-order reorderings will be copied
294
 
        to each other.
295
 
        """
296
 
        repo = self.make_repository('repo')
297
 
        pack_coll = repo._pack_collection
298
 
        indices = set([pack_coll.revision_index, pack_coll.inventory_index,
299
 
                pack_coll.text_index, pack_coll.signature_index])
300
 
        if pack_coll.chk_index is not None:
301
 
            indices.add(pack_coll.chk_index)
302
 
        combined_indices = set(idx.combined_index for idx in indices)
303
 
        for combined_index in combined_indices:
304
 
            self.assertEqual(
305
 
                combined_indices.difference([combined_index]),
306
 
                combined_index._sibling_indices)
307
 
 
308
 
    def test_pack_after_two_commits_packs_everything(self):
309
 
        format = self.get_format()
310
 
        tree = self.make_branch_and_tree('.', format=format)
311
 
        trans = tree.branch.repository.bzrdir.get_repository_transport(None)
312
 
        tree.commit('start')
313
 
        tree.commit('more work')
314
 
        tree.branch.repository.pack()
315
 
        # there should be 1 pack:
316
 
        index = self.index_class(trans, 'pack-names', None)
317
 
        self.assertEqual(1, len(list(index.iter_all_entries())))
318
 
        self.assertEqual(2, len(tree.branch.repository.all_revision_ids()))
319
 
 
320
 
    def test_pack_preserves_all_inventories(self):
321
 
        # This is related to bug:
322
 
        #   https://bugs.launchpad.net/bzr/+bug/412198
323
 
        # Stacked repositories need to keep the inventory for parents, even
324
 
        # after a pack operation. However, it is harder to test that, then just
325
 
        # test that all inventory texts are preserved.
326
 
        format = self.get_format()
327
 
        builder = self.make_branch_builder('source', format=format)
328
 
        builder.start_series()
329
 
        builder.build_snapshot('A-id', None, [
330
 
            ('add', ('', 'root-id', 'directory', None))])
331
 
        builder.build_snapshot('B-id', None, [
332
 
            ('add', ('file', 'file-id', 'file', 'B content\n'))])
333
 
        builder.build_snapshot('C-id', None, [
334
 
            ('modify', ('file-id', 'C content\n'))])
335
 
        builder.finish_series()
336
 
        b = builder.get_branch()
337
 
        b.lock_read()
338
 
        self.addCleanup(b.unlock)
339
 
        repo = self.make_repository('repo', shared=True, format=format)
340
 
        repo.lock_write()
341
 
        self.addCleanup(repo.unlock)
342
 
        repo.fetch(b.repository, revision_id='B-id')
343
 
        inv = b.repository.iter_inventories(['C-id']).next()
344
 
        repo.start_write_group()
345
 
        repo.add_inventory('C-id', inv, ['B-id'])
346
 
        repo.commit_write_group()
347
 
        self.assertEqual([('A-id',), ('B-id',), ('C-id',)],
348
 
                         sorted(repo.inventories.keys()))
349
 
        repo.pack()
350
 
        self.assertEqual([('A-id',), ('B-id',), ('C-id',)],
351
 
                         sorted(repo.inventories.keys()))
352
 
        # Content should be preserved as well
353
 
        self.assertEqual(inv, repo.iter_inventories(['C-id']).next())
354
 
 
355
 
    def test_pack_layout(self):
356
 
        # Test that the ordering of revisions in pack repositories is
357
 
        # tip->ancestor
358
 
        format = self.get_format()
359
 
        tree = self.make_branch_and_tree('.', format=format)
360
 
        trans = tree.branch.repository.bzrdir.get_repository_transport(None)
361
 
        tree.commit('start', rev_id='1')
362
 
        tree.commit('more work', rev_id='2')
363
 
        tree.branch.repository.pack()
364
 
        tree.lock_read()
365
 
        self.addCleanup(tree.unlock)
366
 
        pack = tree.branch.repository._pack_collection.get_pack_by_name(
367
 
            tree.branch.repository._pack_collection.names()[0])
368
 
        # revision access tends to be tip->ancestor, so ordering that way on
369
 
        # disk is a good idea.
370
 
        for _1, key, val, refs in pack.revision_index.iter_all_entries():
371
 
            if type(format.repository_format) is RepositoryFormat2a:
372
 
                # group_start, group_len, internal_start, internal_len
373
 
                pos = map(int, val.split())
374
 
            else:
375
 
                # eol_flag, start, len
376
 
                pos = int(val[1:].split()[0])
377
 
            if key == ('1',):
378
 
                pos_1 = pos
379
 
            else:
380
 
                pos_2 = pos
381
 
        self.assertTrue(pos_2 < pos_1, 'rev 1 came before rev 2 %s > %s'
382
 
                                       % (pos_1, pos_2))
383
 
 
384
 
    def test_pack_repositories_support_multiple_write_locks(self):
385
 
        format = self.get_format()
386
 
        self.make_repository('.', shared=True, format=format)
387
 
        r1 = repository.Repository.open('.')
388
 
        r2 = repository.Repository.open('.')
389
 
        r1.lock_write()
390
 
        self.addCleanup(r1.unlock)
391
 
        r2.lock_write()
392
 
        r2.unlock()
393
 
 
394
 
    def _add_text(self, repo, fileid):
395
 
        """Add a text to the repository within a write group."""
396
 
        repo.texts.add_lines((fileid, 'samplerev+'+fileid), [],
397
 
            ['smaplerev+'+fileid])
398
 
 
399
 
    def test_concurrent_writers_merge_new_packs(self):
400
 
        format = self.get_format()
401
 
        self.make_repository('.', shared=True, format=format)
402
 
        r1 = repository.Repository.open('.')
403
 
        r2 = repository.Repository.open('.')
404
 
        r1.lock_write()
405
 
        try:
406
 
            # access enough data to load the names list
407
 
            list(r1.all_revision_ids())
408
 
            r2.lock_write()
409
 
            try:
410
 
                # access enough data to load the names list
411
 
                list(r2.all_revision_ids())
412
 
                r1.start_write_group()
413
 
                try:
414
 
                    r2.start_write_group()
415
 
                    try:
416
 
                        self._add_text(r1, 'fileidr1')
417
 
                        self._add_text(r2, 'fileidr2')
418
 
                    except:
419
 
                        r2.abort_write_group()
420
 
                        raise
421
 
                except:
422
 
                    r1.abort_write_group()
423
 
                    raise
424
 
                # both r1 and r2 have open write groups with data in them
425
 
                # created while the other's write group was open.
426
 
                # Commit both which requires a merge to the pack-names.
427
 
                try:
428
 
                    r1.commit_write_group()
429
 
                except:
430
 
                    r1.abort_write_group()
431
 
                    r2.abort_write_group()
432
 
                    raise
433
 
                r2.commit_write_group()
434
 
                # tell r1 to reload from disk
435
 
                r1._pack_collection.reset()
436
 
                # Now both repositories should know about both names
437
 
                r1._pack_collection.ensure_loaded()
438
 
                r2._pack_collection.ensure_loaded()
439
 
                self.assertEqual(r1._pack_collection.names(), r2._pack_collection.names())
440
 
                self.assertEqual(2, len(r1._pack_collection.names()))
441
 
            finally:
442
 
                r2.unlock()
443
 
        finally:
444
 
            r1.unlock()
445
 
 
446
 
    def test_concurrent_writer_second_preserves_dropping_a_pack(self):
447
 
        format = self.get_format()
448
 
        self.make_repository('.', shared=True, format=format)
449
 
        r1 = repository.Repository.open('.')
450
 
        r2 = repository.Repository.open('.')
451
 
        # add a pack to drop
452
 
        r1.lock_write()
453
 
        try:
454
 
            r1.start_write_group()
455
 
            try:
456
 
                self._add_text(r1, 'fileidr1')
457
 
            except:
458
 
                r1.abort_write_group()
459
 
                raise
460
 
            else:
461
 
                r1.commit_write_group()
462
 
            r1._pack_collection.ensure_loaded()
463
 
            name_to_drop = r1._pack_collection.all_packs()[0].name
464
 
        finally:
465
 
            r1.unlock()
466
 
        r1.lock_write()
467
 
        try:
468
 
            # access enough data to load the names list
469
 
            list(r1.all_revision_ids())
470
 
            r2.lock_write()
471
 
            try:
472
 
                # access enough data to load the names list
473
 
                list(r2.all_revision_ids())
474
 
                r1._pack_collection.ensure_loaded()
475
 
                try:
476
 
                    r2.start_write_group()
477
 
                    try:
478
 
                        # in r1, drop the pack
479
 
                        r1._pack_collection._remove_pack_from_memory(
480
 
                            r1._pack_collection.get_pack_by_name(name_to_drop))
481
 
                        # in r2, add a pack
482
 
                        self._add_text(r2, 'fileidr2')
483
 
                    except:
484
 
                        r2.abort_write_group()
485
 
                        raise
486
 
                except:
487
 
                    r1._pack_collection.reset()
488
 
                    raise
489
 
                # r1 has a changed names list, and r2 an open write groups with
490
 
                # changes.
491
 
                # save r1, and then commit the r2 write group, which requires a
492
 
                # merge to the pack-names, which should not reinstate
493
 
                # name_to_drop
494
 
                try:
495
 
                    r1._pack_collection._save_pack_names()
496
 
                    r1._pack_collection.reset()
497
 
                except:
498
 
                    r2.abort_write_group()
499
 
                    raise
500
 
                try:
501
 
                    r2.commit_write_group()
502
 
                except:
503
 
                    r2.abort_write_group()
504
 
                    raise
505
 
                # Now both repositories should now about just one name.
506
 
                r1._pack_collection.ensure_loaded()
507
 
                r2._pack_collection.ensure_loaded()
508
 
                self.assertEqual(r1._pack_collection.names(), r2._pack_collection.names())
509
 
                self.assertEqual(1, len(r1._pack_collection.names()))
510
 
                self.assertFalse(name_to_drop in r1._pack_collection.names())
511
 
            finally:
512
 
                r2.unlock()
513
 
        finally:
514
 
            r1.unlock()
515
 
 
516
 
    def test_concurrent_pack_triggers_reload(self):
517
 
        # create 2 packs, which we will then collapse
518
 
        tree = self.make_branch_and_tree('tree')
519
 
        tree.lock_write()
520
 
        try:
521
 
            rev1 = tree.commit('one')
522
 
            rev2 = tree.commit('two')
523
 
            r2 = repository.Repository.open('tree')
524
 
            r2.lock_read()
525
 
            try:
526
 
                # Now r2 has read the pack-names file, but will need to reload
527
 
                # it after r1 has repacked
528
 
                tree.branch.repository.pack()
529
 
                self.assertEqual({rev2:(rev1,)}, r2.get_parent_map([rev2]))
530
 
            finally:
531
 
                r2.unlock()
532
 
        finally:
533
 
            tree.unlock()
534
 
 
535
 
    def test_concurrent_pack_during_get_record_reloads(self):
536
 
        tree = self.make_branch_and_tree('tree')
537
 
        tree.lock_write()
538
 
        try:
539
 
            rev1 = tree.commit('one')
540
 
            rev2 = tree.commit('two')
541
 
            keys = [(rev1,), (rev2,)]
542
 
            r2 = repository.Repository.open('tree')
543
 
            r2.lock_read()
544
 
            try:
545
 
                # At this point, we will start grabbing a record stream, and
546
 
                # trigger a repack mid-way
547
 
                packed = False
548
 
                result = {}
549
 
                record_stream = r2.revisions.get_record_stream(keys,
550
 
                                    'unordered', False)
551
 
                for record in record_stream:
552
 
                    result[record.key] = record
553
 
                    if not packed:
554
 
                        tree.branch.repository.pack()
555
 
                        packed = True
556
 
                # The first record will be found in the original location, but
557
 
                # after the pack, we have to reload to find the next record
558
 
                self.assertEqual(sorted(keys), sorted(result.keys()))
559
 
            finally:
560
 
                r2.unlock()
561
 
        finally:
562
 
            tree.unlock()
563
 
 
564
 
    def test_concurrent_pack_during_autopack(self):
565
 
        tree = self.make_branch_and_tree('tree')
566
 
        tree.lock_write()
567
 
        try:
568
 
            for i in xrange(9):
569
 
                tree.commit('rev %d' % (i,))
570
 
            r2 = repository.Repository.open('tree')
571
 
            r2.lock_write()
572
 
            try:
573
 
                # Monkey patch so that pack occurs while the other repo is
574
 
                # autopacking. This is slightly bad, but all current pack
575
 
                # repository implementations have a _pack_collection, and we
576
 
                # test that it gets triggered. So if a future format changes
577
 
                # things, the test will fail rather than succeed accidentally.
578
 
                autopack_count = [0]
579
 
                r1 = tree.branch.repository
580
 
                orig = r1._pack_collection.pack_distribution
581
 
                def trigger_during_auto(*args, **kwargs):
582
 
                    ret = orig(*args, **kwargs)
583
 
                    if not autopack_count[0]:
584
 
                        r2.pack()
585
 
                    autopack_count[0] += 1
586
 
                    return ret
587
 
                r1._pack_collection.pack_distribution = trigger_during_auto
588
 
                tree.commit('autopack-rev')
589
 
                # This triggers 2 autopacks. The first one causes r2.pack() to
590
 
                # fire, but r2 doesn't see the new pack file yet. The
591
 
                # autopack restarts and sees there are 2 files and there
592
 
                # should be only 1 for 10 commits. So it goes ahead and
593
 
                # finishes autopacking.
594
 
                self.assertEqual([2], autopack_count)
595
 
            finally:
596
 
                r2.unlock()
597
 
        finally:
598
 
            tree.unlock()
599
 
 
600
 
    def test_lock_write_does_not_physically_lock(self):
601
 
        repo = self.make_repository('.', format=self.get_format())
602
 
        repo.lock_write()
603
 
        self.addCleanup(repo.unlock)
604
 
        self.assertFalse(repo.get_physical_lock_status())
605
 
 
606
 
    def prepare_for_break_lock(self):
607
 
        # Setup the global ui factory state so that a break-lock method call
608
 
        # will find usable input in the input stream.
609
 
        ui.ui_factory = ui.CannedInputUIFactory([True])
610
 
 
611
 
    def test_break_lock_breaks_physical_lock(self):
612
 
        repo = self.make_repository('.', format=self.get_format())
613
 
        repo._pack_collection.lock_names()
614
 
        repo.control_files.leave_in_place()
615
 
        repo.unlock()
616
 
        repo2 = repository.Repository.open('.')
617
 
        self.assertTrue(repo.get_physical_lock_status())
618
 
        self.prepare_for_break_lock()
619
 
        repo2.break_lock()
620
 
        self.assertFalse(repo.get_physical_lock_status())
621
 
 
622
 
    def test_broken_physical_locks_error_on__unlock_names_lock(self):
623
 
        repo = self.make_repository('.', format=self.get_format())
624
 
        repo._pack_collection.lock_names()
625
 
        self.assertTrue(repo.get_physical_lock_status())
626
 
        repo2 = repository.Repository.open('.')
627
 
        self.prepare_for_break_lock()
628
 
        repo2.break_lock()
629
 
        self.assertRaises(errors.LockBroken, repo._pack_collection._unlock_names)
630
 
 
631
 
    def test_fetch_without_find_ghosts_ignores_ghosts(self):
632
 
        # we want two repositories at this point:
633
 
        # one with a revision that is a ghost in the other
634
 
        # repository.
635
 
        # 'ghost' is present in has_ghost, 'ghost' is absent in 'missing_ghost'.
636
 
        # 'references' is present in both repositories, and 'tip' is present
637
 
        # just in has_ghost.
638
 
        # has_ghost       missing_ghost
639
 
        #------------------------------
640
 
        # 'ghost'             -
641
 
        # 'references'    'references'
642
 
        # 'tip'               -
643
 
        # In this test we fetch 'tip' which should not fetch 'ghost'
644
 
        has_ghost = self.make_repository('has_ghost', format=self.get_format())
645
 
        missing_ghost = self.make_repository('missing_ghost',
646
 
            format=self.get_format())
647
 
 
648
 
        def add_commit(repo, revision_id, parent_ids):
649
 
            repo.lock_write()
650
 
            repo.start_write_group()
651
 
            inv = inventory.Inventory(revision_id=revision_id)
652
 
            inv.root.revision = revision_id
653
 
            root_id = inv.root.file_id
654
 
            sha1 = repo.add_inventory(revision_id, inv, [])
655
 
            repo.texts.add_lines((root_id, revision_id), [], [])
656
 
            rev = _mod_revision.Revision(timestamp=0,
657
 
                                         timezone=None,
658
 
                                         committer="Foo Bar <foo@example.com>",
659
 
                                         message="Message",
660
 
                                         inventory_sha1=sha1,
661
 
                                         revision_id=revision_id)
662
 
            rev.parent_ids = parent_ids
663
 
            repo.add_revision(revision_id, rev)
664
 
            repo.commit_write_group()
665
 
            repo.unlock()
666
 
        add_commit(has_ghost, 'ghost', [])
667
 
        add_commit(has_ghost, 'references', ['ghost'])
668
 
        add_commit(missing_ghost, 'references', ['ghost'])
669
 
        add_commit(has_ghost, 'tip', ['references'])
670
 
        missing_ghost.fetch(has_ghost, 'tip')
671
 
        # missing ghost now has tip and not ghost.
672
 
        rev = missing_ghost.get_revision('tip')
673
 
        inv = missing_ghost.get_inventory('tip')
674
 
        self.assertRaises(errors.NoSuchRevision,
675
 
            missing_ghost.get_revision, 'ghost')
676
 
        self.assertRaises(errors.NoSuchRevision,
677
 
            missing_ghost.get_inventory, 'ghost')
678
 
 
679
 
    def make_write_ready_repo(self):
680
 
        format = self.get_format()
681
 
        if isinstance(format.repository_format, RepositoryFormat2a):
682
 
            raise TestNotApplicable("No missing compression parents")
683
 
        repo = self.make_repository('.', format=format)
684
 
        repo.lock_write()
685
 
        self.addCleanup(repo.unlock)
686
 
        repo.start_write_group()
687
 
        self.addCleanup(repo.abort_write_group)
688
 
        return repo
689
 
 
690
 
    def test_missing_inventories_compression_parent_prevents_commit(self):
691
 
        repo = self.make_write_ready_repo()
692
 
        key = ('junk',)
693
 
        repo.inventories._index._missing_compression_parents.add(key)
694
 
        self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
695
 
        self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
696
 
 
697
 
    def test_missing_revisions_compression_parent_prevents_commit(self):
698
 
        repo = self.make_write_ready_repo()
699
 
        key = ('junk',)
700
 
        repo.revisions._index._missing_compression_parents.add(key)
701
 
        self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
702
 
        self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
703
 
 
704
 
    def test_missing_signatures_compression_parent_prevents_commit(self):
705
 
        repo = self.make_write_ready_repo()
706
 
        key = ('junk',)
707
 
        repo.signatures._index._missing_compression_parents.add(key)
708
 
        self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
709
 
        self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
710
 
 
711
 
    def test_missing_text_compression_parent_prevents_commit(self):
712
 
        repo = self.make_write_ready_repo()
713
 
        key = ('some', 'junk')
714
 
        repo.texts._index._missing_compression_parents.add(key)
715
 
        self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
716
 
        e = self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
717
 
 
718
 
    def test_supports_external_lookups(self):
719
 
        repo = self.make_repository('.', format=self.get_format())
720
 
        self.assertEqual(self.format_supports_external_lookups,
721
 
            repo._format.supports_external_lookups)
722
 
 
723
 
    def _lock_write(self, write_lockable):
724
 
        """Lock write_lockable, add a cleanup and return the result.
725
 
        
726
 
        :param write_lockable: An object with a lock_write method.
727
 
        :return: The result of write_lockable.lock_write().
728
 
        """
729
 
        result = write_lockable.lock_write()
730
 
        self.addCleanup(result.unlock)
731
 
        return result
732
 
 
733
 
    def test_abort_write_group_does_not_raise_when_suppressed(self):
734
 
        """Similar to per_repository.test_write_group's test of the same name.
735
 
 
736
 
        Also requires that the exception is logged.
737
 
        """
738
 
        self.vfs_transport_factory = memory.MemoryServer
739
 
        repo = self.make_repository('repo', format=self.get_format())
740
 
        token = self._lock_write(repo).repository_token
741
 
        repo.start_write_group()
742
 
        # Damage the repository on the filesystem
743
 
        self.get_transport('').rename('repo', 'foo')
744
 
        # abort_write_group will not raise an error
745
 
        self.assertEqual(None, repo.abort_write_group(suppress_errors=True))
746
 
        # But it does log an error
747
 
        log = self.get_log()
748
 
        self.assertContainsRe(log, 'abort_write_group failed')
749
 
        self.assertContainsRe(log, r'INFO  bzr: ERROR \(ignored\):')
750
 
        if token is not None:
751
 
            repo.leave_lock_in_place()
752
 
 
753
 
    def test_abort_write_group_does_raise_when_not_suppressed(self):
754
 
        self.vfs_transport_factory = memory.MemoryServer
755
 
        repo = self.make_repository('repo', format=self.get_format())
756
 
        token = self._lock_write(repo).repository_token
757
 
        repo.start_write_group()
758
 
        # Damage the repository on the filesystem
759
 
        self.get_transport('').rename('repo', 'foo')
760
 
        # abort_write_group will not raise an error
761
 
        self.assertRaises(Exception, repo.abort_write_group)
762
 
        if token is not None:
763
 
            repo.leave_lock_in_place()
764
 
 
765
 
    def test_suspend_write_group(self):
766
 
        self.vfs_transport_factory = memory.MemoryServer
767
 
        repo = self.make_repository('repo', format=self.get_format())
768
 
        token = self._lock_write(repo).repository_token
769
 
        repo.start_write_group()
770
 
        repo.texts.add_lines(('file-id', 'revid'), (), ['lines'])
771
 
        wg_tokens = repo.suspend_write_group()
772
 
        expected_pack_name = wg_tokens[0] + '.pack'
773
 
        expected_names = [wg_tokens[0] + ext for ext in
774
 
                            ('.rix', '.iix', '.tix', '.six')]
775
 
        if repo.chk_bytes is not None:
776
 
            expected_names.append(wg_tokens[0] + '.cix')
777
 
        expected_names.append(expected_pack_name)
778
 
        upload_transport = repo._pack_collection._upload_transport
779
 
        limbo_files = upload_transport.list_dir('')
780
 
        self.assertEqual(sorted(expected_names), sorted(limbo_files))
781
 
        md5 = osutils.md5(upload_transport.get_bytes(expected_pack_name))
782
 
        self.assertEqual(wg_tokens[0], md5.hexdigest())
783
 
 
784
 
    def test_resume_chk_bytes(self):
785
 
        self.vfs_transport_factory = memory.MemoryServer
786
 
        repo = self.make_repository('repo', format=self.get_format())
787
 
        if repo.chk_bytes is None:
788
 
            raise TestNotApplicable('no chk_bytes for this repository')
789
 
        token = self._lock_write(repo).repository_token
790
 
        repo.start_write_group()
791
 
        text = 'a bit of text\n'
792
 
        key = ('sha1:' + osutils.sha_string(text),)
793
 
        repo.chk_bytes.add_lines(key, (), [text])
794
 
        wg_tokens = repo.suspend_write_group()
795
 
        same_repo = repo.bzrdir.open_repository()
796
 
        same_repo.lock_write()
797
 
        self.addCleanup(same_repo.unlock)
798
 
        same_repo.resume_write_group(wg_tokens)
799
 
        self.assertEqual([key], list(same_repo.chk_bytes.keys()))
800
 
        self.assertEqual(
801
 
            text, same_repo.chk_bytes.get_record_stream([key],
802
 
                'unordered', True).next().get_bytes_as('fulltext'))
803
 
        same_repo.abort_write_group()
804
 
        self.assertEqual([], list(same_repo.chk_bytes.keys()))
805
 
 
806
 
    def test_resume_write_group_then_abort(self):
807
 
        # Create a repo, start a write group, insert some data, suspend.
808
 
        self.vfs_transport_factory = memory.MemoryServer
809
 
        repo = self.make_repository('repo', format=self.get_format())
810
 
        token = self._lock_write(repo).repository_token
811
 
        repo.start_write_group()
812
 
        text_key = ('file-id', 'revid')
813
 
        repo.texts.add_lines(text_key, (), ['lines'])
814
 
        wg_tokens = repo.suspend_write_group()
815
 
        # Get a fresh repository object for the repo on the filesystem.
816
 
        same_repo = repo.bzrdir.open_repository()
817
 
        # Resume
818
 
        same_repo.lock_write()
819
 
        self.addCleanup(same_repo.unlock)
820
 
        same_repo.resume_write_group(wg_tokens)
821
 
        same_repo.abort_write_group()
822
 
        self.assertEqual(
823
 
            [], same_repo._pack_collection._upload_transport.list_dir(''))
824
 
        self.assertEqual(
825
 
            [], same_repo._pack_collection._pack_transport.list_dir(''))
826
 
 
827
 
    def test_commit_resumed_write_group(self):
828
 
        self.vfs_transport_factory = memory.MemoryServer
829
 
        repo = self.make_repository('repo', format=self.get_format())
830
 
        token = self._lock_write(repo).repository_token
831
 
        repo.start_write_group()
832
 
        text_key = ('file-id', 'revid')
833
 
        repo.texts.add_lines(text_key, (), ['lines'])
834
 
        wg_tokens = repo.suspend_write_group()
835
 
        # Get a fresh repository object for the repo on the filesystem.
836
 
        same_repo = repo.bzrdir.open_repository()
837
 
        # Resume
838
 
        same_repo.lock_write()
839
 
        self.addCleanup(same_repo.unlock)
840
 
        same_repo.resume_write_group(wg_tokens)
841
 
        same_repo.commit_write_group()
842
 
        expected_pack_name = wg_tokens[0] + '.pack'
843
 
        expected_names = [wg_tokens[0] + ext for ext in
844
 
                            ('.rix', '.iix', '.tix', '.six')]
845
 
        if repo.chk_bytes is not None:
846
 
            expected_names.append(wg_tokens[0] + '.cix')
847
 
        self.assertEqual(
848
 
            [], same_repo._pack_collection._upload_transport.list_dir(''))
849
 
        index_names = repo._pack_collection._index_transport.list_dir('')
850
 
        self.assertEqual(sorted(expected_names), sorted(index_names))
851
 
        pack_names = repo._pack_collection._pack_transport.list_dir('')
852
 
        self.assertEqual([expected_pack_name], pack_names)
853
 
 
854
 
    def test_resume_malformed_token(self):
855
 
        self.vfs_transport_factory = memory.MemoryServer
856
 
        # Make a repository with a suspended write group
857
 
        repo = self.make_repository('repo', format=self.get_format())
858
 
        token = self._lock_write(repo).repository_token
859
 
        repo.start_write_group()
860
 
        text_key = ('file-id', 'revid')
861
 
        repo.texts.add_lines(text_key, (), ['lines'])
862
 
        wg_tokens = repo.suspend_write_group()
863
 
        # Make a new repository
864
 
        new_repo = self.make_repository('new_repo', format=self.get_format())
865
 
        token = self._lock_write(new_repo).repository_token
866
 
        hacked_wg_token = (
867
 
            '../../../../repo/.bzr/repository/upload/' + wg_tokens[0])
868
 
        self.assertRaises(
869
 
            errors.UnresumableWriteGroup,
870
 
            new_repo.resume_write_group, [hacked_wg_token])
871
 
 
872
 
 
873
 
class TestPackRepositoryStacking(TestCaseWithTransport):
874
 
 
875
 
    """Tests for stacking pack repositories"""
876
 
 
877
 
    def setUp(self):
878
 
        if not self.format_supports_external_lookups:
879
 
            raise TestNotApplicable("%r doesn't support stacking"
880
 
                % (self.format_name,))
881
 
        super(TestPackRepositoryStacking, self).setUp()
882
 
 
883
 
    def get_format(self):
884
 
        return bzrdir.format_registry.make_bzrdir(self.format_name)
885
 
 
886
 
    def test_stack_checks_rich_root_compatibility(self):
887
 
        # early versions of the packing code relied on pack internals to
888
 
        # stack, but the current version should be able to stack on any
889
 
        # format.
890
 
        #
891
 
        # TODO: Possibly this should be run per-repository-format and raise
892
 
        # TestNotApplicable on formats that don't support stacking. -- mbp
893
 
        # 20080729
894
 
        repo = self.make_repository('repo', format=self.get_format())
895
 
        if repo.supports_rich_root():
896
 
            # can only stack on repositories that have compatible internal
897
 
            # metadata
898
 
            if getattr(repo._format, 'supports_tree_reference', False):
899
 
                matching_format_name = 'pack-0.92-subtree'
900
 
            else:
901
 
                if repo._format.supports_chks:
902
 
                    matching_format_name = '2a'
903
 
                else:
904
 
                    matching_format_name = 'rich-root-pack'
905
 
            mismatching_format_name = 'pack-0.92'
906
 
        else:
907
 
            # We don't have a non-rich-root CHK format.
908
 
            if repo._format.supports_chks:
909
 
                raise AssertionError("no non-rich-root CHK formats known")
910
 
            else:
911
 
                matching_format_name = 'pack-0.92'
912
 
            mismatching_format_name = 'pack-0.92-subtree'
913
 
        base = self.make_repository('base', format=matching_format_name)
914
 
        repo.add_fallback_repository(base)
915
 
        # you can't stack on something with incompatible data
916
 
        bad_repo = self.make_repository('mismatch',
917
 
            format=mismatching_format_name)
918
 
        e = self.assertRaises(errors.IncompatibleRepositories,
919
 
            repo.add_fallback_repository, bad_repo)
920
 
        self.assertContainsRe(str(e),
921
 
            r'(?m)KnitPackRepository.*/mismatch/.*\nis not compatible with\n'
922
 
            r'.*Repository.*/repo/.*\n'
923
 
            r'different rich-root support')
924
 
 
925
 
    def test_stack_checks_serializers_compatibility(self):
926
 
        repo = self.make_repository('repo', format=self.get_format())
927
 
        if getattr(repo._format, 'supports_tree_reference', False):
928
 
            # can only stack on repositories that have compatible internal
929
 
            # metadata
930
 
            matching_format_name = 'pack-0.92-subtree'
931
 
            mismatching_format_name = 'rich-root-pack'
932
 
        else:
933
 
            if repo.supports_rich_root():
934
 
                if repo._format.supports_chks:
935
 
                    matching_format_name = '2a'
936
 
                else:
937
 
                    matching_format_name = 'rich-root-pack'
938
 
                mismatching_format_name = 'pack-0.92-subtree'
939
 
            else:
940
 
                raise TestNotApplicable('No formats use non-v5 serializer'
941
 
                    ' without having rich-root also set')
942
 
        base = self.make_repository('base', format=matching_format_name)
943
 
        repo.add_fallback_repository(base)
944
 
        # you can't stack on something with incompatible data
945
 
        bad_repo = self.make_repository('mismatch',
946
 
            format=mismatching_format_name)
947
 
        e = self.assertRaises(errors.IncompatibleRepositories,
948
 
            repo.add_fallback_repository, bad_repo)
949
 
        self.assertContainsRe(str(e),
950
 
            r'(?m)KnitPackRepository.*/mismatch/.*\nis not compatible with\n'
951
 
            r'.*Repository.*/repo/.*\n'
952
 
            r'different serializers')
953
 
 
954
 
    def test_adding_pack_does_not_record_pack_names_from_other_repositories(self):
955
 
        base = self.make_branch_and_tree('base', format=self.get_format())
956
 
        base.commit('foo')
957
 
        referencing = self.make_branch_and_tree('repo', format=self.get_format())
958
 
        referencing.branch.repository.add_fallback_repository(base.branch.repository)
959
 
        local_tree = referencing.branch.create_checkout('local')
960
 
        local_tree.commit('bar')
961
 
        new_instance = referencing.bzrdir.open_repository()
962
 
        new_instance.lock_read()
963
 
        self.addCleanup(new_instance.unlock)
964
 
        new_instance._pack_collection.ensure_loaded()
965
 
        self.assertEqual(1, len(new_instance._pack_collection.all_packs()))
966
 
 
967
 
    def test_autopack_only_considers_main_repo_packs(self):
968
 
        format = self.get_format()
969
 
        base = self.make_branch_and_tree('base', format=format)
970
 
        base.commit('foo')
971
 
        tree = self.make_branch_and_tree('repo', format=format)
972
 
        tree.branch.repository.add_fallback_repository(base.branch.repository)
973
 
        trans = tree.branch.repository.bzrdir.get_repository_transport(None)
974
 
        # This test could be a little cheaper by replacing the packs
975
 
        # attribute on the repository to allow a different pack distribution
976
 
        # and max packs policy - so we are checking the policy is honoured
977
 
        # in the test. But for now 11 commits is not a big deal in a single
978
 
        # test.
979
 
        local_tree = tree.branch.create_checkout('local')
980
 
        for x in range(9):
981
 
            local_tree.commit('commit %s' % x)
982
 
        # there should be 9 packs:
983
 
        index = self.index_class(trans, 'pack-names', None)
984
 
        self.assertEqual(9, len(list(index.iter_all_entries())))
985
 
        # committing one more should coalesce to 1 of 10.
986
 
        local_tree.commit('commit triggering pack')
987
 
        index = self.index_class(trans, 'pack-names', None)
988
 
        self.assertEqual(1, len(list(index.iter_all_entries())))
989
 
        # packing should not damage data
990
 
        tree = tree.bzrdir.open_workingtree()
991
 
        check_result = tree.branch.repository.check(
992
 
            [tree.branch.last_revision()])
993
 
        nb_files = 5 # .pack, .rix, .iix, .tix, .six
994
 
        if tree.branch.repository._format.supports_chks:
995
 
            nb_files += 1 # .cix
996
 
        # We should have 10 x nb_files files in the obsolete_packs directory.
997
 
        obsolete_files = list(trans.list_dir('obsolete_packs'))
998
 
        self.assertFalse('foo' in obsolete_files)
999
 
        self.assertFalse('bar' in obsolete_files)
1000
 
        self.assertEqual(10 * nb_files, len(obsolete_files))
1001
 
        # XXX: Todo check packs obsoleted correctly - old packs and indices
1002
 
        # in the obsolete_packs directory.
1003
 
        large_pack_name = list(index.iter_all_entries())[0][1][0]
1004
 
        # finally, committing again should not touch the large pack.
1005
 
        local_tree.commit('commit not triggering pack')
1006
 
        index = self.index_class(trans, 'pack-names', None)
1007
 
        self.assertEqual(2, len(list(index.iter_all_entries())))
1008
 
        pack_names = [node[1][0] for node in index.iter_all_entries()]
1009
 
        self.assertTrue(large_pack_name in pack_names)
1010
 
 
1011
 
 
1012
 
class TestKeyDependencies(TestCaseWithTransport):
1013
 
 
1014
 
    def get_format(self):
1015
 
        return bzrdir.format_registry.make_bzrdir(self.format_name)
1016
 
 
1017
 
    def create_source_and_target(self):
1018
 
        builder = self.make_branch_builder('source', format=self.get_format())
1019
 
        builder.start_series()
1020
 
        builder.build_snapshot('A-id', None, [
1021
 
            ('add', ('', 'root-id', 'directory', None))])
1022
 
        builder.build_snapshot('B-id', ['A-id', 'ghost-id'], [])
1023
 
        builder.finish_series()
1024
 
        repo = self.make_repository('target', format=self.get_format())
1025
 
        b = builder.get_branch()
1026
 
        b.lock_read()
1027
 
        self.addCleanup(b.unlock)
1028
 
        repo.lock_write()
1029
 
        self.addCleanup(repo.unlock)
1030
 
        return b.repository, repo
1031
 
 
1032
 
    def test_key_dependencies_cleared_on_abort(self):
1033
 
        source_repo, target_repo = self.create_source_and_target()
1034
 
        target_repo.start_write_group()
1035
 
        try:
1036
 
            stream = source_repo.revisions.get_record_stream([('B-id',)],
1037
 
                                                             'unordered', True)
1038
 
            target_repo.revisions.insert_record_stream(stream)
1039
 
            key_refs = target_repo.revisions._index._key_dependencies
1040
 
            self.assertEqual([('B-id',)], sorted(key_refs.get_referrers()))
1041
 
        finally:
1042
 
            target_repo.abort_write_group()
1043
 
        self.assertEqual([], sorted(key_refs.get_referrers()))
1044
 
 
1045
 
    def test_key_dependencies_cleared_on_suspend(self):
1046
 
        source_repo, target_repo = self.create_source_and_target()
1047
 
        target_repo.start_write_group()
1048
 
        try:
1049
 
            stream = source_repo.revisions.get_record_stream([('B-id',)],
1050
 
                                                             'unordered', True)
1051
 
            target_repo.revisions.insert_record_stream(stream)
1052
 
            key_refs = target_repo.revisions._index._key_dependencies
1053
 
            self.assertEqual([('B-id',)], sorted(key_refs.get_referrers()))
1054
 
        finally:
1055
 
            target_repo.suspend_write_group()
1056
 
        self.assertEqual([], sorted(key_refs.get_referrers()))
1057
 
 
1058
 
    def test_key_dependencies_cleared_on_commit(self):
1059
 
        source_repo, target_repo = self.create_source_and_target()
1060
 
        target_repo.start_write_group()
1061
 
        try:
1062
 
            # Copy all texts, inventories, and chks so that nothing is missing
1063
 
            # for revision B-id.
1064
 
            for vf_name in ['texts', 'chk_bytes', 'inventories']:
1065
 
                source_vf = getattr(source_repo, vf_name, None)
1066
 
                if source_vf is None:
1067
 
                    continue
1068
 
                target_vf = getattr(target_repo, vf_name)
1069
 
                stream = source_vf.get_record_stream(
1070
 
                    source_vf.keys(), 'unordered', True)
1071
 
                target_vf.insert_record_stream(stream)
1072
 
            # Copy just revision B-id
1073
 
            stream = source_repo.revisions.get_record_stream(
1074
 
                [('B-id',)], 'unordered', True)
1075
 
            target_repo.revisions.insert_record_stream(stream)
1076
 
            key_refs = target_repo.revisions._index._key_dependencies
1077
 
            self.assertEqual([('B-id',)], sorted(key_refs.get_referrers()))
1078
 
        finally:
1079
 
            target_repo.commit_write_group()
1080
 
        self.assertEqual([], sorted(key_refs.get_referrers()))
1081
 
 
1082
 
 
1083
 
class TestSmartServerAutopack(TestCaseWithTransport):
1084
 
 
1085
 
    def setUp(self):
1086
 
        super(TestSmartServerAutopack, self).setUp()
1087
 
        # Create a smart server that publishes whatever the backing VFS server
1088
 
        # does.
1089
 
        self.smart_server = test_server.SmartTCPServer_for_testing()
1090
 
        self.start_server(self.smart_server, self.get_server())
1091
 
        # Log all HPSS calls into self.hpss_calls.
1092
 
        client._SmartClient.hooks.install_named_hook(
1093
 
            'call', self.capture_hpss_call, None)
1094
 
        self.hpss_calls = []
1095
 
 
1096
 
    def capture_hpss_call(self, params):
1097
 
        self.hpss_calls.append(params.method)
1098
 
 
1099
 
    def get_format(self):
1100
 
        return bzrdir.format_registry.make_bzrdir(self.format_name)
1101
 
 
1102
 
    def test_autopack_or_streaming_rpc_is_used_when_using_hpss(self):
1103
 
        # Make local and remote repos
1104
 
        format = self.get_format()
1105
 
        tree = self.make_branch_and_tree('local', format=format)
1106
 
        self.make_branch_and_tree('remote', format=format)
1107
 
        remote_branch_url = self.smart_server.get_url() + 'remote'
1108
 
        remote_branch = bzrdir.BzrDir.open(remote_branch_url).open_branch()
1109
 
        # Make 9 local revisions, and push them one at a time to the remote
1110
 
        # repo to produce 9 pack files.
1111
 
        for x in range(9):
1112
 
            tree.commit('commit %s' % x)
1113
 
            tree.branch.push(remote_branch)
1114
 
        # Make one more push to trigger an autopack
1115
 
        self.hpss_calls = []
1116
 
        tree.commit('commit triggering pack')
1117
 
        tree.branch.push(remote_branch)
1118
 
        autopack_calls = len([call for call in self.hpss_calls if call ==
1119
 
            'PackRepository.autopack'])
1120
 
        streaming_calls = len([call for call in self.hpss_calls if call in
1121
 
            ('Repository.insert_stream', 'Repository.insert_stream_1.19')])
1122
 
        if autopack_calls:
1123
 
            # Non streaming server
1124
 
            self.assertEqual(1, autopack_calls)
1125
 
            self.assertEqual(0, streaming_calls)
1126
 
        else:
1127
 
            # Streaming was used, which autopacks on the remote end.
1128
 
            self.assertEqual(0, autopack_calls)
1129
 
            # NB: The 2 calls are because of the sanity check that the server
1130
 
            # supports the verb (see remote.py:RemoteSink.insert_stream for
1131
 
            # details).
1132
 
            self.assertEqual(2, streaming_calls)
1133
 
 
1134
 
 
1135
 
def load_tests(basic_tests, module, loader):
1136
 
    # these give the bzrdir canned format name, and the repository on-disk
1137
 
    # format string
1138
 
    scenarios_params = [
1139
 
         dict(format_name='pack-0.92',
1140
 
              format_string="Bazaar pack repository format 1 (needs bzr 0.92)\n",
1141
 
              format_supports_external_lookups=False,
1142
 
              index_class=GraphIndex),
1143
 
         dict(format_name='pack-0.92-subtree',
1144
 
              format_string="Bazaar pack repository format 1 "
1145
 
              "with subtree support (needs bzr 0.92)\n",
1146
 
              format_supports_external_lookups=False,
1147
 
              index_class=GraphIndex),
1148
 
         dict(format_name='1.6',
1149
 
              format_string="Bazaar RepositoryFormatKnitPack5 (bzr 1.6)\n",
1150
 
              format_supports_external_lookups=True,
1151
 
              index_class=GraphIndex),
1152
 
         dict(format_name='1.6.1-rich-root',
1153
 
              format_string="Bazaar RepositoryFormatKnitPack5RichRoot "
1154
 
                  "(bzr 1.6.1)\n",
1155
 
              format_supports_external_lookups=True,
1156
 
              index_class=GraphIndex),
1157
 
         dict(format_name='1.9',
1158
 
              format_string="Bazaar RepositoryFormatKnitPack6 (bzr 1.9)\n",
1159
 
              format_supports_external_lookups=True,
1160
 
              index_class=BTreeGraphIndex),
1161
 
         dict(format_name='1.9-rich-root',
1162
 
              format_string="Bazaar RepositoryFormatKnitPack6RichRoot "
1163
 
                  "(bzr 1.9)\n",
1164
 
              format_supports_external_lookups=True,
1165
 
              index_class=BTreeGraphIndex),
1166
 
         dict(format_name='2a',
1167
 
              format_string="Bazaar repository format 2a "
1168
 
                "(needs bzr 1.16 or later)\n",
1169
 
              format_supports_external_lookups=True,
1170
 
              index_class=BTreeGraphIndex),
1171
 
         ]
1172
 
    # name of the scenario is the format name
1173
 
    scenarios = [(s['format_name'], s) for s in scenarios_params]
1174
 
    return tests.multiply_tests(basic_tests, scenarios, loader.suiteClass())