~bzr-pqm/bzr/bzr.dev

« back to all changes in this revision

Viewing changes to bzrlib/tests/test_fetch.py

  • Committer: Aaron Bentley
  • Date: 2007-12-09 23:53:50 UTC
  • mto: This revision was merged to the branch mainline in revision 3133.
  • Revision ID: aaron.bentley@utoronto.ca-20071209235350-qp39yk0xzx7a4f6p
Don't use the base if not cherrypicking

Show diffs side-by-side

added added

removed removed

Lines of Context:
22
22
from bzrlib import (
23
23
    bzrdir,
24
24
    errors,
25
 
    osutils,
26
25
    merge,
27
26
    repository,
28
 
    versionedfile,
29
27
    )
30
28
from bzrlib.branch import Branch
31
29
from bzrlib.bzrdir import BzrDir
32
30
from bzrlib.repofmt import knitrepo
 
31
from bzrlib.symbol_versioning import (
 
32
    zero_ninetyone,
 
33
    )
33
34
from bzrlib.tests import TestCaseWithTransport
34
 
from bzrlib.tests.http_utils import TestCaseWithWebserver
 
35
from bzrlib.tests.HTTPTestUtil import TestCaseWithWebserver
35
36
from bzrlib.tests.test_revision import make_branches
36
37
from bzrlib.trace import mutter
37
38
from bzrlib.upgrade import Convert
148
149
        branch = self.make_branch('branch', format=knit2_format)
149
150
        branch.pull(tree.branch, stop_revision='rev1')
150
151
        repo = branch.repository
151
 
        repo.lock_read()
152
 
        try:
153
 
            # Make sure fetch retrieved only what we requested
154
 
            self.assertEqual({('tree-root', 'rev1'):()},
155
 
                repo.texts.get_parent_map(
156
 
                    [('tree-root', 'rev1'), ('tree-root', 'rev2')]))
157
 
        finally:
158
 
            repo.unlock()
 
152
        root_knit = repo.weave_store.get_weave('tree-root',
 
153
                                                repo.get_transaction())
 
154
        # Make sure fetch retrieved only what we requested
 
155
        self.assertTrue('rev1' in root_knit)
 
156
        self.assertTrue('rev2' not in root_knit)
159
157
        branch.pull(tree.branch)
 
158
        root_knit = repo.weave_store.get_weave('tree-root',
 
159
                                                repo.get_transaction())
160
160
        # Make sure that the next revision in the root knit was retrieved,
161
161
        # even though the text, name, parent_id, etc., were unchanged.
162
 
        repo.lock_read()
163
 
        try:
164
 
            # Make sure fetch retrieved only what we requested
165
 
            self.assertEqual({('tree-root', 'rev2'):(('tree-root', 'rev1'),)},
166
 
                repo.texts.get_parent_map([('tree-root', 'rev2')]))
167
 
        finally:
168
 
            repo.unlock()
 
162
        self.assertTrue('rev2' in root_knit)
169
163
 
170
164
    def test_fetch_incompatible(self):
171
165
        knit_tree = self.make_branch_and_tree('knit', format='knit')
172
166
        knit3_tree = self.make_branch_and_tree('knit3',
173
167
            format='dirstate-with-subtree')
174
168
        knit3_tree.commit('blah')
175
 
        e = self.assertRaises(errors.IncompatibleRepositories,
176
 
                              knit_tree.branch.fetch, knit3_tree.branch)
177
 
        self.assertContainsRe(str(e),
178
 
            r"(?m).*/knit.*\nis not compatible with\n.*/knit3/.*\n"
179
 
            r"different rich-root support")
 
169
        self.assertRaises(errors.IncompatibleRepositories,
 
170
                          knit_tree.branch.fetch, knit3_tree.branch)
180
171
 
181
172
 
182
173
class TestMergeFetch(TestCaseWithTransport):
294
285
        # unfortunately this log entry is branch format specific. We could 
295
286
        # factor out the 'what files does this format use' to a method on the 
296
287
        # repository, which would let us to this generically. RBC 20060419
297
 
        # RBC 20080408: Or perhaps we can assert that no files are fully read
298
 
        # twice?
299
288
        self.assertEqual(1, self._count_log_matches('/ce/id.kndx', http_logs))
300
289
        self.assertEqual(1, self._count_log_matches('/ce/id.knit', http_logs))
301
290
        self.assertEqual(1, self._count_log_matches('inventory.kndx', http_logs))
307
296
                                                     http_logs))
308
297
        # FIXME naughty poking in there.
309
298
        self.get_readonly_server().logs = []
310
 
        # check there is nothing more to fetch.  We take care to re-use the
311
 
        # existing transport so that the request logs we're about to examine
312
 
        # aren't cluttered with redundant probes for a smart server.
313
 
        # XXX: Perhaps this further parameterisation: test http with smart
314
 
        # server, and test http without smart server?
315
 
        source = Branch.open(
316
 
            self.get_readonly_url("source/"),
317
 
            possible_transports=[source.bzrdir.root_transport])
 
299
        # check there is nothing more to fetch
 
300
        source = Branch.open(self.get_readonly_url("source/"))
318
301
        self.assertEqual(target.fetch(source), (0, []))
319
302
        # should make just two requests
320
303
        http_logs = self.get_readonly_server().logs
322
305
        self.log('\n'.join(http_logs))
323
306
        self.assertEqual(1, self._count_log_matches('branch-format', http_logs))
324
307
        self.assertEqual(1, self._count_log_matches('branch/format', http_logs))
325
 
        self.assertEqual(1, self._count_log_matches('repository/format',
326
 
            http_logs))
 
308
        self.assertEqual(1, self._count_log_matches('repository/format', http_logs))
327
309
        self.assertTrue(1 >= self._count_log_matches('revision-history',
328
310
                                                     http_logs))
329
311
        self.assertTrue(1 >= self._count_log_matches('last-revision',
330
312
                                                     http_logs))
331
313
        self.assertEqual(4, len(http_logs))
332
 
 
333
 
 
334
 
class TestKnitToPackFetch(TestCaseWithTransport):
335
 
 
336
 
    def find_get_record_stream(self, calls):
337
 
        """In a list of calls, find 'get_record_stream' calls.
338
 
 
339
 
        This also ensures that there is only one get_record_stream call.
340
 
        """
341
 
        get_record_call = None
342
 
        for call in calls:
343
 
            if call[0] == 'get_record_stream':
344
 
                self.assertIs(None, get_record_call,
345
 
                              "there should only be one call to"
346
 
                              " get_record_stream")
347
 
                get_record_call = call
348
 
        self.assertIsNot(None, get_record_call,
349
 
                         "there should be exactly one call to "
350
 
                         " get_record_stream")
351
 
        return get_record_call
352
 
 
353
 
    def test_fetch_with_deltas_no_delta_closure(self):
354
 
        tree = self.make_branch_and_tree('source', format='dirstate')
355
 
        target = self.make_repository('target', format='pack-0.92')
356
 
        self.build_tree(['source/file'])
357
 
        tree.set_root_id('root-id')
358
 
        tree.add('file', 'file-id')
359
 
        tree.commit('one', rev_id='rev-one')
360
 
        source = tree.branch.repository
361
 
        source.texts = versionedfile.RecordingVersionedFilesDecorator(
362
 
                        source.texts)
363
 
        source.signatures = versionedfile.RecordingVersionedFilesDecorator(
364
 
                        source.signatures)
365
 
        source.revisions = versionedfile.RecordingVersionedFilesDecorator(
366
 
                        source.revisions)
367
 
        source.inventories = versionedfile.RecordingVersionedFilesDecorator(
368
 
                        source.inventories)
369
 
        # precondition
370
 
        self.assertTrue(target._fetch_uses_deltas)
371
 
        target.fetch(source, revision_id='rev-one')
372
 
        self.assertEqual(('get_record_stream', [('file-id', 'rev-one')],
373
 
                          target._fetch_order, False),
374
 
                         self.find_get_record_stream(source.texts.calls))
375
 
        self.assertEqual(('get_record_stream', [('rev-one',)],
376
 
                          target._fetch_order, False),
377
 
                         self.find_get_record_stream(source.inventories.calls))
378
 
        self.assertEqual(('get_record_stream', [('rev-one',)],
379
 
                          target._fetch_order, False),
380
 
                         self.find_get_record_stream(source.revisions.calls))
381
 
        # XXX: Signatures is special, and slightly broken. The
382
 
        # standard item_keys_introduced_by actually does a lookup for every
383
 
        # signature to see if it exists, rather than waiting to do them all at
384
 
        # once at the end. The fetch code then does an all-at-once and just
385
 
        # allows for some of them to be missing.
386
 
        # So we know there will be extra calls, but the *last* one is the one
387
 
        # we care about.
388
 
        signature_calls = source.signatures.calls[-1:]
389
 
        self.assertEqual(('get_record_stream', [('rev-one',)],
390
 
                          target._fetch_order, False),
391
 
                         self.find_get_record_stream(signature_calls))
392
 
 
393
 
    def test_fetch_no_deltas_with_delta_closure(self):
394
 
        tree = self.make_branch_and_tree('source', format='dirstate')
395
 
        target = self.make_repository('target', format='pack-0.92')
396
 
        self.build_tree(['source/file'])
397
 
        tree.set_root_id('root-id')
398
 
        tree.add('file', 'file-id')
399
 
        tree.commit('one', rev_id='rev-one')
400
 
        source = tree.branch.repository
401
 
        source.texts = versionedfile.RecordingVersionedFilesDecorator(
402
 
                        source.texts)
403
 
        source.signatures = versionedfile.RecordingVersionedFilesDecorator(
404
 
                        source.signatures)
405
 
        source.revisions = versionedfile.RecordingVersionedFilesDecorator(
406
 
                        source.revisions)
407
 
        source.inventories = versionedfile.RecordingVersionedFilesDecorator(
408
 
                        source.inventories)
409
 
        target._fetch_uses_deltas = False
410
 
        target.fetch(source, revision_id='rev-one')
411
 
        self.assertEqual(('get_record_stream', [('file-id', 'rev-one')],
412
 
                          target._fetch_order, True),
413
 
                         self.find_get_record_stream(source.texts.calls))
414
 
        self.assertEqual(('get_record_stream', [('rev-one',)],
415
 
                          target._fetch_order, True),
416
 
                         self.find_get_record_stream(source.inventories.calls))
417
 
        self.assertEqual(('get_record_stream', [('rev-one',)],
418
 
                          target._fetch_order, True),
419
 
                         self.find_get_record_stream(source.revisions.calls))
420
 
        # XXX: Signatures is special, and slightly broken. The
421
 
        # standard item_keys_introduced_by actually does a lookup for every
422
 
        # signature to see if it exists, rather than waiting to do them all at
423
 
        # once at the end. The fetch code then does an all-at-once and just
424
 
        # allows for some of them to be missing.
425
 
        # So we know there will be extra calls, but the *last* one is the one
426
 
        # we care about.
427
 
        signature_calls = source.signatures.calls[-1:]
428
 
        self.assertEqual(('get_record_stream', [('rev-one',)],
429
 
                          target._fetch_order, True),
430
 
                         self.find_get_record_stream(signature_calls))
431
 
 
432
 
    def test_fetch_revisions_with_deltas_into_pack(self):
433
 
        # See BUG #261339, dev versions of bzr could accidentally create deltas
434
 
        # in revision texts in knit branches (when fetching from packs). So we
435
 
        # ensure that *if* a knit repository has a delta in revisions, that it
436
 
        # gets properly expanded back into a fulltext when stored in the pack
437
 
        # file.
438
 
        tree = self.make_branch_and_tree('source', format='dirstate')
439
 
        target = self.make_repository('target', format='pack-0.92')
440
 
        self.build_tree(['source/file'])
441
 
        tree.set_root_id('root-id')
442
 
        tree.add('file', 'file-id')
443
 
        tree.commit('one', rev_id='rev-one')
444
 
        # Hack the KVF for revisions so that it "accidentally" allows a delta
445
 
        tree.branch.repository.revisions._max_delta_chain = 200
446
 
        tree.commit('two', rev_id='rev-two')
447
 
        source = tree.branch.repository
448
 
        # Ensure that we stored a delta
449
 
        source.lock_read()
450
 
        self.addCleanup(source.unlock)
451
 
        record = source.revisions.get_record_stream([('rev-two',)],
452
 
            'unordered', False).next()
453
 
        self.assertEqual('knit-delta-gz', record.storage_kind)
454
 
        target.fetch(tree.branch.repository, revision_id='rev-two')
455
 
        # The record should get expanded back to a fulltext
456
 
        target.lock_read()
457
 
        self.addCleanup(target.unlock)
458
 
        record = target.revisions.get_record_stream([('rev-two',)],
459
 
            'unordered', False).next()
460
 
        self.assertEqual('knit-ft-gz', record.storage_kind)
461
 
 
462
 
    def test_fetch_with_fallback_and_merge(self):
463
 
        builder = self.make_branch_builder('source', format='pack-0.92')
464
 
        builder.start_series()
465
 
        # graph
466
 
        #   A
467
 
        #   |\
468
 
        #   B C
469
 
        #   | |
470
 
        #   | D
471
 
        #   | |
472
 
        #   | E
473
 
        #    \|
474
 
        #     F
475
 
        # A & B are present in the base (stacked-on) repository, A-E are
476
 
        # present in the source.
477
 
        # This reproduces bug #304841
478
 
        # We need a large enough inventory that total size of compressed deltas
479
 
        # is shorter than the size of a compressed fulltext. We have to use
480
 
        # random ids because otherwise the inventory fulltext compresses too
481
 
        # well and the deltas get bigger.
482
 
        to_add = [
483
 
            ('add', ('', 'TREE_ROOT', 'directory', None))]
484
 
        for i in xrange(10):
485
 
            fname = 'file%03d' % (i,)
486
 
            fileid = '%s-%s' % (fname, osutils.rand_chars(64))
487
 
            to_add.append(('add', (fname, fileid, 'file', 'content\n')))
488
 
        builder.build_snapshot('A', None, to_add)
489
 
        builder.build_snapshot('B', ['A'], [])
490
 
        builder.build_snapshot('C', ['A'], [])
491
 
        builder.build_snapshot('D', ['C'], [])
492
 
        builder.build_snapshot('E', ['D'], [])
493
 
        builder.build_snapshot('F', ['E', 'B'], [])
494
 
        builder.finish_series()
495
 
        source_branch = builder.get_branch()
496
 
        source_branch.bzrdir.sprout('base', revision_id='B')
497
 
        target_branch = self.make_branch('target', format='1.6')
498
 
        target_branch.set_stacked_on_url('../base')
499
 
        source = source_branch.repository
500
 
        source.lock_read()
501
 
        self.addCleanup(source.unlock)
502
 
        source.inventories = versionedfile.OrderingVersionedFilesDecorator(
503
 
                        source.inventories,
504
 
                        key_priority={('E',): 1, ('D',): 2, ('C',): 4,
505
 
                                      ('F',): 3})
506
 
        # Ensure that the content is yielded in the proper order, and given as
507
 
        # the expected kinds
508
 
        records = [(record.key, record.storage_kind)
509
 
                   for record in source.inventories.get_record_stream(
510
 
                        [('D',), ('C',), ('E',), ('F',)], 'unordered', False)]
511
 
        self.assertEqual([(('E',), 'knit-delta-gz'), (('D',), 'knit-delta-gz'),
512
 
                          (('F',), 'knit-delta-gz'), (('C',), 'knit-delta-gz')],
513
 
                          records)
514
 
 
515
 
        target_branch.lock_write()
516
 
        self.addCleanup(target_branch.unlock)
517
 
        target = target_branch.repository
518
 
        target.fetch(source, revision_id='F')
519
 
        # 'C' should be expanded to a fulltext, but D and E should still be
520
 
        # deltas
521
 
        stream = target.inventories.get_record_stream(
522
 
            [('C',), ('D',), ('E',), ('F',)],
523
 
            'unordered', False)
524
 
        kinds = dict((record.key, record.storage_kind) for record in stream)
525
 
        self.assertEqual({('C',): 'knit-ft-gz', ('D',): 'knit-delta-gz',
526
 
                          ('E',): 'knit-delta-gz', ('F',): 'knit-delta-gz'},
527
 
                         kinds)
528
 
 
529
 
 
530
 
class Test1To2Fetch(TestCaseWithTransport):
531
 
    """Tests for Model1To2 failure modes"""
532
 
 
533
 
    def make_tree_and_repo(self):
534
 
        self.tree = self.make_branch_and_tree('tree', format='pack-0.92')
535
 
        self.repo = self.make_repository('rich-repo', format='rich-root-pack')
536
 
        self.repo.lock_write()
537
 
        self.addCleanup(self.repo.unlock)
538
 
 
539
 
    def do_fetch_order_test(self, first, second):
540
 
        """Test that fetch works no matter what the set order of revision is.
541
 
 
542
 
        This test depends on the order of items in a set, which is
543
 
        implementation-dependant, so we test A, B and then B, A.
544
 
        """
545
 
        self.make_tree_and_repo()
546
 
        self.tree.commit('Commit 1', rev_id=first)
547
 
        self.tree.commit('Commit 2', rev_id=second)
548
 
        self.repo.fetch(self.tree.branch.repository, second)
549
 
 
550
 
    def test_fetch_order_AB(self):
551
 
        """See do_fetch_order_test"""
552
 
        self.do_fetch_order_test('A', 'B')
553
 
 
554
 
    def test_fetch_order_BA(self):
555
 
        """See do_fetch_order_test"""
556
 
        self.do_fetch_order_test('B', 'A')
557
 
 
558
 
    def get_parents(self, file_id, revision_id):
559
 
        self.repo.lock_read()
560
 
        try:
561
 
            parent_map = self.repo.texts.get_parent_map([(file_id, revision_id)])
562
 
            return parent_map[(file_id, revision_id)]
563
 
        finally:
564
 
            self.repo.unlock()
565
 
 
566
 
    def test_fetch_ghosts(self):
567
 
        self.make_tree_and_repo()
568
 
        self.tree.commit('first commit', rev_id='left-parent')
569
 
        self.tree.add_parent_tree_id('ghost-parent')
570
 
        fork = self.tree.bzrdir.sprout('fork', 'null:').open_workingtree()
571
 
        fork.commit('not a ghost', rev_id='not-ghost-parent')
572
 
        self.tree.branch.repository.fetch(fork.branch.repository,
573
 
                                     'not-ghost-parent')
574
 
        self.tree.add_parent_tree_id('not-ghost-parent')
575
 
        self.tree.commit('second commit', rev_id='second-id')
576
 
        self.repo.fetch(self.tree.branch.repository, 'second-id')
577
 
        root_id = self.tree.get_root_id()
578
 
        self.assertEqual(
579
 
            ((root_id, 'left-parent'), (root_id, 'ghost-parent'),
580
 
             (root_id, 'not-ghost-parent')),
581
 
            self.get_parents(root_id, 'second-id'))
582
 
 
583
 
    def make_two_commits(self, change_root, fetch_twice):
584
 
        self.make_tree_and_repo()
585
 
        self.tree.commit('first commit', rev_id='first-id')
586
 
        if change_root:
587
 
            self.tree.set_root_id('unique-id')
588
 
        self.tree.commit('second commit', rev_id='second-id')
589
 
        if fetch_twice:
590
 
            self.repo.fetch(self.tree.branch.repository, 'first-id')
591
 
        self.repo.fetch(self.tree.branch.repository, 'second-id')
592
 
 
593
 
    def test_fetch_changed_root(self):
594
 
        self.make_two_commits(change_root=True, fetch_twice=False)
595
 
        self.assertEqual((), self.get_parents('unique-id', 'second-id'))
596
 
 
597
 
    def test_two_fetch_changed_root(self):
598
 
        self.make_two_commits(change_root=True, fetch_twice=True)
599
 
        self.assertEqual((), self.get_parents('unique-id', 'second-id'))
600
 
 
601
 
    def test_two_fetches(self):
602
 
        self.make_two_commits(change_root=False, fetch_twice=True)
603
 
        self.assertEqual((('TREE_ROOT', 'first-id'),),
604
 
            self.get_parents('TREE_ROOT', 'second-id'))