~bzr-pqm/bzr/bzr.dev

« back to all changes in this revision

Viewing changes to bzrlib/tests/test_fetch.py

merge only needs a lock_tree_write() on the working tree, not a full lock_write()

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
 
# Copyright (C) 2005, 2007 Canonical Ltd
 
1
# Copyright (C) 2005 Canonical Ltd
2
2
#
3
3
# This program is free software; you can redistribute it and/or modify
4
4
# it under the terms of the GNU General Public License as published by
18
18
import re
19
19
import sys
20
20
 
21
 
import bzrlib
22
 
from bzrlib import (
23
 
    bzrdir,
24
 
    errors,
25
 
    osutils,
26
 
    merge,
27
 
    repository,
28
 
    versionedfile,
29
 
    )
 
21
from bzrlib import bzrdir, repository
30
22
from bzrlib.branch import Branch
31
23
from bzrlib.bzrdir import BzrDir
32
 
from bzrlib.repofmt import knitrepo
 
24
from bzrlib.builtins import merge
 
25
import bzrlib.errors
33
26
from bzrlib.tests import TestCaseWithTransport
34
 
from bzrlib.tests.http_utils import TestCaseWithWebserver
 
27
from bzrlib.tests.HTTPTestUtil import TestCaseWithWebserver
35
28
from bzrlib.tests.test_revision import make_branches
36
29
from bzrlib.trace import mutter
37
30
from bzrlib.upgrade import Convert
38
31
from bzrlib.workingtree import WorkingTree
39
32
 
40
 
# These tests are a bit old; please instead add new tests into
41
 
# interrepository_implementations/ so they'll run on all relevant
42
 
# combinations.
43
 
 
44
33
 
45
34
def has_revision(branch, revision_id):
46
35
    return branch.repository.has_revision(revision_id)
100
89
    self.assertEquals(fetched, 3, "fetched %d instead of 3" % fetched)
101
90
    # InstallFailed should be raised if the branch is missing the revision
102
91
    # that was requested.
103
 
    self.assertRaises(errors.InstallFailed, br_a3.fetch, br_a2, 'pizza')
 
92
    self.assertRaises(bzrlib.errors.InstallFailed, br_a3.fetch, br_a2, 'pizza')
 
93
    # InstallFailed should be raised if the branch is missing a revision
 
94
    # from its own revision history
 
95
    br_a2.append_revision('a-b-c')
 
96
    self.assertRaises(bzrlib.errors.InstallFailed, br_a3.fetch, br_a2)
104
97
 
105
 
    # TODO: Test trying to fetch from a branch that points to a revision not
106
 
    # actually present in its repository.  Not every branch format allows you
107
 
    # to directly point to such revisions, so it's a bit complicated to
108
 
    # construct.  One way would be to uncommit and gc the revision, but not
109
 
    # every branch supports that.  -- mbp 20070814
 
98
    # TODO: jam 20051218 Branch should no longer allow append_revision for revisions
 
99
    #       which don't exist. So this test needs to be rewritten
 
100
    #       RBC 20060403 the way to do this is to uncommit the revision from the
 
101
    #           repository after the commit
110
102
 
111
103
    #TODO: test that fetch correctly does reweaving when needed. RBC 20051008
112
104
    # Note that this means - updating the weave when ghosts are filled in to 
117
109
 
118
110
    def test_fetch(self):
119
111
        #highest indices a: 5, b: 7
120
 
        br_a, br_b = make_branches(self, format='dirstate-tags')
 
112
        br_a, br_b = make_branches(self)
121
113
        fetch_steps(self, br_a, br_b, br_a)
122
114
 
123
115
    def test_fetch_self(self):
131
123
        corresponding filename, parent, contents or other changes.
132
124
        """
133
125
        knit1_format = bzrdir.BzrDirMetaFormat1()
134
 
        knit1_format.repository_format = knitrepo.RepositoryFormatKnit1()
 
126
        knit1_format.repository_format = repository.RepositoryFormatKnit1()
135
127
        knit2_format = bzrdir.BzrDirMetaFormat1()
136
 
        knit2_format.repository_format = knitrepo.RepositoryFormatKnit3()
 
128
        knit2_format.repository_format = repository.RepositoryFormatKnit2()
137
129
        # we start with a knit1 repository because that causes the
138
130
        # root revision to change for each commit, even though the content,
139
131
        # parent, name, and other attributes are unchanged.
148
140
        branch = self.make_branch('branch', format=knit2_format)
149
141
        branch.pull(tree.branch, stop_revision='rev1')
150
142
        repo = branch.repository
151
 
        repo.lock_read()
152
 
        try:
153
 
            # Make sure fetch retrieved only what we requested
154
 
            self.assertEqual({('tree-root', 'rev1'):()},
155
 
                repo.texts.get_parent_map(
156
 
                    [('tree-root', 'rev1'), ('tree-root', 'rev2')]))
157
 
        finally:
158
 
            repo.unlock()
 
143
        root_knit = repo.weave_store.get_weave('tree-root',
 
144
                                                repo.get_transaction())
 
145
        # Make sure fetch retrieved only what we requested
 
146
        self.assertTrue('rev1' in root_knit)
 
147
        self.assertTrue('rev2' not in root_knit)
159
148
        branch.pull(tree.branch)
 
149
        root_knit = repo.weave_store.get_weave('tree-root',
 
150
                                                repo.get_transaction())
160
151
        # Make sure that the next revision in the root knit was retrieved,
161
152
        # even though the text, name, parent_id, etc., were unchanged.
162
 
        repo.lock_read()
163
 
        try:
164
 
            # Make sure fetch retrieved only what we requested
165
 
            self.assertEqual({('tree-root', 'rev2'):(('tree-root', 'rev1'),)},
166
 
                repo.texts.get_parent_map([('tree-root', 'rev2')]))
167
 
        finally:
168
 
            repo.unlock()
169
 
 
170
 
    def test_fetch_incompatible(self):
171
 
        knit_tree = self.make_branch_and_tree('knit', format='knit')
172
 
        knit3_tree = self.make_branch_and_tree('knit3',
173
 
            format='dirstate-with-subtree')
174
 
        knit3_tree.commit('blah')
175
 
        e = self.assertRaises(errors.IncompatibleRepositories,
176
 
                              knit_tree.branch.fetch, knit3_tree.branch)
177
 
        self.assertContainsRe(str(e),
178
 
            r"(?m).*/knit.*\nis not compatible with\n.*/knit3/.*\n"
179
 
            r"different rich-root support")
 
153
        self.assertTrue('rev2' in root_knit)
180
154
 
181
155
 
182
156
class TestMergeFetch(TestCaseWithTransport):
190
164
        wt2 = self.make_branch_and_tree('br2')
191
165
        br2 = wt2.branch
192
166
        wt2.commit(message='rev 2-1', rev_id='2-1')
193
 
        wt2.merge_from_branch(br1, from_revision='null:')
 
167
        merge(other_revision=['br1', -1], base_revision=['br1', 0],
 
168
              this_dir='br2')
194
169
        self._check_revs_present(br2)
195
170
 
196
171
    def test_merge_fetches(self):
201
176
        dir_2 = br1.bzrdir.sprout('br2')
202
177
        br2 = dir_2.open_branch()
203
178
        wt1.commit(message='rev 1-2', rev_id='1-2')
204
 
        wt2 = dir_2.open_workingtree()
205
 
        wt2.commit(message='rev 2-1', rev_id='2-1')
206
 
        wt2.merge_from_branch(br1)
 
179
        dir_2.open_workingtree().commit(message='rev 2-1', rev_id='2-1')
 
180
        merge(other_revision=['br1', -1], base_revision=[None, None], 
 
181
              this_dir='br2')
207
182
        self._check_revs_present(br2)
208
183
 
209
184
    def _check_revs_present(self, br2):
238
213
    def test_merge_fetches_file_history(self):
239
214
        """Merge brings across file histories"""
240
215
        br2 = Branch.open('br2')
241
 
        br1 = Branch.open('br1')
242
 
        wt2 = WorkingTree.open('br2').merge_from_branch(br1)
243
 
        br2.lock_read()
244
 
        self.addCleanup(br2.unlock)
 
216
        merge(other_revision=['br1', -1], base_revision=[None, None], 
 
217
              this_dir='br2')
245
218
        for rev_id, text in [('1-2', 'original from 1\n'),
246
219
                             ('1-3', 'agreement\n'),
247
220
                             ('2-1', 'contents in 2\n'),
275
248
 
276
249
    def test_weaves_are_retrieved_once(self):
277
250
        self.build_tree(("source/", "source/file", "target/"))
278
 
        # This test depends on knit dasta storage.
279
 
        wt = self.make_branch_and_tree('source', format='dirstate-tags')
 
251
        wt = self.make_branch_and_tree('source')
280
252
        branch = wt.branch
281
253
        wt.add(["file"], ["id"])
282
254
        wt.commit("added file")
283
 
        open("source/file", 'w').write("blah\n")
 
255
        print >>open("source/file", 'w'), "blah"
284
256
        wt.commit("changed file")
285
257
        target = BzrDir.create_branch_and_repo("target/")
286
258
        source = Branch.open(self.get_readonly_url("source/"))
294
266
        # unfortunately this log entry is branch format specific. We could 
295
267
        # factor out the 'what files does this format use' to a method on the 
296
268
        # repository, which would let us to this generically. RBC 20060419
297
 
        # RBC 20080408: Or perhaps we can assert that no files are fully read
298
 
        # twice?
299
269
        self.assertEqual(1, self._count_log_matches('/ce/id.kndx', http_logs))
300
270
        self.assertEqual(1, self._count_log_matches('/ce/id.knit', http_logs))
301
271
        self.assertEqual(1, self._count_log_matches('inventory.kndx', http_logs))
302
272
        # this r-h check test will prevent regressions, but it currently already 
303
273
        # passes, before the patch to cache-rh is applied :[
304
 
        self.assertTrue(1 >= self._count_log_matches('revision-history',
305
 
                                                     http_logs))
306
 
        self.assertTrue(1 >= self._count_log_matches('last-revision',
307
 
                                                     http_logs))
 
274
        self.assertEqual(1, self._count_log_matches('revision-history', http_logs))
308
275
        # FIXME naughty poking in there.
309
276
        self.get_readonly_server().logs = []
310
 
        # check there is nothing more to fetch.  We take care to re-use the
311
 
        # existing transport so that the request logs we're about to examine
312
 
        # aren't cluttered with redundant probes for a smart server.
313
 
        # XXX: Perhaps this further parameterisation: test http with smart
314
 
        # server, and test http without smart server?
315
 
        source = Branch.open(
316
 
            self.get_readonly_url("source/"),
317
 
            possible_transports=[source.bzrdir.root_transport])
 
277
        # check there is nothing more to fetch
 
278
        source = Branch.open(self.get_readonly_url("source/"))
318
279
        self.assertEqual(target.fetch(source), (0, []))
319
280
        # should make just two requests
320
281
        http_logs = self.get_readonly_server().logs
322
283
        self.log('\n'.join(http_logs))
323
284
        self.assertEqual(1, self._count_log_matches('branch-format', http_logs))
324
285
        self.assertEqual(1, self._count_log_matches('branch/format', http_logs))
325
 
        self.assertEqual(1, self._count_log_matches('repository/format',
326
 
            http_logs))
327
 
        self.assertTrue(1 >= self._count_log_matches('revision-history',
328
 
                                                     http_logs))
329
 
        self.assertTrue(1 >= self._count_log_matches('last-revision',
330
 
                                                     http_logs))
 
286
        self.assertEqual(1, self._count_log_matches('repository/format', http_logs))
 
287
        self.assertEqual(1, self._count_log_matches('revision-history', http_logs))
331
288
        self.assertEqual(4, len(http_logs))
332
 
 
333
 
 
334
 
class TestKnitToPackFetch(TestCaseWithTransport):
335
 
 
336
 
    def find_get_record_stream(self, calls):
337
 
        """In a list of calls, find 'get_record_stream' calls.
338
 
 
339
 
        This also ensures that there is only one get_record_stream call.
340
 
        """
341
 
        get_record_call = None
342
 
        for call in calls:
343
 
            if call[0] == 'get_record_stream':
344
 
                self.assertIs(None, get_record_call,
345
 
                              "there should only be one call to"
346
 
                              " get_record_stream")
347
 
                get_record_call = call
348
 
        self.assertIsNot(None, get_record_call,
349
 
                         "there should be exactly one call to "
350
 
                         " get_record_stream")
351
 
        return get_record_call
352
 
 
353
 
    def test_fetch_with_deltas_no_delta_closure(self):
354
 
        tree = self.make_branch_and_tree('source', format='dirstate')
355
 
        target = self.make_repository('target', format='pack-0.92')
356
 
        self.build_tree(['source/file'])
357
 
        tree.set_root_id('root-id')
358
 
        tree.add('file', 'file-id')
359
 
        tree.commit('one', rev_id='rev-one')
360
 
        source = tree.branch.repository
361
 
        source.texts = versionedfile.RecordingVersionedFilesDecorator(
362
 
                        source.texts)
363
 
        source.signatures = versionedfile.RecordingVersionedFilesDecorator(
364
 
                        source.signatures)
365
 
        source.revisions = versionedfile.RecordingVersionedFilesDecorator(
366
 
                        source.revisions)
367
 
        source.inventories = versionedfile.RecordingVersionedFilesDecorator(
368
 
                        source.inventories)
369
 
        # precondition
370
 
        self.assertTrue(target._fetch_uses_deltas)
371
 
        target.fetch(source, revision_id='rev-one')
372
 
        self.assertEqual(('get_record_stream', [('file-id', 'rev-one')],
373
 
                          target._fetch_order, False),
374
 
                         self.find_get_record_stream(source.texts.calls))
375
 
        self.assertEqual(('get_record_stream', [('rev-one',)],
376
 
                          target._fetch_order, False),
377
 
                         self.find_get_record_stream(source.inventories.calls))
378
 
        self.assertEqual(('get_record_stream', [('rev-one',)],
379
 
                          target._fetch_order, False),
380
 
                         self.find_get_record_stream(source.revisions.calls))
381
 
        # XXX: Signatures is special, and slightly broken. The
382
 
        # standard item_keys_introduced_by actually does a lookup for every
383
 
        # signature to see if it exists, rather than waiting to do them all at
384
 
        # once at the end. The fetch code then does an all-at-once and just
385
 
        # allows for some of them to be missing.
386
 
        # So we know there will be extra calls, but the *last* one is the one
387
 
        # we care about.
388
 
        signature_calls = source.signatures.calls[-1:]
389
 
        self.assertEqual(('get_record_stream', [('rev-one',)],
390
 
                          target._fetch_order, False),
391
 
                         self.find_get_record_stream(signature_calls))
392
 
 
393
 
    def test_fetch_no_deltas_with_delta_closure(self):
394
 
        tree = self.make_branch_and_tree('source', format='dirstate')
395
 
        target = self.make_repository('target', format='pack-0.92')
396
 
        self.build_tree(['source/file'])
397
 
        tree.set_root_id('root-id')
398
 
        tree.add('file', 'file-id')
399
 
        tree.commit('one', rev_id='rev-one')
400
 
        source = tree.branch.repository
401
 
        source.texts = versionedfile.RecordingVersionedFilesDecorator(
402
 
                        source.texts)
403
 
        source.signatures = versionedfile.RecordingVersionedFilesDecorator(
404
 
                        source.signatures)
405
 
        source.revisions = versionedfile.RecordingVersionedFilesDecorator(
406
 
                        source.revisions)
407
 
        source.inventories = versionedfile.RecordingVersionedFilesDecorator(
408
 
                        source.inventories)
409
 
        target._fetch_uses_deltas = False
410
 
        target.fetch(source, revision_id='rev-one')
411
 
        self.assertEqual(('get_record_stream', [('file-id', 'rev-one')],
412
 
                          target._fetch_order, True),
413
 
                         self.find_get_record_stream(source.texts.calls))
414
 
        self.assertEqual(('get_record_stream', [('rev-one',)],
415
 
                          target._fetch_order, True),
416
 
                         self.find_get_record_stream(source.inventories.calls))
417
 
        self.assertEqual(('get_record_stream', [('rev-one',)],
418
 
                          target._fetch_order, True),
419
 
                         self.find_get_record_stream(source.revisions.calls))
420
 
        # XXX: Signatures is special, and slightly broken. The
421
 
        # standard item_keys_introduced_by actually does a lookup for every
422
 
        # signature to see if it exists, rather than waiting to do them all at
423
 
        # once at the end. The fetch code then does an all-at-once and just
424
 
        # allows for some of them to be missing.
425
 
        # So we know there will be extra calls, but the *last* one is the one
426
 
        # we care about.
427
 
        signature_calls = source.signatures.calls[-1:]
428
 
        self.assertEqual(('get_record_stream', [('rev-one',)],
429
 
                          target._fetch_order, True),
430
 
                         self.find_get_record_stream(signature_calls))
431
 
 
432
 
    def test_fetch_revisions_with_deltas_into_pack(self):
433
 
        # See BUG #261339, dev versions of bzr could accidentally create deltas
434
 
        # in revision texts in knit branches (when fetching from packs). So we
435
 
        # ensure that *if* a knit repository has a delta in revisions, that it
436
 
        # gets properly expanded back into a fulltext when stored in the pack
437
 
        # file.
438
 
        tree = self.make_branch_and_tree('source', format='dirstate')
439
 
        target = self.make_repository('target', format='pack-0.92')
440
 
        self.build_tree(['source/file'])
441
 
        tree.set_root_id('root-id')
442
 
        tree.add('file', 'file-id')
443
 
        tree.commit('one', rev_id='rev-one')
444
 
        # Hack the KVF for revisions so that it "accidentally" allows a delta
445
 
        tree.branch.repository.revisions._max_delta_chain = 200
446
 
        tree.commit('two', rev_id='rev-two')
447
 
        source = tree.branch.repository
448
 
        # Ensure that we stored a delta
449
 
        source.lock_read()
450
 
        self.addCleanup(source.unlock)
451
 
        record = source.revisions.get_record_stream([('rev-two',)],
452
 
            'unordered', False).next()
453
 
        self.assertEqual('knit-delta-gz', record.storage_kind)
454
 
        target.fetch(tree.branch.repository, revision_id='rev-two')
455
 
        # The record should get expanded back to a fulltext
456
 
        target.lock_read()
457
 
        self.addCleanup(target.unlock)
458
 
        record = target.revisions.get_record_stream([('rev-two',)],
459
 
            'unordered', False).next()
460
 
        self.assertEqual('knit-ft-gz', record.storage_kind)
461
 
 
462
 
    def test_fetch_with_fallback_and_merge(self):
463
 
        builder = self.make_branch_builder('source', format='pack-0.92')
464
 
        builder.start_series()
465
 
        # graph
466
 
        #   A
467
 
        #   |\
468
 
        #   B C
469
 
        #   | |
470
 
        #   | D
471
 
        #   | |
472
 
        #   | E
473
 
        #    \|
474
 
        #     F
475
 
        # A & B are present in the base (stacked-on) repository, A-E are
476
 
        # present in the source.
477
 
        # This reproduces bug #304841
478
 
        # We need a large enough inventory that total size of compressed deltas
479
 
        # is shorter than the size of a compressed fulltext. We have to use
480
 
        # random ids because otherwise the inventory fulltext compresses too
481
 
        # well and the deltas get bigger.
482
 
        to_add = [
483
 
            ('add', ('', 'TREE_ROOT', 'directory', None))]
484
 
        for i in xrange(10):
485
 
            fname = 'file%03d' % (i,)
486
 
            fileid = '%s-%s' % (fname, osutils.rand_chars(64))
487
 
            to_add.append(('add', (fname, fileid, 'file', 'content\n')))
488
 
        builder.build_snapshot('A', None, to_add)
489
 
        builder.build_snapshot('B', ['A'], [])
490
 
        builder.build_snapshot('C', ['A'], [])
491
 
        builder.build_snapshot('D', ['C'], [])
492
 
        builder.build_snapshot('E', ['D'], [])
493
 
        builder.build_snapshot('F', ['E', 'B'], [])
494
 
        builder.finish_series()
495
 
        source_branch = builder.get_branch()
496
 
        source_branch.bzrdir.sprout('base', revision_id='B')
497
 
        target_branch = self.make_branch('target', format='1.6')
498
 
        target_branch.set_stacked_on_url('../base')
499
 
        source = source_branch.repository
500
 
        source.lock_read()
501
 
        self.addCleanup(source.unlock)
502
 
        source.inventories = versionedfile.OrderingVersionedFilesDecorator(
503
 
                        source.inventories,
504
 
                        key_priority={('E',): 1, ('D',): 2, ('C',): 4,
505
 
                                      ('F',): 3})
506
 
        # Ensure that the content is yielded in the proper order, and given as
507
 
        # the expected kinds
508
 
        records = [(record.key, record.storage_kind)
509
 
                   for record in source.inventories.get_record_stream(
510
 
                        [('D',), ('C',), ('E',), ('F',)], 'unordered', False)]
511
 
        self.assertEqual([(('E',), 'knit-delta-gz'), (('D',), 'knit-delta-gz'),
512
 
                          (('F',), 'knit-delta-gz'), (('C',), 'knit-delta-gz')],
513
 
                          records)
514
 
 
515
 
        target_branch.lock_write()
516
 
        self.addCleanup(target_branch.unlock)
517
 
        target = target_branch.repository
518
 
        target.fetch(source, revision_id='F')
519
 
        # 'C' should be expanded to a fulltext, but D and E should still be
520
 
        # deltas
521
 
        stream = target.inventories.get_record_stream(
522
 
            [('C',), ('D',), ('E',), ('F',)],
523
 
            'unordered', False)
524
 
        kinds = dict((record.key, record.storage_kind) for record in stream)
525
 
        self.assertEqual({('C',): 'knit-ft-gz', ('D',): 'knit-delta-gz',
526
 
                          ('E',): 'knit-delta-gz', ('F',): 'knit-delta-gz'},
527
 
                         kinds)
528
 
 
529
 
 
530
 
class Test1To2Fetch(TestCaseWithTransport):
531
 
    """Tests for Model1To2 failure modes"""
532
 
 
533
 
    def make_tree_and_repo(self):
534
 
        self.tree = self.make_branch_and_tree('tree', format='pack-0.92')
535
 
        self.repo = self.make_repository('rich-repo', format='rich-root-pack')
536
 
        self.repo.lock_write()
537
 
        self.addCleanup(self.repo.unlock)
538
 
 
539
 
    def do_fetch_order_test(self, first, second):
540
 
        """Test that fetch works no matter what the set order of revision is.
541
 
 
542
 
        This test depends on the order of items in a set, which is
543
 
        implementation-dependant, so we test A, B and then B, A.
544
 
        """
545
 
        self.make_tree_and_repo()
546
 
        self.tree.commit('Commit 1', rev_id=first)
547
 
        self.tree.commit('Commit 2', rev_id=second)
548
 
        self.repo.fetch(self.tree.branch.repository, second)
549
 
 
550
 
    def test_fetch_order_AB(self):
551
 
        """See do_fetch_order_test"""
552
 
        self.do_fetch_order_test('A', 'B')
553
 
 
554
 
    def test_fetch_order_BA(self):
555
 
        """See do_fetch_order_test"""
556
 
        self.do_fetch_order_test('B', 'A')
557
 
 
558
 
    def get_parents(self, file_id, revision_id):
559
 
        self.repo.lock_read()
560
 
        try:
561
 
            parent_map = self.repo.texts.get_parent_map([(file_id, revision_id)])
562
 
            return parent_map[(file_id, revision_id)]
563
 
        finally:
564
 
            self.repo.unlock()
565
 
 
566
 
    def test_fetch_ghosts(self):
567
 
        self.make_tree_and_repo()
568
 
        self.tree.commit('first commit', rev_id='left-parent')
569
 
        self.tree.add_parent_tree_id('ghost-parent')
570
 
        fork = self.tree.bzrdir.sprout('fork', 'null:').open_workingtree()
571
 
        fork.commit('not a ghost', rev_id='not-ghost-parent')
572
 
        self.tree.branch.repository.fetch(fork.branch.repository,
573
 
                                     'not-ghost-parent')
574
 
        self.tree.add_parent_tree_id('not-ghost-parent')
575
 
        self.tree.commit('second commit', rev_id='second-id')
576
 
        self.repo.fetch(self.tree.branch.repository, 'second-id')
577
 
        root_id = self.tree.get_root_id()
578
 
        self.assertEqual(
579
 
            ((root_id, 'left-parent'), (root_id, 'ghost-parent'),
580
 
             (root_id, 'not-ghost-parent')),
581
 
            self.get_parents(root_id, 'second-id'))
582
 
 
583
 
    def make_two_commits(self, change_root, fetch_twice):
584
 
        self.make_tree_and_repo()
585
 
        self.tree.commit('first commit', rev_id='first-id')
586
 
        if change_root:
587
 
            self.tree.set_root_id('unique-id')
588
 
        self.tree.commit('second commit', rev_id='second-id')
589
 
        if fetch_twice:
590
 
            self.repo.fetch(self.tree.branch.repository, 'first-id')
591
 
        self.repo.fetch(self.tree.branch.repository, 'second-id')
592
 
 
593
 
    def test_fetch_changed_root(self):
594
 
        self.make_two_commits(change_root=True, fetch_twice=False)
595
 
        self.assertEqual((), self.get_parents('unique-id', 'second-id'))
596
 
 
597
 
    def test_two_fetch_changed_root(self):
598
 
        self.make_two_commits(change_root=True, fetch_twice=True)
599
 
        self.assertEqual((), self.get_parents('unique-id', 'second-id'))
600
 
 
601
 
    def test_two_fetches(self):
602
 
        self.make_two_commits(change_root=False, fetch_twice=True)
603
 
        self.assertEqual((('TREE_ROOT', 'first-id'),),
604
 
            self.get_parents('TREE_ROOT', 'second-id'))