13
13
# You should have received a copy of the GNU General Public License
14
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17
22
from bzrlib import (
21
revision as _mod_revision,
24
28
from bzrlib.branch import Branch
29
from bzrlib.bzrdir import BzrDir
25
30
from bzrlib.repofmt import knitrepo
31
from bzrlib.symbol_versioning import (
26
34
from bzrlib.tests import TestCaseWithTransport
35
from bzrlib.tests.http_utils import TestCaseWithWebserver
27
36
from bzrlib.tests.test_revision import make_branches
37
from bzrlib.trace import mutter
28
38
from bzrlib.upgrade import Convert
29
39
from bzrlib.workingtree import WorkingTree
31
41
# These tests are a bit old; please instead add new tests into
32
# per_interrepository/ so they'll run on all relevant
42
# interrepository_implementations/ so they'll run on all relevant
36
46
def has_revision(branch, revision_id):
37
47
return branch.repository.has_revision(revision_id)
40
def revision_history(branch):
43
graph = branch.repository.get_graph()
44
history = list(graph.iter_lefthand_ancestry(branch.last_revision(),
45
[_mod_revision.NULL_REVISION]))
52
49
def fetch_steps(self, br_a, br_b, writable_a):
53
50
"""A foreign test method for testing fetch locally and remotely."""
55
52
# TODO RBC 20060201 make this a repository test.
56
53
repo_b = br_b.repository
57
self.assertFalse(repo_b.has_revision(revision_history(br_a)[3]))
58
self.assertTrue(repo_b.has_revision(revision_history(br_a)[2]))
59
self.assertEquals(len(revision_history(br_b)), 7)
60
br_b.fetch(br_a, revision_history(br_a)[2])
54
self.assertFalse(repo_b.has_revision(br_a.revision_history()[3]))
55
self.assertTrue(repo_b.has_revision(br_a.revision_history()[2]))
56
self.assertEquals(len(br_b.revision_history()), 7)
57
self.assertEquals(br_b.fetch(br_a, br_a.revision_history()[2])[0], 0)
61
58
# branch.fetch is not supposed to alter the revision history
62
self.assertEquals(len(revision_history(br_b)), 7)
63
self.assertFalse(repo_b.has_revision(revision_history(br_a)[3]))
59
self.assertEquals(len(br_b.revision_history()), 7)
60
self.assertFalse(repo_b.has_revision(br_a.revision_history()[3]))
65
62
# fetching the next revision up in sample data copies one revision
66
br_b.fetch(br_a, revision_history(br_a)[3])
67
self.assertTrue(repo_b.has_revision(revision_history(br_a)[3]))
68
self.assertFalse(has_revision(br_a, revision_history(br_b)[6]))
69
self.assertTrue(br_a.repository.has_revision(revision_history(br_b)[5]))
63
self.assertEquals(br_b.fetch(br_a, br_a.revision_history()[3])[0], 1)
64
self.assertTrue(repo_b.has_revision(br_a.revision_history()[3]))
65
self.assertFalse(has_revision(br_a, br_b.revision_history()[6]))
66
self.assertTrue(br_a.repository.has_revision(br_b.revision_history()[5]))
71
68
# When a non-branch ancestor is missing, it should be unlisted...
72
69
# as its not reference from the inventory weave.
73
70
br_b4 = self.make_branch('br_4')
76
writable_a.fetch(br_b)
77
self.assertTrue(has_revision(br_a, revision_history(br_b)[3]))
78
self.assertTrue(has_revision(br_a, revision_history(br_b)[4]))
71
count, failures = br_b4.fetch(br_b)
72
self.assertEqual(count, 7)
73
self.assertEqual(failures, [])
75
self.assertEqual(writable_a.fetch(br_b)[0], 1)
76
self.assertTrue(has_revision(br_a, br_b.revision_history()[3]))
77
self.assertTrue(has_revision(br_a, br_b.revision_history()[4]))
80
79
br_b2 = self.make_branch('br_b2')
82
self.assertTrue(has_revision(br_b2, revision_history(br_b)[4]))
83
self.assertTrue(has_revision(br_b2, revision_history(br_a)[2]))
84
self.assertFalse(has_revision(br_b2, revision_history(br_a)[3]))
80
self.assertEquals(br_b2.fetch(br_b)[0], 7)
81
self.assertTrue(has_revision(br_b2, br_b.revision_history()[4]))
82
self.assertTrue(has_revision(br_b2, br_a.revision_history()[2]))
83
self.assertFalse(has_revision(br_b2, br_a.revision_history()[3]))
86
85
br_a2 = self.make_branch('br_a2')
88
self.assertTrue(has_revision(br_a2, revision_history(br_b)[4]))
89
self.assertTrue(has_revision(br_a2, revision_history(br_a)[3]))
90
self.assertTrue(has_revision(br_a2, revision_history(br_a)[2]))
86
self.assertEquals(br_a2.fetch(br_a)[0], 9)
87
self.assertTrue(has_revision(br_a2, br_b.revision_history()[4]))
88
self.assertTrue(has_revision(br_a2, br_a.revision_history()[3]))
89
self.assertTrue(has_revision(br_a2, br_a.revision_history()[2]))
92
91
br_a3 = self.make_branch('br_a3')
93
# pulling a branch with no revisions grabs nothing, regardless of
92
# pulling a branch with no revisions grabs nothing, regardless of
94
93
# whats in the inventory.
94
self.assertEquals(br_a3.fetch(br_a2)[0], 0)
96
95
for revno in range(4):
98
br_a3.repository.has_revision(revision_history(br_a)[revno]))
99
br_a3.fetch(br_a2, revision_history(br_a)[2])
97
br_a3.repository.has_revision(br_a.revision_history()[revno]))
98
self.assertEqual(br_a3.fetch(br_a2, br_a.revision_history()[2])[0], 3)
100
99
# pull the 3 revisions introduced by a@u-0-3
101
br_a3.fetch(br_a2, revision_history(br_a)[3])
102
# NoSuchRevision should be raised if the branch is missing the revision
100
fetched = br_a3.fetch(br_a2, br_a.revision_history()[3])[0]
101
self.assertEquals(fetched, 3, "fetched %d instead of 3" % fetched)
102
# InstallFailed should be raised if the branch is missing the revision
103
103
# that was requested.
104
self.assertRaises(errors.NoSuchRevision, br_a3.fetch, br_a2, 'pizza')
104
self.assertRaises(errors.InstallFailed, br_a3.fetch, br_a2, 'pizza')
106
106
# TODO: Test trying to fetch from a branch that points to a revision not
107
107
# actually present in its repository. Not every branch format allows you
252
242
rev_id).get_file_text('this-file-id'), text)
255
class TestKnitToPackFetch(TestCaseWithTransport):
257
def find_get_record_stream(self, calls, expected_count=1):
258
"""In a list of calls, find the last 'get_record_stream'.
260
:param expected_count: The number of calls we should exepect to find.
261
If a different number is found, an assertion is raised.
263
get_record_call = None
266
if call[0] == 'get_record_stream':
268
get_record_call = call
269
self.assertEqual(expected_count, call_count)
270
return get_record_call
272
def test_fetch_with_deltas_no_delta_closure(self):
273
tree = self.make_branch_and_tree('source', format='dirstate')
274
target = self.make_repository('target', format='pack-0.92')
275
self.build_tree(['source/file'])
276
tree.set_root_id('root-id')
277
tree.add('file', 'file-id')
278
tree.commit('one', rev_id='rev-one')
279
source = tree.branch.repository
280
source.texts = versionedfile.RecordingVersionedFilesDecorator(
282
source.signatures = versionedfile.RecordingVersionedFilesDecorator(
284
source.revisions = versionedfile.RecordingVersionedFilesDecorator(
286
source.inventories = versionedfile.RecordingVersionedFilesDecorator(
289
self.assertTrue(target._format._fetch_uses_deltas)
290
target.fetch(source, revision_id='rev-one')
291
self.assertEqual(('get_record_stream', [('file-id', 'rev-one')],
292
target._format._fetch_order, False),
293
self.find_get_record_stream(source.texts.calls))
294
self.assertEqual(('get_record_stream', [('rev-one',)],
295
target._format._fetch_order, False),
296
self.find_get_record_stream(source.inventories.calls, 2))
297
self.assertEqual(('get_record_stream', [('rev-one',)],
298
target._format._fetch_order, False),
299
self.find_get_record_stream(source.revisions.calls))
300
# XXX: Signatures is special, and slightly broken. The
301
# standard item_keys_introduced_by actually does a lookup for every
302
# signature to see if it exists, rather than waiting to do them all at
303
# once at the end. The fetch code then does an all-at-once and just
304
# allows for some of them to be missing.
305
# So we know there will be extra calls, but the *last* one is the one
307
signature_calls = source.signatures.calls[-1:]
308
self.assertEqual(('get_record_stream', [('rev-one',)],
309
target._format._fetch_order, False),
310
self.find_get_record_stream(signature_calls))
312
def test_fetch_no_deltas_with_delta_closure(self):
313
tree = self.make_branch_and_tree('source', format='dirstate')
314
target = self.make_repository('target', format='pack-0.92')
315
self.build_tree(['source/file'])
316
tree.set_root_id('root-id')
317
tree.add('file', 'file-id')
318
tree.commit('one', rev_id='rev-one')
319
source = tree.branch.repository
320
source.texts = versionedfile.RecordingVersionedFilesDecorator(
322
source.signatures = versionedfile.RecordingVersionedFilesDecorator(
324
source.revisions = versionedfile.RecordingVersionedFilesDecorator(
326
source.inventories = versionedfile.RecordingVersionedFilesDecorator(
328
# XXX: This won't work in general, but for the dirstate format it does.
329
self.overrideAttr(target._format, '_fetch_uses_deltas', False)
330
target.fetch(source, revision_id='rev-one')
331
self.assertEqual(('get_record_stream', [('file-id', 'rev-one')],
332
target._format._fetch_order, True),
333
self.find_get_record_stream(source.texts.calls))
334
self.assertEqual(('get_record_stream', [('rev-one',)],
335
target._format._fetch_order, True),
336
self.find_get_record_stream(source.inventories.calls, 2))
337
self.assertEqual(('get_record_stream', [('rev-one',)],
338
target._format._fetch_order, True),
339
self.find_get_record_stream(source.revisions.calls))
340
# XXX: Signatures is special, and slightly broken. The
341
# standard item_keys_introduced_by actually does a lookup for every
342
# signature to see if it exists, rather than waiting to do them all at
343
# once at the end. The fetch code then does an all-at-once and just
344
# allows for some of them to be missing.
345
# So we know there will be extra calls, but the *last* one is the one
347
signature_calls = source.signatures.calls[-1:]
348
self.assertEqual(('get_record_stream', [('rev-one',)],
349
target._format._fetch_order, True),
350
self.find_get_record_stream(signature_calls))
352
def test_fetch_revisions_with_deltas_into_pack(self):
353
# See BUG #261339, dev versions of bzr could accidentally create deltas
354
# in revision texts in knit branches (when fetching from packs). So we
355
# ensure that *if* a knit repository has a delta in revisions, that it
356
# gets properly expanded back into a fulltext when stored in the pack
358
tree = self.make_branch_and_tree('source', format='dirstate')
359
target = self.make_repository('target', format='pack-0.92')
360
self.build_tree(['source/file'])
361
tree.set_root_id('root-id')
362
tree.add('file', 'file-id')
363
tree.commit('one', rev_id='rev-one')
364
# Hack the KVF for revisions so that it "accidentally" allows a delta
365
tree.branch.repository.revisions._max_delta_chain = 200
366
tree.commit('two', rev_id='rev-two')
367
source = tree.branch.repository
368
# Ensure that we stored a delta
370
self.addCleanup(source.unlock)
371
record = source.revisions.get_record_stream([('rev-two',)],
372
'unordered', False).next()
373
self.assertEqual('knit-delta-gz', record.storage_kind)
374
target.fetch(tree.branch.repository, revision_id='rev-two')
375
# The record should get expanded back to a fulltext
377
self.addCleanup(target.unlock)
378
record = target.revisions.get_record_stream([('rev-two',)],
379
'unordered', False).next()
380
self.assertEqual('knit-ft-gz', record.storage_kind)
382
def test_fetch_with_fallback_and_merge(self):
383
builder = self.make_branch_builder('source', format='pack-0.92')
384
builder.start_series()
395
# A & B are present in the base (stacked-on) repository, A-E are
396
# present in the source.
397
# This reproduces bug #304841
398
# We need a large enough inventory that total size of compressed deltas
399
# is shorter than the size of a compressed fulltext. We have to use
400
# random ids because otherwise the inventory fulltext compresses too
401
# well and the deltas get bigger.
403
('add', ('', 'TREE_ROOT', 'directory', None))]
405
fname = 'file%03d' % (i,)
406
fileid = '%s-%s' % (fname, osutils.rand_chars(64))
407
to_add.append(('add', (fname, fileid, 'file', 'content\n')))
408
builder.build_snapshot('A', None, to_add)
409
builder.build_snapshot('B', ['A'], [])
410
builder.build_snapshot('C', ['A'], [])
411
builder.build_snapshot('D', ['C'], [])
412
builder.build_snapshot('E', ['D'], [])
413
builder.build_snapshot('F', ['E', 'B'], [])
414
builder.finish_series()
415
source_branch = builder.get_branch()
416
source_branch.bzrdir.sprout('base', revision_id='B')
417
target_branch = self.make_branch('target', format='1.6')
418
target_branch.set_stacked_on_url('../base')
419
source = source_branch.repository
421
self.addCleanup(source.unlock)
422
source.inventories = versionedfile.OrderingVersionedFilesDecorator(
424
key_priority={('E',): 1, ('D',): 2, ('C',): 4,
426
# Ensure that the content is yielded in the proper order, and given as
428
records = [(record.key, record.storage_kind)
429
for record in source.inventories.get_record_stream(
430
[('D',), ('C',), ('E',), ('F',)], 'unordered', False)]
431
self.assertEqual([(('E',), 'knit-delta-gz'), (('D',), 'knit-delta-gz'),
432
(('F',), 'knit-delta-gz'), (('C',), 'knit-delta-gz')],
435
target_branch.lock_write()
436
self.addCleanup(target_branch.unlock)
437
target = target_branch.repository
438
target.fetch(source, revision_id='F')
439
# 'C' should be expanded to a fulltext, but D and E should still be
441
stream = target.inventories.get_record_stream(
442
[('C',), ('D',), ('E',), ('F',)],
444
kinds = dict((record.key, record.storage_kind) for record in stream)
445
self.assertEqual({('C',): 'knit-ft-gz', ('D',): 'knit-delta-gz',
446
('E',): 'knit-delta-gz', ('F',): 'knit-delta-gz'},
450
class Test1To2Fetch(TestCaseWithTransport):
451
"""Tests for Model1To2 failure modes"""
453
def make_tree_and_repo(self):
454
self.tree = self.make_branch_and_tree('tree', format='pack-0.92')
455
self.repo = self.make_repository('rich-repo', format='rich-root-pack')
456
self.repo.lock_write()
457
self.addCleanup(self.repo.unlock)
459
def do_fetch_order_test(self, first, second):
460
"""Test that fetch works no matter what the set order of revision is.
462
This test depends on the order of items in a set, which is
463
implementation-dependant, so we test A, B and then B, A.
465
self.make_tree_and_repo()
466
self.tree.commit('Commit 1', rev_id=first)
467
self.tree.commit('Commit 2', rev_id=second)
468
self.repo.fetch(self.tree.branch.repository, second)
470
def test_fetch_order_AB(self):
471
"""See do_fetch_order_test"""
472
self.do_fetch_order_test('A', 'B')
474
def test_fetch_order_BA(self):
475
"""See do_fetch_order_test"""
476
self.do_fetch_order_test('B', 'A')
478
def get_parents(self, file_id, revision_id):
479
self.repo.lock_read()
481
parent_map = self.repo.texts.get_parent_map([(file_id, revision_id)])
482
return parent_map[(file_id, revision_id)]
486
def test_fetch_ghosts(self):
487
self.make_tree_and_repo()
488
self.tree.commit('first commit', rev_id='left-parent')
489
self.tree.add_parent_tree_id('ghost-parent')
490
fork = self.tree.bzrdir.sprout('fork', 'null:').open_workingtree()
491
fork.commit('not a ghost', rev_id='not-ghost-parent')
492
self.tree.branch.repository.fetch(fork.branch.repository,
494
self.tree.add_parent_tree_id('not-ghost-parent')
495
self.tree.commit('second commit', rev_id='second-id')
496
self.repo.fetch(self.tree.branch.repository, 'second-id')
497
root_id = self.tree.get_root_id()
499
((root_id, 'left-parent'), (root_id, 'not-ghost-parent')),
500
self.get_parents(root_id, 'second-id'))
502
def make_two_commits(self, change_root, fetch_twice):
503
self.make_tree_and_repo()
504
self.tree.commit('first commit', rev_id='first-id')
506
self.tree.set_root_id('unique-id')
507
self.tree.commit('second commit', rev_id='second-id')
509
self.repo.fetch(self.tree.branch.repository, 'first-id')
510
self.repo.fetch(self.tree.branch.repository, 'second-id')
512
def test_fetch_changed_root(self):
513
self.make_two_commits(change_root=True, fetch_twice=False)
514
self.assertEqual((), self.get_parents('unique-id', 'second-id'))
516
def test_two_fetch_changed_root(self):
517
self.make_two_commits(change_root=True, fetch_twice=True)
518
self.assertEqual((), self.get_parents('unique-id', 'second-id'))
520
def test_two_fetches(self):
521
self.make_two_commits(change_root=False, fetch_twice=True)
522
self.assertEqual((('TREE_ROOT', 'first-id'),),
523
self.get_parents('TREE_ROOT', 'second-id'))
245
class TestHttpFetch(TestCaseWithWebserver):
246
# FIXME RBC 20060124 this really isn't web specific, perhaps an
247
# instrumented readonly transport? Can we do an instrumented
248
# adapter and use self.get_readonly_url ?
250
def test_fetch(self):
251
#highest indices a: 5, b: 7
252
br_a, br_b = make_branches(self)
253
br_rem_a = Branch.open(self.get_readonly_url('branch1'))
254
fetch_steps(self, br_rem_a, br_b, br_a)
256
def _count_log_matches(self, target, logs):
257
"""Count the number of times the target file pattern was fetched in an http log"""
258
get_succeeds_re = re.compile(
259
'.*"GET .*%s HTTP/1.1" 20[06] - "-" "bzr/%s' %
260
( target, bzrlib.__version__))
263
if get_succeeds_re.match(line):
267
def test_weaves_are_retrieved_once(self):
268
self.build_tree(("source/", "source/file", "target/"))
269
# This test depends on knit dasta storage.
270
wt = self.make_branch_and_tree('source', format='dirstate-tags')
272
wt.add(["file"], ["id"])
273
wt.commit("added file")
274
open("source/file", 'w').write("blah\n")
275
wt.commit("changed file")
276
target = BzrDir.create_branch_and_repo("target/")
277
source = Branch.open(self.get_readonly_url("source/"))
278
self.assertEqual(target.fetch(source), (2, []))
279
# this is the path to the literal file. As format changes
280
# occur it needs to be updated. FIXME: ask the store for the
282
self.log("web server logs are:")
283
http_logs = self.get_readonly_server().logs
284
self.log('\n'.join(http_logs))
285
# unfortunately this log entry is branch format specific. We could
286
# factor out the 'what files does this format use' to a method on the
287
# repository, which would let us to this generically. RBC 20060419
288
self.assertEqual(1, self._count_log_matches('/ce/id.kndx', http_logs))
289
self.assertEqual(1, self._count_log_matches('/ce/id.knit', http_logs))
290
self.assertEqual(1, self._count_log_matches('inventory.kndx', http_logs))
291
# this r-h check test will prevent regressions, but it currently already
292
# passes, before the patch to cache-rh is applied :[
293
self.assertTrue(1 >= self._count_log_matches('revision-history',
295
self.assertTrue(1 >= self._count_log_matches('last-revision',
297
# FIXME naughty poking in there.
298
self.get_readonly_server().logs = []
299
# check there is nothing more to fetch
300
source = Branch.open(self.get_readonly_url("source/"))
301
self.assertEqual(target.fetch(source), (0, []))
302
# should make just two requests
303
http_logs = self.get_readonly_server().logs
304
self.log("web server logs are:")
305
self.log('\n'.join(http_logs))
306
self.assertEqual(1, self._count_log_matches('branch-format', http_logs))
307
self.assertEqual(1, self._count_log_matches('branch/format', http_logs))
308
self.assertEqual(1, self._count_log_matches('repository/format', http_logs))
309
self.assertTrue(1 >= self._count_log_matches('revision-history',
311
self.assertTrue(1 >= self._count_log_matches('last-revision',
313
self.assertEqual(4, len(http_logs))