66
61
# When a non-branch ancestor is missing, it should be unlisted...
67
62
# as its not reference from the inventory weave.
68
63
br_b4 = self.make_branch('br_4')
64
count, failures = br_b4.fetch(br_b)
65
self.assertEqual(count, 7)
66
self.assertEqual(failures, [])
71
writable_a.fetch(br_b)
68
self.assertEqual(writable_a.fetch(br_b)[0], 1)
72
69
self.assertTrue(has_revision(br_a, br_b.revision_history()[3]))
73
70
self.assertTrue(has_revision(br_a, br_b.revision_history()[4]))
75
72
br_b2 = self.make_branch('br_b2')
73
self.assertEquals(br_b2.fetch(br_b)[0], 7)
77
74
self.assertTrue(has_revision(br_b2, br_b.revision_history()[4]))
78
75
self.assertTrue(has_revision(br_b2, br_a.revision_history()[2]))
79
76
self.assertFalse(has_revision(br_b2, br_a.revision_history()[3]))
81
78
br_a2 = self.make_branch('br_a2')
79
self.assertEquals(br_a2.fetch(br_a)[0], 9)
83
80
self.assertTrue(has_revision(br_a2, br_b.revision_history()[4]))
84
81
self.assertTrue(has_revision(br_a2, br_a.revision_history()[3]))
85
82
self.assertTrue(has_revision(br_a2, br_a.revision_history()[2]))
87
84
br_a3 = self.make_branch('br_a3')
88
# pulling a branch with no revisions grabs nothing, regardless of
85
# pulling a branch with no revisions grabs nothing, regardless of
89
86
# whats in the inventory.
87
self.assertEquals(br_a3.fetch(br_a2)[0], 0)
91
88
for revno in range(4):
93
90
br_a3.repository.has_revision(br_a.revision_history()[revno]))
94
br_a3.fetch(br_a2, br_a.revision_history()[2])
91
self.assertEqual(br_a3.fetch(br_a2, br_a.revision_history()[2])[0], 3)
95
92
# pull the 3 revisions introduced by a@u-0-3
96
br_a3.fetch(br_a2, br_a.revision_history()[3])
97
# NoSuchRevision should be raised if the branch is missing the revision
93
fetched = br_a3.fetch(br_a2, br_a.revision_history()[3])[0]
94
self.assertEquals(fetched, 3, "fetched %d instead of 3" % fetched)
95
# InstallFailed should be raised if the branch is missing the revision
98
96
# that was requested.
99
self.assertRaises(errors.NoSuchRevision, br_a3.fetch, br_a2, 'pizza')
97
self.assertRaises(bzrlib.errors.InstallFailed, br_a3.fetch, br_a2, 'pizza')
98
# InstallFailed should be raised if the branch is missing a revision
99
# from its own revision history
100
br_a2.append_revision('a-b-c')
101
self.assertRaises(bzrlib.errors.InstallFailed, br_a3.fetch, br_a2)
101
# TODO: Test trying to fetch from a branch that points to a revision not
102
# actually present in its repository. Not every branch format allows you
103
# to directly point to such revisions, so it's a bit complicated to
104
# construct. One way would be to uncommit and gc the revision, but not
105
# every branch supports that. -- mbp 20070814
103
# TODO: ADHB 20070116 Perhaps set_last_revision shouldn't accept
104
# revisions which are not present? In that case, this test
107
# RBC 20060403 the way to do this is to uncommit the revision from
108
# the repository after the commit
107
110
#TODO: test that fetch correctly does reweaving when needed. RBC 20051008
108
# Note that this means - updating the weave when ghosts are filled in to
111
# Note that this means - updating the weave when ghosts are filled in to
109
112
# add the right parents.
247
238
rev_id).get_file_text('this-file-id'), text)
250
class TestKnitToPackFetch(TestCaseWithTransport):
252
def find_get_record_stream(self, calls, expected_count=1):
253
"""In a list of calls, find the last 'get_record_stream'.
255
:param expected_count: The number of calls we should exepect to find.
256
If a different number is found, an assertion is raised.
258
get_record_call = None
261
if call[0] == 'get_record_stream':
263
get_record_call = call
264
self.assertEqual(expected_count, call_count)
265
return get_record_call
267
def test_fetch_with_deltas_no_delta_closure(self):
268
tree = self.make_branch_and_tree('source', format='dirstate')
269
target = self.make_repository('target', format='pack-0.92')
270
self.build_tree(['source/file'])
271
tree.set_root_id('root-id')
272
tree.add('file', 'file-id')
273
tree.commit('one', rev_id='rev-one')
274
source = tree.branch.repository
275
source.texts = versionedfile.RecordingVersionedFilesDecorator(
277
source.signatures = versionedfile.RecordingVersionedFilesDecorator(
279
source.revisions = versionedfile.RecordingVersionedFilesDecorator(
281
source.inventories = versionedfile.RecordingVersionedFilesDecorator(
284
self.assertTrue(target._format._fetch_uses_deltas)
285
target.fetch(source, revision_id='rev-one')
286
self.assertEqual(('get_record_stream', [('file-id', 'rev-one')],
287
target._format._fetch_order, False),
288
self.find_get_record_stream(source.texts.calls))
289
self.assertEqual(('get_record_stream', [('rev-one',)],
290
target._format._fetch_order, False),
291
self.find_get_record_stream(source.inventories.calls, 2))
292
self.assertEqual(('get_record_stream', [('rev-one',)],
293
target._format._fetch_order, False),
294
self.find_get_record_stream(source.revisions.calls))
295
# XXX: Signatures is special, and slightly broken. The
296
# standard item_keys_introduced_by actually does a lookup for every
297
# signature to see if it exists, rather than waiting to do them all at
298
# once at the end. The fetch code then does an all-at-once and just
299
# allows for some of them to be missing.
300
# So we know there will be extra calls, but the *last* one is the one
302
signature_calls = source.signatures.calls[-1:]
303
self.assertEqual(('get_record_stream', [('rev-one',)],
304
target._format._fetch_order, False),
305
self.find_get_record_stream(signature_calls))
307
def test_fetch_no_deltas_with_delta_closure(self):
308
tree = self.make_branch_and_tree('source', format='dirstate')
309
target = self.make_repository('target', format='pack-0.92')
310
self.build_tree(['source/file'])
311
tree.set_root_id('root-id')
312
tree.add('file', 'file-id')
313
tree.commit('one', rev_id='rev-one')
314
source = tree.branch.repository
315
source.texts = versionedfile.RecordingVersionedFilesDecorator(
317
source.signatures = versionedfile.RecordingVersionedFilesDecorator(
319
source.revisions = versionedfile.RecordingVersionedFilesDecorator(
321
source.inventories = versionedfile.RecordingVersionedFilesDecorator(
323
# XXX: This won't work in general, but for the dirstate format it does.
324
self.overrideAttr(target._format, '_fetch_uses_deltas', False)
325
target.fetch(source, revision_id='rev-one')
326
self.assertEqual(('get_record_stream', [('file-id', 'rev-one')],
327
target._format._fetch_order, True),
328
self.find_get_record_stream(source.texts.calls))
329
self.assertEqual(('get_record_stream', [('rev-one',)],
330
target._format._fetch_order, True),
331
self.find_get_record_stream(source.inventories.calls, 2))
332
self.assertEqual(('get_record_stream', [('rev-one',)],
333
target._format._fetch_order, True),
334
self.find_get_record_stream(source.revisions.calls))
335
# XXX: Signatures is special, and slightly broken. The
336
# standard item_keys_introduced_by actually does a lookup for every
337
# signature to see if it exists, rather than waiting to do them all at
338
# once at the end. The fetch code then does an all-at-once and just
339
# allows for some of them to be missing.
340
# So we know there will be extra calls, but the *last* one is the one
342
signature_calls = source.signatures.calls[-1:]
343
self.assertEqual(('get_record_stream', [('rev-one',)],
344
target._format._fetch_order, True),
345
self.find_get_record_stream(signature_calls))
347
def test_fetch_revisions_with_deltas_into_pack(self):
348
# See BUG #261339, dev versions of bzr could accidentally create deltas
349
# in revision texts in knit branches (when fetching from packs). So we
350
# ensure that *if* a knit repository has a delta in revisions, that it
351
# gets properly expanded back into a fulltext when stored in the pack
353
tree = self.make_branch_and_tree('source', format='dirstate')
354
target = self.make_repository('target', format='pack-0.92')
355
self.build_tree(['source/file'])
356
tree.set_root_id('root-id')
357
tree.add('file', 'file-id')
358
tree.commit('one', rev_id='rev-one')
359
# Hack the KVF for revisions so that it "accidentally" allows a delta
360
tree.branch.repository.revisions._max_delta_chain = 200
361
tree.commit('two', rev_id='rev-two')
362
source = tree.branch.repository
363
# Ensure that we stored a delta
365
self.addCleanup(source.unlock)
366
record = source.revisions.get_record_stream([('rev-two',)],
367
'unordered', False).next()
368
self.assertEqual('knit-delta-gz', record.storage_kind)
369
target.fetch(tree.branch.repository, revision_id='rev-two')
370
# The record should get expanded back to a fulltext
372
self.addCleanup(target.unlock)
373
record = target.revisions.get_record_stream([('rev-two',)],
374
'unordered', False).next()
375
self.assertEqual('knit-ft-gz', record.storage_kind)
377
def test_fetch_with_fallback_and_merge(self):
378
builder = self.make_branch_builder('source', format='pack-0.92')
379
builder.start_series()
390
# A & B are present in the base (stacked-on) repository, A-E are
391
# present in the source.
392
# This reproduces bug #304841
393
# We need a large enough inventory that total size of compressed deltas
394
# is shorter than the size of a compressed fulltext. We have to use
395
# random ids because otherwise the inventory fulltext compresses too
396
# well and the deltas get bigger.
398
('add', ('', 'TREE_ROOT', 'directory', None))]
400
fname = 'file%03d' % (i,)
401
fileid = '%s-%s' % (fname, osutils.rand_chars(64))
402
to_add.append(('add', (fname, fileid, 'file', 'content\n')))
403
builder.build_snapshot('A', None, to_add)
404
builder.build_snapshot('B', ['A'], [])
405
builder.build_snapshot('C', ['A'], [])
406
builder.build_snapshot('D', ['C'], [])
407
builder.build_snapshot('E', ['D'], [])
408
builder.build_snapshot('F', ['E', 'B'], [])
409
builder.finish_series()
410
source_branch = builder.get_branch()
411
source_branch.bzrdir.sprout('base', revision_id='B')
412
target_branch = self.make_branch('target', format='1.6')
413
target_branch.set_stacked_on_url('../base')
414
source = source_branch.repository
416
self.addCleanup(source.unlock)
417
source.inventories = versionedfile.OrderingVersionedFilesDecorator(
419
key_priority={('E',): 1, ('D',): 2, ('C',): 4,
421
# Ensure that the content is yielded in the proper order, and given as
423
records = [(record.key, record.storage_kind)
424
for record in source.inventories.get_record_stream(
425
[('D',), ('C',), ('E',), ('F',)], 'unordered', False)]
426
self.assertEqual([(('E',), 'knit-delta-gz'), (('D',), 'knit-delta-gz'),
427
(('F',), 'knit-delta-gz'), (('C',), 'knit-delta-gz')],
430
target_branch.lock_write()
431
self.addCleanup(target_branch.unlock)
432
target = target_branch.repository
433
target.fetch(source, revision_id='F')
434
# 'C' should be expanded to a fulltext, but D and E should still be
436
stream = target.inventories.get_record_stream(
437
[('C',), ('D',), ('E',), ('F',)],
439
kinds = dict((record.key, record.storage_kind) for record in stream)
440
self.assertEqual({('C',): 'knit-ft-gz', ('D',): 'knit-delta-gz',
441
('E',): 'knit-delta-gz', ('F',): 'knit-delta-gz'},
445
class Test1To2Fetch(TestCaseWithTransport):
446
"""Tests for Model1To2 failure modes"""
448
def make_tree_and_repo(self):
449
self.tree = self.make_branch_and_tree('tree', format='pack-0.92')
450
self.repo = self.make_repository('rich-repo', format='rich-root-pack')
451
self.repo.lock_write()
452
self.addCleanup(self.repo.unlock)
454
def do_fetch_order_test(self, first, second):
455
"""Test that fetch works no matter what the set order of revision is.
457
This test depends on the order of items in a set, which is
458
implementation-dependant, so we test A, B and then B, A.
460
self.make_tree_and_repo()
461
self.tree.commit('Commit 1', rev_id=first)
462
self.tree.commit('Commit 2', rev_id=second)
463
self.repo.fetch(self.tree.branch.repository, second)
465
def test_fetch_order_AB(self):
466
"""See do_fetch_order_test"""
467
self.do_fetch_order_test('A', 'B')
469
def test_fetch_order_BA(self):
470
"""See do_fetch_order_test"""
471
self.do_fetch_order_test('B', 'A')
473
def get_parents(self, file_id, revision_id):
474
self.repo.lock_read()
476
parent_map = self.repo.texts.get_parent_map([(file_id, revision_id)])
477
return parent_map[(file_id, revision_id)]
481
def test_fetch_ghosts(self):
482
self.make_tree_and_repo()
483
self.tree.commit('first commit', rev_id='left-parent')
484
self.tree.add_parent_tree_id('ghost-parent')
485
fork = self.tree.bzrdir.sprout('fork', 'null:').open_workingtree()
486
fork.commit('not a ghost', rev_id='not-ghost-parent')
487
self.tree.branch.repository.fetch(fork.branch.repository,
489
self.tree.add_parent_tree_id('not-ghost-parent')
490
self.tree.commit('second commit', rev_id='second-id')
491
self.repo.fetch(self.tree.branch.repository, 'second-id')
492
root_id = self.tree.get_root_id()
494
((root_id, 'left-parent'), (root_id, 'not-ghost-parent')),
495
self.get_parents(root_id, 'second-id'))
497
def make_two_commits(self, change_root, fetch_twice):
498
self.make_tree_and_repo()
499
self.tree.commit('first commit', rev_id='first-id')
501
self.tree.set_root_id('unique-id')
502
self.tree.commit('second commit', rev_id='second-id')
504
self.repo.fetch(self.tree.branch.repository, 'first-id')
505
self.repo.fetch(self.tree.branch.repository, 'second-id')
507
def test_fetch_changed_root(self):
508
self.make_two_commits(change_root=True, fetch_twice=False)
509
self.assertEqual((), self.get_parents('unique-id', 'second-id'))
511
def test_two_fetch_changed_root(self):
512
self.make_two_commits(change_root=True, fetch_twice=True)
513
self.assertEqual((), self.get_parents('unique-id', 'second-id'))
515
def test_two_fetches(self):
516
self.make_two_commits(change_root=False, fetch_twice=True)
517
self.assertEqual((('TREE_ROOT', 'first-id'),),
518
self.get_parents('TREE_ROOT', 'second-id'))
241
class TestHttpFetch(TestCaseWithWebserver):
242
# FIXME RBC 20060124 this really isn't web specific, perhaps an
243
# instrumented readonly transport? Can we do an instrumented
244
# adapter and use self.get_readonly_url ?
246
def test_fetch(self):
247
#highest indices a: 5, b: 7
248
br_a, br_b = make_branches(self)
249
br_rem_a = Branch.open(self.get_readonly_url('branch1'))
250
fetch_steps(self, br_rem_a, br_b, br_a)
252
def _count_log_matches(self, target, logs):
253
"""Count the number of times the target file pattern was fetched in an http log"""
254
get_succeeds_re = re.compile(
255
'.*"GET .*%s HTTP/1.1" 20[06] - "-" "bzr/%s' %
256
( target, bzrlib.__version__))
259
if get_succeeds_re.match(line):
263
def test_weaves_are_retrieved_once(self):
264
self.build_tree(("source/", "source/file", "target/"))
265
wt = self.make_branch_and_tree('source')
267
wt.add(["file"], ["id"])
268
wt.commit("added file")
269
print >>open("source/file", 'w'), "blah"
270
wt.commit("changed file")
271
target = BzrDir.create_branch_and_repo("target/")
272
source = Branch.open(self.get_readonly_url("source/"))
273
self.assertEqual(target.fetch(source), (2, []))
274
# this is the path to the literal file. As format changes
275
# occur it needs to be updated. FIXME: ask the store for the
277
self.log("web server logs are:")
278
http_logs = self.get_readonly_server().logs
279
self.log('\n'.join(http_logs))
280
# unfortunately this log entry is branch format specific. We could
281
# factor out the 'what files does this format use' to a method on the
282
# repository, which would let us to this generically. RBC 20060419
283
self.assertEqual(1, self._count_log_matches('/ce/id.kndx', http_logs))
284
self.assertEqual(1, self._count_log_matches('/ce/id.knit', http_logs))
285
self.assertEqual(1, self._count_log_matches('inventory.kndx', http_logs))
286
# this r-h check test will prevent regressions, but it currently already
287
# passes, before the patch to cache-rh is applied :[
288
self.assertTrue(1 >= self._count_log_matches('revision-history',
290
self.assertTrue(1 >= self._count_log_matches('last-revision',
292
# FIXME naughty poking in there.
293
self.get_readonly_server().logs = []
294
# check there is nothing more to fetch
295
source = Branch.open(self.get_readonly_url("source/"))
296
self.assertEqual(target.fetch(source), (0, []))
297
# should make just two requests
298
http_logs = self.get_readonly_server().logs
299
self.log("web server logs are:")
300
self.log('\n'.join(http_logs))
301
self.assertEqual(1, self._count_log_matches('branch-format', http_logs))
302
self.assertEqual(1, self._count_log_matches('branch/format', http_logs))
303
self.assertEqual(1, self._count_log_matches('repository/format', http_logs))
304
self.assertTrue(1 >= self._count_log_matches('revision-history',
306
self.assertTrue(1 >= self._count_log_matches('last-revision',
308
self.assertEqual(4, len(http_logs))