13
13
# You should have received a copy of the GNU General Public License
14
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17
22
from bzrlib import (
23
29
from bzrlib.branch import Branch
30
from bzrlib.bzrdir import BzrDir
24
31
from bzrlib.repofmt import knitrepo
25
32
from bzrlib.tests import TestCaseWithTransport
33
from bzrlib.tests.http_utils import TestCaseWithWebserver
26
34
from bzrlib.tests.test_revision import make_branches
35
from bzrlib.trace import mutter
27
36
from bzrlib.upgrade import Convert
28
37
from bzrlib.workingtree import WorkingTree
30
39
# These tests are a bit old; please instead add new tests into
31
# per_interrepository/ so they'll run on all relevant
40
# interrepository_implementations/ so they'll run on all relevant
38
47
def fetch_steps(self, br_a, br_b, writable_a):
39
48
"""A foreign test method for testing fetch locally and remotely."""
41
50
# TODO RBC 20060201 make this a repository test.
42
51
repo_b = br_b.repository
43
52
self.assertFalse(repo_b.has_revision(br_a.revision_history()[3]))
44
53
self.assertTrue(repo_b.has_revision(br_a.revision_history()[2]))
45
54
self.assertEquals(len(br_b.revision_history()), 7)
46
br_b.fetch(br_a, br_a.revision_history()[2])
55
self.assertEquals(br_b.fetch(br_a, br_a.revision_history()[2])[0], 0)
47
56
# branch.fetch is not supposed to alter the revision history
48
57
self.assertEquals(len(br_b.revision_history()), 7)
49
58
self.assertFalse(repo_b.has_revision(br_a.revision_history()[3]))
51
60
# fetching the next revision up in sample data copies one revision
52
br_b.fetch(br_a, br_a.revision_history()[3])
61
self.assertEquals(br_b.fetch(br_a, br_a.revision_history()[3])[0], 1)
53
62
self.assertTrue(repo_b.has_revision(br_a.revision_history()[3]))
54
63
self.assertFalse(has_revision(br_a, br_b.revision_history()[6]))
55
64
self.assertTrue(br_a.repository.has_revision(br_b.revision_history()[5]))
57
66
# When a non-branch ancestor is missing, it should be unlisted...
58
67
# as its not reference from the inventory weave.
59
68
br_b4 = self.make_branch('br_4')
69
count, failures = br_b4.fetch(br_b)
70
self.assertEqual(count, 7)
71
self.assertEqual(failures, [])
62
writable_a.fetch(br_b)
73
self.assertEqual(writable_a.fetch(br_b)[0], 1)
63
74
self.assertTrue(has_revision(br_a, br_b.revision_history()[3]))
64
75
self.assertTrue(has_revision(br_a, br_b.revision_history()[4]))
66
77
br_b2 = self.make_branch('br_b2')
78
self.assertEquals(br_b2.fetch(br_b)[0], 7)
68
79
self.assertTrue(has_revision(br_b2, br_b.revision_history()[4]))
69
80
self.assertTrue(has_revision(br_b2, br_a.revision_history()[2]))
70
81
self.assertFalse(has_revision(br_b2, br_a.revision_history()[3]))
72
83
br_a2 = self.make_branch('br_a2')
84
self.assertEquals(br_a2.fetch(br_a)[0], 9)
74
85
self.assertTrue(has_revision(br_a2, br_b.revision_history()[4]))
75
86
self.assertTrue(has_revision(br_a2, br_a.revision_history()[3]))
76
87
self.assertTrue(has_revision(br_a2, br_a.revision_history()[2]))
78
89
br_a3 = self.make_branch('br_a3')
79
# pulling a branch with no revisions grabs nothing, regardless of
90
# pulling a branch with no revisions grabs nothing, regardless of
80
91
# whats in the inventory.
92
self.assertEquals(br_a3.fetch(br_a2)[0], 0)
82
93
for revno in range(4):
84
95
br_a3.repository.has_revision(br_a.revision_history()[revno]))
85
br_a3.fetch(br_a2, br_a.revision_history()[2])
96
self.assertEqual(br_a3.fetch(br_a2, br_a.revision_history()[2])[0], 3)
86
97
# pull the 3 revisions introduced by a@u-0-3
87
br_a3.fetch(br_a2, br_a.revision_history()[3])
88
# NoSuchRevision should be raised if the branch is missing the revision
98
fetched = br_a3.fetch(br_a2, br_a.revision_history()[3])[0]
99
self.assertEquals(fetched, 3, "fetched %d instead of 3" % fetched)
100
# InstallFailed should be raised if the branch is missing the revision
89
101
# that was requested.
90
self.assertRaises(errors.NoSuchRevision, br_a3.fetch, br_a2, 'pizza')
102
self.assertRaises(errors.InstallFailed, br_a3.fetch, br_a2, 'pizza')
92
104
# TODO: Test trying to fetch from a branch that points to a revision not
93
105
# actually present in its repository. Not every branch format allows you
238
250
rev_id).get_file_text('this-file-id'), text)
253
class TestHttpFetch(TestCaseWithWebserver):
254
# FIXME RBC 20060124 this really isn't web specific, perhaps an
255
# instrumented readonly transport? Can we do an instrumented
256
# adapter and use self.get_readonly_url ?
258
def test_fetch(self):
259
#highest indices a: 5, b: 7
260
br_a, br_b = make_branches(self)
261
br_rem_a = Branch.open(self.get_readonly_url('branch1'))
262
fetch_steps(self, br_rem_a, br_b, br_a)
264
def _count_log_matches(self, target, logs):
265
"""Count the number of times the target file pattern was fetched in an http log"""
266
get_succeeds_re = re.compile(
267
'.*"GET .*%s HTTP/1.1" 20[06] - "-" "bzr/%s' %
268
( target, bzrlib.__version__))
271
if get_succeeds_re.match(line):
275
def test_weaves_are_retrieved_once(self):
276
self.build_tree(("source/", "source/file", "target/"))
277
# This test depends on knit dasta storage.
278
wt = self.make_branch_and_tree('source', format='dirstate-tags')
280
wt.add(["file"], ["id"])
281
wt.commit("added file")
282
open("source/file", 'w').write("blah\n")
283
wt.commit("changed file")
284
target = BzrDir.create_branch_and_repo("target/")
285
source = Branch.open(self.get_readonly_url("source/"))
286
self.assertEqual(target.fetch(source), (2, []))
287
# this is the path to the literal file. As format changes
288
# occur it needs to be updated. FIXME: ask the store for the
290
self.log("web server logs are:")
291
http_logs = self.get_readonly_server().logs
292
self.log('\n'.join(http_logs))
293
# unfortunately this log entry is branch format specific. We could
294
# factor out the 'what files does this format use' to a method on the
295
# repository, which would let us to this generically. RBC 20060419
296
# RBC 20080408: Or perhaps we can assert that no files are fully read
298
self.assertEqual(1, self._count_log_matches('/ce/id.kndx', http_logs))
299
self.assertEqual(1, self._count_log_matches('/ce/id.knit', http_logs))
300
self.assertEqual(1, self._count_log_matches('inventory.kndx', http_logs))
301
# this r-h check test will prevent regressions, but it currently already
302
# passes, before the patch to cache-rh is applied :[
303
self.assertTrue(1 >= self._count_log_matches('revision-history',
305
self.assertTrue(1 >= self._count_log_matches('last-revision',
307
# FIXME naughty poking in there.
308
self.get_readonly_server().logs = []
309
# check there is nothing more to fetch. We take care to re-use the
310
# existing transport so that the request logs we're about to examine
311
# aren't cluttered with redundant probes for a smart server.
312
# XXX: Perhaps this further parameterisation: test http with smart
313
# server, and test http without smart server?
314
source = Branch.open(
315
self.get_readonly_url("source/"),
316
possible_transports=[source.bzrdir.root_transport])
317
self.assertEqual(target.fetch(source), (0, []))
318
# should make just two requests
319
http_logs = self.get_readonly_server().logs
320
self.log("web server logs are:")
321
self.log('\n'.join(http_logs))
322
self.assertEqual(1, self._count_log_matches('branch-format', http_logs))
323
self.assertEqual(1, self._count_log_matches('branch/format', http_logs))
324
self.assertEqual(1, self._count_log_matches('repository/format',
326
self.assertTrue(1 >= self._count_log_matches('revision-history',
328
self.assertTrue(1 >= self._count_log_matches('last-revision',
330
self.assertEqual(4, len(http_logs))
241
333
class TestKnitToPackFetch(TestCaseWithTransport):
243
def find_get_record_stream(self, calls, expected_count=1):
244
"""In a list of calls, find the last 'get_record_stream'.
335
def find_get_record_stream(self, calls):
336
"""In a list of calls, find 'get_record_stream' calls.
246
:param expected_count: The number of calls we should exepect to find.
247
If a different number is found, an assertion is raised.
338
This also ensures that there is only one get_record_stream call.
249
340
get_record_call = None
251
341
for call in calls:
252
342
if call[0] == 'get_record_stream':
343
self.assertIs(None, get_record_call,
344
"there should only be one call to"
345
" get_record_stream")
254
346
get_record_call = call
255
self.assertEqual(expected_count, call_count)
347
self.assertIsNot(None, get_record_call,
348
"there should be exactly one call to "
349
" get_record_stream")
256
350
return get_record_call
258
352
def test_fetch_with_deltas_no_delta_closure(self):
272
366
source.inventories = versionedfile.RecordingVersionedFilesDecorator(
273
367
source.inventories)
275
self.assertTrue(target._format._fetch_uses_deltas)
369
self.assertTrue(target._fetch_uses_deltas)
276
370
target.fetch(source, revision_id='rev-one')
277
371
self.assertEqual(('get_record_stream', [('file-id', 'rev-one')],
278
target._format._fetch_order, False),
372
target._fetch_order, False),
279
373
self.find_get_record_stream(source.texts.calls))
280
374
self.assertEqual(('get_record_stream', [('rev-one',)],
281
target._format._fetch_order, False),
282
self.find_get_record_stream(source.inventories.calls, 2))
375
target._fetch_order, False),
376
self.find_get_record_stream(source.inventories.calls))
377
# Because of bugs in the old fetch code, revisions could accidentally
378
# have deltas present in knits. However, it was never intended, so we
379
# always for include_delta_closure=True, to make sure we get fulltexts.
283
381
self.assertEqual(('get_record_stream', [('rev-one',)],
284
target._format._fetch_order, False),
382
target._fetch_order, True),
285
383
self.find_get_record_stream(source.revisions.calls))
286
384
# XXX: Signatures is special, and slightly broken. The
287
385
# standard item_keys_introduced_by actually does a lookup for every
311
409
source.revisions)
312
410
source.inventories = versionedfile.RecordingVersionedFilesDecorator(
313
411
source.inventories)
314
# XXX: This won't work in general, but for the dirstate format it does.
315
self.overrideAttr(target._format, '_fetch_uses_deltas', False)
412
target._fetch_uses_deltas = False
316
413
target.fetch(source, revision_id='rev-one')
317
414
self.assertEqual(('get_record_stream', [('file-id', 'rev-one')],
318
target._format._fetch_order, True),
415
target._fetch_order, True),
319
416
self.find_get_record_stream(source.texts.calls))
320
417
self.assertEqual(('get_record_stream', [('rev-one',)],
321
target._format._fetch_order, True),
322
self.find_get_record_stream(source.inventories.calls, 2))
418
target._fetch_order, True),
419
self.find_get_record_stream(source.inventories.calls))
323
420
self.assertEqual(('get_record_stream', [('rev-one',)],
324
target._format._fetch_order, True),
421
target._fetch_order, True),
325
422
self.find_get_record_stream(source.revisions.calls))
326
423
# XXX: Signatures is special, and slightly broken. The
327
424
# standard item_keys_introduced_by actually does a lookup for every
333
430
signature_calls = source.signatures.calls[-1:]
334
431
self.assertEqual(('get_record_stream', [('rev-one',)],
335
target._format._fetch_order, True),
432
target._fetch_order, True),
336
433
self.find_get_record_stream(signature_calls))
338
def test_fetch_revisions_with_deltas_into_pack(self):
339
# See BUG #261339, dev versions of bzr could accidentally create deltas
340
# in revision texts in knit branches (when fetching from packs). So we
341
# ensure that *if* a knit repository has a delta in revisions, that it
342
# gets properly expanded back into a fulltext when stored in the pack
344
tree = self.make_branch_and_tree('source', format='dirstate')
345
target = self.make_repository('target', format='pack-0.92')
346
self.build_tree(['source/file'])
347
tree.set_root_id('root-id')
348
tree.add('file', 'file-id')
349
tree.commit('one', rev_id='rev-one')
350
# Hack the KVF for revisions so that it "accidentally" allows a delta
351
tree.branch.repository.revisions._max_delta_chain = 200
352
tree.commit('two', rev_id='rev-two')
353
source = tree.branch.repository
354
# Ensure that we stored a delta
356
self.addCleanup(source.unlock)
357
record = source.revisions.get_record_stream([('rev-two',)],
358
'unordered', False).next()
359
self.assertEqual('knit-delta-gz', record.storage_kind)
360
target.fetch(tree.branch.repository, revision_id='rev-two')
361
# The record should get expanded back to a fulltext
363
self.addCleanup(target.unlock)
364
record = target.revisions.get_record_stream([('rev-two',)],
365
'unordered', False).next()
366
self.assertEqual('knit-ft-gz', record.storage_kind)
368
def test_fetch_with_fallback_and_merge(self):
369
builder = self.make_branch_builder('source', format='pack-0.92')
370
builder.start_series()
381
# A & B are present in the base (stacked-on) repository, A-E are
382
# present in the source.
383
# This reproduces bug #304841
384
# We need a large enough inventory that total size of compressed deltas
385
# is shorter than the size of a compressed fulltext. We have to use
386
# random ids because otherwise the inventory fulltext compresses too
387
# well and the deltas get bigger.
389
('add', ('', 'TREE_ROOT', 'directory', None))]
391
fname = 'file%03d' % (i,)
392
fileid = '%s-%s' % (fname, osutils.rand_chars(64))
393
to_add.append(('add', (fname, fileid, 'file', 'content\n')))
394
builder.build_snapshot('A', None, to_add)
395
builder.build_snapshot('B', ['A'], [])
396
builder.build_snapshot('C', ['A'], [])
397
builder.build_snapshot('D', ['C'], [])
398
builder.build_snapshot('E', ['D'], [])
399
builder.build_snapshot('F', ['E', 'B'], [])
400
builder.finish_series()
401
source_branch = builder.get_branch()
402
source_branch.bzrdir.sprout('base', revision_id='B')
403
target_branch = self.make_branch('target', format='1.6')
404
target_branch.set_stacked_on_url('../base')
405
source = source_branch.repository
407
self.addCleanup(source.unlock)
408
source.inventories = versionedfile.OrderingVersionedFilesDecorator(
410
key_priority={('E',): 1, ('D',): 2, ('C',): 4,
412
# Ensure that the content is yielded in the proper order, and given as
414
records = [(record.key, record.storage_kind)
415
for record in source.inventories.get_record_stream(
416
[('D',), ('C',), ('E',), ('F',)], 'unordered', False)]
417
self.assertEqual([(('E',), 'knit-delta-gz'), (('D',), 'knit-delta-gz'),
418
(('F',), 'knit-delta-gz'), (('C',), 'knit-delta-gz')],
421
target_branch.lock_write()
422
self.addCleanup(target_branch.unlock)
423
target = target_branch.repository
424
target.fetch(source, revision_id='F')
425
# 'C' should be expanded to a fulltext, but D and E should still be
427
stream = target.inventories.get_record_stream(
428
[('C',), ('D',), ('E',), ('F',)],
430
kinds = dict((record.key, record.storage_kind) for record in stream)
431
self.assertEqual({('C',): 'knit-ft-gz', ('D',): 'knit-delta-gz',
432
('E',): 'knit-delta-gz', ('F',): 'knit-delta-gz'},
436
436
class Test1To2Fetch(TestCaseWithTransport):
437
437
"""Tests for Model1To2 failure modes"""