262
199
def _count_log_matches(self, target, logs):
263
200
"""Count the number of times the target file pattern was fetched in an http log"""
264
get_succeeds_re = re.compile(
265
'.*"GET .*%s HTTP/1.1" 20[06] - "-" "bzr/%s' %
266
( target, bzrlib.__version__))
201
log_pattern = '%s HTTP/1.1" 200 - "-" "bzr/%s' % \
202
(target, bzrlib.__version__)
268
204
for line in logs:
269
if get_succeeds_re.match(line):
205
# TODO: perhaps use a regexp instead so we can match more
207
if line.find(log_pattern) > -1:
273
211
def test_weaves_are_retrieved_once(self):
274
212
self.build_tree(("source/", "source/file", "target/"))
275
# This test depends on knit dasta storage.
276
wt = self.make_branch_and_tree('source', format='dirstate-tags')
213
wt = self.make_branch_and_tree('source')
277
214
branch = wt.branch
278
215
wt.add(["file"], ["id"])
279
216
wt.commit("added file")
280
open("source/file", 'w').write("blah\n")
217
print >>open("source/file", 'w'), "blah"
281
218
wt.commit("changed file")
282
219
target = BzrDir.create_branch_and_repo("target/")
283
220
source = Branch.open(self.get_readonly_url("source/"))
285
# this is the path to the literal file. As format changes
221
self.assertEqual(target.fetch(source), (2, []))
222
log_pattern = '%%s HTTP/1.1" 200 - "-" "bzr/%s' % bzrlib.__version__
223
# this is the path to the literal file. As format changes
286
224
# occur it needs to be updated. FIXME: ask the store for the
288
226
self.log("web server logs are:")
289
227
http_logs = self.get_readonly_server().logs
290
228
self.log('\n'.join(http_logs))
291
# unfortunately this log entry is branch format specific. We could
292
# factor out the 'what files does this format use' to a method on the
229
# unfortunately this log entry is branch format specific. We could
230
# factor out the 'what files does this format use' to a method on the
293
231
# repository, which would let us to this generically. RBC 20060419
294
# RBC 20080408: Or perhaps we can assert that no files are fully read
296
232
self.assertEqual(1, self._count_log_matches('/ce/id.kndx', http_logs))
297
233
self.assertEqual(1, self._count_log_matches('/ce/id.knit', http_logs))
298
234
self.assertEqual(1, self._count_log_matches('inventory.kndx', http_logs))
299
# this r-h check test will prevent regressions, but it currently already
235
# this r-h check test will prevent regressions, but it currently already
300
236
# passes, before the patch to cache-rh is applied :[
301
self.assertTrue(1 >= self._count_log_matches('revision-history',
303
self.assertTrue(1 >= self._count_log_matches('last-revision',
237
self.assertEqual(1, self._count_log_matches('revision-history', http_logs))
305
238
# FIXME naughty poking in there.
306
239
self.get_readonly_server().logs = []
307
# check there is nothing more to fetch. We take care to re-use the
308
# existing transport so that the request logs we're about to examine
309
# aren't cluttered with redundant probes for a smart server.
310
# XXX: Perhaps this further parameterisation: test http with smart
311
# server, and test http without smart server?
312
source = Branch.open(
313
self.get_readonly_url("source/"),
314
possible_transports=[source.bzrdir.root_transport])
240
# check there is nothing more to fetch
241
source = Branch.open(self.get_readonly_url("source/"))
242
self.assertEqual(target.fetch(source), (0, []))
316
243
# should make just two requests
317
244
http_logs = self.get_readonly_server().logs
318
245
self.log("web server logs are:")
319
246
self.log('\n'.join(http_logs))
320
247
self.assertEqual(1, self._count_log_matches('branch-format', http_logs))
321
248
self.assertEqual(1, self._count_log_matches('branch/format', http_logs))
322
self.assertEqual(1, self._count_log_matches('repository/format',
324
self.assertEqual(1, self._count_log_matches('revisions.kndx',
326
self.assertTrue(1 >= self._count_log_matches('revision-history',
328
self.assertTrue(1 >= self._count_log_matches('last-revision',
330
self.assertLength(5, http_logs)
333
class TestKnitToPackFetch(TestCaseWithTransport):
335
def find_get_record_stream(self, calls, expected_count=1):
336
"""In a list of calls, find the last 'get_record_stream'.
338
:param expected_count: The number of calls we should exepect to find.
339
If a different number is found, an assertion is raised.
341
get_record_call = None
344
if call[0] == 'get_record_stream':
346
get_record_call = call
347
self.assertEqual(expected_count, call_count)
348
return get_record_call
350
def test_fetch_with_deltas_no_delta_closure(self):
351
tree = self.make_branch_and_tree('source', format='dirstate')
352
target = self.make_repository('target', format='pack-0.92')
353
self.build_tree(['source/file'])
354
tree.set_root_id('root-id')
355
tree.add('file', 'file-id')
356
tree.commit('one', rev_id='rev-one')
357
source = tree.branch.repository
358
source.texts = versionedfile.RecordingVersionedFilesDecorator(
360
source.signatures = versionedfile.RecordingVersionedFilesDecorator(
362
source.revisions = versionedfile.RecordingVersionedFilesDecorator(
364
source.inventories = versionedfile.RecordingVersionedFilesDecorator(
367
self.assertTrue(target._format._fetch_uses_deltas)
368
target.fetch(source, revision_id='rev-one')
369
self.assertEqual(('get_record_stream', [('file-id', 'rev-one')],
370
target._format._fetch_order, False),
371
self.find_get_record_stream(source.texts.calls))
372
self.assertEqual(('get_record_stream', [('rev-one',)],
373
target._format._fetch_order, False),
374
self.find_get_record_stream(source.inventories.calls, 2))
375
self.assertEqual(('get_record_stream', [('rev-one',)],
376
target._format._fetch_order, False),
377
self.find_get_record_stream(source.revisions.calls))
378
# XXX: Signatures is special, and slightly broken. The
379
# standard item_keys_introduced_by actually does a lookup for every
380
# signature to see if it exists, rather than waiting to do them all at
381
# once at the end. The fetch code then does an all-at-once and just
382
# allows for some of them to be missing.
383
# So we know there will be extra calls, but the *last* one is the one
385
signature_calls = source.signatures.calls[-1:]
386
self.assertEqual(('get_record_stream', [('rev-one',)],
387
target._format._fetch_order, False),
388
self.find_get_record_stream(signature_calls))
390
def test_fetch_no_deltas_with_delta_closure(self):
391
tree = self.make_branch_and_tree('source', format='dirstate')
392
target = self.make_repository('target', format='pack-0.92')
393
self.build_tree(['source/file'])
394
tree.set_root_id('root-id')
395
tree.add('file', 'file-id')
396
tree.commit('one', rev_id='rev-one')
397
source = tree.branch.repository
398
source.texts = versionedfile.RecordingVersionedFilesDecorator(
400
source.signatures = versionedfile.RecordingVersionedFilesDecorator(
402
source.revisions = versionedfile.RecordingVersionedFilesDecorator(
404
source.inventories = versionedfile.RecordingVersionedFilesDecorator(
406
# XXX: This won't work in general, but for the dirstate format it does.
407
self.overrideAttr(target._format, '_fetch_uses_deltas', False)
408
target.fetch(source, revision_id='rev-one')
409
self.assertEqual(('get_record_stream', [('file-id', 'rev-one')],
410
target._format._fetch_order, True),
411
self.find_get_record_stream(source.texts.calls))
412
self.assertEqual(('get_record_stream', [('rev-one',)],
413
target._format._fetch_order, True),
414
self.find_get_record_stream(source.inventories.calls, 2))
415
self.assertEqual(('get_record_stream', [('rev-one',)],
416
target._format._fetch_order, True),
417
self.find_get_record_stream(source.revisions.calls))
418
# XXX: Signatures is special, and slightly broken. The
419
# standard item_keys_introduced_by actually does a lookup for every
420
# signature to see if it exists, rather than waiting to do them all at
421
# once at the end. The fetch code then does an all-at-once and just
422
# allows for some of them to be missing.
423
# So we know there will be extra calls, but the *last* one is the one
425
signature_calls = source.signatures.calls[-1:]
426
self.assertEqual(('get_record_stream', [('rev-one',)],
427
target._format._fetch_order, True),
428
self.find_get_record_stream(signature_calls))
430
def test_fetch_revisions_with_deltas_into_pack(self):
431
# See BUG #261339, dev versions of bzr could accidentally create deltas
432
# in revision texts in knit branches (when fetching from packs). So we
433
# ensure that *if* a knit repository has a delta in revisions, that it
434
# gets properly expanded back into a fulltext when stored in the pack
436
tree = self.make_branch_and_tree('source', format='dirstate')
437
target = self.make_repository('target', format='pack-0.92')
438
self.build_tree(['source/file'])
439
tree.set_root_id('root-id')
440
tree.add('file', 'file-id')
441
tree.commit('one', rev_id='rev-one')
442
# Hack the KVF for revisions so that it "accidentally" allows a delta
443
tree.branch.repository.revisions._max_delta_chain = 200
444
tree.commit('two', rev_id='rev-two')
445
source = tree.branch.repository
446
# Ensure that we stored a delta
448
self.addCleanup(source.unlock)
449
record = source.revisions.get_record_stream([('rev-two',)],
450
'unordered', False).next()
451
self.assertEqual('knit-delta-gz', record.storage_kind)
452
target.fetch(tree.branch.repository, revision_id='rev-two')
453
# The record should get expanded back to a fulltext
455
self.addCleanup(target.unlock)
456
record = target.revisions.get_record_stream([('rev-two',)],
457
'unordered', False).next()
458
self.assertEqual('knit-ft-gz', record.storage_kind)
460
def test_fetch_with_fallback_and_merge(self):
461
builder = self.make_branch_builder('source', format='pack-0.92')
462
builder.start_series()
473
# A & B are present in the base (stacked-on) repository, A-E are
474
# present in the source.
475
# This reproduces bug #304841
476
# We need a large enough inventory that total size of compressed deltas
477
# is shorter than the size of a compressed fulltext. We have to use
478
# random ids because otherwise the inventory fulltext compresses too
479
# well and the deltas get bigger.
481
('add', ('', 'TREE_ROOT', 'directory', None))]
483
fname = 'file%03d' % (i,)
484
fileid = '%s-%s' % (fname, osutils.rand_chars(64))
485
to_add.append(('add', (fname, fileid, 'file', 'content\n')))
486
builder.build_snapshot('A', None, to_add)
487
builder.build_snapshot('B', ['A'], [])
488
builder.build_snapshot('C', ['A'], [])
489
builder.build_snapshot('D', ['C'], [])
490
builder.build_snapshot('E', ['D'], [])
491
builder.build_snapshot('F', ['E', 'B'], [])
492
builder.finish_series()
493
source_branch = builder.get_branch()
494
source_branch.bzrdir.sprout('base', revision_id='B')
495
target_branch = self.make_branch('target', format='1.6')
496
target_branch.set_stacked_on_url('../base')
497
source = source_branch.repository
499
self.addCleanup(source.unlock)
500
source.inventories = versionedfile.OrderingVersionedFilesDecorator(
502
key_priority={('E',): 1, ('D',): 2, ('C',): 4,
504
# Ensure that the content is yielded in the proper order, and given as
506
records = [(record.key, record.storage_kind)
507
for record in source.inventories.get_record_stream(
508
[('D',), ('C',), ('E',), ('F',)], 'unordered', False)]
509
self.assertEqual([(('E',), 'knit-delta-gz'), (('D',), 'knit-delta-gz'),
510
(('F',), 'knit-delta-gz'), (('C',), 'knit-delta-gz')],
513
target_branch.lock_write()
514
self.addCleanup(target_branch.unlock)
515
target = target_branch.repository
516
target.fetch(source, revision_id='F')
517
# 'C' should be expanded to a fulltext, but D and E should still be
519
stream = target.inventories.get_record_stream(
520
[('C',), ('D',), ('E',), ('F',)],
522
kinds = dict((record.key, record.storage_kind) for record in stream)
523
self.assertEqual({('C',): 'knit-ft-gz', ('D',): 'knit-delta-gz',
524
('E',): 'knit-delta-gz', ('F',): 'knit-delta-gz'},
528
class Test1To2Fetch(TestCaseWithTransport):
529
"""Tests for Model1To2 failure modes"""
531
def make_tree_and_repo(self):
532
self.tree = self.make_branch_and_tree('tree', format='pack-0.92')
533
self.repo = self.make_repository('rich-repo', format='rich-root-pack')
534
self.repo.lock_write()
535
self.addCleanup(self.repo.unlock)
537
def do_fetch_order_test(self, first, second):
538
"""Test that fetch works no matter what the set order of revision is.
540
This test depends on the order of items in a set, which is
541
implementation-dependant, so we test A, B and then B, A.
543
self.make_tree_and_repo()
544
self.tree.commit('Commit 1', rev_id=first)
545
self.tree.commit('Commit 2', rev_id=second)
546
self.repo.fetch(self.tree.branch.repository, second)
548
def test_fetch_order_AB(self):
549
"""See do_fetch_order_test"""
550
self.do_fetch_order_test('A', 'B')
552
def test_fetch_order_BA(self):
553
"""See do_fetch_order_test"""
554
self.do_fetch_order_test('B', 'A')
556
def get_parents(self, file_id, revision_id):
557
self.repo.lock_read()
559
parent_map = self.repo.texts.get_parent_map([(file_id, revision_id)])
560
return parent_map[(file_id, revision_id)]
564
def test_fetch_ghosts(self):
565
self.make_tree_and_repo()
566
self.tree.commit('first commit', rev_id='left-parent')
567
self.tree.add_parent_tree_id('ghost-parent')
568
fork = self.tree.bzrdir.sprout('fork', 'null:').open_workingtree()
569
fork.commit('not a ghost', rev_id='not-ghost-parent')
570
self.tree.branch.repository.fetch(fork.branch.repository,
572
self.tree.add_parent_tree_id('not-ghost-parent')
573
self.tree.commit('second commit', rev_id='second-id')
574
self.repo.fetch(self.tree.branch.repository, 'second-id')
575
root_id = self.tree.get_root_id()
577
((root_id, 'left-parent'), (root_id, 'not-ghost-parent')),
578
self.get_parents(root_id, 'second-id'))
580
def make_two_commits(self, change_root, fetch_twice):
581
self.make_tree_and_repo()
582
self.tree.commit('first commit', rev_id='first-id')
584
self.tree.set_root_id('unique-id')
585
self.tree.commit('second commit', rev_id='second-id')
587
self.repo.fetch(self.tree.branch.repository, 'first-id')
588
self.repo.fetch(self.tree.branch.repository, 'second-id')
590
def test_fetch_changed_root(self):
591
self.make_two_commits(change_root=True, fetch_twice=False)
592
self.assertEqual((), self.get_parents('unique-id', 'second-id'))
594
def test_two_fetch_changed_root(self):
595
self.make_two_commits(change_root=True, fetch_twice=True)
596
self.assertEqual((), self.get_parents('unique-id', 'second-id'))
598
def test_two_fetches(self):
599
self.make_two_commits(change_root=False, fetch_twice=True)
600
self.assertEqual((('TREE_ROOT', 'first-id'),),
601
self.get_parents('TREE_ROOT', 'second-id'))
249
self.assertEqual(1, self._count_log_matches('repository/format', http_logs))
250
self.assertEqual(1, self._count_log_matches('revision-history', http_logs))
251
self.assertEqual(4, len(http_logs))