327
319
transport.append_bytes(packname, bytes)
328
320
writer = pack.ContainerWriter(write_data)
330
access = pack_repo._DirectPackAccess({})
322
access = _DirectPackAccess({})
331
323
access.set_writer(writer, index, (transport, packname))
332
324
return access, writer
334
def make_pack_file(self):
335
"""Create a pack file with 2 records."""
336
access, writer = self._get_access(packname='packname', index='foo')
338
memos.extend(access.add_raw_records([('key1', 10)], '1234567890'))
339
memos.extend(access.add_raw_records([('key2', 5)], '12345'))
343
def test_pack_collection_pack_retries(self):
344
"""An explicit pack of a pack collection succeeds even when a
345
concurrent pack happens.
347
builder = self.make_branch_builder('.')
348
builder.start_series()
349
builder.build_snapshot('rev-1', None, [
350
('add', ('', 'root-id', 'directory', None)),
351
('add', ('file', 'file-id', 'file', 'content\nrev 1\n')),
353
builder.build_snapshot('rev-2', ['rev-1'], [
354
('modify', ('file-id', 'content\nrev 2\n')),
356
builder.build_snapshot('rev-3', ['rev-2'], [
357
('modify', ('file-id', 'content\nrev 3\n')),
359
self.addCleanup(builder.finish_series)
360
b = builder.get_branch()
361
self.addCleanup(b.lock_write().unlock)
363
collection = repo._pack_collection
364
# Concurrently repack the repo.
365
reopened_repo = repo.bzrdir.open_repository()
370
def make_vf_for_retrying(self):
371
"""Create 3 packs and a reload function.
373
Originally, 2 pack files will have the data, but one will be missing.
374
And then the third will be used in place of the first two if reload()
377
:return: (versioned_file, reload_counter)
378
versioned_file a KnitVersionedFiles using the packs for access
380
builder = self.make_branch_builder('.', format="1.9")
381
builder.start_series()
382
builder.build_snapshot('rev-1', None, [
383
('add', ('', 'root-id', 'directory', None)),
384
('add', ('file', 'file-id', 'file', 'content\nrev 1\n')),
386
builder.build_snapshot('rev-2', ['rev-1'], [
387
('modify', ('file-id', 'content\nrev 2\n')),
389
builder.build_snapshot('rev-3', ['rev-2'], [
390
('modify', ('file-id', 'content\nrev 3\n')),
392
builder.finish_series()
393
b = builder.get_branch()
395
self.addCleanup(b.unlock)
396
# Pack these three revisions into another pack file, but don't remove
399
collection = repo._pack_collection
400
collection.ensure_loaded()
401
orig_packs = collection.packs
402
packer = knitpack_repo.KnitPacker(collection, orig_packs, '.testpack')
403
new_pack = packer.pack()
404
# forget about the new pack
408
# Set up a reload() function that switches to using the new pack file
409
new_index = new_pack.revision_index
410
access_tuple = new_pack.access_tuple()
411
reload_counter = [0, 0, 0]
413
reload_counter[0] += 1
414
if reload_counter[1] > 0:
415
# We already reloaded, nothing more to do
416
reload_counter[2] += 1
418
reload_counter[1] += 1
419
vf._index._graph_index._indices[:] = [new_index]
420
vf._access._indices.clear()
421
vf._access._indices[new_index] = access_tuple
423
# Delete one of the pack files so the data will need to be reloaded. We
424
# will delete the file with 'rev-2' in it
425
trans, name = orig_packs[1].access_tuple()
427
# We don't have the index trigger reloading because we want to test
428
# that we reload when the .pack disappears
429
vf._access._reload_func = reload
430
return vf, reload_counter
432
def make_reload_func(self, return_val=True):
435
reload_called[0] += 1
437
return reload_called, reload
439
def make_retry_exception(self):
440
# We raise a real exception so that sys.exc_info() is properly
443
raise _TestException('foobar')
444
except _TestException, e:
445
retry_exc = errors.RetryWithNewPacks(None, reload_occurred=False,
446
exc_info=sys.exc_info())
449
326
def test_read_from_several_packs(self):
450
327
access, writer = self._get_access()
488
365
self.assertEqual(['1234567890'], list(access.get_raw_records(memos)))
490
def test_missing_index_raises_retry(self):
491
memos = self.make_pack_file()
492
transport = self.get_transport()
493
reload_called, reload_func = self.make_reload_func()
494
# Note that the index key has changed from 'foo' to 'bar'
495
access = pack_repo._DirectPackAccess({'bar':(transport, 'packname')},
496
reload_func=reload_func)
497
e = self.assertListRaises(errors.RetryWithNewPacks,
498
access.get_raw_records, memos)
499
# Because a key was passed in which does not match our index list, we
500
# assume that the listing was already reloaded
501
self.assertTrue(e.reload_occurred)
502
self.assertIsInstance(e.exc_info, tuple)
503
self.assertIs(e.exc_info[0], KeyError)
504
self.assertIsInstance(e.exc_info[1], KeyError)
506
def test_missing_index_raises_key_error_with_no_reload(self):
507
memos = self.make_pack_file()
508
transport = self.get_transport()
509
# Note that the index key has changed from 'foo' to 'bar'
510
access = pack_repo._DirectPackAccess({'bar':(transport, 'packname')})
511
e = self.assertListRaises(KeyError, access.get_raw_records, memos)
513
def test_missing_file_raises_retry(self):
514
memos = self.make_pack_file()
515
transport = self.get_transport()
516
reload_called, reload_func = self.make_reload_func()
517
# Note that the 'filename' has been changed to 'different-packname'
518
access = pack_repo._DirectPackAccess(
519
{'foo':(transport, 'different-packname')},
520
reload_func=reload_func)
521
e = self.assertListRaises(errors.RetryWithNewPacks,
522
access.get_raw_records, memos)
523
# The file has gone missing, so we assume we need to reload
524
self.assertFalse(e.reload_occurred)
525
self.assertIsInstance(e.exc_info, tuple)
526
self.assertIs(e.exc_info[0], errors.NoSuchFile)
527
self.assertIsInstance(e.exc_info[1], errors.NoSuchFile)
528
self.assertEqual('different-packname', e.exc_info[1].path)
530
def test_missing_file_raises_no_such_file_with_no_reload(self):
531
memos = self.make_pack_file()
532
transport = self.get_transport()
533
# Note that the 'filename' has been changed to 'different-packname'
534
access = pack_repo._DirectPackAccess(
535
{'foo': (transport, 'different-packname')})
536
e = self.assertListRaises(errors.NoSuchFile,
537
access.get_raw_records, memos)
539
def test_failing_readv_raises_retry(self):
540
memos = self.make_pack_file()
541
transport = self.get_transport()
542
failing_transport = MockReadvFailingTransport(
543
[transport.get_bytes('packname')])
544
reload_called, reload_func = self.make_reload_func()
545
access = pack_repo._DirectPackAccess(
546
{'foo': (failing_transport, 'packname')},
547
reload_func=reload_func)
548
# Asking for a single record will not trigger the Mock failure
549
self.assertEqual(['1234567890'],
550
list(access.get_raw_records(memos[:1])))
551
self.assertEqual(['12345'],
552
list(access.get_raw_records(memos[1:2])))
553
# A multiple offset readv() will fail mid-way through
554
e = self.assertListRaises(errors.RetryWithNewPacks,
555
access.get_raw_records, memos)
556
# The file has gone missing, so we assume we need to reload
557
self.assertFalse(e.reload_occurred)
558
self.assertIsInstance(e.exc_info, tuple)
559
self.assertIs(e.exc_info[0], errors.NoSuchFile)
560
self.assertIsInstance(e.exc_info[1], errors.NoSuchFile)
561
self.assertEqual('packname', e.exc_info[1].path)
563
def test_failing_readv_raises_no_such_file_with_no_reload(self):
564
memos = self.make_pack_file()
565
transport = self.get_transport()
566
failing_transport = MockReadvFailingTransport(
567
[transport.get_bytes('packname')])
568
reload_called, reload_func = self.make_reload_func()
569
access = pack_repo._DirectPackAccess(
570
{'foo':(failing_transport, 'packname')})
571
# Asking for a single record will not trigger the Mock failure
572
self.assertEqual(['1234567890'],
573
list(access.get_raw_records(memos[:1])))
574
self.assertEqual(['12345'],
575
list(access.get_raw_records(memos[1:2])))
576
# A multiple offset readv() will fail mid-way through
577
e = self.assertListRaises(errors.NoSuchFile,
578
access.get_raw_records, memos)
580
def test_reload_or_raise_no_reload(self):
581
access = pack_repo._DirectPackAccess({}, reload_func=None)
582
retry_exc = self.make_retry_exception()
583
# Without a reload_func, we will just re-raise the original exception
584
self.assertRaises(_TestException, access.reload_or_raise, retry_exc)
586
def test_reload_or_raise_reload_changed(self):
587
reload_called, reload_func = self.make_reload_func(return_val=True)
588
access = pack_repo._DirectPackAccess({}, reload_func=reload_func)
589
retry_exc = self.make_retry_exception()
590
access.reload_or_raise(retry_exc)
591
self.assertEqual([1], reload_called)
592
retry_exc.reload_occurred=True
593
access.reload_or_raise(retry_exc)
594
self.assertEqual([2], reload_called)
596
def test_reload_or_raise_reload_no_change(self):
597
reload_called, reload_func = self.make_reload_func(return_val=False)
598
access = pack_repo._DirectPackAccess({}, reload_func=reload_func)
599
retry_exc = self.make_retry_exception()
600
# If reload_occurred is False, then we consider it an error to have
601
# reload_func() return False (no changes).
602
self.assertRaises(_TestException, access.reload_or_raise, retry_exc)
603
self.assertEqual([1], reload_called)
604
retry_exc.reload_occurred=True
605
# If reload_occurred is True, then we assume nothing changed because
606
# it had changed earlier, but didn't change again
607
access.reload_or_raise(retry_exc)
608
self.assertEqual([2], reload_called)
610
def test_annotate_retries(self):
611
vf, reload_counter = self.make_vf_for_retrying()
612
# It is a little bit bogus to annotate the Revision VF, but it works,
613
# as we have ancestry stored there
615
reload_lines = vf.annotate(key)
616
self.assertEqual([1, 1, 0], reload_counter)
617
plain_lines = vf.annotate(key)
618
self.assertEqual([1, 1, 0], reload_counter) # No extra reloading
619
if reload_lines != plain_lines:
620
self.fail('Annotation was not identical with reloading.')
621
# Now delete the packs-in-use, which should trigger another reload, but
622
# this time we just raise an exception because we can't recover
623
for trans, name in vf._access._indices.itervalues():
625
self.assertRaises(errors.NoSuchFile, vf.annotate, key)
626
self.assertEqual([2, 1, 1], reload_counter)
628
def test__get_record_map_retries(self):
629
vf, reload_counter = self.make_vf_for_retrying()
630
keys = [('rev-1',), ('rev-2',), ('rev-3',)]
631
records = vf._get_record_map(keys)
632
self.assertEqual(keys, sorted(records.keys()))
633
self.assertEqual([1, 1, 0], reload_counter)
634
# Now delete the packs-in-use, which should trigger another reload, but
635
# this time we just raise an exception because we can't recover
636
for trans, name in vf._access._indices.itervalues():
638
self.assertRaises(errors.NoSuchFile, vf._get_record_map, keys)
639
self.assertEqual([2, 1, 1], reload_counter)
641
def test_get_record_stream_retries(self):
642
vf, reload_counter = self.make_vf_for_retrying()
643
keys = [('rev-1',), ('rev-2',), ('rev-3',)]
644
record_stream = vf.get_record_stream(keys, 'topological', False)
645
record = record_stream.next()
646
self.assertEqual(('rev-1',), record.key)
647
self.assertEqual([0, 0, 0], reload_counter)
648
record = record_stream.next()
649
self.assertEqual(('rev-2',), record.key)
650
self.assertEqual([1, 1, 0], reload_counter)
651
record = record_stream.next()
652
self.assertEqual(('rev-3',), record.key)
653
self.assertEqual([1, 1, 0], reload_counter)
654
# Now delete all pack files, and see that we raise the right error
655
for trans, name in vf._access._indices.itervalues():
657
self.assertListRaises(errors.NoSuchFile,
658
vf.get_record_stream, keys, 'topological', False)
660
def test_iter_lines_added_or_present_in_keys_retries(self):
661
vf, reload_counter = self.make_vf_for_retrying()
662
keys = [('rev-1',), ('rev-2',), ('rev-3',)]
663
# Unfortunately, iter_lines_added_or_present_in_keys iterates the
664
# result in random order (determined by the iteration order from a
665
# set()), so we don't have any solid way to trigger whether data is
666
# read before or after. However we tried to delete the middle node to
667
# exercise the code well.
668
# What we care about is that all lines are always yielded, but not
671
reload_lines = sorted(vf.iter_lines_added_or_present_in_keys(keys))
672
self.assertEqual([1, 1, 0], reload_counter)
673
# Now do it again, to make sure the result is equivalent
674
plain_lines = sorted(vf.iter_lines_added_or_present_in_keys(keys))
675
self.assertEqual([1, 1, 0], reload_counter) # No extra reloading
676
self.assertEqual(plain_lines, reload_lines)
677
self.assertEqual(21, len(plain_lines))
678
# Now delete all pack files, and see that we raise the right error
679
for trans, name in vf._access._indices.itervalues():
681
self.assertListRaises(errors.NoSuchFile,
682
vf.iter_lines_added_or_present_in_keys, keys)
683
self.assertEqual([2, 1, 1], reload_counter)
685
def test_get_record_stream_yields_disk_sorted_order(self):
686
# if we get 'unordered' pick a semi-optimal order for reading. The
687
# order should be grouped by pack file, and then by position in file
688
repo = self.make_repository('test', format='pack-0.92')
690
self.addCleanup(repo.unlock)
691
repo.start_write_group()
693
vf.add_lines(('f-id', 'rev-5'), [('f-id', 'rev-4')], ['lines\n'])
694
vf.add_lines(('f-id', 'rev-1'), [], ['lines\n'])
695
vf.add_lines(('f-id', 'rev-2'), [('f-id', 'rev-1')], ['lines\n'])
696
repo.commit_write_group()
697
# We inserted them as rev-5, rev-1, rev-2, we should get them back in
699
stream = vf.get_record_stream([('f-id', 'rev-1'), ('f-id', 'rev-5'),
700
('f-id', 'rev-2')], 'unordered', False)
701
keys = [r.key for r in stream]
702
self.assertEqual([('f-id', 'rev-5'), ('f-id', 'rev-1'),
703
('f-id', 'rev-2')], keys)
704
repo.start_write_group()
705
vf.add_lines(('f-id', 'rev-4'), [('f-id', 'rev-3')], ['lines\n'])
706
vf.add_lines(('f-id', 'rev-3'), [('f-id', 'rev-2')], ['lines\n'])
707
vf.add_lines(('f-id', 'rev-6'), [('f-id', 'rev-5')], ['lines\n'])
708
repo.commit_write_group()
709
# Request in random order, to make sure the output order isn't based on
711
request_keys = set(('f-id', 'rev-%d' % i) for i in range(1, 7))
712
stream = vf.get_record_stream(request_keys, 'unordered', False)
713
keys = [r.key for r in stream]
714
# We want to get the keys back in disk order, but it doesn't matter
715
# which pack we read from first. So this can come back in 2 orders
716
alt1 = [('f-id', 'rev-%d' % i) for i in [4, 3, 6, 5, 1, 2]]
717
alt2 = [('f-id', 'rev-%d' % i) for i in [5, 1, 2, 4, 3, 6]]
718
if keys != alt1 and keys != alt2:
719
self.fail('Returned key order did not match either expected order.'
720
' expected %s or %s, not %s'
721
% (alt1, alt2, keys))
724
368
class LowLevelKnitDataTests(TestCase):
1320
901
class LowLevelKnitIndexTests_c(LowLevelKnitIndexTests):
1322
_test_needs_features = [compiled_knit_feature]
903
_test_needs_features = [CompiledKnitFeature]
1324
905
def get_knit_index(self, transport, name, mode):
1325
906
mapper = ConstantMapper(name)
1326
from bzrlib._knit_load_data_pyx import _load_data_c
1327
self.overrideAttr(knit, '_load_data', _load_data_c)
907
orig = knit._load_data
909
knit._load_data = orig
910
self.addCleanup(reset)
911
from bzrlib._knit_load_data_c import _load_data_c
912
knit._load_data = _load_data_c
1328
913
allow_writes = lambda: mode == 'w'
1329
return _KndxIndex(transport, mapper, lambda:None,
1330
allow_writes, lambda:True)
1333
class Test_KnitAnnotator(TestCaseWithMemoryTransport):
1335
def make_annotator(self):
1336
factory = knit.make_pack_factory(True, True, 1)
1337
vf = factory(self.get_transport())
1338
return knit._KnitAnnotator(vf)
1340
def test__expand_fulltext(self):
1341
ann = self.make_annotator()
1342
rev_key = ('rev-id',)
1343
ann._num_compression_children[rev_key] = 1
1344
res = ann._expand_record(rev_key, (('parent-id',),), None,
1345
['line1\n', 'line2\n'], ('fulltext', True))
1346
# The content object and text lines should be cached appropriately
1347
self.assertEqual(['line1\n', 'line2'], res)
1348
content_obj = ann._content_objects[rev_key]
1349
self.assertEqual(['line1\n', 'line2\n'], content_obj._lines)
1350
self.assertEqual(res, content_obj.text())
1351
self.assertEqual(res, ann._text_cache[rev_key])
1353
def test__expand_delta_comp_parent_not_available(self):
1354
# Parent isn't available yet, so we return nothing, but queue up this
1355
# node for later processing
1356
ann = self.make_annotator()
1357
rev_key = ('rev-id',)
1358
parent_key = ('parent-id',)
1359
record = ['0,1,1\n', 'new-line\n']
1360
details = ('line-delta', False)
1361
res = ann._expand_record(rev_key, (parent_key,), parent_key,
1363
self.assertEqual(None, res)
1364
self.assertTrue(parent_key in ann._pending_deltas)
1365
pending = ann._pending_deltas[parent_key]
1366
self.assertEqual(1, len(pending))
1367
self.assertEqual((rev_key, (parent_key,), record, details), pending[0])
1369
def test__expand_record_tracks_num_children(self):
1370
ann = self.make_annotator()
1371
rev_key = ('rev-id',)
1372
rev2_key = ('rev2-id',)
1373
parent_key = ('parent-id',)
1374
record = ['0,1,1\n', 'new-line\n']
1375
details = ('line-delta', False)
1376
ann._num_compression_children[parent_key] = 2
1377
ann._expand_record(parent_key, (), None, ['line1\n', 'line2\n'],
1378
('fulltext', False))
1379
res = ann._expand_record(rev_key, (parent_key,), parent_key,
1381
self.assertEqual({parent_key: 1}, ann._num_compression_children)
1382
# Expanding the second child should remove the content object, and the
1383
# num_compression_children entry
1384
res = ann._expand_record(rev2_key, (parent_key,), parent_key,
1386
self.assertFalse(parent_key in ann._content_objects)
1387
self.assertEqual({}, ann._num_compression_children)
1388
# We should not cache the content_objects for rev2 and rev, because
1389
# they do not have compression children of their own.
1390
self.assertEqual({}, ann._content_objects)
1392
def test__expand_delta_records_blocks(self):
1393
ann = self.make_annotator()
1394
rev_key = ('rev-id',)
1395
parent_key = ('parent-id',)
1396
record = ['0,1,1\n', 'new-line\n']
1397
details = ('line-delta', True)
1398
ann._num_compression_children[parent_key] = 2
1399
ann._expand_record(parent_key, (), None,
1400
['line1\n', 'line2\n', 'line3\n'],
1401
('fulltext', False))
1402
ann._expand_record(rev_key, (parent_key,), parent_key, record, details)
1403
self.assertEqual({(rev_key, parent_key): [(1, 1, 1), (3, 3, 0)]},
1404
ann._matching_blocks)
1405
rev2_key = ('rev2-id',)
1406
record = ['0,1,1\n', 'new-line\n']
1407
details = ('line-delta', False)
1408
ann._expand_record(rev2_key, (parent_key,), parent_key, record, details)
1409
self.assertEqual([(1, 1, 2), (3, 3, 0)],
1410
ann._matching_blocks[(rev2_key, parent_key)])
1412
def test__get_parent_ann_uses_matching_blocks(self):
1413
ann = self.make_annotator()
1414
rev_key = ('rev-id',)
1415
parent_key = ('parent-id',)
1416
parent_ann = [(parent_key,)]*3
1417
block_key = (rev_key, parent_key)
1418
ann._annotations_cache[parent_key] = parent_ann
1419
ann._matching_blocks[block_key] = [(0, 1, 1), (3, 3, 0)]
1420
# We should not try to access any parent_lines content, because we know
1421
# we already have the matching blocks
1422
par_ann, blocks = ann._get_parent_annotations_and_matches(rev_key,
1423
['1\n', '2\n', '3\n'], parent_key)
1424
self.assertEqual(parent_ann, par_ann)
1425
self.assertEqual([(0, 1, 1), (3, 3, 0)], blocks)
1426
self.assertEqual({}, ann._matching_blocks)
1428
def test__process_pending(self):
1429
ann = self.make_annotator()
1430
rev_key = ('rev-id',)
1433
record = ['0,1,1\n', 'new-line\n']
1434
details = ('line-delta', False)
1435
p1_record = ['line1\n', 'line2\n']
1436
ann._num_compression_children[p1_key] = 1
1437
res = ann._expand_record(rev_key, (p1_key,p2_key), p1_key,
1439
self.assertEqual(None, res)
1440
# self.assertTrue(p1_key in ann._pending_deltas)
1441
self.assertEqual({}, ann._pending_annotation)
1442
# Now insert p1, and we should be able to expand the delta
1443
res = ann._expand_record(p1_key, (), None, p1_record,
1444
('fulltext', False))
1445
self.assertEqual(p1_record, res)
1446
ann._annotations_cache[p1_key] = [(p1_key,)]*2
1447
res = ann._process_pending(p1_key)
1448
self.assertEqual([], res)
1449
self.assertFalse(p1_key in ann._pending_deltas)
1450
self.assertTrue(p2_key in ann._pending_annotation)
1451
self.assertEqual({p2_key: [(rev_key, (p1_key, p2_key))]},
1452
ann._pending_annotation)
1453
# Now fill in parent 2, and pending annotation should be satisfied
1454
res = ann._expand_record(p2_key, (), None, [], ('fulltext', False))
1455
ann._annotations_cache[p2_key] = []
1456
res = ann._process_pending(p2_key)
1457
self.assertEqual([rev_key], res)
1458
self.assertEqual({}, ann._pending_annotation)
1459
self.assertEqual({}, ann._pending_deltas)
1461
def test_record_delta_removes_basis(self):
1462
ann = self.make_annotator()
1463
ann._expand_record(('parent-id',), (), None,
1464
['line1\n', 'line2\n'], ('fulltext', False))
1465
ann._num_compression_children['parent-id'] = 2
1467
def test_annotate_special_text(self):
1468
ann = self.make_annotator()
1470
rev1_key = ('rev-1',)
1471
rev2_key = ('rev-2',)
1472
rev3_key = ('rev-3',)
1473
spec_key = ('special:',)
1474
vf.add_lines(rev1_key, [], ['initial content\n'])
1475
vf.add_lines(rev2_key, [rev1_key], ['initial content\n',
1478
vf.add_lines(rev3_key, [rev1_key], ['initial content\n',
1481
spec_text = ('initial content\n'
1485
ann.add_special_text(spec_key, [rev2_key, rev3_key], spec_text)
1486
anns, lines = ann.annotate(spec_key)
1487
self.assertEqual([(rev1_key,),
1488
(rev2_key, rev3_key),
1492
self.assertEqualDiff(spec_text, ''.join(lines))
914
return _KndxIndex(transport, mapper, lambda:None, allow_writes, lambda:True)
1495
917
class KnitTests(TestCaseWithTransport):
1806
1205
# change options in the second record
1807
1206
self.assertRaises(errors.KnitCorrupt, index.add_records,
1808
1207
[(('tip',), 'fulltext,no-eol', (None, 0, 100), [('parent',)]),
1809
(('tip',), 'line-delta', (None, 0, 100), [('parent',)])])
1208
(('tip',), 'no-eol,line-delta', (None, 0, 100), [('parent',)])])
1810
1209
self.assertEqual([], self.caught_entries)
1812
def make_g_index_missing_compression_parent(self):
1813
graph_index = self.make_g_index('missing_comp', 2,
1814
[(('tip', ), ' 100 78',
1815
([('missing-parent', ), ('ghost', )], [('missing-parent', )]))])
1818
def make_g_index_missing_parent(self):
1819
graph_index = self.make_g_index('missing_parent', 2,
1820
[(('parent', ), ' 100 78', ([], [])),
1821
(('tip', ), ' 100 78',
1822
([('parent', ), ('missing-parent', )], [('parent', )])),
1826
def make_g_index_no_external_refs(self):
1827
graph_index = self.make_g_index('no_external_refs', 2,
1828
[(('rev', ), ' 100 78',
1829
([('parent', ), ('ghost', )], []))])
1832
def test_add_good_unvalidated_index(self):
1833
unvalidated = self.make_g_index_no_external_refs()
1834
combined = CombinedGraphIndex([unvalidated])
1835
index = _KnitGraphIndex(combined, lambda: True, deltas=True)
1836
index.scan_unvalidated_index(unvalidated)
1837
self.assertEqual(frozenset(), index.get_missing_compression_parents())
1839
def test_add_missing_compression_parent_unvalidated_index(self):
1840
unvalidated = self.make_g_index_missing_compression_parent()
1841
combined = CombinedGraphIndex([unvalidated])
1842
index = _KnitGraphIndex(combined, lambda: True, deltas=True)
1843
index.scan_unvalidated_index(unvalidated)
1844
# This also checks that its only the compression parent that is
1845
# examined, otherwise 'ghost' would also be reported as a missing
1848
frozenset([('missing-parent',)]),
1849
index.get_missing_compression_parents())
1851
def test_add_missing_noncompression_parent_unvalidated_index(self):
1852
unvalidated = self.make_g_index_missing_parent()
1853
combined = CombinedGraphIndex([unvalidated])
1854
index = _KnitGraphIndex(combined, lambda: True, deltas=True,
1855
track_external_parent_refs=True)
1856
index.scan_unvalidated_index(unvalidated)
1858
frozenset([('missing-parent',)]), index.get_missing_parents())
1860
def test_track_external_parent_refs(self):
1861
g_index = self.make_g_index('empty', 2, [])
1862
combined = CombinedGraphIndex([g_index])
1863
index = _KnitGraphIndex(combined, lambda: True, deltas=True,
1864
add_callback=self.catch_add, track_external_parent_refs=True)
1865
self.caught_entries = []
1867
(('new-key',), 'fulltext,no-eol', (None, 50, 60),
1868
[('parent-1',), ('parent-2',)])])
1870
frozenset([('parent-1',), ('parent-2',)]),
1871
index.get_missing_parents())
1873
def test_add_unvalidated_index_with_present_external_references(self):
1874
index = self.two_graph_index(deltas=True)
1875
# Ugly hack to get at one of the underlying GraphIndex objects that
1876
# two_graph_index built.
1877
unvalidated = index._graph_index._indices[1]
1878
# 'parent' is an external ref of _indices[1] (unvalidated), but is
1879
# present in _indices[0].
1880
index.scan_unvalidated_index(unvalidated)
1881
self.assertEqual(frozenset(), index.get_missing_compression_parents())
1883
def make_new_missing_parent_g_index(self, name):
1884
missing_parent = name + '-missing-parent'
1885
graph_index = self.make_g_index(name, 2,
1886
[((name + 'tip', ), ' 100 78',
1887
([(missing_parent, ), ('ghost', )], [(missing_parent, )]))])
1890
def test_add_mulitiple_unvalidated_indices_with_missing_parents(self):
1891
g_index_1 = self.make_new_missing_parent_g_index('one')
1892
g_index_2 = self.make_new_missing_parent_g_index('two')
1893
combined = CombinedGraphIndex([g_index_1, g_index_2])
1894
index = _KnitGraphIndex(combined, lambda: True, deltas=True)
1895
index.scan_unvalidated_index(g_index_1)
1896
index.scan_unvalidated_index(g_index_2)
1898
frozenset([('one-missing-parent',), ('two-missing-parent',)]),
1899
index.get_missing_compression_parents())
1901
def test_add_mulitiple_unvalidated_indices_with_mutual_dependencies(self):
1902
graph_index_a = self.make_g_index('one', 2,
1903
[(('parent-one', ), ' 100 78', ([('non-compression-parent',)], [])),
1904
(('child-of-two', ), ' 100 78',
1905
([('parent-two',)], [('parent-two',)]))])
1906
graph_index_b = self.make_g_index('two', 2,
1907
[(('parent-two', ), ' 100 78', ([('non-compression-parent',)], [])),
1908
(('child-of-one', ), ' 100 78',
1909
([('parent-one',)], [('parent-one',)]))])
1910
combined = CombinedGraphIndex([graph_index_a, graph_index_b])
1911
index = _KnitGraphIndex(combined, lambda: True, deltas=True)
1912
index.scan_unvalidated_index(graph_index_a)
1913
index.scan_unvalidated_index(graph_index_b)
1915
frozenset([]), index.get_missing_compression_parents())
1918
1212
class TestNoParentsGraphIndexKnit(KnitTests):
1919
1213
"""Tests for knits using _KnitGraphIndex with no parents."""
2093
1379
self.assertEqual([], self.caught_entries)
2096
class TestKnitVersionedFiles(KnitTests):
2098
def assertGroupKeysForIo(self, exp_groups, keys, non_local_keys,
2099
positions, _min_buffer_size=None):
2100
kvf = self.make_test_knit()
2101
if _min_buffer_size is None:
2102
_min_buffer_size = knit._STREAM_MIN_BUFFER_SIZE
2103
self.assertEqual(exp_groups, kvf._group_keys_for_io(keys,
2104
non_local_keys, positions,
2105
_min_buffer_size=_min_buffer_size))
2107
def assertSplitByPrefix(self, expected_map, expected_prefix_order,
2109
split, prefix_order = KnitVersionedFiles._split_by_prefix(keys)
2110
self.assertEqual(expected_map, split)
2111
self.assertEqual(expected_prefix_order, prefix_order)
2113
def test__group_keys_for_io(self):
2114
ft_detail = ('fulltext', False)
2115
ld_detail = ('line-delta', False)
2123
f_a: (ft_detail, (f_a, 0, 100), None),
2124
f_b: (ld_detail, (f_b, 100, 21), f_a),
2125
f_c: (ld_detail, (f_c, 180, 15), f_b),
2126
g_a: (ft_detail, (g_a, 121, 35), None),
2127
g_b: (ld_detail, (g_b, 156, 12), g_a),
2128
g_c: (ld_detail, (g_c, 195, 13), g_a),
2130
self.assertGroupKeysForIo([([f_a], set())],
2131
[f_a], [], positions)
2132
self.assertGroupKeysForIo([([f_a], set([f_a]))],
2133
[f_a], [f_a], positions)
2134
self.assertGroupKeysForIo([([f_a, f_b], set([]))],
2135
[f_a, f_b], [], positions)
2136
self.assertGroupKeysForIo([([f_a, f_b], set([f_b]))],
2137
[f_a, f_b], [f_b], positions)
2138
self.assertGroupKeysForIo([([f_a, f_b, g_a, g_b], set())],
2139
[f_a, g_a, f_b, g_b], [], positions)
2140
self.assertGroupKeysForIo([([f_a, f_b, g_a, g_b], set())],
2141
[f_a, g_a, f_b, g_b], [], positions,
2142
_min_buffer_size=150)
2143
self.assertGroupKeysForIo([([f_a, f_b], set()), ([g_a, g_b], set())],
2144
[f_a, g_a, f_b, g_b], [], positions,
2145
_min_buffer_size=100)
2146
self.assertGroupKeysForIo([([f_c], set()), ([g_b], set())],
2147
[f_c, g_b], [], positions,
2148
_min_buffer_size=125)
2149
self.assertGroupKeysForIo([([g_b, f_c], set())],
2150
[g_b, f_c], [], positions,
2151
_min_buffer_size=125)
2153
def test__split_by_prefix(self):
2154
self.assertSplitByPrefix({'f': [('f', 'a'), ('f', 'b')],
2155
'g': [('g', 'b'), ('g', 'a')],
2157
[('f', 'a'), ('g', 'b'),
2158
('g', 'a'), ('f', 'b')])
2160
self.assertSplitByPrefix({'f': [('f', 'a'), ('f', 'b')],
2161
'g': [('g', 'b'), ('g', 'a')],
2163
[('f', 'a'), ('f', 'b'),
2164
('g', 'b'), ('g', 'a')])
2166
self.assertSplitByPrefix({'f': [('f', 'a'), ('f', 'b')],
2167
'g': [('g', 'b'), ('g', 'a')],
2169
[('f', 'a'), ('f', 'b'),
2170
('g', 'b'), ('g', 'a')])
2172
self.assertSplitByPrefix({'f': [('f', 'a'), ('f', 'b')],
2173
'g': [('g', 'b'), ('g', 'a')],
2174
'': [('a',), ('b',)]
2176
[('f', 'a'), ('g', 'b'),
2178
('g', 'a'), ('f', 'b')])
2181
1382
class TestStacking(KnitTests):
2183
1384
def get_basis_and_test_knit(self):
2570
1760
multiparent.NewText(['foo\n']),
2571
1761
multiparent.ParentText(1, 0, 2, 1)])],
2573
self.assertEqual(3, len(basis.calls))
1763
self.assertEqual(4, len(basis.calls))
2574
1764
self.assertEqual([
2575
1765
("get_parent_map", set([key_left, key_right])),
2576
1766
("get_parent_map", set([key_left, key_right])),
1767
("get_parent_map", set([key_left, key_right])),
2579
last_call = basis.calls[-1]
1770
last_call = basis.calls[3]
2580
1771
self.assertEqual('get_record_stream', last_call[0])
2581
1772
self.assertEqual(set([key_left, key_right]), set(last_call[1]))
2582
self.assertEqual('topological', last_call[2])
1773
self.assertEqual('unordered', last_call[2])
2583
1774
self.assertEqual(True, last_call[3])
2586
class TestNetworkBehaviour(KnitTests):
2587
"""Tests for getting data out of/into knits over the network."""
2589
def test_include_delta_closure_generates_a_knit_delta_closure(self):
2590
vf = self.make_test_knit(name='test')
2591
# put in three texts, giving ft, delta, delta
2592
vf.add_lines(('base',), (), ['base\n', 'content\n'])
2593
vf.add_lines(('d1',), (('base',),), ['d1\n'])
2594
vf.add_lines(('d2',), (('d1',),), ['d2\n'])
2595
# But heuristics could interfere, so check what happened:
2596
self.assertEqual(['knit-ft-gz', 'knit-delta-gz', 'knit-delta-gz'],
2597
[record.storage_kind for record in
2598
vf.get_record_stream([('base',), ('d1',), ('d2',)],
2599
'topological', False)])
2600
# generate a stream of just the deltas include_delta_closure=True,
2601
# serialise to the network, and check that we get a delta closure on the wire.
2602
stream = vf.get_record_stream([('d1',), ('d2',)], 'topological', True)
2603
netb = [record.get_bytes_as(record.storage_kind) for record in stream]
2604
# The first bytes should be a memo from _ContentMapGenerator, and the
2605
# second bytes should be empty (because its a API proxy not something
2606
# for wire serialisation.
2607
self.assertEqual('', netb[1])
2609
kind, line_end = network_bytes_to_kind_and_offset(bytes)
2610
self.assertEqual('knit-delta-closure', kind)
2613
class TestContentMapGenerator(KnitTests):
2614
"""Tests for ContentMapGenerator"""
2616
def test_get_record_stream_gives_records(self):
2617
vf = self.make_test_knit(name='test')
2618
# put in three texts, giving ft, delta, delta
2619
vf.add_lines(('base',), (), ['base\n', 'content\n'])
2620
vf.add_lines(('d1',), (('base',),), ['d1\n'])
2621
vf.add_lines(('d2',), (('d1',),), ['d2\n'])
2622
keys = [('d1',), ('d2',)]
2623
generator = _VFContentMapGenerator(vf, keys,
2624
global_map=vf.get_parent_map(keys))
2625
for record in generator.get_record_stream():
2626
if record.key == ('d1',):
2627
self.assertEqual('d1\n', record.get_bytes_as('fulltext'))
2629
self.assertEqual('d2\n', record.get_bytes_as('fulltext'))
2631
def test_get_record_stream_kinds_are_raw(self):
2632
vf = self.make_test_knit(name='test')
2633
# put in three texts, giving ft, delta, delta
2634
vf.add_lines(('base',), (), ['base\n', 'content\n'])
2635
vf.add_lines(('d1',), (('base',),), ['d1\n'])
2636
vf.add_lines(('d2',), (('d1',),), ['d2\n'])
2637
keys = [('base',), ('d1',), ('d2',)]
2638
generator = _VFContentMapGenerator(vf, keys,
2639
global_map=vf.get_parent_map(keys))
2640
kinds = {('base',): 'knit-delta-closure',
2641
('d1',): 'knit-delta-closure-ref',
2642
('d2',): 'knit-delta-closure-ref',
2644
for record in generator.get_record_stream():
2645
self.assertEqual(kinds[record.key], record.storage_kind)