323
347
access.set_writer(writer, index, (transport, packname))
324
348
return access, writer
350
def make_pack_file(self):
351
"""Create a pack file with 2 records."""
352
access, writer = self._get_access(packname='packname', index='foo')
354
memos.extend(access.add_raw_records([('key1', 10)], '1234567890'))
355
memos.extend(access.add_raw_records([('key2', 5)], '12345'))
359
def make_vf_for_retrying(self):
360
"""Create 3 packs and a reload function.
362
Originally, 2 pack files will have the data, but one will be missing.
363
And then the third will be used in place of the first two if reload()
366
:return: (versioned_file, reload_counter)
367
versioned_file a KnitVersionedFiles using the packs for access
369
tree = self.make_branch_and_memory_tree('tree')
371
self.addCleanup(tree.unlock)
372
tree.add([''], ['root-id'])
373
tree.commit('one', rev_id='rev-1')
374
tree.commit('two', rev_id='rev-2')
375
tree.commit('three', rev_id='rev-3')
376
# Pack these three revisions into another pack file, but don't remove
378
repo = tree.branch.repository
379
collection = repo._pack_collection
380
collection.ensure_loaded()
381
orig_packs = collection.packs
382
packer = pack_repo.Packer(collection, orig_packs, '.testpack')
383
new_pack = packer.pack()
384
# forget about the new pack
387
vf = tree.branch.repository.revisions
388
# Set up a reload() function that switches to using the new pack file
389
new_index = new_pack.revision_index
390
access_tuple = new_pack.access_tuple()
391
reload_counter = [0, 0, 0]
393
reload_counter[0] += 1
394
if reload_counter[1] > 0:
395
# We already reloaded, nothing more to do
396
reload_counter[2] += 1
398
reload_counter[1] += 1
399
vf._index._graph_index._indices[:] = [new_index]
400
vf._access._indices.clear()
401
vf._access._indices[new_index] = access_tuple
403
# Delete one of the pack files so the data will need to be reloaded. We
404
# will delete the file with 'rev-2' in it
405
trans, name = orig_packs[1].access_tuple()
407
# We don't have the index trigger reloading because we want to test
408
# that we reload when the .pack disappears
409
vf._access._reload_func = reload
410
return vf, reload_counter
412
def make_reload_func(self, return_val=True):
415
reload_called[0] += 1
417
return reload_called, reload
419
def make_retry_exception(self):
420
# We raise a real exception so that sys.exc_info() is properly
423
raise _TestException('foobar')
424
except _TestException, e:
425
retry_exc = errors.RetryWithNewPacks(None, reload_occurred=False,
426
exc_info=sys.exc_info())
326
429
def test_read_from_several_packs(self):
327
430
access, writer = self._get_access()
365
468
self.assertEqual(['1234567890'], list(access.get_raw_records(memos)))
470
def test_missing_index_raises_retry(self):
471
memos = self.make_pack_file()
472
transport = self.get_transport()
473
reload_called, reload_func = self.make_reload_func()
474
# Note that the index key has changed from 'foo' to 'bar'
475
access = _DirectPackAccess({'bar':(transport, 'packname')},
476
reload_func=reload_func)
477
e = self.assertListRaises(errors.RetryWithNewPacks,
478
access.get_raw_records, memos)
479
# Because a key was passed in which does not match our index list, we
480
# assume that the listing was already reloaded
481
self.assertTrue(e.reload_occurred)
482
self.assertIsInstance(e.exc_info, tuple)
483
self.assertIs(e.exc_info[0], KeyError)
484
self.assertIsInstance(e.exc_info[1], KeyError)
486
def test_missing_index_raises_key_error_with_no_reload(self):
487
memos = self.make_pack_file()
488
transport = self.get_transport()
489
# Note that the index key has changed from 'foo' to 'bar'
490
access = _DirectPackAccess({'bar':(transport, 'packname')})
491
e = self.assertListRaises(KeyError, access.get_raw_records, memos)
493
def test_missing_file_raises_retry(self):
494
memos = self.make_pack_file()
495
transport = self.get_transport()
496
reload_called, reload_func = self.make_reload_func()
497
# Note that the 'filename' has been changed to 'different-packname'
498
access = _DirectPackAccess({'foo':(transport, 'different-packname')},
499
reload_func=reload_func)
500
e = self.assertListRaises(errors.RetryWithNewPacks,
501
access.get_raw_records, memos)
502
# The file has gone missing, so we assume we need to reload
503
self.assertFalse(e.reload_occurred)
504
self.assertIsInstance(e.exc_info, tuple)
505
self.assertIs(e.exc_info[0], errors.NoSuchFile)
506
self.assertIsInstance(e.exc_info[1], errors.NoSuchFile)
507
self.assertEqual('different-packname', e.exc_info[1].path)
509
def test_missing_file_raises_no_such_file_with_no_reload(self):
510
memos = self.make_pack_file()
511
transport = self.get_transport()
512
# Note that the 'filename' has been changed to 'different-packname'
513
access = _DirectPackAccess({'foo':(transport, 'different-packname')})
514
e = self.assertListRaises(errors.NoSuchFile,
515
access.get_raw_records, memos)
517
def test_failing_readv_raises_retry(self):
518
memos = self.make_pack_file()
519
transport = self.get_transport()
520
failing_transport = MockReadvFailingTransport(
521
[transport.get_bytes('packname')])
522
reload_called, reload_func = self.make_reload_func()
523
access = _DirectPackAccess({'foo':(failing_transport, 'packname')},
524
reload_func=reload_func)
525
# Asking for a single record will not trigger the Mock failure
526
self.assertEqual(['1234567890'],
527
list(access.get_raw_records(memos[:1])))
528
self.assertEqual(['12345'],
529
list(access.get_raw_records(memos[1:2])))
530
# A multiple offset readv() will fail mid-way through
531
e = self.assertListRaises(errors.RetryWithNewPacks,
532
access.get_raw_records, memos)
533
# The file has gone missing, so we assume we need to reload
534
self.assertFalse(e.reload_occurred)
535
self.assertIsInstance(e.exc_info, tuple)
536
self.assertIs(e.exc_info[0], errors.NoSuchFile)
537
self.assertIsInstance(e.exc_info[1], errors.NoSuchFile)
538
self.assertEqual('packname', e.exc_info[1].path)
540
def test_failing_readv_raises_no_such_file_with_no_reload(self):
541
memos = self.make_pack_file()
542
transport = self.get_transport()
543
failing_transport = MockReadvFailingTransport(
544
[transport.get_bytes('packname')])
545
reload_called, reload_func = self.make_reload_func()
546
access = _DirectPackAccess({'foo':(failing_transport, 'packname')})
547
# Asking for a single record will not trigger the Mock failure
548
self.assertEqual(['1234567890'],
549
list(access.get_raw_records(memos[:1])))
550
self.assertEqual(['12345'],
551
list(access.get_raw_records(memos[1:2])))
552
# A multiple offset readv() will fail mid-way through
553
e = self.assertListRaises(errors.NoSuchFile,
554
access.get_raw_records, memos)
556
def test_reload_or_raise_no_reload(self):
557
access = _DirectPackAccess({}, reload_func=None)
558
retry_exc = self.make_retry_exception()
559
# Without a reload_func, we will just re-raise the original exception
560
self.assertRaises(_TestException, access.reload_or_raise, retry_exc)
562
def test_reload_or_raise_reload_changed(self):
563
reload_called, reload_func = self.make_reload_func(return_val=True)
564
access = _DirectPackAccess({}, reload_func=reload_func)
565
retry_exc = self.make_retry_exception()
566
access.reload_or_raise(retry_exc)
567
self.assertEqual([1], reload_called)
568
retry_exc.reload_occurred=True
569
access.reload_or_raise(retry_exc)
570
self.assertEqual([2], reload_called)
572
def test_reload_or_raise_reload_no_change(self):
573
reload_called, reload_func = self.make_reload_func(return_val=False)
574
access = _DirectPackAccess({}, reload_func=reload_func)
575
retry_exc = self.make_retry_exception()
576
# If reload_occurred is False, then we consider it an error to have
577
# reload_func() return False (no changes).
578
self.assertRaises(_TestException, access.reload_or_raise, retry_exc)
579
self.assertEqual([1], reload_called)
580
retry_exc.reload_occurred=True
581
# If reload_occurred is True, then we assume nothing changed because
582
# it had changed earlier, but didn't change again
583
access.reload_or_raise(retry_exc)
584
self.assertEqual([2], reload_called)
586
def test_annotate_retries(self):
587
vf, reload_counter = self.make_vf_for_retrying()
588
# It is a little bit bogus to annotate the Revision VF, but it works,
589
# as we have ancestry stored there
591
reload_lines = vf.annotate(key)
592
self.assertEqual([1, 1, 0], reload_counter)
593
plain_lines = vf.annotate(key)
594
self.assertEqual([1, 1, 0], reload_counter) # No extra reloading
595
if reload_lines != plain_lines:
596
self.fail('Annotation was not identical with reloading.')
597
# Now delete the packs-in-use, which should trigger another reload, but
598
# this time we just raise an exception because we can't recover
599
for trans, name in vf._access._indices.itervalues():
601
self.assertRaises(errors.NoSuchFile, vf.annotate, key)
602
self.assertEqual([2, 1, 1], reload_counter)
604
def test__get_record_map_retries(self):
605
vf, reload_counter = self.make_vf_for_retrying()
606
keys = [('rev-1',), ('rev-2',), ('rev-3',)]
607
records = vf._get_record_map(keys)
608
self.assertEqual(keys, sorted(records.keys()))
609
self.assertEqual([1, 1, 0], reload_counter)
610
# Now delete the packs-in-use, which should trigger another reload, but
611
# this time we just raise an exception because we can't recover
612
for trans, name in vf._access._indices.itervalues():
614
self.assertRaises(errors.NoSuchFile, vf._get_record_map, keys)
615
self.assertEqual([2, 1, 1], reload_counter)
617
def test_get_record_stream_retries(self):
618
vf, reload_counter = self.make_vf_for_retrying()
619
keys = [('rev-1',), ('rev-2',), ('rev-3',)]
620
record_stream = vf.get_record_stream(keys, 'topological', False)
621
record = record_stream.next()
622
self.assertEqual(('rev-1',), record.key)
623
self.assertEqual([0, 0, 0], reload_counter)
624
record = record_stream.next()
625
self.assertEqual(('rev-2',), record.key)
626
self.assertEqual([1, 1, 0], reload_counter)
627
record = record_stream.next()
628
self.assertEqual(('rev-3',), record.key)
629
self.assertEqual([1, 1, 0], reload_counter)
630
# Now delete all pack files, and see that we raise the right error
631
for trans, name in vf._access._indices.itervalues():
633
self.assertListRaises(errors.NoSuchFile,
634
vf.get_record_stream, keys, 'topological', False)
636
def test_iter_lines_added_or_present_in_keys_retries(self):
637
vf, reload_counter = self.make_vf_for_retrying()
638
keys = [('rev-1',), ('rev-2',), ('rev-3',)]
639
# Unfortunately, iter_lines_added_or_present_in_keys iterates the
640
# result in random order (determined by the iteration order from a
641
# set()), so we don't have any solid way to trigger whether data is
642
# read before or after. However we tried to delete the middle node to
643
# exercise the code well.
644
# What we care about is that all lines are always yielded, but not
647
reload_lines = sorted(vf.iter_lines_added_or_present_in_keys(keys))
648
self.assertEqual([1, 1, 0], reload_counter)
649
# Now do it again, to make sure the result is equivalent
650
plain_lines = sorted(vf.iter_lines_added_or_present_in_keys(keys))
651
self.assertEqual([1, 1, 0], reload_counter) # No extra reloading
652
self.assertEqual(plain_lines, reload_lines)
653
self.assertEqual(21, len(plain_lines))
654
# Now delete all pack files, and see that we raise the right error
655
for trans, name in vf._access._indices.itervalues():
657
self.assertListRaises(errors.NoSuchFile,
658
vf.iter_lines_added_or_present_in_keys, keys)
659
self.assertEqual([2, 1, 1], reload_counter)
661
def test_get_record_stream_yields_disk_sorted_order(self):
662
# if we get 'unordered' pick a semi-optimal order for reading. The
663
# order should be grouped by pack file, and then by position in file
664
repo = self.make_repository('test', format='pack-0.92')
666
self.addCleanup(repo.unlock)
667
repo.start_write_group()
669
vf.add_lines(('f-id', 'rev-5'), [('f-id', 'rev-4')], ['lines\n'])
670
vf.add_lines(('f-id', 'rev-1'), [], ['lines\n'])
671
vf.add_lines(('f-id', 'rev-2'), [('f-id', 'rev-1')], ['lines\n'])
672
repo.commit_write_group()
673
# We inserted them as rev-5, rev-1, rev-2, we should get them back in
675
stream = vf.get_record_stream([('f-id', 'rev-1'), ('f-id', 'rev-5'),
676
('f-id', 'rev-2')], 'unordered', False)
677
keys = [r.key for r in stream]
678
self.assertEqual([('f-id', 'rev-5'), ('f-id', 'rev-1'),
679
('f-id', 'rev-2')], keys)
680
repo.start_write_group()
681
vf.add_lines(('f-id', 'rev-4'), [('f-id', 'rev-3')], ['lines\n'])
682
vf.add_lines(('f-id', 'rev-3'), [('f-id', 'rev-2')], ['lines\n'])
683
vf.add_lines(('f-id', 'rev-6'), [('f-id', 'rev-5')], ['lines\n'])
684
repo.commit_write_group()
685
# Request in random order, to make sure the output order isn't based on
687
request_keys = set(('f-id', 'rev-%d' % i) for i in range(1, 7))
688
stream = vf.get_record_stream(request_keys, 'unordered', False)
689
keys = [r.key for r in stream]
690
# We want to get the keys back in disk order, but it doesn't matter
691
# which pack we read from first. So this can come back in 2 orders
692
alt1 = [('f-id', 'rev-%d' % i) for i in [4, 3, 6, 5, 1, 2]]
693
alt2 = [('f-id', 'rev-%d' % i) for i in [5, 1, 2, 4, 3, 6]]
694
if keys != alt1 and keys != alt2:
695
self.fail('Returned key order did not match either expected order.'
696
' expected %s or %s, not %s'
697
% (alt1, alt2, keys))
368
700
class LowLevelKnitDataTests(TestCase):
1205
1627
# change options in the second record
1206
1628
self.assertRaises(errors.KnitCorrupt, index.add_records,
1207
1629
[(('tip',), 'fulltext,no-eol', (None, 0, 100), [('parent',)]),
1208
(('tip',), 'no-eol,line-delta', (None, 0, 100), [('parent',)])])
1630
(('tip',), 'line-delta', (None, 0, 100), [('parent',)])])
1209
1631
self.assertEqual([], self.caught_entries)
1633
def make_g_index_missing_compression_parent(self):
1634
graph_index = self.make_g_index('missing_comp', 2,
1635
[(('tip', ), ' 100 78',
1636
([('missing-parent', ), ('ghost', )], [('missing-parent', )]))])
1639
def make_g_index_missing_parent(self):
1640
graph_index = self.make_g_index('missing_parent', 2,
1641
[(('parent', ), ' 100 78', ([], [])),
1642
(('tip', ), ' 100 78',
1643
([('parent', ), ('missing-parent', )], [('parent', )])),
1647
def make_g_index_no_external_refs(self):
1648
graph_index = self.make_g_index('no_external_refs', 2,
1649
[(('rev', ), ' 100 78',
1650
([('parent', ), ('ghost', )], []))])
1653
def test_add_good_unvalidated_index(self):
1654
unvalidated = self.make_g_index_no_external_refs()
1655
combined = CombinedGraphIndex([unvalidated])
1656
index = _KnitGraphIndex(combined, lambda: True, deltas=True)
1657
index.scan_unvalidated_index(unvalidated)
1658
self.assertEqual(frozenset(), index.get_missing_compression_parents())
1660
def test_add_missing_compression_parent_unvalidated_index(self):
1661
unvalidated = self.make_g_index_missing_compression_parent()
1662
combined = CombinedGraphIndex([unvalidated])
1663
index = _KnitGraphIndex(combined, lambda: True, deltas=True)
1664
index.scan_unvalidated_index(unvalidated)
1665
# This also checks that its only the compression parent that is
1666
# examined, otherwise 'ghost' would also be reported as a missing
1669
frozenset([('missing-parent',)]),
1670
index.get_missing_compression_parents())
1672
def test_add_missing_noncompression_parent_unvalidated_index(self):
1673
unvalidated = self.make_g_index_missing_parent()
1674
combined = CombinedGraphIndex([unvalidated])
1675
index = _KnitGraphIndex(combined, lambda: True, deltas=True,
1676
track_external_parent_refs=True)
1677
index.scan_unvalidated_index(unvalidated)
1679
frozenset([('missing-parent',)]), index.get_missing_parents())
1681
def test_track_external_parent_refs(self):
1682
g_index = self.make_g_index('empty', 2, [])
1683
combined = CombinedGraphIndex([g_index])
1684
index = _KnitGraphIndex(combined, lambda: True, deltas=True,
1685
add_callback=self.catch_add, track_external_parent_refs=True)
1686
self.caught_entries = []
1688
(('new-key',), 'fulltext,no-eol', (None, 50, 60),
1689
[('parent-1',), ('parent-2',)])])
1691
frozenset([('parent-1',), ('parent-2',)]),
1692
index.get_missing_parents())
1694
def test_add_unvalidated_index_with_present_external_references(self):
1695
index = self.two_graph_index(deltas=True)
1696
# Ugly hack to get at one of the underlying GraphIndex objects that
1697
# two_graph_index built.
1698
unvalidated = index._graph_index._indices[1]
1699
# 'parent' is an external ref of _indices[1] (unvalidated), but is
1700
# present in _indices[0].
1701
index.scan_unvalidated_index(unvalidated)
1702
self.assertEqual(frozenset(), index.get_missing_compression_parents())
1704
def make_new_missing_parent_g_index(self, name):
1705
missing_parent = name + '-missing-parent'
1706
graph_index = self.make_g_index(name, 2,
1707
[((name + 'tip', ), ' 100 78',
1708
([(missing_parent, ), ('ghost', )], [(missing_parent, )]))])
1711
def test_add_mulitiple_unvalidated_indices_with_missing_parents(self):
1712
g_index_1 = self.make_new_missing_parent_g_index('one')
1713
g_index_2 = self.make_new_missing_parent_g_index('two')
1714
combined = CombinedGraphIndex([g_index_1, g_index_2])
1715
index = _KnitGraphIndex(combined, lambda: True, deltas=True)
1716
index.scan_unvalidated_index(g_index_1)
1717
index.scan_unvalidated_index(g_index_2)
1719
frozenset([('one-missing-parent',), ('two-missing-parent',)]),
1720
index.get_missing_compression_parents())
1722
def test_add_mulitiple_unvalidated_indices_with_mutual_dependencies(self):
1723
graph_index_a = self.make_g_index('one', 2,
1724
[(('parent-one', ), ' 100 78', ([('non-compression-parent',)], [])),
1725
(('child-of-two', ), ' 100 78',
1726
([('parent-two',)], [('parent-two',)]))])
1727
graph_index_b = self.make_g_index('two', 2,
1728
[(('parent-two', ), ' 100 78', ([('non-compression-parent',)], [])),
1729
(('child-of-one', ), ' 100 78',
1730
([('parent-one',)], [('parent-one',)]))])
1731
combined = CombinedGraphIndex([graph_index_a, graph_index_b])
1732
index = _KnitGraphIndex(combined, lambda: True, deltas=True)
1733
index.scan_unvalidated_index(graph_index_a)
1734
index.scan_unvalidated_index(graph_index_b)
1736
frozenset([]), index.get_missing_compression_parents())
1212
1739
class TestNoParentsGraphIndexKnit(KnitTests):
1213
1740
"""Tests for knits using _KnitGraphIndex with no parents."""
1379
1914
self.assertEqual([], self.caught_entries)
1917
class TestKnitVersionedFiles(KnitTests):
1919
def assertGroupKeysForIo(self, exp_groups, keys, non_local_keys,
1920
positions, _min_buffer_size=None):
1921
kvf = self.make_test_knit()
1922
if _min_buffer_size is None:
1923
_min_buffer_size = knit._STREAM_MIN_BUFFER_SIZE
1924
self.assertEqual(exp_groups, kvf._group_keys_for_io(keys,
1925
non_local_keys, positions,
1926
_min_buffer_size=_min_buffer_size))
1928
def assertSplitByPrefix(self, expected_map, expected_prefix_order,
1930
split, prefix_order = KnitVersionedFiles._split_by_prefix(keys)
1931
self.assertEqual(expected_map, split)
1932
self.assertEqual(expected_prefix_order, prefix_order)
1934
def test__group_keys_for_io(self):
1935
ft_detail = ('fulltext', False)
1936
ld_detail = ('line-delta', False)
1944
f_a: (ft_detail, (f_a, 0, 100), None),
1945
f_b: (ld_detail, (f_b, 100, 21), f_a),
1946
f_c: (ld_detail, (f_c, 180, 15), f_b),
1947
g_a: (ft_detail, (g_a, 121, 35), None),
1948
g_b: (ld_detail, (g_b, 156, 12), g_a),
1949
g_c: (ld_detail, (g_c, 195, 13), g_a),
1951
self.assertGroupKeysForIo([([f_a], set())],
1952
[f_a], [], positions)
1953
self.assertGroupKeysForIo([([f_a], set([f_a]))],
1954
[f_a], [f_a], positions)
1955
self.assertGroupKeysForIo([([f_a, f_b], set([]))],
1956
[f_a, f_b], [], positions)
1957
self.assertGroupKeysForIo([([f_a, f_b], set([f_b]))],
1958
[f_a, f_b], [f_b], positions)
1959
self.assertGroupKeysForIo([([f_a, f_b, g_a, g_b], set())],
1960
[f_a, g_a, f_b, g_b], [], positions)
1961
self.assertGroupKeysForIo([([f_a, f_b, g_a, g_b], set())],
1962
[f_a, g_a, f_b, g_b], [], positions,
1963
_min_buffer_size=150)
1964
self.assertGroupKeysForIo([([f_a, f_b], set()), ([g_a, g_b], set())],
1965
[f_a, g_a, f_b, g_b], [], positions,
1966
_min_buffer_size=100)
1967
self.assertGroupKeysForIo([([f_c], set()), ([g_b], set())],
1968
[f_c, g_b], [], positions,
1969
_min_buffer_size=125)
1970
self.assertGroupKeysForIo([([g_b, f_c], set())],
1971
[g_b, f_c], [], positions,
1972
_min_buffer_size=125)
1974
def test__split_by_prefix(self):
1975
self.assertSplitByPrefix({'f': [('f', 'a'), ('f', 'b')],
1976
'g': [('g', 'b'), ('g', 'a')],
1978
[('f', 'a'), ('g', 'b'),
1979
('g', 'a'), ('f', 'b')])
1981
self.assertSplitByPrefix({'f': [('f', 'a'), ('f', 'b')],
1982
'g': [('g', 'b'), ('g', 'a')],
1984
[('f', 'a'), ('f', 'b'),
1985
('g', 'b'), ('g', 'a')])
1987
self.assertSplitByPrefix({'f': [('f', 'a'), ('f', 'b')],
1988
'g': [('g', 'b'), ('g', 'a')],
1990
[('f', 'a'), ('f', 'b'),
1991
('g', 'b'), ('g', 'a')])
1993
self.assertSplitByPrefix({'f': [('f', 'a'), ('f', 'b')],
1994
'g': [('g', 'b'), ('g', 'a')],
1995
'': [('a',), ('b',)]
1997
[('f', 'a'), ('g', 'b'),
1999
('g', 'a'), ('f', 'b')])
1382
2002
class TestStacking(KnitTests):
1384
2004
def get_basis_and_test_knit(self):
1764
2391
multiparent.NewText(['foo\n']),
1765
2392
multiparent.ParentText(1, 0, 2, 1)])],
1767
self.assertEqual(4, len(basis.calls))
2394
self.assertEqual(3, len(basis.calls))
1768
2395
self.assertEqual([
1769
2396
("get_parent_map", set([key_left, key_right])),
1770
2397
("get_parent_map", set([key_left, key_right])),
1771
("get_parent_map", set([key_left, key_right])),
1774
last_call = basis.calls[3]
2400
last_call = basis.calls[-1]
1775
2401
self.assertEqual('get_record_stream', last_call[0])
1776
2402
self.assertEqual(set([key_left, key_right]), set(last_call[1]))
1777
2403
self.assertEqual('unordered', last_call[2])
1778
2404
self.assertEqual(True, last_call[3])
2407
class TestNetworkBehaviour(KnitTests):
2408
"""Tests for getting data out of/into knits over the network."""
2410
def test_include_delta_closure_generates_a_knit_delta_closure(self):
2411
vf = self.make_test_knit(name='test')
2412
# put in three texts, giving ft, delta, delta
2413
vf.add_lines(('base',), (), ['base\n', 'content\n'])
2414
vf.add_lines(('d1',), (('base',),), ['d1\n'])
2415
vf.add_lines(('d2',), (('d1',),), ['d2\n'])
2416
# But heuristics could interfere, so check what happened:
2417
self.assertEqual(['knit-ft-gz', 'knit-delta-gz', 'knit-delta-gz'],
2418
[record.storage_kind for record in
2419
vf.get_record_stream([('base',), ('d1',), ('d2',)],
2420
'topological', False)])
2421
# generate a stream of just the deltas include_delta_closure=True,
2422
# serialise to the network, and check that we get a delta closure on the wire.
2423
stream = vf.get_record_stream([('d1',), ('d2',)], 'topological', True)
2424
netb = [record.get_bytes_as(record.storage_kind) for record in stream]
2425
# The first bytes should be a memo from _ContentMapGenerator, and the
2426
# second bytes should be empty (because its a API proxy not something
2427
# for wire serialisation.
2428
self.assertEqual('', netb[1])
2430
kind, line_end = network_bytes_to_kind_and_offset(bytes)
2431
self.assertEqual('knit-delta-closure', kind)
2434
class TestContentMapGenerator(KnitTests):
2435
"""Tests for ContentMapGenerator"""
2437
def test_get_record_stream_gives_records(self):
2438
vf = self.make_test_knit(name='test')
2439
# put in three texts, giving ft, delta, delta
2440
vf.add_lines(('base',), (), ['base\n', 'content\n'])
2441
vf.add_lines(('d1',), (('base',),), ['d1\n'])
2442
vf.add_lines(('d2',), (('d1',),), ['d2\n'])
2443
keys = [('d1',), ('d2',)]
2444
generator = _VFContentMapGenerator(vf, keys,
2445
global_map=vf.get_parent_map(keys))
2446
for record in generator.get_record_stream():
2447
if record.key == ('d1',):
2448
self.assertEqual('d1\n', record.get_bytes_as('fulltext'))
2450
self.assertEqual('d2\n', record.get_bytes_as('fulltext'))
2452
def test_get_record_stream_kinds_are_raw(self):
2453
vf = self.make_test_knit(name='test')
2454
# put in three texts, giving ft, delta, delta
2455
vf.add_lines(('base',), (), ['base\n', 'content\n'])
2456
vf.add_lines(('d1',), (('base',),), ['d1\n'])
2457
vf.add_lines(('d2',), (('d1',),), ['d2\n'])
2458
keys = [('base',), ('d1',), ('d2',)]
2459
generator = _VFContentMapGenerator(vf, keys,
2460
global_map=vf.get_parent_map(keys))
2461
kinds = {('base',): 'knit-delta-closure',
2462
('d1',): 'knit-delta-closure-ref',
2463
('d2',): 'knit-delta-closure-ref',
2465
for record in generator.get_record_stream():
2466
self.assertEqual(kinds[record.key], record.storage_kind)