1284
1162
write_weave(w, tmpf)
1285
1163
self.log(tmpf.getvalue())
1287
overlappedInsertExpected = ['aaa', '<<<<<<< ', 'xxx', 'yyy', '=======',
1165
overlappedInsertExpected = ['aaa', '<<<<<<< ', 'xxx', 'yyy', '=======',
1288
1166
'xxx', '>>>>>>> ', 'bbb']
1291
class TestContentFactoryAdaption(TestCaseWithMemoryTransport):
1293
def test_select_adaptor(self):
1294
"""Test expected adapters exist."""
1295
# One scenario for each lookup combination we expect to use.
1296
# Each is source_kind, requested_kind, adapter class
1298
('knit-delta-gz', 'fulltext', _mod_knit.DeltaPlainToFullText),
1299
('knit-ft-gz', 'fulltext', _mod_knit.FTPlainToFullText),
1300
('knit-annotated-delta-gz', 'knit-delta-gz',
1301
_mod_knit.DeltaAnnotatedToUnannotated),
1302
('knit-annotated-delta-gz', 'fulltext',
1303
_mod_knit.DeltaAnnotatedToFullText),
1304
('knit-annotated-ft-gz', 'knit-ft-gz',
1305
_mod_knit.FTAnnotatedToUnannotated),
1306
('knit-annotated-ft-gz', 'fulltext',
1307
_mod_knit.FTAnnotatedToFullText),
1309
for source, requested, klass in scenarios:
1310
adapter_factory = versionedfile.adapter_registry.get(
1311
(source, requested))
1312
adapter = adapter_factory(None)
1313
self.assertIsInstance(adapter, klass)
1315
def get_knit(self, annotated=True):
1316
mapper = ConstantMapper('knit')
1317
transport = self.get_transport()
1318
return make_file_factory(annotated, mapper)(transport)
1320
def helpGetBytes(self, f, ft_adapter, delta_adapter):
1321
"""Grab the interested adapted texts for tests."""
1322
# origin is a fulltext
1323
entries = f.get_record_stream([('origin',)], 'unordered', False)
1324
base = entries.next()
1325
ft_data = ft_adapter.get_bytes(base)
1326
# merged is both a delta and multiple parents.
1327
entries = f.get_record_stream([('merged',)], 'unordered', False)
1328
merged = entries.next()
1329
delta_data = delta_adapter.get_bytes(merged)
1330
return ft_data, delta_data
1332
def test_deannotation_noeol(self):
1333
"""Test converting annotated knits to unannotated knits."""
1334
# we need a full text, and a delta
1336
get_diamond_files(f, 1, trailing_eol=False)
1337
ft_data, delta_data = self.helpGetBytes(f,
1338
_mod_knit.FTAnnotatedToUnannotated(None),
1339
_mod_knit.DeltaAnnotatedToUnannotated(None))
1341
'version origin 1 b284f94827db1fa2970d9e2014f080413b547a7e\n'
1344
GzipFile(mode='rb', fileobj=StringIO(ft_data)).read())
1346
'version merged 4 32c2e79763b3f90e8ccde37f9710b6629c25a796\n'
1347
'1,2,3\nleft\nright\nmerged\nend merged\n',
1348
GzipFile(mode='rb', fileobj=StringIO(delta_data)).read())
1350
def test_deannotation(self):
1351
"""Test converting annotated knits to unannotated knits."""
1352
# we need a full text, and a delta
1354
get_diamond_files(f, 1)
1355
ft_data, delta_data = self.helpGetBytes(f,
1356
_mod_knit.FTAnnotatedToUnannotated(None),
1357
_mod_knit.DeltaAnnotatedToUnannotated(None))
1359
'version origin 1 00e364d235126be43292ab09cb4686cf703ddc17\n'
1362
GzipFile(mode='rb', fileobj=StringIO(ft_data)).read())
1364
'version merged 3 ed8bce375198ea62444dc71952b22cfc2b09226d\n'
1365
'2,2,2\nright\nmerged\nend merged\n',
1366
GzipFile(mode='rb', fileobj=StringIO(delta_data)).read())
1368
def test_annotated_to_fulltext_no_eol(self):
1369
"""Test adapting annotated knits to full texts (for -> weaves)."""
1370
# we need a full text, and a delta
1372
get_diamond_files(f, 1, trailing_eol=False)
1373
# Reconstructing a full text requires a backing versioned file, and it
1374
# must have the base lines requested from it.
1375
logged_vf = versionedfile.RecordingVersionedFilesDecorator(f)
1376
ft_data, delta_data = self.helpGetBytes(f,
1377
_mod_knit.FTAnnotatedToFullText(None),
1378
_mod_knit.DeltaAnnotatedToFullText(logged_vf))
1379
self.assertEqual('origin', ft_data)
1380
self.assertEqual('base\nleft\nright\nmerged', delta_data)
1381
self.assertEqual([('get_record_stream', [('left',)], 'unordered',
1382
True)], logged_vf.calls)
1384
def test_annotated_to_fulltext(self):
1385
"""Test adapting annotated knits to full texts (for -> weaves)."""
1386
# we need a full text, and a delta
1388
get_diamond_files(f, 1)
1389
# Reconstructing a full text requires a backing versioned file, and it
1390
# must have the base lines requested from it.
1391
logged_vf = versionedfile.RecordingVersionedFilesDecorator(f)
1392
ft_data, delta_data = self.helpGetBytes(f,
1393
_mod_knit.FTAnnotatedToFullText(None),
1394
_mod_knit.DeltaAnnotatedToFullText(logged_vf))
1395
self.assertEqual('origin\n', ft_data)
1396
self.assertEqual('base\nleft\nright\nmerged\n', delta_data)
1397
self.assertEqual([('get_record_stream', [('left',)], 'unordered',
1398
True)], logged_vf.calls)
1400
def test_unannotated_to_fulltext(self):
1401
"""Test adapting unannotated knits to full texts.
1403
This is used for -> weaves, and for -> annotated knits.
1405
# we need a full text, and a delta
1406
f = self.get_knit(annotated=False)
1407
get_diamond_files(f, 1)
1408
# Reconstructing a full text requires a backing versioned file, and it
1409
# must have the base lines requested from it.
1410
logged_vf = versionedfile.RecordingVersionedFilesDecorator(f)
1411
ft_data, delta_data = self.helpGetBytes(f,
1412
_mod_knit.FTPlainToFullText(None),
1413
_mod_knit.DeltaPlainToFullText(logged_vf))
1414
self.assertEqual('origin\n', ft_data)
1415
self.assertEqual('base\nleft\nright\nmerged\n', delta_data)
1416
self.assertEqual([('get_record_stream', [('left',)], 'unordered',
1417
True)], logged_vf.calls)
1419
def test_unannotated_to_fulltext_no_eol(self):
1420
"""Test adapting unannotated knits to full texts.
1422
This is used for -> weaves, and for -> annotated knits.
1424
# we need a full text, and a delta
1425
f = self.get_knit(annotated=False)
1426
get_diamond_files(f, 1, trailing_eol=False)
1427
# Reconstructing a full text requires a backing versioned file, and it
1428
# must have the base lines requested from it.
1429
logged_vf = versionedfile.RecordingVersionedFilesDecorator(f)
1430
ft_data, delta_data = self.helpGetBytes(f,
1431
_mod_knit.FTPlainToFullText(None),
1432
_mod_knit.DeltaPlainToFullText(logged_vf))
1433
self.assertEqual('origin', ft_data)
1434
self.assertEqual('base\nleft\nright\nmerged', delta_data)
1435
self.assertEqual([('get_record_stream', [('left',)], 'unordered',
1436
True)], logged_vf.calls)
1439
class TestKeyMapper(TestCaseWithMemoryTransport):
1440
"""Tests for various key mapping logic."""
1442
def test_identity_mapper(self):
1443
mapper = versionedfile.ConstantMapper("inventory")
1444
self.assertEqual("inventory", mapper.map(('foo@ar',)))
1445
self.assertEqual("inventory", mapper.map(('quux',)))
1447
def test_prefix_mapper(self):
1449
mapper = versionedfile.PrefixMapper()
1450
self.assertEqual("file-id", mapper.map(("file-id", "revision-id")))
1451
self.assertEqual("new-id", mapper.map(("new-id", "revision-id")))
1452
self.assertEqual(('file-id',), mapper.unmap("file-id"))
1453
self.assertEqual(('new-id',), mapper.unmap("new-id"))
1455
def test_hash_prefix_mapper(self):
1456
#format6: hash + plain
1457
mapper = versionedfile.HashPrefixMapper()
1458
self.assertEqual("9b/file-id", mapper.map(("file-id", "revision-id")))
1459
self.assertEqual("45/new-id", mapper.map(("new-id", "revision-id")))
1460
self.assertEqual(('file-id',), mapper.unmap("9b/file-id"))
1461
self.assertEqual(('new-id',), mapper.unmap("45/new-id"))
1463
def test_hash_escaped_mapper(self):
1464
#knit1: hash + escaped
1465
mapper = versionedfile.HashEscapedPrefixMapper()
1466
self.assertEqual("88/%2520", mapper.map((" ", "revision-id")))
1467
self.assertEqual("ed/fil%2545-%2549d", mapper.map(("filE-Id",
1469
self.assertEqual("88/ne%2557-%2549d", mapper.map(("neW-Id",
1471
self.assertEqual(('filE-Id',), mapper.unmap("ed/fil%2545-%2549d"))
1472
self.assertEqual(('neW-Id',), mapper.unmap("88/ne%2557-%2549d"))
1475
class TestVersionedFiles(TestCaseWithMemoryTransport):
1476
"""Tests for the multiple-file variant of VersionedFile."""
1478
def get_versionedfiles(self, relpath='files'):
1479
transport = self.get_transport(relpath)
1481
transport.mkdir('.')
1482
files = self.factory(transport)
1483
if self.cleanup is not None:
1484
self.addCleanup(self.cleanup, files)
1487
def get_simple_key(self, suffix):
1488
"""Return a key for the object under test."""
1489
if self.key_length == 1:
1492
return ('FileA',) + (suffix,)
1494
def test_add_lines(self):
1495
f = self.get_versionedfiles()
1496
key0 = self.get_simple_key('r0')
1497
key1 = self.get_simple_key('r1')
1498
key2 = self.get_simple_key('r2')
1499
keyf = self.get_simple_key('foo')
1500
f.add_lines(key0, [], ['a\n', 'b\n'])
1502
f.add_lines(key1, [key0], ['b\n', 'c\n'])
1504
f.add_lines(key1, [], ['b\n', 'c\n'])
1506
self.assertTrue(key0 in keys)
1507
self.assertTrue(key1 in keys)
1509
for record in f.get_record_stream([key0, key1], 'unordered', True):
1510
records.append((record.key, record.get_bytes_as('fulltext')))
1512
self.assertEqual([(key0, 'a\nb\n'), (key1, 'b\nc\n')], records)
1514
def test__add_text(self):
1515
f = self.get_versionedfiles()
1516
key0 = self.get_simple_key('r0')
1517
key1 = self.get_simple_key('r1')
1518
key2 = self.get_simple_key('r2')
1519
keyf = self.get_simple_key('foo')
1520
f._add_text(key0, [], 'a\nb\n')
1522
f._add_text(key1, [key0], 'b\nc\n')
1524
f._add_text(key1, [], 'b\nc\n')
1526
self.assertTrue(key0 in keys)
1527
self.assertTrue(key1 in keys)
1529
for record in f.get_record_stream([key0, key1], 'unordered', True):
1530
records.append((record.key, record.get_bytes_as('fulltext')))
1532
self.assertEqual([(key0, 'a\nb\n'), (key1, 'b\nc\n')], records)
1534
def test_annotate(self):
1535
files = self.get_versionedfiles()
1536
self.get_diamond_files(files)
1537
if self.key_length == 1:
1541
# introduced full text
1542
origins = files.annotate(prefix + ('origin',))
1544
(prefix + ('origin',), 'origin\n')],
1547
origins = files.annotate(prefix + ('base',))
1549
(prefix + ('base',), 'base\n')],
1552
origins = files.annotate(prefix + ('merged',))
1555
(prefix + ('base',), 'base\n'),
1556
(prefix + ('left',), 'left\n'),
1557
(prefix + ('right',), 'right\n'),
1558
(prefix + ('merged',), 'merged\n')
1562
# Without a graph everything is new.
1564
(prefix + ('merged',), 'base\n'),
1565
(prefix + ('merged',), 'left\n'),
1566
(prefix + ('merged',), 'right\n'),
1567
(prefix + ('merged',), 'merged\n')
1570
self.assertRaises(RevisionNotPresent,
1571
files.annotate, prefix + ('missing-key',))
1573
def test_check_no_parameters(self):
1574
files = self.get_versionedfiles()
1576
def test_check_progressbar_parameter(self):
1577
"""A progress bar can be supplied because check can be a generator."""
1578
pb = ui.ui_factory.nested_progress_bar()
1579
self.addCleanup(pb.finished)
1580
files = self.get_versionedfiles()
1581
files.check(progress_bar=pb)
1583
def test_check_with_keys_becomes_generator(self):
1584
files = self.get_versionedfiles()
1585
self.get_diamond_files(files)
1587
entries = files.check(keys=keys)
1589
# Texts output should be fulltexts.
1590
self.capture_stream(files, entries, seen.add,
1591
files.get_parent_map(keys), require_fulltext=True)
1592
# All texts should be output.
1593
self.assertEqual(set(keys), seen)
1595
def test_clear_cache(self):
1596
files = self.get_versionedfiles()
1599
def test_construct(self):
1600
"""Each parameterised test can be constructed on a transport."""
1601
files = self.get_versionedfiles()
1603
def get_diamond_files(self, files, trailing_eol=True, left_only=False,
1605
return get_diamond_files(files, self.key_length,
1606
trailing_eol=trailing_eol, nograph=not self.graph,
1607
left_only=left_only, nokeys=nokeys)
1609
def _add_content_nostoresha(self, add_lines):
1610
"""When nostore_sha is supplied using old content raises."""
1611
vf = self.get_versionedfiles()
1612
empty_text = ('a', [])
1613
sample_text_nl = ('b', ["foo\n", "bar\n"])
1614
sample_text_no_nl = ('c', ["foo\n", "bar"])
1616
for version, lines in (empty_text, sample_text_nl, sample_text_no_nl):
1618
sha, _, _ = vf.add_lines(self.get_simple_key(version), [],
1621
sha, _, _ = vf._add_text(self.get_simple_key(version), [],
1624
# we now have a copy of all the lines in the vf.
1625
for sha, (version, lines) in zip(
1626
shas, (empty_text, sample_text_nl, sample_text_no_nl)):
1627
new_key = self.get_simple_key(version + "2")
1628
self.assertRaises(errors.ExistingContent,
1629
vf.add_lines, new_key, [], lines,
1631
self.assertRaises(errors.ExistingContent,
1632
vf._add_text, new_key, [], ''.join(lines),
1634
# and no new version should have been added.
1635
record = vf.get_record_stream([new_key], 'unordered', True).next()
1636
self.assertEqual('absent', record.storage_kind)
1638
def test_add_lines_nostoresha(self):
1639
self._add_content_nostoresha(add_lines=True)
1641
def test__add_text_nostoresha(self):
1642
self._add_content_nostoresha(add_lines=False)
1644
def test_add_lines_return(self):
1645
files = self.get_versionedfiles()
1646
# save code by using the stock data insertion helper.
1647
adds = self.get_diamond_files(files)
1649
# We can only validate the first 2 elements returned from add_lines.
1651
self.assertEqual(3, len(add))
1652
results.append(add[:2])
1653
if self.key_length == 1:
1655
('00e364d235126be43292ab09cb4686cf703ddc17', 7),
1656
('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1657
('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1658
('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1659
('ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
1661
elif self.key_length == 2:
1663
('00e364d235126be43292ab09cb4686cf703ddc17', 7),
1664
('00e364d235126be43292ab09cb4686cf703ddc17', 7),
1665
('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1666
('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1667
('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1668
('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1669
('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1670
('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1671
('ed8bce375198ea62444dc71952b22cfc2b09226d', 23),
1672
('ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
1675
def test_add_lines_no_key_generates_chk_key(self):
1676
files = self.get_versionedfiles()
1677
# save code by using the stock data insertion helper.
1678
adds = self.get_diamond_files(files, nokeys=True)
1680
# We can only validate the first 2 elements returned from add_lines.
1682
self.assertEqual(3, len(add))
1683
results.append(add[:2])
1684
if self.key_length == 1:
1686
('00e364d235126be43292ab09cb4686cf703ddc17', 7),
1687
('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1688
('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1689
('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1690
('ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
1692
# Check the added items got CHK keys.
1693
self.assertEqual(set([
1694
('sha1:00e364d235126be43292ab09cb4686cf703ddc17',),
1695
('sha1:51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44',),
1696
('sha1:9ef09dfa9d86780bdec9219a22560c6ece8e0ef1',),
1697
('sha1:a8478686da38e370e32e42e8a0c220e33ee9132f',),
1698
('sha1:ed8bce375198ea62444dc71952b22cfc2b09226d',),
1701
elif self.key_length == 2:
1703
('00e364d235126be43292ab09cb4686cf703ddc17', 7),
1704
('00e364d235126be43292ab09cb4686cf703ddc17', 7),
1705
('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1706
('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1707
('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1708
('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1709
('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1710
('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1711
('ed8bce375198ea62444dc71952b22cfc2b09226d', 23),
1712
('ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
1714
# Check the added items got CHK keys.
1715
self.assertEqual(set([
1716
('FileA', 'sha1:00e364d235126be43292ab09cb4686cf703ddc17'),
1717
('FileA', 'sha1:51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44'),
1718
('FileA', 'sha1:9ef09dfa9d86780bdec9219a22560c6ece8e0ef1'),
1719
('FileA', 'sha1:a8478686da38e370e32e42e8a0c220e33ee9132f'),
1720
('FileA', 'sha1:ed8bce375198ea62444dc71952b22cfc2b09226d'),
1721
('FileB', 'sha1:00e364d235126be43292ab09cb4686cf703ddc17'),
1722
('FileB', 'sha1:51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44'),
1723
('FileB', 'sha1:9ef09dfa9d86780bdec9219a22560c6ece8e0ef1'),
1724
('FileB', 'sha1:a8478686da38e370e32e42e8a0c220e33ee9132f'),
1725
('FileB', 'sha1:ed8bce375198ea62444dc71952b22cfc2b09226d'),
1729
def test_empty_lines(self):
1730
"""Empty files can be stored."""
1731
f = self.get_versionedfiles()
1732
key_a = self.get_simple_key('a')
1733
f.add_lines(key_a, [], [])
1734
self.assertEqual('',
1735
f.get_record_stream([key_a], 'unordered', True
1736
).next().get_bytes_as('fulltext'))
1737
key_b = self.get_simple_key('b')
1738
f.add_lines(key_b, self.get_parents([key_a]), [])
1739
self.assertEqual('',
1740
f.get_record_stream([key_b], 'unordered', True
1741
).next().get_bytes_as('fulltext'))
1743
def test_newline_only(self):
1744
f = self.get_versionedfiles()
1745
key_a = self.get_simple_key('a')
1746
f.add_lines(key_a, [], ['\n'])
1747
self.assertEqual('\n',
1748
f.get_record_stream([key_a], 'unordered', True
1749
).next().get_bytes_as('fulltext'))
1750
key_b = self.get_simple_key('b')
1751
f.add_lines(key_b, self.get_parents([key_a]), ['\n'])
1752
self.assertEqual('\n',
1753
f.get_record_stream([key_b], 'unordered', True
1754
).next().get_bytes_as('fulltext'))
1756
def test_get_known_graph_ancestry(self):
1757
f = self.get_versionedfiles()
1759
raise TestNotApplicable('ancestry info only relevant with graph.')
1760
key_a = self.get_simple_key('a')
1761
key_b = self.get_simple_key('b')
1762
key_c = self.get_simple_key('c')
1768
f.add_lines(key_a, [], ['\n'])
1769
f.add_lines(key_b, [key_a], ['\n'])
1770
f.add_lines(key_c, [key_a, key_b], ['\n'])
1771
kg = f.get_known_graph_ancestry([key_c])
1772
self.assertIsInstance(kg, _mod_graph.KnownGraph)
1773
self.assertEqual([key_a, key_b, key_c], list(kg.topo_sort()))
1775
def test_known_graph_with_fallbacks(self):
1776
f = self.get_versionedfiles('files')
1778
raise TestNotApplicable('ancestry info only relevant with graph.')
1779
if getattr(f, 'add_fallback_versioned_files', None) is None:
1780
raise TestNotApplicable("%s doesn't support fallbacks"
1781
% (f.__class__.__name__,))
1782
key_a = self.get_simple_key('a')
1783
key_b = self.get_simple_key('b')
1784
key_c = self.get_simple_key('c')
1785
# A only in fallback
1790
g = self.get_versionedfiles('fallback')
1791
g.add_lines(key_a, [], ['\n'])
1792
f.add_fallback_versioned_files(g)
1793
f.add_lines(key_b, [key_a], ['\n'])
1794
f.add_lines(key_c, [key_a, key_b], ['\n'])
1795
kg = f.get_known_graph_ancestry([key_c])
1796
self.assertEqual([key_a, key_b, key_c], list(kg.topo_sort()))
1798
def test_get_record_stream_empty(self):
1799
"""An empty stream can be requested without error."""
1800
f = self.get_versionedfiles()
1801
entries = f.get_record_stream([], 'unordered', False)
1802
self.assertEqual([], list(entries))
1804
def assertValidStorageKind(self, storage_kind):
1805
"""Assert that storage_kind is a valid storage_kind."""
1806
self.assertSubset([storage_kind],
1807
['mpdiff', 'knit-annotated-ft', 'knit-annotated-delta',
1808
'knit-ft', 'knit-delta', 'chunked', 'fulltext',
1809
'knit-annotated-ft-gz', 'knit-annotated-delta-gz', 'knit-ft-gz',
1811
'knit-delta-closure', 'knit-delta-closure-ref',
1812
'groupcompress-block', 'groupcompress-block-ref'])
1814
def capture_stream(self, f, entries, on_seen, parents,
1815
require_fulltext=False):
1816
"""Capture a stream for testing."""
1817
for factory in entries:
1818
on_seen(factory.key)
1819
self.assertValidStorageKind(factory.storage_kind)
1820
if factory.sha1 is not None:
1821
self.assertEqual(f.get_sha1s([factory.key])[factory.key],
1823
self.assertEqual(parents[factory.key], factory.parents)
1824
self.assertIsInstance(factory.get_bytes_as(factory.storage_kind),
1826
if require_fulltext:
1827
factory.get_bytes_as('fulltext')
1829
def test_get_record_stream_interface(self):
1830
"""each item in a stream has to provide a regular interface."""
1831
files = self.get_versionedfiles()
1832
self.get_diamond_files(files)
1833
keys, _ = self.get_keys_and_sort_order()
1834
parent_map = files.get_parent_map(keys)
1835
entries = files.get_record_stream(keys, 'unordered', False)
1837
self.capture_stream(files, entries, seen.add, parent_map)
1838
self.assertEqual(set(keys), seen)
1840
def get_keys_and_sort_order(self):
1841
"""Get diamond test keys list, and their sort ordering."""
1842
if self.key_length == 1:
1843
keys = [('merged',), ('left',), ('right',), ('base',)]
1844
sort_order = {('merged',):2, ('left',):1, ('right',):1, ('base',):0}
1847
('FileA', 'merged'), ('FileA', 'left'), ('FileA', 'right'),
1849
('FileB', 'merged'), ('FileB', 'left'), ('FileB', 'right'),
1853
('FileA', 'merged'):2, ('FileA', 'left'):1, ('FileA', 'right'):1,
1854
('FileA', 'base'):0,
1855
('FileB', 'merged'):2, ('FileB', 'left'):1, ('FileB', 'right'):1,
1856
('FileB', 'base'):0,
1858
return keys, sort_order
1860
def get_keys_and_groupcompress_sort_order(self):
1861
"""Get diamond test keys list, and their groupcompress sort ordering."""
1862
if self.key_length == 1:
1863
keys = [('merged',), ('left',), ('right',), ('base',)]
1864
sort_order = {('merged',):0, ('left',):1, ('right',):1, ('base',):2}
1867
('FileA', 'merged'), ('FileA', 'left'), ('FileA', 'right'),
1869
('FileB', 'merged'), ('FileB', 'left'), ('FileB', 'right'),
1873
('FileA', 'merged'):0, ('FileA', 'left'):1, ('FileA', 'right'):1,
1874
('FileA', 'base'):2,
1875
('FileB', 'merged'):3, ('FileB', 'left'):4, ('FileB', 'right'):4,
1876
('FileB', 'base'):5,
1878
return keys, sort_order
1880
def test_get_record_stream_interface_ordered(self):
1881
"""each item in a stream has to provide a regular interface."""
1882
files = self.get_versionedfiles()
1883
self.get_diamond_files(files)
1884
keys, sort_order = self.get_keys_and_sort_order()
1885
parent_map = files.get_parent_map(keys)
1886
entries = files.get_record_stream(keys, 'topological', False)
1888
self.capture_stream(files, entries, seen.append, parent_map)
1889
self.assertStreamOrder(sort_order, seen, keys)
1891
def test_get_record_stream_interface_ordered_with_delta_closure(self):
1892
"""each item must be accessible as a fulltext."""
1893
files = self.get_versionedfiles()
1894
self.get_diamond_files(files)
1895
keys, sort_order = self.get_keys_and_sort_order()
1896
parent_map = files.get_parent_map(keys)
1897
entries = files.get_record_stream(keys, 'topological', True)
1899
for factory in entries:
1900
seen.append(factory.key)
1901
self.assertValidStorageKind(factory.storage_kind)
1902
self.assertSubset([factory.sha1],
1903
[None, files.get_sha1s([factory.key])[factory.key]])
1904
self.assertEqual(parent_map[factory.key], factory.parents)
1905
# self.assertEqual(files.get_text(factory.key),
1906
ft_bytes = factory.get_bytes_as('fulltext')
1907
self.assertIsInstance(ft_bytes, str)
1908
chunked_bytes = factory.get_bytes_as('chunked')
1909
self.assertEqualDiff(ft_bytes, ''.join(chunked_bytes))
1911
self.assertStreamOrder(sort_order, seen, keys)
1913
def test_get_record_stream_interface_groupcompress(self):
1914
"""each item in a stream has to provide a regular interface."""
1915
files = self.get_versionedfiles()
1916
self.get_diamond_files(files)
1917
keys, sort_order = self.get_keys_and_groupcompress_sort_order()
1918
parent_map = files.get_parent_map(keys)
1919
entries = files.get_record_stream(keys, 'groupcompress', False)
1921
self.capture_stream(files, entries, seen.append, parent_map)
1922
self.assertStreamOrder(sort_order, seen, keys)
1924
def assertStreamOrder(self, sort_order, seen, keys):
1925
self.assertEqual(len(set(seen)), len(keys))
1926
if self.key_length == 1:
1929
lows = {('FileA',):0, ('FileB',):0}
1931
self.assertEqual(set(keys), set(seen))
1934
sort_pos = sort_order[key]
1935
self.assertTrue(sort_pos >= lows[key[:-1]],
1936
"Out of order in sorted stream: %r, %r" % (key, seen))
1937
lows[key[:-1]] = sort_pos
1939
def test_get_record_stream_unknown_storage_kind_raises(self):
1940
"""Asking for a storage kind that the stream cannot supply raises."""
1941
files = self.get_versionedfiles()
1942
self.get_diamond_files(files)
1943
if self.key_length == 1:
1944
keys = [('merged',), ('left',), ('right',), ('base',)]
1947
('FileA', 'merged'), ('FileA', 'left'), ('FileA', 'right'),
1949
('FileB', 'merged'), ('FileB', 'left'), ('FileB', 'right'),
1952
parent_map = files.get_parent_map(keys)
1953
entries = files.get_record_stream(keys, 'unordered', False)
1954
# We track the contents because we should be able to try, fail a
1955
# particular kind and then ask for one that works and continue.
1957
for factory in entries:
1958
seen.add(factory.key)
1959
self.assertValidStorageKind(factory.storage_kind)
1960
if factory.sha1 is not None:
1961
self.assertEqual(files.get_sha1s([factory.key])[factory.key],
1963
self.assertEqual(parent_map[factory.key], factory.parents)
1964
# currently no stream emits mpdiff
1965
self.assertRaises(errors.UnavailableRepresentation,
1966
factory.get_bytes_as, 'mpdiff')
1967
self.assertIsInstance(factory.get_bytes_as(factory.storage_kind),
1969
self.assertEqual(set(keys), seen)
1971
def test_get_record_stream_missing_records_are_absent(self):
1972
files = self.get_versionedfiles()
1973
self.get_diamond_files(files)
1974
if self.key_length == 1:
1975
keys = [('merged',), ('left',), ('right',), ('absent',), ('base',)]
1978
('FileA', 'merged'), ('FileA', 'left'), ('FileA', 'right'),
1979
('FileA', 'absent'), ('FileA', 'base'),
1980
('FileB', 'merged'), ('FileB', 'left'), ('FileB', 'right'),
1981
('FileB', 'absent'), ('FileB', 'base'),
1982
('absent', 'absent'),
1984
parent_map = files.get_parent_map(keys)
1985
entries = files.get_record_stream(keys, 'unordered', False)
1986
self.assertAbsentRecord(files, keys, parent_map, entries)
1987
entries = files.get_record_stream(keys, 'topological', False)
1988
self.assertAbsentRecord(files, keys, parent_map, entries)
1990
def assertRecordHasContent(self, record, bytes):
1991
"""Assert that record has the bytes bytes."""
1992
self.assertEqual(bytes, record.get_bytes_as('fulltext'))
1993
self.assertEqual(bytes, ''.join(record.get_bytes_as('chunked')))
1995
def test_get_record_stream_native_formats_are_wire_ready_one_ft(self):
1996
files = self.get_versionedfiles()
1997
key = self.get_simple_key('foo')
1998
files.add_lines(key, (), ['my text\n', 'content'])
1999
stream = files.get_record_stream([key], 'unordered', False)
2000
record = stream.next()
2001
if record.storage_kind in ('chunked', 'fulltext'):
2002
# chunked and fulltext representations are for direct use not wire
2003
# serialisation: check they are able to be used directly. To send
2004
# such records over the wire translation will be needed.
2005
self.assertRecordHasContent(record, "my text\ncontent")
2007
bytes = [record.get_bytes_as(record.storage_kind)]
2008
network_stream = versionedfile.NetworkRecordStream(bytes).read()
2009
source_record = record
2011
for record in network_stream:
2012
records.append(record)
2013
self.assertEqual(source_record.storage_kind,
2014
record.storage_kind)
2015
self.assertEqual(source_record.parents, record.parents)
2017
source_record.get_bytes_as(source_record.storage_kind),
2018
record.get_bytes_as(record.storage_kind))
2019
self.assertEqual(1, len(records))
2021
def assertStreamMetaEqual(self, records, expected, stream):
2022
"""Assert that streams expected and stream have the same records.
2024
:param records: A list to collect the seen records.
2025
:return: A generator of the records in stream.
2027
# We make assertions during copying to catch things early for
2029
for record, ref_record in izip(stream, expected):
2030
records.append(record)
2031
self.assertEqual(ref_record.key, record.key)
2032
self.assertEqual(ref_record.storage_kind, record.storage_kind)
2033
self.assertEqual(ref_record.parents, record.parents)
2036
def stream_to_bytes_or_skip_counter(self, skipped_records, full_texts,
2038
"""Convert a stream to a bytes iterator.
2040
:param skipped_records: A list with one element to increment when a
2042
:param full_texts: A dict from key->fulltext representation, for
2043
checking chunked or fulltext stored records.
2044
:param stream: A record_stream.
2045
:return: An iterator over the bytes of each record.
2047
for record in stream:
2048
if record.storage_kind in ('chunked', 'fulltext'):
2049
skipped_records[0] += 1
2050
# check the content is correct for direct use.
2051
self.assertRecordHasContent(record, full_texts[record.key])
2053
yield record.get_bytes_as(record.storage_kind)
2055
def test_get_record_stream_native_formats_are_wire_ready_ft_delta(self):
2056
files = self.get_versionedfiles()
2057
target_files = self.get_versionedfiles('target')
2058
key = self.get_simple_key('ft')
2059
key_delta = self.get_simple_key('delta')
2060
files.add_lines(key, (), ['my text\n', 'content'])
2062
delta_parents = (key,)
2065
files.add_lines(key_delta, delta_parents, ['different\n', 'content\n'])
2066
local = files.get_record_stream([key, key_delta], 'unordered', False)
2067
ref = files.get_record_stream([key, key_delta], 'unordered', False)
2068
skipped_records = [0]
2070
key: "my text\ncontent",
2071
key_delta: "different\ncontent\n",
2073
byte_stream = self.stream_to_bytes_or_skip_counter(
2074
skipped_records, full_texts, local)
2075
network_stream = versionedfile.NetworkRecordStream(byte_stream).read()
2077
# insert the stream from the network into a versioned files object so we can
2078
# check the content was carried across correctly without doing delta
2080
target_files.insert_record_stream(
2081
self.assertStreamMetaEqual(records, ref, network_stream))
2082
# No duplicates on the wire thank you!
2083
self.assertEqual(2, len(records) + skipped_records[0])
2085
# if any content was copied it all must have all been.
2086
self.assertIdenticalVersionedFile(files, target_files)
2088
def test_get_record_stream_native_formats_are_wire_ready_delta(self):
2089
# copy a delta over the wire
2090
files = self.get_versionedfiles()
2091
target_files = self.get_versionedfiles('target')
2092
key = self.get_simple_key('ft')
2093
key_delta = self.get_simple_key('delta')
2094
files.add_lines(key, (), ['my text\n', 'content'])
2096
delta_parents = (key,)
2099
files.add_lines(key_delta, delta_parents, ['different\n', 'content\n'])
2100
# Copy the basis text across so we can reconstruct the delta during
2101
# insertion into target.
2102
target_files.insert_record_stream(files.get_record_stream([key],
2103
'unordered', False))
2104
local = files.get_record_stream([key_delta], 'unordered', False)
2105
ref = files.get_record_stream([key_delta], 'unordered', False)
2106
skipped_records = [0]
2108
key_delta: "different\ncontent\n",
2110
byte_stream = self.stream_to_bytes_or_skip_counter(
2111
skipped_records, full_texts, local)
2112
network_stream = versionedfile.NetworkRecordStream(byte_stream).read()
2114
# insert the stream from the network into a versioned files object so we can
2115
# check the content was carried across correctly without doing delta
2116
# inspection during check_stream.
2117
target_files.insert_record_stream(
2118
self.assertStreamMetaEqual(records, ref, network_stream))
2119
# No duplicates on the wire thank you!
2120
self.assertEqual(1, len(records) + skipped_records[0])
2122
# if any content was copied it all must have all been
2123
self.assertIdenticalVersionedFile(files, target_files)
2125
def test_get_record_stream_wire_ready_delta_closure_included(self):
2126
# copy a delta over the wire with the ability to get its full text.
2127
files = self.get_versionedfiles()
2128
key = self.get_simple_key('ft')
2129
key_delta = self.get_simple_key('delta')
2130
files.add_lines(key, (), ['my text\n', 'content'])
2132
delta_parents = (key,)
2135
files.add_lines(key_delta, delta_parents, ['different\n', 'content\n'])
2136
local = files.get_record_stream([key_delta], 'unordered', True)
2137
ref = files.get_record_stream([key_delta], 'unordered', True)
2138
skipped_records = [0]
2140
key_delta: "different\ncontent\n",
2142
byte_stream = self.stream_to_bytes_or_skip_counter(
2143
skipped_records, full_texts, local)
2144
network_stream = versionedfile.NetworkRecordStream(byte_stream).read()
2146
# insert the stream from the network into a versioned files object so we can
2147
# check the content was carried across correctly without doing delta
2148
# inspection during check_stream.
2149
for record in self.assertStreamMetaEqual(records, ref, network_stream):
2150
# we have to be able to get the full text out:
2151
self.assertRecordHasContent(record, full_texts[record.key])
2152
# No duplicates on the wire thank you!
2153
self.assertEqual(1, len(records) + skipped_records[0])
2155
def assertAbsentRecord(self, files, keys, parents, entries):
2156
"""Helper for test_get_record_stream_missing_records_are_absent."""
2158
for factory in entries:
2159
seen.add(factory.key)
2160
if factory.key[-1] == 'absent':
2161
self.assertEqual('absent', factory.storage_kind)
2162
self.assertEqual(None, factory.sha1)
2163
self.assertEqual(None, factory.parents)
2165
self.assertValidStorageKind(factory.storage_kind)
2166
if factory.sha1 is not None:
2167
sha1 = files.get_sha1s([factory.key])[factory.key]
2168
self.assertEqual(sha1, factory.sha1)
2169
self.assertEqual(parents[factory.key], factory.parents)
2170
self.assertIsInstance(factory.get_bytes_as(factory.storage_kind),
2172
self.assertEqual(set(keys), seen)
2174
def test_filter_absent_records(self):
2175
"""Requested missing records can be filter trivially."""
2176
files = self.get_versionedfiles()
2177
self.get_diamond_files(files)
2178
keys, _ = self.get_keys_and_sort_order()
2179
parent_map = files.get_parent_map(keys)
2180
# Add an absent record in the middle of the present keys. (We don't ask
2181
# for just absent keys to ensure that content before and after the
2182
# absent keys is still delivered).
2183
present_keys = list(keys)
2184
if self.key_length == 1:
2185
keys.insert(2, ('extra',))
2187
keys.insert(2, ('extra', 'extra'))
2188
entries = files.get_record_stream(keys, 'unordered', False)
2190
self.capture_stream(files, versionedfile.filter_absent(entries), seen.add,
2192
self.assertEqual(set(present_keys), seen)
2194
def get_mapper(self):
2195
"""Get a mapper suitable for the key length of the test interface."""
2196
if self.key_length == 1:
2197
return ConstantMapper('source')
2199
return HashEscapedPrefixMapper()
2201
def get_parents(self, parents):
2202
"""Get parents, taking self.graph into consideration."""
2208
def test_get_annotator(self):
2209
files = self.get_versionedfiles()
2210
self.get_diamond_files(files)
2211
origin_key = self.get_simple_key('origin')
2212
base_key = self.get_simple_key('base')
2213
left_key = self.get_simple_key('left')
2214
right_key = self.get_simple_key('right')
2215
merged_key = self.get_simple_key('merged')
2216
# annotator = files.get_annotator()
2217
# introduced full text
2218
origins, lines = files.get_annotator().annotate(origin_key)
2219
self.assertEqual([(origin_key,)], origins)
2220
self.assertEqual(['origin\n'], lines)
2222
origins, lines = files.get_annotator().annotate(base_key)
2223
self.assertEqual([(base_key,)], origins)
2225
origins, lines = files.get_annotator().annotate(merged_key)
2234
# Without a graph everything is new.
2241
self.assertRaises(RevisionNotPresent,
2242
files.get_annotator().annotate, self.get_simple_key('missing-key'))
2244
def test_get_parent_map(self):
2245
files = self.get_versionedfiles()
2246
if self.key_length == 1:
2248
(('r0',), self.get_parents(())),
2249
(('r1',), self.get_parents((('r0',),))),
2250
(('r2',), self.get_parents(())),
2251
(('r3',), self.get_parents(())),
2252
(('m',), self.get_parents((('r0',),('r1',),('r2',),('r3',)))),
2256
(('FileA', 'r0'), self.get_parents(())),
2257
(('FileA', 'r1'), self.get_parents((('FileA', 'r0'),))),
2258
(('FileA', 'r2'), self.get_parents(())),
2259
(('FileA', 'r3'), self.get_parents(())),
2260
(('FileA', 'm'), self.get_parents((('FileA', 'r0'),
2261
('FileA', 'r1'), ('FileA', 'r2'), ('FileA', 'r3')))),
2263
for key, parents in parent_details:
2264
files.add_lines(key, parents, [])
2265
# immediately after adding it should be queryable.
2266
self.assertEqual({key:parents}, files.get_parent_map([key]))
2267
# We can ask for an empty set
2268
self.assertEqual({}, files.get_parent_map([]))
2269
# We can ask for many keys
2270
all_parents = dict(parent_details)
2271
self.assertEqual(all_parents, files.get_parent_map(all_parents.keys()))
2272
# Absent keys are just not included in the result.
2273
keys = all_parents.keys()
2274
if self.key_length == 1:
2275
keys.insert(1, ('missing',))
2277
keys.insert(1, ('missing', 'missing'))
2278
# Absent keys are just ignored
2279
self.assertEqual(all_parents, files.get_parent_map(keys))
2281
def test_get_sha1s(self):
2282
files = self.get_versionedfiles()
2283
self.get_diamond_files(files)
2284
if self.key_length == 1:
2285
keys = [('base',), ('origin',), ('left',), ('merged',), ('right',)]
2287
# ask for shas from different prefixes.
2289
('FileA', 'base'), ('FileB', 'origin'), ('FileA', 'left'),
2290
('FileA', 'merged'), ('FileB', 'right'),
2293
keys[0]: '51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44',
2294
keys[1]: '00e364d235126be43292ab09cb4686cf703ddc17',
2295
keys[2]: 'a8478686da38e370e32e42e8a0c220e33ee9132f',
2296
keys[3]: 'ed8bce375198ea62444dc71952b22cfc2b09226d',
2297
keys[4]: '9ef09dfa9d86780bdec9219a22560c6ece8e0ef1',
2299
files.get_sha1s(keys))
2301
def test_insert_record_stream_empty(self):
2302
"""Inserting an empty record stream should work."""
2303
files = self.get_versionedfiles()
2304
files.insert_record_stream([])
2306
def assertIdenticalVersionedFile(self, expected, actual):
2307
"""Assert that left and right have the same contents."""
2308
self.assertEqual(set(actual.keys()), set(expected.keys()))
2309
actual_parents = actual.get_parent_map(actual.keys())
2311
self.assertEqual(actual_parents, expected.get_parent_map(expected.keys()))
2313
for key, parents in actual_parents.items():
2314
self.assertEqual(None, parents)
2315
for key in actual.keys():
2316
actual_text = actual.get_record_stream(
2317
[key], 'unordered', True).next().get_bytes_as('fulltext')
2318
expected_text = expected.get_record_stream(
2319
[key], 'unordered', True).next().get_bytes_as('fulltext')
2320
self.assertEqual(actual_text, expected_text)
2322
def test_insert_record_stream_fulltexts(self):
2323
"""Any file should accept a stream of fulltexts."""
2324
files = self.get_versionedfiles()
2325
mapper = self.get_mapper()
2326
source_transport = self.get_transport('source')
2327
source_transport.mkdir('.')
2328
# weaves always output fulltexts.
2329
source = make_versioned_files_factory(WeaveFile, mapper)(
2331
self.get_diamond_files(source, trailing_eol=False)
2332
stream = source.get_record_stream(source.keys(), 'topological',
2334
files.insert_record_stream(stream)
2335
self.assertIdenticalVersionedFile(source, files)
2337
def test_insert_record_stream_fulltexts_noeol(self):
2338
"""Any file should accept a stream of fulltexts."""
2339
files = self.get_versionedfiles()
2340
mapper = self.get_mapper()
2341
source_transport = self.get_transport('source')
2342
source_transport.mkdir('.')
2343
# weaves always output fulltexts.
2344
source = make_versioned_files_factory(WeaveFile, mapper)(
2346
self.get_diamond_files(source, trailing_eol=False)
2347
stream = source.get_record_stream(source.keys(), 'topological',
2349
files.insert_record_stream(stream)
2350
self.assertIdenticalVersionedFile(source, files)
2352
def test_insert_record_stream_annotated_knits(self):
2353
"""Any file should accept a stream from plain knits."""
2354
files = self.get_versionedfiles()
2355
mapper = self.get_mapper()
2356
source_transport = self.get_transport('source')
2357
source_transport.mkdir('.')
2358
source = make_file_factory(True, mapper)(source_transport)
2359
self.get_diamond_files(source)
2360
stream = source.get_record_stream(source.keys(), 'topological',
2362
files.insert_record_stream(stream)
2363
self.assertIdenticalVersionedFile(source, files)
2365
def test_insert_record_stream_annotated_knits_noeol(self):
2366
"""Any file should accept a stream from plain knits."""
2367
files = self.get_versionedfiles()
2368
mapper = self.get_mapper()
2369
source_transport = self.get_transport('source')
2370
source_transport.mkdir('.')
2371
source = make_file_factory(True, mapper)(source_transport)
2372
self.get_diamond_files(source, trailing_eol=False)
2373
stream = source.get_record_stream(source.keys(), 'topological',
2375
files.insert_record_stream(stream)
2376
self.assertIdenticalVersionedFile(source, files)
2378
def test_insert_record_stream_plain_knits(self):
2379
"""Any file should accept a stream from plain knits."""
2380
files = self.get_versionedfiles()
2381
mapper = self.get_mapper()
2382
source_transport = self.get_transport('source')
2383
source_transport.mkdir('.')
2384
source = make_file_factory(False, mapper)(source_transport)
2385
self.get_diamond_files(source)
2386
stream = source.get_record_stream(source.keys(), 'topological',
2388
files.insert_record_stream(stream)
2389
self.assertIdenticalVersionedFile(source, files)
2391
def test_insert_record_stream_plain_knits_noeol(self):
2392
"""Any file should accept a stream from plain knits."""
2393
files = self.get_versionedfiles()
2394
mapper = self.get_mapper()
2395
source_transport = self.get_transport('source')
2396
source_transport.mkdir('.')
2397
source = make_file_factory(False, mapper)(source_transport)
2398
self.get_diamond_files(source, trailing_eol=False)
2399
stream = source.get_record_stream(source.keys(), 'topological',
2401
files.insert_record_stream(stream)
2402
self.assertIdenticalVersionedFile(source, files)
2404
def test_insert_record_stream_existing_keys(self):
2405
"""Inserting keys already in a file should not error."""
2406
files = self.get_versionedfiles()
2407
source = self.get_versionedfiles('source')
2408
self.get_diamond_files(source)
2409
# insert some keys into f.
2410
self.get_diamond_files(files, left_only=True)
2411
stream = source.get_record_stream(source.keys(), 'topological',
2413
files.insert_record_stream(stream)
2414
self.assertIdenticalVersionedFile(source, files)
2416
def test_insert_record_stream_missing_keys(self):
2417
"""Inserting a stream with absent keys should raise an error."""
2418
files = self.get_versionedfiles()
2419
source = self.get_versionedfiles('source')
2420
stream = source.get_record_stream([('missing',) * self.key_length],
2421
'topological', False)
2422
self.assertRaises(errors.RevisionNotPresent, files.insert_record_stream,
2425
def test_insert_record_stream_out_of_order(self):
2426
"""An out of order stream can either error or work."""
2427
files = self.get_versionedfiles()
2428
source = self.get_versionedfiles('source')
2429
self.get_diamond_files(source)
2430
if self.key_length == 1:
2431
origin_keys = [('origin',)]
2432
end_keys = [('merged',), ('left',)]
2433
start_keys = [('right',), ('base',)]
2435
origin_keys = [('FileA', 'origin'), ('FileB', 'origin')]
2436
end_keys = [('FileA', 'merged',), ('FileA', 'left',),
2437
('FileB', 'merged',), ('FileB', 'left',)]
2438
start_keys = [('FileA', 'right',), ('FileA', 'base',),
2439
('FileB', 'right',), ('FileB', 'base',)]
2440
origin_entries = source.get_record_stream(origin_keys, 'unordered', False)
2441
end_entries = source.get_record_stream(end_keys, 'topological', False)
2442
start_entries = source.get_record_stream(start_keys, 'topological', False)
2443
entries = chain(origin_entries, end_entries, start_entries)
2445
files.insert_record_stream(entries)
2446
except RevisionNotPresent:
2447
# Must not have corrupted the file.
2450
self.assertIdenticalVersionedFile(source, files)
2452
def test_insert_record_stream_long_parent_chain_out_of_order(self):
2453
"""An out of order stream can either error or work."""
2455
raise TestNotApplicable('ancestry info only relevant with graph.')
2456
# Create a reasonably long chain of records based on each other, where
2457
# most will be deltas.
2458
source = self.get_versionedfiles('source')
2461
content = [('same same %d\n' % n) for n in range(500)]
2462
for letter in 'abcdefghijklmnopqrstuvwxyz':
2463
key = ('key-' + letter,)
2464
if self.key_length == 2:
2465
key = ('prefix',) + key
2466
content.append('content for ' + letter + '\n')
2467
source.add_lines(key, parents, content)
2470
# Create a stream of these records, excluding the first record that the
2471
# rest ultimately depend upon, and insert it into a new vf.
2473
for key in reversed(keys):
2474
streams.append(source.get_record_stream([key], 'unordered', False))
2475
deltas = chain(*streams[:-1])
2476
files = self.get_versionedfiles()
2478
files.insert_record_stream(deltas)
2479
except RevisionNotPresent:
2480
# Must not have corrupted the file.
2483
# Must only report either just the first key as a missing parent,
2484
# no key as missing (for nodelta scenarios).
2485
missing = set(files.get_missing_compression_parent_keys())
2486
missing.discard(keys[0])
2487
self.assertEqual(set(), missing)
2489
def get_knit_delta_source(self):
2490
"""Get a source that can produce a stream with knit delta records,
2491
regardless of this test's scenario.
2493
mapper = self.get_mapper()
2494
source_transport = self.get_transport('source')
2495
source_transport.mkdir('.')
2496
source = make_file_factory(False, mapper)(source_transport)
2497
get_diamond_files(source, self.key_length, trailing_eol=True,
2498
nograph=False, left_only=False)
2501
def test_insert_record_stream_delta_missing_basis_no_corruption(self):
2502
"""Insertion where a needed basis is not included notifies the caller
2503
of the missing basis. In the meantime a record missing its basis is
2506
source = self.get_knit_delta_source()
2507
keys = [self.get_simple_key('origin'), self.get_simple_key('merged')]
2508
entries = source.get_record_stream(keys, 'unordered', False)
2509
files = self.get_versionedfiles()
2510
if self.support_partial_insertion:
2511
self.assertEqual([],
2512
list(files.get_missing_compression_parent_keys()))
2513
files.insert_record_stream(entries)
2514
missing_bases = files.get_missing_compression_parent_keys()
2515
self.assertEqual(set([self.get_simple_key('left')]),
2517
self.assertEqual(set(keys), set(files.get_parent_map(keys)))
2520
errors.RevisionNotPresent, files.insert_record_stream, entries)
2523
def test_insert_record_stream_delta_missing_basis_can_be_added_later(self):
2524
"""Insertion where a needed basis is not included notifies the caller
2525
of the missing basis. That basis can be added in a second
2526
insert_record_stream call that does not need to repeat records present
2527
in the previous stream. The record(s) that required that basis are
2528
fully inserted once their basis is no longer missing.
2530
if not self.support_partial_insertion:
2531
raise TestNotApplicable(
2532
'versioned file scenario does not support partial insertion')
2533
source = self.get_knit_delta_source()
2534
entries = source.get_record_stream([self.get_simple_key('origin'),
2535
self.get_simple_key('merged')], 'unordered', False)
2536
files = self.get_versionedfiles()
2537
files.insert_record_stream(entries)
2538
missing_bases = files.get_missing_compression_parent_keys()
2539
self.assertEqual(set([self.get_simple_key('left')]),
2541
# 'merged' is inserted (although a commit of a write group involving
2542
# this versionedfiles would fail).
2543
merged_key = self.get_simple_key('merged')
2545
[merged_key], files.get_parent_map([merged_key]).keys())
2546
# Add the full delta closure of the missing records
2547
missing_entries = source.get_record_stream(
2548
missing_bases, 'unordered', True)
2549
files.insert_record_stream(missing_entries)
2550
# Now 'merged' is fully inserted (and a commit would succeed).
2551
self.assertEqual([], list(files.get_missing_compression_parent_keys()))
2553
[merged_key], files.get_parent_map([merged_key]).keys())
2556
def test_iter_lines_added_or_present_in_keys(self):
2557
# test that we get at least an equalset of the lines added by
2558
# versions in the store.
2559
# the ordering here is to make a tree so that dumb searches have
2560
# more changes to muck up.
2562
class InstrumentedProgress(progress.ProgressTask):
2565
progress.ProgressTask.__init__(self)
2568
def update(self, msg=None, current=None, total=None):
2569
self.updates.append((msg, current, total))
2571
files = self.get_versionedfiles()
2572
# add a base to get included
2573
files.add_lines(self.get_simple_key('base'), (), ['base\n'])
2574
# add a ancestor to be included on one side
2575
files.add_lines(self.get_simple_key('lancestor'), (), ['lancestor\n'])
2576
# add a ancestor to be included on the other side
2577
files.add_lines(self.get_simple_key('rancestor'),
2578
self.get_parents([self.get_simple_key('base')]), ['rancestor\n'])
2579
# add a child of rancestor with no eofile-nl
2580
files.add_lines(self.get_simple_key('child'),
2581
self.get_parents([self.get_simple_key('rancestor')]),
2582
['base\n', 'child\n'])
2583
# add a child of lancestor and base to join the two roots
2584
files.add_lines(self.get_simple_key('otherchild'),
2585
self.get_parents([self.get_simple_key('lancestor'),
2586
self.get_simple_key('base')]),
2587
['base\n', 'lancestor\n', 'otherchild\n'])
2588
def iter_with_keys(keys, expected):
2589
# now we need to see what lines are returned, and how often.
2591
progress = InstrumentedProgress()
2592
# iterate over the lines
2593
for line in files.iter_lines_added_or_present_in_keys(keys,
2595
lines.setdefault(line, 0)
2597
if []!= progress.updates:
2598
self.assertEqual(expected, progress.updates)
2600
lines = iter_with_keys(
2601
[self.get_simple_key('child'), self.get_simple_key('otherchild')],
2602
[('Walking content', 0, 2),
2603
('Walking content', 1, 2),
2604
('Walking content', 2, 2)])
2605
# we must see child and otherchild
2606
self.assertTrue(lines[('child\n', self.get_simple_key('child'))] > 0)
2608
lines[('otherchild\n', self.get_simple_key('otherchild'))] > 0)
2609
# we dont care if we got more than that.
2612
lines = iter_with_keys(files.keys(),
2613
[('Walking content', 0, 5),
2614
('Walking content', 1, 5),
2615
('Walking content', 2, 5),
2616
('Walking content', 3, 5),
2617
('Walking content', 4, 5),
2618
('Walking content', 5, 5)])
2619
# all lines must be seen at least once
2620
self.assertTrue(lines[('base\n', self.get_simple_key('base'))] > 0)
2622
lines[('lancestor\n', self.get_simple_key('lancestor'))] > 0)
2624
lines[('rancestor\n', self.get_simple_key('rancestor'))] > 0)
2625
self.assertTrue(lines[('child\n', self.get_simple_key('child'))] > 0)
2627
lines[('otherchild\n', self.get_simple_key('otherchild'))] > 0)
2629
def test_make_mpdiffs(self):
2630
from bzrlib import multiparent
2631
files = self.get_versionedfiles('source')
2632
# add texts that should trip the knit maximum delta chain threshold
2633
# as well as doing parallel chains of data in knits.
2634
# this is done by two chains of 25 insertions
2635
files.add_lines(self.get_simple_key('base'), [], ['line\n'])
2636
files.add_lines(self.get_simple_key('noeol'),
2637
self.get_parents([self.get_simple_key('base')]), ['line'])
2638
# detailed eol tests:
2639
# shared last line with parent no-eol
2640
files.add_lines(self.get_simple_key('noeolsecond'),
2641
self.get_parents([self.get_simple_key('noeol')]),
2643
# differing last line with parent, both no-eol
2644
files.add_lines(self.get_simple_key('noeolnotshared'),
2645
self.get_parents([self.get_simple_key('noeolsecond')]),
2646
['line\n', 'phone'])
2647
# add eol following a noneol parent, change content
2648
files.add_lines(self.get_simple_key('eol'),
2649
self.get_parents([self.get_simple_key('noeol')]), ['phone\n'])
2650
# add eol following a noneol parent, no change content
2651
files.add_lines(self.get_simple_key('eolline'),
2652
self.get_parents([self.get_simple_key('noeol')]), ['line\n'])
2653
# noeol with no parents:
2654
files.add_lines(self.get_simple_key('noeolbase'), [], ['line'])
2655
# noeol preceeding its leftmost parent in the output:
2656
# this is done by making it a merge of two parents with no common
2657
# anestry: noeolbase and noeol with the
2658
# later-inserted parent the leftmost.
2659
files.add_lines(self.get_simple_key('eolbeforefirstparent'),
2660
self.get_parents([self.get_simple_key('noeolbase'),
2661
self.get_simple_key('noeol')]),
2663
# two identical eol texts
2664
files.add_lines(self.get_simple_key('noeoldup'),
2665
self.get_parents([self.get_simple_key('noeol')]), ['line'])
2666
next_parent = self.get_simple_key('base')
2667
text_name = 'chain1-'
2669
sha1s = {0 :'da6d3141cb4a5e6f464bf6e0518042ddc7bfd079',
2670
1 :'45e21ea146a81ea44a821737acdb4f9791c8abe7',
2671
2 :'e1f11570edf3e2a070052366c582837a4fe4e9fa',
2672
3 :'26b4b8626da827088c514b8f9bbe4ebf181edda1',
2673
4 :'e28a5510be25ba84d31121cff00956f9970ae6f6',
2674
5 :'d63ec0ce22e11dcf65a931b69255d3ac747a318d',
2675
6 :'2c2888d288cb5e1d98009d822fedfe6019c6a4ea',
2676
7 :'95c14da9cafbf828e3e74a6f016d87926ba234ab',
2677
8 :'779e9a0b28f9f832528d4b21e17e168c67697272',
2678
9 :'1f8ff4e5c6ff78ac106fcfe6b1e8cb8740ff9a8f',
2679
10:'131a2ae712cf51ed62f143e3fbac3d4206c25a05',
2680
11:'c5a9d6f520d2515e1ec401a8f8a67e6c3c89f199',
2681
12:'31a2286267f24d8bedaa43355f8ad7129509ea85',
2682
13:'dc2a7fe80e8ec5cae920973973a8ee28b2da5e0a',
2683
14:'2c4b1736566b8ca6051e668de68650686a3922f2',
2684
15:'5912e4ecd9b0c07be4d013e7e2bdcf9323276cde',
2685
16:'b0d2e18d3559a00580f6b49804c23fea500feab3',
2686
17:'8e1d43ad72f7562d7cb8f57ee584e20eb1a69fc7',
2687
18:'5cf64a3459ae28efa60239e44b20312d25b253f3',
2688
19:'1ebed371807ba5935958ad0884595126e8c4e823',
2689
20:'2aa62a8b06fb3b3b892a3292a068ade69d5ee0d3',
2690
21:'01edc447978004f6e4e962b417a4ae1955b6fe5d',
2691
22:'d8d8dc49c4bf0bab401e0298bb5ad827768618bb',
2692
23:'c21f62b1c482862983a8ffb2b0c64b3451876e3f',
2693
24:'c0593fe795e00dff6b3c0fe857a074364d5f04fc',
2694
25:'dd1a1cf2ba9cc225c3aff729953e6364bf1d1855',
2696
for depth in range(26):
2697
new_version = self.get_simple_key(text_name + '%s' % depth)
2698
text = text + ['line\n']
2699
files.add_lines(new_version, self.get_parents([next_parent]), text)
2700
next_parent = new_version
2701
next_parent = self.get_simple_key('base')
2702
text_name = 'chain2-'
2704
for depth in range(26):
2705
new_version = self.get_simple_key(text_name + '%s' % depth)
2706
text = text + ['line\n']
2707
files.add_lines(new_version, self.get_parents([next_parent]), text)
2708
next_parent = new_version
2709
target = self.get_versionedfiles('target')
2710
for key in multiparent.topo_iter_keys(files, files.keys()):
2711
mpdiff = files.make_mpdiffs([key])[0]
2712
parents = files.get_parent_map([key])[key] or []
2714
[(key, parents, files.get_sha1s([key])[key], mpdiff)])
2715
self.assertEqualDiff(
2716
files.get_record_stream([key], 'unordered',
2717
True).next().get_bytes_as('fulltext'),
2718
target.get_record_stream([key], 'unordered',
2719
True).next().get_bytes_as('fulltext')
2722
def test_keys(self):
2723
# While use is discouraged, versions() is still needed by aspects of
2725
files = self.get_versionedfiles()
2726
self.assertEqual(set(), set(files.keys()))
2727
if self.key_length == 1:
2730
key = ('foo', 'bar',)
2731
files.add_lines(key, (), [])
2732
self.assertEqual(set([key]), set(files.keys()))
2735
class VirtualVersionedFilesTests(TestCase):
2736
"""Basic tests for the VirtualVersionedFiles implementations."""
2738
def _get_parent_map(self, keys):
2741
if k in self._parent_map:
2742
ret[k] = self._parent_map[k]
2746
TestCase.setUp(self)
2748
self._parent_map = {}
2749
self.texts = VirtualVersionedFiles(self._get_parent_map,
2752
def test_add_lines(self):
2753
self.assertRaises(NotImplementedError,
2754
self.texts.add_lines, "foo", [], [])
2756
def test_add_mpdiffs(self):
2757
self.assertRaises(NotImplementedError,
2758
self.texts.add_mpdiffs, [])
2760
def test_check_noerrors(self):
2763
def test_insert_record_stream(self):
2764
self.assertRaises(NotImplementedError, self.texts.insert_record_stream,
2767
def test_get_sha1s_nonexistent(self):
2768
self.assertEquals({}, self.texts.get_sha1s([("NONEXISTENT",)]))
2770
def test_get_sha1s(self):
2771
self._lines["key"] = ["dataline1", "dataline2"]
2772
self.assertEquals({("key",): osutils.sha_strings(self._lines["key"])},
2773
self.texts.get_sha1s([("key",)]))
2775
def test_get_parent_map(self):
2776
self._parent_map = {"G": ("A", "B")}
2777
self.assertEquals({("G",): (("A",),("B",))},
2778
self.texts.get_parent_map([("G",), ("L",)]))
2780
def test_get_record_stream(self):
2781
self._lines["A"] = ["FOO", "BAR"]
2782
it = self.texts.get_record_stream([("A",)], "unordered", True)
2784
self.assertEquals("chunked", record.storage_kind)
2785
self.assertEquals("FOOBAR", record.get_bytes_as("fulltext"))
2786
self.assertEquals(["FOO", "BAR"], record.get_bytes_as("chunked"))
2788
def test_get_record_stream_absent(self):
2789
it = self.texts.get_record_stream([("A",)], "unordered", True)
2791
self.assertEquals("absent", record.storage_kind)
2793
def test_iter_lines_added_or_present_in_keys(self):
2794
self._lines["A"] = ["FOO", "BAR"]
2795
self._lines["B"] = ["HEY"]
2796
self._lines["C"] = ["Alberta"]
2797
it = self.texts.iter_lines_added_or_present_in_keys([("A",), ("B",)])
2798
self.assertEquals(sorted([("FOO", "A"), ("BAR", "A"), ("HEY", "B")]),
2802
class TestOrderingVersionedFilesDecorator(TestCaseWithMemoryTransport):
2804
def get_ordering_vf(self, key_priority):
2805
builder = self.make_branch_builder('test')
2806
builder.start_series()
2807
builder.build_snapshot('A', None, [
2808
('add', ('', 'TREE_ROOT', 'directory', None))])
2809
builder.build_snapshot('B', ['A'], [])
2810
builder.build_snapshot('C', ['B'], [])
2811
builder.build_snapshot('D', ['C'], [])
2812
builder.finish_series()
2813
b = builder.get_branch()
2815
self.addCleanup(b.unlock)
2816
vf = b.repository.inventories
2817
return versionedfile.OrderingVersionedFilesDecorator(vf, key_priority)
2819
def test_get_empty(self):
2820
vf = self.get_ordering_vf({})
2821
self.assertEqual([], vf.calls)
2823
def test_get_record_stream_topological(self):
2824
vf = self.get_ordering_vf({('A',): 3, ('B',): 2, ('C',): 4, ('D',): 1})
2825
request_keys = [('B',), ('C',), ('D',), ('A',)]
2826
keys = [r.key for r in vf.get_record_stream(request_keys,
2827
'topological', False)]
2828
# We should have gotten the keys in topological order
2829
self.assertEqual([('A',), ('B',), ('C',), ('D',)], keys)
2830
# And recorded that the request was made
2831
self.assertEqual([('get_record_stream', request_keys, 'topological',
2834
def test_get_record_stream_ordered(self):
2835
vf = self.get_ordering_vf({('A',): 3, ('B',): 2, ('C',): 4, ('D',): 1})
2836
request_keys = [('B',), ('C',), ('D',), ('A',)]
2837
keys = [r.key for r in vf.get_record_stream(request_keys,
2838
'unordered', False)]
2839
# They should be returned based on their priority
2840
self.assertEqual([('D',), ('B',), ('A',), ('C',)], keys)
2841
# And the request recorded
2842
self.assertEqual([('get_record_stream', request_keys, 'unordered',
2845
def test_get_record_stream_implicit_order(self):
2846
vf = self.get_ordering_vf({('B',): 2, ('D',): 1})
2847
request_keys = [('B',), ('C',), ('D',), ('A',)]
2848
keys = [r.key for r in vf.get_record_stream(request_keys,
2849
'unordered', False)]
2850
# A and C are not in the map, so they get sorted to the front. A comes
2851
# before C alphabetically, so it comes back first
2852
self.assertEqual([('A',), ('C',), ('D',), ('B',)], keys)
2853
# And the request recorded
2854
self.assertEqual([('get_record_stream', request_keys, 'unordered',