1162
1279
write_weave(w, tmpf)
1163
1280
self.log(tmpf.getvalue())
1165
overlappedInsertExpected = ['aaa', '<<<<<<< ', 'xxx', 'yyy', '=======',
1282
overlappedInsertExpected = ['aaa', '<<<<<<< ', 'xxx', 'yyy', '=======',
1166
1283
'xxx', '>>>>>>> ', 'bbb']
1286
class TestContentFactoryAdaption(TestCaseWithMemoryTransport):
1288
def test_select_adaptor(self):
1289
"""Test expected adapters exist."""
1290
# One scenario for each lookup combination we expect to use.
1291
# Each is source_kind, requested_kind, adapter class
1293
('knit-delta-gz', 'fulltext', _mod_knit.DeltaPlainToFullText),
1294
('knit-ft-gz', 'fulltext', _mod_knit.FTPlainToFullText),
1295
('knit-annotated-delta-gz', 'knit-delta-gz',
1296
_mod_knit.DeltaAnnotatedToUnannotated),
1297
('knit-annotated-delta-gz', 'fulltext',
1298
_mod_knit.DeltaAnnotatedToFullText),
1299
('knit-annotated-ft-gz', 'knit-ft-gz',
1300
_mod_knit.FTAnnotatedToUnannotated),
1301
('knit-annotated-ft-gz', 'fulltext',
1302
_mod_knit.FTAnnotatedToFullText),
1304
for source, requested, klass in scenarios:
1305
adapter_factory = versionedfile.adapter_registry.get(
1306
(source, requested))
1307
adapter = adapter_factory(None)
1308
self.assertIsInstance(adapter, klass)
1310
def get_knit(self, annotated=True):
1311
mapper = ConstantMapper('knit')
1312
transport = self.get_transport()
1313
return make_file_factory(annotated, mapper)(transport)
1315
def helpGetBytes(self, f, ft_adapter, delta_adapter):
1316
"""Grab the interested adapted texts for tests."""
1317
# origin is a fulltext
1318
entries = f.get_record_stream([('origin',)], 'unordered', False)
1319
base = entries.next()
1320
ft_data = ft_adapter.get_bytes(base)
1321
# merged is both a delta and multiple parents.
1322
entries = f.get_record_stream([('merged',)], 'unordered', False)
1323
merged = entries.next()
1324
delta_data = delta_adapter.get_bytes(merged)
1325
return ft_data, delta_data
1327
def test_deannotation_noeol(self):
1328
"""Test converting annotated knits to unannotated knits."""
1329
# we need a full text, and a delta
1331
get_diamond_files(f, 1, trailing_eol=False)
1332
ft_data, delta_data = self.helpGetBytes(f,
1333
_mod_knit.FTAnnotatedToUnannotated(None),
1334
_mod_knit.DeltaAnnotatedToUnannotated(None))
1336
'version origin 1 b284f94827db1fa2970d9e2014f080413b547a7e\n'
1339
GzipFile(mode='rb', fileobj=StringIO(ft_data)).read())
1341
'version merged 4 32c2e79763b3f90e8ccde37f9710b6629c25a796\n'
1342
'1,2,3\nleft\nright\nmerged\nend merged\n',
1343
GzipFile(mode='rb', fileobj=StringIO(delta_data)).read())
1345
def test_deannotation(self):
1346
"""Test converting annotated knits to unannotated knits."""
1347
# we need a full text, and a delta
1349
get_diamond_files(f, 1)
1350
ft_data, delta_data = self.helpGetBytes(f,
1351
_mod_knit.FTAnnotatedToUnannotated(None),
1352
_mod_knit.DeltaAnnotatedToUnannotated(None))
1354
'version origin 1 00e364d235126be43292ab09cb4686cf703ddc17\n'
1357
GzipFile(mode='rb', fileobj=StringIO(ft_data)).read())
1359
'version merged 3 ed8bce375198ea62444dc71952b22cfc2b09226d\n'
1360
'2,2,2\nright\nmerged\nend merged\n',
1361
GzipFile(mode='rb', fileobj=StringIO(delta_data)).read())
1363
def test_annotated_to_fulltext_no_eol(self):
1364
"""Test adapting annotated knits to full texts (for -> weaves)."""
1365
# we need a full text, and a delta
1367
get_diamond_files(f, 1, trailing_eol=False)
1368
# Reconstructing a full text requires a backing versioned file, and it
1369
# must have the base lines requested from it.
1370
logged_vf = versionedfile.RecordingVersionedFilesDecorator(f)
1371
ft_data, delta_data = self.helpGetBytes(f,
1372
_mod_knit.FTAnnotatedToFullText(None),
1373
_mod_knit.DeltaAnnotatedToFullText(logged_vf))
1374
self.assertEqual('origin', ft_data)
1375
self.assertEqual('base\nleft\nright\nmerged', delta_data)
1376
self.assertEqual([('get_record_stream', [('left',)], 'unordered',
1377
True)], logged_vf.calls)
1379
def test_annotated_to_fulltext(self):
1380
"""Test adapting annotated knits to full texts (for -> weaves)."""
1381
# we need a full text, and a delta
1383
get_diamond_files(f, 1)
1384
# Reconstructing a full text requires a backing versioned file, and it
1385
# must have the base lines requested from it.
1386
logged_vf = versionedfile.RecordingVersionedFilesDecorator(f)
1387
ft_data, delta_data = self.helpGetBytes(f,
1388
_mod_knit.FTAnnotatedToFullText(None),
1389
_mod_knit.DeltaAnnotatedToFullText(logged_vf))
1390
self.assertEqual('origin\n', ft_data)
1391
self.assertEqual('base\nleft\nright\nmerged\n', delta_data)
1392
self.assertEqual([('get_record_stream', [('left',)], 'unordered',
1393
True)], logged_vf.calls)
1395
def test_unannotated_to_fulltext(self):
1396
"""Test adapting unannotated knits to full texts.
1398
This is used for -> weaves, and for -> annotated knits.
1400
# we need a full text, and a delta
1401
f = self.get_knit(annotated=False)
1402
get_diamond_files(f, 1)
1403
# Reconstructing a full text requires a backing versioned file, and it
1404
# must have the base lines requested from it.
1405
logged_vf = versionedfile.RecordingVersionedFilesDecorator(f)
1406
ft_data, delta_data = self.helpGetBytes(f,
1407
_mod_knit.FTPlainToFullText(None),
1408
_mod_knit.DeltaPlainToFullText(logged_vf))
1409
self.assertEqual('origin\n', ft_data)
1410
self.assertEqual('base\nleft\nright\nmerged\n', delta_data)
1411
self.assertEqual([('get_record_stream', [('left',)], 'unordered',
1412
True)], logged_vf.calls)
1414
def test_unannotated_to_fulltext_no_eol(self):
1415
"""Test adapting unannotated knits to full texts.
1417
This is used for -> weaves, and for -> annotated knits.
1419
# we need a full text, and a delta
1420
f = self.get_knit(annotated=False)
1421
get_diamond_files(f, 1, trailing_eol=False)
1422
# Reconstructing a full text requires a backing versioned file, and it
1423
# must have the base lines requested from it.
1424
logged_vf = versionedfile.RecordingVersionedFilesDecorator(f)
1425
ft_data, delta_data = self.helpGetBytes(f,
1426
_mod_knit.FTPlainToFullText(None),
1427
_mod_knit.DeltaPlainToFullText(logged_vf))
1428
self.assertEqual('origin', ft_data)
1429
self.assertEqual('base\nleft\nright\nmerged', delta_data)
1430
self.assertEqual([('get_record_stream', [('left',)], 'unordered',
1431
True)], logged_vf.calls)
1434
class TestKeyMapper(TestCaseWithMemoryTransport):
1435
"""Tests for various key mapping logic."""
1437
def test_identity_mapper(self):
1438
mapper = versionedfile.ConstantMapper("inventory")
1439
self.assertEqual("inventory", mapper.map(('foo@ar',)))
1440
self.assertEqual("inventory", mapper.map(('quux',)))
1442
def test_prefix_mapper(self):
1444
mapper = versionedfile.PrefixMapper()
1445
self.assertEqual("file-id", mapper.map(("file-id", "revision-id")))
1446
self.assertEqual("new-id", mapper.map(("new-id", "revision-id")))
1447
self.assertEqual(('file-id',), mapper.unmap("file-id"))
1448
self.assertEqual(('new-id',), mapper.unmap("new-id"))
1450
def test_hash_prefix_mapper(self):
1451
#format6: hash + plain
1452
mapper = versionedfile.HashPrefixMapper()
1453
self.assertEqual("9b/file-id", mapper.map(("file-id", "revision-id")))
1454
self.assertEqual("45/new-id", mapper.map(("new-id", "revision-id")))
1455
self.assertEqual(('file-id',), mapper.unmap("9b/file-id"))
1456
self.assertEqual(('new-id',), mapper.unmap("45/new-id"))
1458
def test_hash_escaped_mapper(self):
1459
#knit1: hash + escaped
1460
mapper = versionedfile.HashEscapedPrefixMapper()
1461
self.assertEqual("88/%2520", mapper.map((" ", "revision-id")))
1462
self.assertEqual("ed/fil%2545-%2549d", mapper.map(("filE-Id",
1464
self.assertEqual("88/ne%2557-%2549d", mapper.map(("neW-Id",
1466
self.assertEqual(('filE-Id',), mapper.unmap("ed/fil%2545-%2549d"))
1467
self.assertEqual(('neW-Id',), mapper.unmap("88/ne%2557-%2549d"))
1470
class TestVersionedFiles(TestCaseWithMemoryTransport):
1471
"""Tests for the multiple-file variant of VersionedFile."""
1473
def get_versionedfiles(self, relpath='files'):
1474
transport = self.get_transport(relpath)
1476
transport.mkdir('.')
1477
files = self.factory(transport)
1478
if self.cleanup is not None:
1479
self.addCleanup(self.cleanup, files)
1482
def get_simple_key(self, suffix):
1483
"""Return a key for the object under test."""
1484
if self.key_length == 1:
1487
return ('FileA',) + (suffix,)
1489
def test_add_lines(self):
1490
f = self.get_versionedfiles()
1491
key0 = self.get_simple_key('r0')
1492
key1 = self.get_simple_key('r1')
1493
key2 = self.get_simple_key('r2')
1494
keyf = self.get_simple_key('foo')
1495
f.add_lines(key0, [], ['a\n', 'b\n'])
1497
f.add_lines(key1, [key0], ['b\n', 'c\n'])
1499
f.add_lines(key1, [], ['b\n', 'c\n'])
1501
self.assertTrue(key0 in keys)
1502
self.assertTrue(key1 in keys)
1504
for record in f.get_record_stream([key0, key1], 'unordered', True):
1505
records.append((record.key, record.get_bytes_as('fulltext')))
1507
self.assertEqual([(key0, 'a\nb\n'), (key1, 'b\nc\n')], records)
1509
def test__add_text(self):
1510
f = self.get_versionedfiles()
1511
key0 = self.get_simple_key('r0')
1512
key1 = self.get_simple_key('r1')
1513
key2 = self.get_simple_key('r2')
1514
keyf = self.get_simple_key('foo')
1515
f._add_text(key0, [], 'a\nb\n')
1517
f._add_text(key1, [key0], 'b\nc\n')
1519
f._add_text(key1, [], 'b\nc\n')
1521
self.assertTrue(key0 in keys)
1522
self.assertTrue(key1 in keys)
1524
for record in f.get_record_stream([key0, key1], 'unordered', True):
1525
records.append((record.key, record.get_bytes_as('fulltext')))
1527
self.assertEqual([(key0, 'a\nb\n'), (key1, 'b\nc\n')], records)
1529
def test_annotate(self):
1530
files = self.get_versionedfiles()
1531
self.get_diamond_files(files)
1532
if self.key_length == 1:
1536
# introduced full text
1537
origins = files.annotate(prefix + ('origin',))
1539
(prefix + ('origin',), 'origin\n')],
1542
origins = files.annotate(prefix + ('base',))
1544
(prefix + ('base',), 'base\n')],
1547
origins = files.annotate(prefix + ('merged',))
1550
(prefix + ('base',), 'base\n'),
1551
(prefix + ('left',), 'left\n'),
1552
(prefix + ('right',), 'right\n'),
1553
(prefix + ('merged',), 'merged\n')
1557
# Without a graph everything is new.
1559
(prefix + ('merged',), 'base\n'),
1560
(prefix + ('merged',), 'left\n'),
1561
(prefix + ('merged',), 'right\n'),
1562
(prefix + ('merged',), 'merged\n')
1565
self.assertRaises(RevisionNotPresent,
1566
files.annotate, prefix + ('missing-key',))
1568
def test_check_no_parameters(self):
1569
files = self.get_versionedfiles()
1571
def test_check_progressbar_parameter(self):
1572
"""A progress bar can be supplied because check can be a generator."""
1573
pb = ui.ui_factory.nested_progress_bar()
1574
self.addCleanup(pb.finished)
1575
files = self.get_versionedfiles()
1576
files.check(progress_bar=pb)
1578
def test_check_with_keys_becomes_generator(self):
1579
files = self.get_versionedfiles()
1580
self.get_diamond_files(files)
1582
entries = files.check(keys=keys)
1584
# Texts output should be fulltexts.
1585
self.capture_stream(files, entries, seen.add,
1586
files.get_parent_map(keys), require_fulltext=True)
1587
# All texts should be output.
1588
self.assertEqual(set(keys), seen)
1590
def test_clear_cache(self):
1591
files = self.get_versionedfiles()
1594
def test_construct(self):
1595
"""Each parameterised test can be constructed on a transport."""
1596
files = self.get_versionedfiles()
1598
def get_diamond_files(self, files, trailing_eol=True, left_only=False,
1600
return get_diamond_files(files, self.key_length,
1601
trailing_eol=trailing_eol, nograph=not self.graph,
1602
left_only=left_only, nokeys=nokeys)
1604
def _add_content_nostoresha(self, add_lines):
1605
"""When nostore_sha is supplied using old content raises."""
1606
vf = self.get_versionedfiles()
1607
empty_text = ('a', [])
1608
sample_text_nl = ('b', ["foo\n", "bar\n"])
1609
sample_text_no_nl = ('c', ["foo\n", "bar"])
1611
for version, lines in (empty_text, sample_text_nl, sample_text_no_nl):
1613
sha, _, _ = vf.add_lines(self.get_simple_key(version), [],
1616
sha, _, _ = vf._add_text(self.get_simple_key(version), [],
1619
# we now have a copy of all the lines in the vf.
1620
for sha, (version, lines) in zip(
1621
shas, (empty_text, sample_text_nl, sample_text_no_nl)):
1622
new_key = self.get_simple_key(version + "2")
1623
self.assertRaises(errors.ExistingContent,
1624
vf.add_lines, new_key, [], lines,
1626
self.assertRaises(errors.ExistingContent,
1627
vf._add_text, new_key, [], ''.join(lines),
1629
# and no new version should have been added.
1630
record = vf.get_record_stream([new_key], 'unordered', True).next()
1631
self.assertEqual('absent', record.storage_kind)
1633
def test_add_lines_nostoresha(self):
1634
self._add_content_nostoresha(add_lines=True)
1636
def test__add_text_nostoresha(self):
1637
self._add_content_nostoresha(add_lines=False)
1639
def test_add_lines_return(self):
1640
files = self.get_versionedfiles()
1641
# save code by using the stock data insertion helper.
1642
adds = self.get_diamond_files(files)
1644
# We can only validate the first 2 elements returned from add_lines.
1646
self.assertEqual(3, len(add))
1647
results.append(add[:2])
1648
if self.key_length == 1:
1650
('00e364d235126be43292ab09cb4686cf703ddc17', 7),
1651
('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1652
('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1653
('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1654
('ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
1656
elif self.key_length == 2:
1658
('00e364d235126be43292ab09cb4686cf703ddc17', 7),
1659
('00e364d235126be43292ab09cb4686cf703ddc17', 7),
1660
('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1661
('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1662
('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1663
('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1664
('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1665
('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1666
('ed8bce375198ea62444dc71952b22cfc2b09226d', 23),
1667
('ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
1670
def test_add_lines_no_key_generates_chk_key(self):
1671
files = self.get_versionedfiles()
1672
# save code by using the stock data insertion helper.
1673
adds = self.get_diamond_files(files, nokeys=True)
1675
# We can only validate the first 2 elements returned from add_lines.
1677
self.assertEqual(3, len(add))
1678
results.append(add[:2])
1679
if self.key_length == 1:
1681
('00e364d235126be43292ab09cb4686cf703ddc17', 7),
1682
('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1683
('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1684
('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1685
('ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
1687
# Check the added items got CHK keys.
1688
self.assertEqual(set([
1689
('sha1:00e364d235126be43292ab09cb4686cf703ddc17',),
1690
('sha1:51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44',),
1691
('sha1:9ef09dfa9d86780bdec9219a22560c6ece8e0ef1',),
1692
('sha1:a8478686da38e370e32e42e8a0c220e33ee9132f',),
1693
('sha1:ed8bce375198ea62444dc71952b22cfc2b09226d',),
1696
elif self.key_length == 2:
1698
('00e364d235126be43292ab09cb4686cf703ddc17', 7),
1699
('00e364d235126be43292ab09cb4686cf703ddc17', 7),
1700
('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1701
('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1702
('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1703
('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1704
('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1705
('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1706
('ed8bce375198ea62444dc71952b22cfc2b09226d', 23),
1707
('ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
1709
# Check the added items got CHK keys.
1710
self.assertEqual(set([
1711
('FileA', 'sha1:00e364d235126be43292ab09cb4686cf703ddc17'),
1712
('FileA', 'sha1:51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44'),
1713
('FileA', 'sha1:9ef09dfa9d86780bdec9219a22560c6ece8e0ef1'),
1714
('FileA', 'sha1:a8478686da38e370e32e42e8a0c220e33ee9132f'),
1715
('FileA', 'sha1:ed8bce375198ea62444dc71952b22cfc2b09226d'),
1716
('FileB', 'sha1:00e364d235126be43292ab09cb4686cf703ddc17'),
1717
('FileB', 'sha1:51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44'),
1718
('FileB', 'sha1:9ef09dfa9d86780bdec9219a22560c6ece8e0ef1'),
1719
('FileB', 'sha1:a8478686da38e370e32e42e8a0c220e33ee9132f'),
1720
('FileB', 'sha1:ed8bce375198ea62444dc71952b22cfc2b09226d'),
1724
def test_empty_lines(self):
1725
"""Empty files can be stored."""
1726
f = self.get_versionedfiles()
1727
key_a = self.get_simple_key('a')
1728
f.add_lines(key_a, [], [])
1729
self.assertEqual('',
1730
f.get_record_stream([key_a], 'unordered', True
1731
).next().get_bytes_as('fulltext'))
1732
key_b = self.get_simple_key('b')
1733
f.add_lines(key_b, self.get_parents([key_a]), [])
1734
self.assertEqual('',
1735
f.get_record_stream([key_b], 'unordered', True
1736
).next().get_bytes_as('fulltext'))
1738
def test_newline_only(self):
1739
f = self.get_versionedfiles()
1740
key_a = self.get_simple_key('a')
1741
f.add_lines(key_a, [], ['\n'])
1742
self.assertEqual('\n',
1743
f.get_record_stream([key_a], 'unordered', True
1744
).next().get_bytes_as('fulltext'))
1745
key_b = self.get_simple_key('b')
1746
f.add_lines(key_b, self.get_parents([key_a]), ['\n'])
1747
self.assertEqual('\n',
1748
f.get_record_stream([key_b], 'unordered', True
1749
).next().get_bytes_as('fulltext'))
1751
def test_get_known_graph_ancestry(self):
1752
f = self.get_versionedfiles()
1754
raise TestNotApplicable('ancestry info only relevant with graph.')
1755
key_a = self.get_simple_key('a')
1756
key_b = self.get_simple_key('b')
1757
key_c = self.get_simple_key('c')
1763
f.add_lines(key_a, [], ['\n'])
1764
f.add_lines(key_b, [key_a], ['\n'])
1765
f.add_lines(key_c, [key_a, key_b], ['\n'])
1766
kg = f.get_known_graph_ancestry([key_c])
1767
self.assertIsInstance(kg, _mod_graph.KnownGraph)
1768
self.assertEqual([key_a, key_b, key_c], list(kg.topo_sort()))
1770
def test_known_graph_with_fallbacks(self):
1771
f = self.get_versionedfiles('files')
1773
raise TestNotApplicable('ancestry info only relevant with graph.')
1774
if getattr(f, 'add_fallback_versioned_files', None) is None:
1775
raise TestNotApplicable("%s doesn't support fallbacks"
1776
% (f.__class__.__name__,))
1777
key_a = self.get_simple_key('a')
1778
key_b = self.get_simple_key('b')
1779
key_c = self.get_simple_key('c')
1780
# A only in fallback
1785
g = self.get_versionedfiles('fallback')
1786
g.add_lines(key_a, [], ['\n'])
1787
f.add_fallback_versioned_files(g)
1788
f.add_lines(key_b, [key_a], ['\n'])
1789
f.add_lines(key_c, [key_a, key_b], ['\n'])
1790
kg = f.get_known_graph_ancestry([key_c])
1791
self.assertEqual([key_a, key_b, key_c], list(kg.topo_sort()))
1793
def test_get_record_stream_empty(self):
1794
"""An empty stream can be requested without error."""
1795
f = self.get_versionedfiles()
1796
entries = f.get_record_stream([], 'unordered', False)
1797
self.assertEqual([], list(entries))
1799
def assertValidStorageKind(self, storage_kind):
1800
"""Assert that storage_kind is a valid storage_kind."""
1801
self.assertSubset([storage_kind],
1802
['mpdiff', 'knit-annotated-ft', 'knit-annotated-delta',
1803
'knit-ft', 'knit-delta', 'chunked', 'fulltext',
1804
'knit-annotated-ft-gz', 'knit-annotated-delta-gz', 'knit-ft-gz',
1806
'knit-delta-closure', 'knit-delta-closure-ref',
1807
'groupcompress-block', 'groupcompress-block-ref'])
1809
def capture_stream(self, f, entries, on_seen, parents,
1810
require_fulltext=False):
1811
"""Capture a stream for testing."""
1812
for factory in entries:
1813
on_seen(factory.key)
1814
self.assertValidStorageKind(factory.storage_kind)
1815
if factory.sha1 is not None:
1816
self.assertEqual(f.get_sha1s([factory.key])[factory.key],
1818
self.assertEqual(parents[factory.key], factory.parents)
1819
self.assertIsInstance(factory.get_bytes_as(factory.storage_kind),
1821
if require_fulltext:
1822
factory.get_bytes_as('fulltext')
1824
def test_get_record_stream_interface(self):
1825
"""each item in a stream has to provide a regular interface."""
1826
files = self.get_versionedfiles()
1827
self.get_diamond_files(files)
1828
keys, _ = self.get_keys_and_sort_order()
1829
parent_map = files.get_parent_map(keys)
1830
entries = files.get_record_stream(keys, 'unordered', False)
1832
self.capture_stream(files, entries, seen.add, parent_map)
1833
self.assertEqual(set(keys), seen)
1835
def get_keys_and_sort_order(self):
1836
"""Get diamond test keys list, and their sort ordering."""
1837
if self.key_length == 1:
1838
keys = [('merged',), ('left',), ('right',), ('base',)]
1839
sort_order = {('merged',):2, ('left',):1, ('right',):1, ('base',):0}
1842
('FileA', 'merged'), ('FileA', 'left'), ('FileA', 'right'),
1844
('FileB', 'merged'), ('FileB', 'left'), ('FileB', 'right'),
1848
('FileA', 'merged'):2, ('FileA', 'left'):1, ('FileA', 'right'):1,
1849
('FileA', 'base'):0,
1850
('FileB', 'merged'):2, ('FileB', 'left'):1, ('FileB', 'right'):1,
1851
('FileB', 'base'):0,
1853
return keys, sort_order
1855
def get_keys_and_groupcompress_sort_order(self):
1856
"""Get diamond test keys list, and their groupcompress sort ordering."""
1857
if self.key_length == 1:
1858
keys = [('merged',), ('left',), ('right',), ('base',)]
1859
sort_order = {('merged',):0, ('left',):1, ('right',):1, ('base',):2}
1862
('FileA', 'merged'), ('FileA', 'left'), ('FileA', 'right'),
1864
('FileB', 'merged'), ('FileB', 'left'), ('FileB', 'right'),
1868
('FileA', 'merged'):0, ('FileA', 'left'):1, ('FileA', 'right'):1,
1869
('FileA', 'base'):2,
1870
('FileB', 'merged'):3, ('FileB', 'left'):4, ('FileB', 'right'):4,
1871
('FileB', 'base'):5,
1873
return keys, sort_order
1875
def test_get_record_stream_interface_ordered(self):
1876
"""each item in a stream has to provide a regular interface."""
1877
files = self.get_versionedfiles()
1878
self.get_diamond_files(files)
1879
keys, sort_order = self.get_keys_and_sort_order()
1880
parent_map = files.get_parent_map(keys)
1881
entries = files.get_record_stream(keys, 'topological', False)
1883
self.capture_stream(files, entries, seen.append, parent_map)
1884
self.assertStreamOrder(sort_order, seen, keys)
1886
def test_get_record_stream_interface_ordered_with_delta_closure(self):
1887
"""each item must be accessible as a fulltext."""
1888
files = self.get_versionedfiles()
1889
self.get_diamond_files(files)
1890
keys, sort_order = self.get_keys_and_sort_order()
1891
parent_map = files.get_parent_map(keys)
1892
entries = files.get_record_stream(keys, 'topological', True)
1894
for factory in entries:
1895
seen.append(factory.key)
1896
self.assertValidStorageKind(factory.storage_kind)
1897
self.assertSubset([factory.sha1],
1898
[None, files.get_sha1s([factory.key])[factory.key]])
1899
self.assertEqual(parent_map[factory.key], factory.parents)
1900
# self.assertEqual(files.get_text(factory.key),
1901
ft_bytes = factory.get_bytes_as('fulltext')
1902
self.assertIsInstance(ft_bytes, str)
1903
chunked_bytes = factory.get_bytes_as('chunked')
1904
self.assertEqualDiff(ft_bytes, ''.join(chunked_bytes))
1906
self.assertStreamOrder(sort_order, seen, keys)
1908
def test_get_record_stream_interface_groupcompress(self):
1909
"""each item in a stream has to provide a regular interface."""
1910
files = self.get_versionedfiles()
1911
self.get_diamond_files(files)
1912
keys, sort_order = self.get_keys_and_groupcompress_sort_order()
1913
parent_map = files.get_parent_map(keys)
1914
entries = files.get_record_stream(keys, 'groupcompress', False)
1916
self.capture_stream(files, entries, seen.append, parent_map)
1917
self.assertStreamOrder(sort_order, seen, keys)
1919
def assertStreamOrder(self, sort_order, seen, keys):
1920
self.assertEqual(len(set(seen)), len(keys))
1921
if self.key_length == 1:
1924
lows = {('FileA',):0, ('FileB',):0}
1926
self.assertEqual(set(keys), set(seen))
1929
sort_pos = sort_order[key]
1930
self.assertTrue(sort_pos >= lows[key[:-1]],
1931
"Out of order in sorted stream: %r, %r" % (key, seen))
1932
lows[key[:-1]] = sort_pos
1934
def test_get_record_stream_unknown_storage_kind_raises(self):
1935
"""Asking for a storage kind that the stream cannot supply raises."""
1936
files = self.get_versionedfiles()
1937
self.get_diamond_files(files)
1938
if self.key_length == 1:
1939
keys = [('merged',), ('left',), ('right',), ('base',)]
1942
('FileA', 'merged'), ('FileA', 'left'), ('FileA', 'right'),
1944
('FileB', 'merged'), ('FileB', 'left'), ('FileB', 'right'),
1947
parent_map = files.get_parent_map(keys)
1948
entries = files.get_record_stream(keys, 'unordered', False)
1949
# We track the contents because we should be able to try, fail a
1950
# particular kind and then ask for one that works and continue.
1952
for factory in entries:
1953
seen.add(factory.key)
1954
self.assertValidStorageKind(factory.storage_kind)
1955
if factory.sha1 is not None:
1956
self.assertEqual(files.get_sha1s([factory.key])[factory.key],
1958
self.assertEqual(parent_map[factory.key], factory.parents)
1959
# currently no stream emits mpdiff
1960
self.assertRaises(errors.UnavailableRepresentation,
1961
factory.get_bytes_as, 'mpdiff')
1962
self.assertIsInstance(factory.get_bytes_as(factory.storage_kind),
1964
self.assertEqual(set(keys), seen)
1966
def test_get_record_stream_missing_records_are_absent(self):
1967
files = self.get_versionedfiles()
1968
self.get_diamond_files(files)
1969
if self.key_length == 1:
1970
keys = [('merged',), ('left',), ('right',), ('absent',), ('base',)]
1973
('FileA', 'merged'), ('FileA', 'left'), ('FileA', 'right'),
1974
('FileA', 'absent'), ('FileA', 'base'),
1975
('FileB', 'merged'), ('FileB', 'left'), ('FileB', 'right'),
1976
('FileB', 'absent'), ('FileB', 'base'),
1977
('absent', 'absent'),
1979
parent_map = files.get_parent_map(keys)
1980
entries = files.get_record_stream(keys, 'unordered', False)
1981
self.assertAbsentRecord(files, keys, parent_map, entries)
1982
entries = files.get_record_stream(keys, 'topological', False)
1983
self.assertAbsentRecord(files, keys, parent_map, entries)
1985
def assertRecordHasContent(self, record, bytes):
1986
"""Assert that record has the bytes bytes."""
1987
self.assertEqual(bytes, record.get_bytes_as('fulltext'))
1988
self.assertEqual(bytes, ''.join(record.get_bytes_as('chunked')))
1990
def test_get_record_stream_native_formats_are_wire_ready_one_ft(self):
1991
files = self.get_versionedfiles()
1992
key = self.get_simple_key('foo')
1993
files.add_lines(key, (), ['my text\n', 'content'])
1994
stream = files.get_record_stream([key], 'unordered', False)
1995
record = stream.next()
1996
if record.storage_kind in ('chunked', 'fulltext'):
1997
# chunked and fulltext representations are for direct use not wire
1998
# serialisation: check they are able to be used directly. To send
1999
# such records over the wire translation will be needed.
2000
self.assertRecordHasContent(record, "my text\ncontent")
2002
bytes = [record.get_bytes_as(record.storage_kind)]
2003
network_stream = versionedfile.NetworkRecordStream(bytes).read()
2004
source_record = record
2006
for record in network_stream:
2007
records.append(record)
2008
self.assertEqual(source_record.storage_kind,
2009
record.storage_kind)
2010
self.assertEqual(source_record.parents, record.parents)
2012
source_record.get_bytes_as(source_record.storage_kind),
2013
record.get_bytes_as(record.storage_kind))
2014
self.assertEqual(1, len(records))
2016
def assertStreamMetaEqual(self, records, expected, stream):
2017
"""Assert that streams expected and stream have the same records.
2019
:param records: A list to collect the seen records.
2020
:return: A generator of the records in stream.
2022
# We make assertions during copying to catch things early for
2024
for record, ref_record in izip(stream, expected):
2025
records.append(record)
2026
self.assertEqual(ref_record.key, record.key)
2027
self.assertEqual(ref_record.storage_kind, record.storage_kind)
2028
self.assertEqual(ref_record.parents, record.parents)
2031
def stream_to_bytes_or_skip_counter(self, skipped_records, full_texts,
2033
"""Convert a stream to a bytes iterator.
2035
:param skipped_records: A list with one element to increment when a
2037
:param full_texts: A dict from key->fulltext representation, for
2038
checking chunked or fulltext stored records.
2039
:param stream: A record_stream.
2040
:return: An iterator over the bytes of each record.
2042
for record in stream:
2043
if record.storage_kind in ('chunked', 'fulltext'):
2044
skipped_records[0] += 1
2045
# check the content is correct for direct use.
2046
self.assertRecordHasContent(record, full_texts[record.key])
2048
yield record.get_bytes_as(record.storage_kind)
2050
def test_get_record_stream_native_formats_are_wire_ready_ft_delta(self):
2051
files = self.get_versionedfiles()
2052
target_files = self.get_versionedfiles('target')
2053
key = self.get_simple_key('ft')
2054
key_delta = self.get_simple_key('delta')
2055
files.add_lines(key, (), ['my text\n', 'content'])
2057
delta_parents = (key,)
2060
files.add_lines(key_delta, delta_parents, ['different\n', 'content\n'])
2061
local = files.get_record_stream([key, key_delta], 'unordered', False)
2062
ref = files.get_record_stream([key, key_delta], 'unordered', False)
2063
skipped_records = [0]
2065
key: "my text\ncontent",
2066
key_delta: "different\ncontent\n",
2068
byte_stream = self.stream_to_bytes_or_skip_counter(
2069
skipped_records, full_texts, local)
2070
network_stream = versionedfile.NetworkRecordStream(byte_stream).read()
2072
# insert the stream from the network into a versioned files object so we can
2073
# check the content was carried across correctly without doing delta
2075
target_files.insert_record_stream(
2076
self.assertStreamMetaEqual(records, ref, network_stream))
2077
# No duplicates on the wire thank you!
2078
self.assertEqual(2, len(records) + skipped_records[0])
2080
# if any content was copied it all must have all been.
2081
self.assertIdenticalVersionedFile(files, target_files)
2083
def test_get_record_stream_native_formats_are_wire_ready_delta(self):
2084
# copy a delta over the wire
2085
files = self.get_versionedfiles()
2086
target_files = self.get_versionedfiles('target')
2087
key = self.get_simple_key('ft')
2088
key_delta = self.get_simple_key('delta')
2089
files.add_lines(key, (), ['my text\n', 'content'])
2091
delta_parents = (key,)
2094
files.add_lines(key_delta, delta_parents, ['different\n', 'content\n'])
2095
# Copy the basis text across so we can reconstruct the delta during
2096
# insertion into target.
2097
target_files.insert_record_stream(files.get_record_stream([key],
2098
'unordered', False))
2099
local = files.get_record_stream([key_delta], 'unordered', False)
2100
ref = files.get_record_stream([key_delta], 'unordered', False)
2101
skipped_records = [0]
2103
key_delta: "different\ncontent\n",
2105
byte_stream = self.stream_to_bytes_or_skip_counter(
2106
skipped_records, full_texts, local)
2107
network_stream = versionedfile.NetworkRecordStream(byte_stream).read()
2109
# insert the stream from the network into a versioned files object so we can
2110
# check the content was carried across correctly without doing delta
2111
# inspection during check_stream.
2112
target_files.insert_record_stream(
2113
self.assertStreamMetaEqual(records, ref, network_stream))
2114
# No duplicates on the wire thank you!
2115
self.assertEqual(1, len(records) + skipped_records[0])
2117
# if any content was copied it all must have all been
2118
self.assertIdenticalVersionedFile(files, target_files)
2120
def test_get_record_stream_wire_ready_delta_closure_included(self):
2121
# copy a delta over the wire with the ability to get its full text.
2122
files = self.get_versionedfiles()
2123
key = self.get_simple_key('ft')
2124
key_delta = self.get_simple_key('delta')
2125
files.add_lines(key, (), ['my text\n', 'content'])
2127
delta_parents = (key,)
2130
files.add_lines(key_delta, delta_parents, ['different\n', 'content\n'])
2131
local = files.get_record_stream([key_delta], 'unordered', True)
2132
ref = files.get_record_stream([key_delta], 'unordered', True)
2133
skipped_records = [0]
2135
key_delta: "different\ncontent\n",
2137
byte_stream = self.stream_to_bytes_or_skip_counter(
2138
skipped_records, full_texts, local)
2139
network_stream = versionedfile.NetworkRecordStream(byte_stream).read()
2141
# insert the stream from the network into a versioned files object so we can
2142
# check the content was carried across correctly without doing delta
2143
# inspection during check_stream.
2144
for record in self.assertStreamMetaEqual(records, ref, network_stream):
2145
# we have to be able to get the full text out:
2146
self.assertRecordHasContent(record, full_texts[record.key])
2147
# No duplicates on the wire thank you!
2148
self.assertEqual(1, len(records) + skipped_records[0])
2150
def assertAbsentRecord(self, files, keys, parents, entries):
2151
"""Helper for test_get_record_stream_missing_records_are_absent."""
2153
for factory in entries:
2154
seen.add(factory.key)
2155
if factory.key[-1] == 'absent':
2156
self.assertEqual('absent', factory.storage_kind)
2157
self.assertEqual(None, factory.sha1)
2158
self.assertEqual(None, factory.parents)
2160
self.assertValidStorageKind(factory.storage_kind)
2161
if factory.sha1 is not None:
2162
sha1 = files.get_sha1s([factory.key])[factory.key]
2163
self.assertEqual(sha1, factory.sha1)
2164
self.assertEqual(parents[factory.key], factory.parents)
2165
self.assertIsInstance(factory.get_bytes_as(factory.storage_kind),
2167
self.assertEqual(set(keys), seen)
2169
def test_filter_absent_records(self):
2170
"""Requested missing records can be filter trivially."""
2171
files = self.get_versionedfiles()
2172
self.get_diamond_files(files)
2173
keys, _ = self.get_keys_and_sort_order()
2174
parent_map = files.get_parent_map(keys)
2175
# Add an absent record in the middle of the present keys. (We don't ask
2176
# for just absent keys to ensure that content before and after the
2177
# absent keys is still delivered).
2178
present_keys = list(keys)
2179
if self.key_length == 1:
2180
keys.insert(2, ('extra',))
2182
keys.insert(2, ('extra', 'extra'))
2183
entries = files.get_record_stream(keys, 'unordered', False)
2185
self.capture_stream(files, versionedfile.filter_absent(entries), seen.add,
2187
self.assertEqual(set(present_keys), seen)
2189
def get_mapper(self):
2190
"""Get a mapper suitable for the key length of the test interface."""
2191
if self.key_length == 1:
2192
return ConstantMapper('source')
2194
return HashEscapedPrefixMapper()
2196
def get_parents(self, parents):
2197
"""Get parents, taking self.graph into consideration."""
2203
def test_get_annotator(self):
2204
files = self.get_versionedfiles()
2205
self.get_diamond_files(files)
2206
origin_key = self.get_simple_key('origin')
2207
base_key = self.get_simple_key('base')
2208
left_key = self.get_simple_key('left')
2209
right_key = self.get_simple_key('right')
2210
merged_key = self.get_simple_key('merged')
2211
# annotator = files.get_annotator()
2212
# introduced full text
2213
origins, lines = files.get_annotator().annotate(origin_key)
2214
self.assertEqual([(origin_key,)], origins)
2215
self.assertEqual(['origin\n'], lines)
2217
origins, lines = files.get_annotator().annotate(base_key)
2218
self.assertEqual([(base_key,)], origins)
2220
origins, lines = files.get_annotator().annotate(merged_key)
2229
# Without a graph everything is new.
2236
self.assertRaises(RevisionNotPresent,
2237
files.get_annotator().annotate, self.get_simple_key('missing-key'))
2239
def test_get_parent_map(self):
2240
files = self.get_versionedfiles()
2241
if self.key_length == 1:
2243
(('r0',), self.get_parents(())),
2244
(('r1',), self.get_parents((('r0',),))),
2245
(('r2',), self.get_parents(())),
2246
(('r3',), self.get_parents(())),
2247
(('m',), self.get_parents((('r0',),('r1',),('r2',),('r3',)))),
2251
(('FileA', 'r0'), self.get_parents(())),
2252
(('FileA', 'r1'), self.get_parents((('FileA', 'r0'),))),
2253
(('FileA', 'r2'), self.get_parents(())),
2254
(('FileA', 'r3'), self.get_parents(())),
2255
(('FileA', 'm'), self.get_parents((('FileA', 'r0'),
2256
('FileA', 'r1'), ('FileA', 'r2'), ('FileA', 'r3')))),
2258
for key, parents in parent_details:
2259
files.add_lines(key, parents, [])
2260
# immediately after adding it should be queryable.
2261
self.assertEqual({key:parents}, files.get_parent_map([key]))
2262
# We can ask for an empty set
2263
self.assertEqual({}, files.get_parent_map([]))
2264
# We can ask for many keys
2265
all_parents = dict(parent_details)
2266
self.assertEqual(all_parents, files.get_parent_map(all_parents.keys()))
2267
# Absent keys are just not included in the result.
2268
keys = all_parents.keys()
2269
if self.key_length == 1:
2270
keys.insert(1, ('missing',))
2272
keys.insert(1, ('missing', 'missing'))
2273
# Absent keys are just ignored
2274
self.assertEqual(all_parents, files.get_parent_map(keys))
2276
def test_get_sha1s(self):
2277
files = self.get_versionedfiles()
2278
self.get_diamond_files(files)
2279
if self.key_length == 1:
2280
keys = [('base',), ('origin',), ('left',), ('merged',), ('right',)]
2282
# ask for shas from different prefixes.
2284
('FileA', 'base'), ('FileB', 'origin'), ('FileA', 'left'),
2285
('FileA', 'merged'), ('FileB', 'right'),
2288
keys[0]: '51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44',
2289
keys[1]: '00e364d235126be43292ab09cb4686cf703ddc17',
2290
keys[2]: 'a8478686da38e370e32e42e8a0c220e33ee9132f',
2291
keys[3]: 'ed8bce375198ea62444dc71952b22cfc2b09226d',
2292
keys[4]: '9ef09dfa9d86780bdec9219a22560c6ece8e0ef1',
2294
files.get_sha1s(keys))
2296
def test_insert_record_stream_empty(self):
2297
"""Inserting an empty record stream should work."""
2298
files = self.get_versionedfiles()
2299
files.insert_record_stream([])
2301
def assertIdenticalVersionedFile(self, expected, actual):
2302
"""Assert that left and right have the same contents."""
2303
self.assertEqual(set(actual.keys()), set(expected.keys()))
2304
actual_parents = actual.get_parent_map(actual.keys())
2306
self.assertEqual(actual_parents, expected.get_parent_map(expected.keys()))
2308
for key, parents in actual_parents.items():
2309
self.assertEqual(None, parents)
2310
for key in actual.keys():
2311
actual_text = actual.get_record_stream(
2312
[key], 'unordered', True).next().get_bytes_as('fulltext')
2313
expected_text = expected.get_record_stream(
2314
[key], 'unordered', True).next().get_bytes_as('fulltext')
2315
self.assertEqual(actual_text, expected_text)
2317
def test_insert_record_stream_fulltexts(self):
2318
"""Any file should accept a stream of fulltexts."""
2319
files = self.get_versionedfiles()
2320
mapper = self.get_mapper()
2321
source_transport = self.get_transport('source')
2322
source_transport.mkdir('.')
2323
# weaves always output fulltexts.
2324
source = make_versioned_files_factory(WeaveFile, mapper)(
2326
self.get_diamond_files(source, trailing_eol=False)
2327
stream = source.get_record_stream(source.keys(), 'topological',
2329
files.insert_record_stream(stream)
2330
self.assertIdenticalVersionedFile(source, files)
2332
def test_insert_record_stream_fulltexts_noeol(self):
2333
"""Any file should accept a stream of fulltexts."""
2334
files = self.get_versionedfiles()
2335
mapper = self.get_mapper()
2336
source_transport = self.get_transport('source')
2337
source_transport.mkdir('.')
2338
# weaves always output fulltexts.
2339
source = make_versioned_files_factory(WeaveFile, mapper)(
2341
self.get_diamond_files(source, trailing_eol=False)
2342
stream = source.get_record_stream(source.keys(), 'topological',
2344
files.insert_record_stream(stream)
2345
self.assertIdenticalVersionedFile(source, files)
2347
def test_insert_record_stream_annotated_knits(self):
2348
"""Any file should accept a stream from plain knits."""
2349
files = self.get_versionedfiles()
2350
mapper = self.get_mapper()
2351
source_transport = self.get_transport('source')
2352
source_transport.mkdir('.')
2353
source = make_file_factory(True, mapper)(source_transport)
2354
self.get_diamond_files(source)
2355
stream = source.get_record_stream(source.keys(), 'topological',
2357
files.insert_record_stream(stream)
2358
self.assertIdenticalVersionedFile(source, files)
2360
def test_insert_record_stream_annotated_knits_noeol(self):
2361
"""Any file should accept a stream from plain knits."""
2362
files = self.get_versionedfiles()
2363
mapper = self.get_mapper()
2364
source_transport = self.get_transport('source')
2365
source_transport.mkdir('.')
2366
source = make_file_factory(True, mapper)(source_transport)
2367
self.get_diamond_files(source, trailing_eol=False)
2368
stream = source.get_record_stream(source.keys(), 'topological',
2370
files.insert_record_stream(stream)
2371
self.assertIdenticalVersionedFile(source, files)
2373
def test_insert_record_stream_plain_knits(self):
2374
"""Any file should accept a stream from plain knits."""
2375
files = self.get_versionedfiles()
2376
mapper = self.get_mapper()
2377
source_transport = self.get_transport('source')
2378
source_transport.mkdir('.')
2379
source = make_file_factory(False, mapper)(source_transport)
2380
self.get_diamond_files(source)
2381
stream = source.get_record_stream(source.keys(), 'topological',
2383
files.insert_record_stream(stream)
2384
self.assertIdenticalVersionedFile(source, files)
2386
def test_insert_record_stream_plain_knits_noeol(self):
2387
"""Any file should accept a stream from plain knits."""
2388
files = self.get_versionedfiles()
2389
mapper = self.get_mapper()
2390
source_transport = self.get_transport('source')
2391
source_transport.mkdir('.')
2392
source = make_file_factory(False, mapper)(source_transport)
2393
self.get_diamond_files(source, trailing_eol=False)
2394
stream = source.get_record_stream(source.keys(), 'topological',
2396
files.insert_record_stream(stream)
2397
self.assertIdenticalVersionedFile(source, files)
2399
def test_insert_record_stream_existing_keys(self):
2400
"""Inserting keys already in a file should not error."""
2401
files = self.get_versionedfiles()
2402
source = self.get_versionedfiles('source')
2403
self.get_diamond_files(source)
2404
# insert some keys into f.
2405
self.get_diamond_files(files, left_only=True)
2406
stream = source.get_record_stream(source.keys(), 'topological',
2408
files.insert_record_stream(stream)
2409
self.assertIdenticalVersionedFile(source, files)
2411
def test_insert_record_stream_missing_keys(self):
2412
"""Inserting a stream with absent keys should raise an error."""
2413
files = self.get_versionedfiles()
2414
source = self.get_versionedfiles('source')
2415
stream = source.get_record_stream([('missing',) * self.key_length],
2416
'topological', False)
2417
self.assertRaises(errors.RevisionNotPresent, files.insert_record_stream,
2420
def test_insert_record_stream_out_of_order(self):
2421
"""An out of order stream can either error or work."""
2422
files = self.get_versionedfiles()
2423
source = self.get_versionedfiles('source')
2424
self.get_diamond_files(source)
2425
if self.key_length == 1:
2426
origin_keys = [('origin',)]
2427
end_keys = [('merged',), ('left',)]
2428
start_keys = [('right',), ('base',)]
2430
origin_keys = [('FileA', 'origin'), ('FileB', 'origin')]
2431
end_keys = [('FileA', 'merged',), ('FileA', 'left',),
2432
('FileB', 'merged',), ('FileB', 'left',)]
2433
start_keys = [('FileA', 'right',), ('FileA', 'base',),
2434
('FileB', 'right',), ('FileB', 'base',)]
2435
origin_entries = source.get_record_stream(origin_keys, 'unordered', False)
2436
end_entries = source.get_record_stream(end_keys, 'topological', False)
2437
start_entries = source.get_record_stream(start_keys, 'topological', False)
2438
entries = chain(origin_entries, end_entries, start_entries)
2440
files.insert_record_stream(entries)
2441
except RevisionNotPresent:
2442
# Must not have corrupted the file.
2445
self.assertIdenticalVersionedFile(source, files)
2447
def test_insert_record_stream_long_parent_chain_out_of_order(self):
2448
"""An out of order stream can either error or work."""
2450
raise TestNotApplicable('ancestry info only relevant with graph.')
2451
# Create a reasonably long chain of records based on each other, where
2452
# most will be deltas.
2453
source = self.get_versionedfiles('source')
2456
content = [('same same %d\n' % n) for n in range(500)]
2457
for letter in 'abcdefghijklmnopqrstuvwxyz':
2458
key = ('key-' + letter,)
2459
if self.key_length == 2:
2460
key = ('prefix',) + key
2461
content.append('content for ' + letter + '\n')
2462
source.add_lines(key, parents, content)
2465
# Create a stream of these records, excluding the first record that the
2466
# rest ultimately depend upon, and insert it into a new vf.
2468
for key in reversed(keys):
2469
streams.append(source.get_record_stream([key], 'unordered', False))
2470
deltas = chain(*streams[:-1])
2471
files = self.get_versionedfiles()
2473
files.insert_record_stream(deltas)
2474
except RevisionNotPresent:
2475
# Must not have corrupted the file.
2478
# Must only report either just the first key as a missing parent,
2479
# no key as missing (for nodelta scenarios).
2480
missing = set(files.get_missing_compression_parent_keys())
2481
missing.discard(keys[0])
2482
self.assertEqual(set(), missing)
2484
def get_knit_delta_source(self):
2485
"""Get a source that can produce a stream with knit delta records,
2486
regardless of this test's scenario.
2488
mapper = self.get_mapper()
2489
source_transport = self.get_transport('source')
2490
source_transport.mkdir('.')
2491
source = make_file_factory(False, mapper)(source_transport)
2492
get_diamond_files(source, self.key_length, trailing_eol=True,
2493
nograph=False, left_only=False)
2496
def test_insert_record_stream_delta_missing_basis_no_corruption(self):
2497
"""Insertion where a needed basis is not included notifies the caller
2498
of the missing basis. In the meantime a record missing its basis is
2501
source = self.get_knit_delta_source()
2502
keys = [self.get_simple_key('origin'), self.get_simple_key('merged')]
2503
entries = source.get_record_stream(keys, 'unordered', False)
2504
files = self.get_versionedfiles()
2505
if self.support_partial_insertion:
2506
self.assertEqual([],
2507
list(files.get_missing_compression_parent_keys()))
2508
files.insert_record_stream(entries)
2509
missing_bases = files.get_missing_compression_parent_keys()
2510
self.assertEqual(set([self.get_simple_key('left')]),
2512
self.assertEqual(set(keys), set(files.get_parent_map(keys)))
2515
errors.RevisionNotPresent, files.insert_record_stream, entries)
2518
def test_insert_record_stream_delta_missing_basis_can_be_added_later(self):
2519
"""Insertion where a needed basis is not included notifies the caller
2520
of the missing basis. That basis can be added in a second
2521
insert_record_stream call that does not need to repeat records present
2522
in the previous stream. The record(s) that required that basis are
2523
fully inserted once their basis is no longer missing.
2525
if not self.support_partial_insertion:
2526
raise TestNotApplicable(
2527
'versioned file scenario does not support partial insertion')
2528
source = self.get_knit_delta_source()
2529
entries = source.get_record_stream([self.get_simple_key('origin'),
2530
self.get_simple_key('merged')], 'unordered', False)
2531
files = self.get_versionedfiles()
2532
files.insert_record_stream(entries)
2533
missing_bases = files.get_missing_compression_parent_keys()
2534
self.assertEqual(set([self.get_simple_key('left')]),
2536
# 'merged' is inserted (although a commit of a write group involving
2537
# this versionedfiles would fail).
2538
merged_key = self.get_simple_key('merged')
2540
[merged_key], files.get_parent_map([merged_key]).keys())
2541
# Add the full delta closure of the missing records
2542
missing_entries = source.get_record_stream(
2543
missing_bases, 'unordered', True)
2544
files.insert_record_stream(missing_entries)
2545
# Now 'merged' is fully inserted (and a commit would succeed).
2546
self.assertEqual([], list(files.get_missing_compression_parent_keys()))
2548
[merged_key], files.get_parent_map([merged_key]).keys())
2551
def test_iter_lines_added_or_present_in_keys(self):
2552
# test that we get at least an equalset of the lines added by
2553
# versions in the store.
2554
# the ordering here is to make a tree so that dumb searches have
2555
# more changes to muck up.
2557
class InstrumentedProgress(progress.ProgressTask):
2560
progress.ProgressTask.__init__(self)
2563
def update(self, msg=None, current=None, total=None):
2564
self.updates.append((msg, current, total))
2566
files = self.get_versionedfiles()
2567
# add a base to get included
2568
files.add_lines(self.get_simple_key('base'), (), ['base\n'])
2569
# add a ancestor to be included on one side
2570
files.add_lines(self.get_simple_key('lancestor'), (), ['lancestor\n'])
2571
# add a ancestor to be included on the other side
2572
files.add_lines(self.get_simple_key('rancestor'),
2573
self.get_parents([self.get_simple_key('base')]), ['rancestor\n'])
2574
# add a child of rancestor with no eofile-nl
2575
files.add_lines(self.get_simple_key('child'),
2576
self.get_parents([self.get_simple_key('rancestor')]),
2577
['base\n', 'child\n'])
2578
# add a child of lancestor and base to join the two roots
2579
files.add_lines(self.get_simple_key('otherchild'),
2580
self.get_parents([self.get_simple_key('lancestor'),
2581
self.get_simple_key('base')]),
2582
['base\n', 'lancestor\n', 'otherchild\n'])
2583
def iter_with_keys(keys, expected):
2584
# now we need to see what lines are returned, and how often.
2586
progress = InstrumentedProgress()
2587
# iterate over the lines
2588
for line in files.iter_lines_added_or_present_in_keys(keys,
2590
lines.setdefault(line, 0)
2592
if []!= progress.updates:
2593
self.assertEqual(expected, progress.updates)
2595
lines = iter_with_keys(
2596
[self.get_simple_key('child'), self.get_simple_key('otherchild')],
2597
[('Walking content', 0, 2),
2598
('Walking content', 1, 2),
2599
('Walking content', 2, 2)])
2600
# we must see child and otherchild
2601
self.assertTrue(lines[('child\n', self.get_simple_key('child'))] > 0)
2603
lines[('otherchild\n', self.get_simple_key('otherchild'))] > 0)
2604
# we dont care if we got more than that.
2607
lines = iter_with_keys(files.keys(),
2608
[('Walking content', 0, 5),
2609
('Walking content', 1, 5),
2610
('Walking content', 2, 5),
2611
('Walking content', 3, 5),
2612
('Walking content', 4, 5),
2613
('Walking content', 5, 5)])
2614
# all lines must be seen at least once
2615
self.assertTrue(lines[('base\n', self.get_simple_key('base'))] > 0)
2617
lines[('lancestor\n', self.get_simple_key('lancestor'))] > 0)
2619
lines[('rancestor\n', self.get_simple_key('rancestor'))] > 0)
2620
self.assertTrue(lines[('child\n', self.get_simple_key('child'))] > 0)
2622
lines[('otherchild\n', self.get_simple_key('otherchild'))] > 0)
2624
def test_make_mpdiffs(self):
2625
from bzrlib import multiparent
2626
files = self.get_versionedfiles('source')
2627
# add texts that should trip the knit maximum delta chain threshold
2628
# as well as doing parallel chains of data in knits.
2629
# this is done by two chains of 25 insertions
2630
files.add_lines(self.get_simple_key('base'), [], ['line\n'])
2631
files.add_lines(self.get_simple_key('noeol'),
2632
self.get_parents([self.get_simple_key('base')]), ['line'])
2633
# detailed eol tests:
2634
# shared last line with parent no-eol
2635
files.add_lines(self.get_simple_key('noeolsecond'),
2636
self.get_parents([self.get_simple_key('noeol')]),
2638
# differing last line with parent, both no-eol
2639
files.add_lines(self.get_simple_key('noeolnotshared'),
2640
self.get_parents([self.get_simple_key('noeolsecond')]),
2641
['line\n', 'phone'])
2642
# add eol following a noneol parent, change content
2643
files.add_lines(self.get_simple_key('eol'),
2644
self.get_parents([self.get_simple_key('noeol')]), ['phone\n'])
2645
# add eol following a noneol parent, no change content
2646
files.add_lines(self.get_simple_key('eolline'),
2647
self.get_parents([self.get_simple_key('noeol')]), ['line\n'])
2648
# noeol with no parents:
2649
files.add_lines(self.get_simple_key('noeolbase'), [], ['line'])
2650
# noeol preceeding its leftmost parent in the output:
2651
# this is done by making it a merge of two parents with no common
2652
# anestry: noeolbase and noeol with the
2653
# later-inserted parent the leftmost.
2654
files.add_lines(self.get_simple_key('eolbeforefirstparent'),
2655
self.get_parents([self.get_simple_key('noeolbase'),
2656
self.get_simple_key('noeol')]),
2658
# two identical eol texts
2659
files.add_lines(self.get_simple_key('noeoldup'),
2660
self.get_parents([self.get_simple_key('noeol')]), ['line'])
2661
next_parent = self.get_simple_key('base')
2662
text_name = 'chain1-'
2664
sha1s = {0 :'da6d3141cb4a5e6f464bf6e0518042ddc7bfd079',
2665
1 :'45e21ea146a81ea44a821737acdb4f9791c8abe7',
2666
2 :'e1f11570edf3e2a070052366c582837a4fe4e9fa',
2667
3 :'26b4b8626da827088c514b8f9bbe4ebf181edda1',
2668
4 :'e28a5510be25ba84d31121cff00956f9970ae6f6',
2669
5 :'d63ec0ce22e11dcf65a931b69255d3ac747a318d',
2670
6 :'2c2888d288cb5e1d98009d822fedfe6019c6a4ea',
2671
7 :'95c14da9cafbf828e3e74a6f016d87926ba234ab',
2672
8 :'779e9a0b28f9f832528d4b21e17e168c67697272',
2673
9 :'1f8ff4e5c6ff78ac106fcfe6b1e8cb8740ff9a8f',
2674
10:'131a2ae712cf51ed62f143e3fbac3d4206c25a05',
2675
11:'c5a9d6f520d2515e1ec401a8f8a67e6c3c89f199',
2676
12:'31a2286267f24d8bedaa43355f8ad7129509ea85',
2677
13:'dc2a7fe80e8ec5cae920973973a8ee28b2da5e0a',
2678
14:'2c4b1736566b8ca6051e668de68650686a3922f2',
2679
15:'5912e4ecd9b0c07be4d013e7e2bdcf9323276cde',
2680
16:'b0d2e18d3559a00580f6b49804c23fea500feab3',
2681
17:'8e1d43ad72f7562d7cb8f57ee584e20eb1a69fc7',
2682
18:'5cf64a3459ae28efa60239e44b20312d25b253f3',
2683
19:'1ebed371807ba5935958ad0884595126e8c4e823',
2684
20:'2aa62a8b06fb3b3b892a3292a068ade69d5ee0d3',
2685
21:'01edc447978004f6e4e962b417a4ae1955b6fe5d',
2686
22:'d8d8dc49c4bf0bab401e0298bb5ad827768618bb',
2687
23:'c21f62b1c482862983a8ffb2b0c64b3451876e3f',
2688
24:'c0593fe795e00dff6b3c0fe857a074364d5f04fc',
2689
25:'dd1a1cf2ba9cc225c3aff729953e6364bf1d1855',
2691
for depth in range(26):
2692
new_version = self.get_simple_key(text_name + '%s' % depth)
2693
text = text + ['line\n']
2694
files.add_lines(new_version, self.get_parents([next_parent]), text)
2695
next_parent = new_version
2696
next_parent = self.get_simple_key('base')
2697
text_name = 'chain2-'
2699
for depth in range(26):
2700
new_version = self.get_simple_key(text_name + '%s' % depth)
2701
text = text + ['line\n']
2702
files.add_lines(new_version, self.get_parents([next_parent]), text)
2703
next_parent = new_version
2704
target = self.get_versionedfiles('target')
2705
for key in multiparent.topo_iter_keys(files, files.keys()):
2706
mpdiff = files.make_mpdiffs([key])[0]
2707
parents = files.get_parent_map([key])[key] or []
2709
[(key, parents, files.get_sha1s([key])[key], mpdiff)])
2710
self.assertEqualDiff(
2711
files.get_record_stream([key], 'unordered',
2712
True).next().get_bytes_as('fulltext'),
2713
target.get_record_stream([key], 'unordered',
2714
True).next().get_bytes_as('fulltext')
2717
def test_keys(self):
2718
# While use is discouraged, versions() is still needed by aspects of
2720
files = self.get_versionedfiles()
2721
self.assertEqual(set(), set(files.keys()))
2722
if self.key_length == 1:
2725
key = ('foo', 'bar',)
2726
files.add_lines(key, (), [])
2727
self.assertEqual(set([key]), set(files.keys()))
2730
class VirtualVersionedFilesTests(TestCase):
2731
"""Basic tests for the VirtualVersionedFiles implementations."""
2733
def _get_parent_map(self, keys):
2736
if k in self._parent_map:
2737
ret[k] = self._parent_map[k]
2741
TestCase.setUp(self)
2743
self._parent_map = {}
2744
self.texts = VirtualVersionedFiles(self._get_parent_map,
2747
def test_add_lines(self):
2748
self.assertRaises(NotImplementedError,
2749
self.texts.add_lines, "foo", [], [])
2751
def test_add_mpdiffs(self):
2752
self.assertRaises(NotImplementedError,
2753
self.texts.add_mpdiffs, [])
2755
def test_check_noerrors(self):
2758
def test_insert_record_stream(self):
2759
self.assertRaises(NotImplementedError, self.texts.insert_record_stream,
2762
def test_get_sha1s_nonexistent(self):
2763
self.assertEquals({}, self.texts.get_sha1s([("NONEXISTENT",)]))
2765
def test_get_sha1s(self):
2766
self._lines["key"] = ["dataline1", "dataline2"]
2767
self.assertEquals({("key",): osutils.sha_strings(self._lines["key"])},
2768
self.texts.get_sha1s([("key",)]))
2770
def test_get_parent_map(self):
2771
self._parent_map = {"G": ("A", "B")}
2772
self.assertEquals({("G",): (("A",),("B",))},
2773
self.texts.get_parent_map([("G",), ("L",)]))
2775
def test_get_record_stream(self):
2776
self._lines["A"] = ["FOO", "BAR"]
2777
it = self.texts.get_record_stream([("A",)], "unordered", True)
2779
self.assertEquals("chunked", record.storage_kind)
2780
self.assertEquals("FOOBAR", record.get_bytes_as("fulltext"))
2781
self.assertEquals(["FOO", "BAR"], record.get_bytes_as("chunked"))
2783
def test_get_record_stream_absent(self):
2784
it = self.texts.get_record_stream([("A",)], "unordered", True)
2786
self.assertEquals("absent", record.storage_kind)
2788
def test_iter_lines_added_or_present_in_keys(self):
2789
self._lines["A"] = ["FOO", "BAR"]
2790
self._lines["B"] = ["HEY"]
2791
self._lines["C"] = ["Alberta"]
2792
it = self.texts.iter_lines_added_or_present_in_keys([("A",), ("B",)])
2793
self.assertEquals(sorted([("FOO", "A"), ("BAR", "A"), ("HEY", "B")]),
2797
class TestOrderingVersionedFilesDecorator(TestCaseWithMemoryTransport):
2799
def get_ordering_vf(self, key_priority):
2800
builder = self.make_branch_builder('test')
2801
builder.start_series()
2802
builder.build_snapshot('A', None, [
2803
('add', ('', 'TREE_ROOT', 'directory', None))])
2804
builder.build_snapshot('B', ['A'], [])
2805
builder.build_snapshot('C', ['B'], [])
2806
builder.build_snapshot('D', ['C'], [])
2807
builder.finish_series()
2808
b = builder.get_branch()
2810
self.addCleanup(b.unlock)
2811
vf = b.repository.inventories
2812
return versionedfile.OrderingVersionedFilesDecorator(vf, key_priority)
2814
def test_get_empty(self):
2815
vf = self.get_ordering_vf({})
2816
self.assertEqual([], vf.calls)
2818
def test_get_record_stream_topological(self):
2819
vf = self.get_ordering_vf({('A',): 3, ('B',): 2, ('C',): 4, ('D',): 1})
2820
request_keys = [('B',), ('C',), ('D',), ('A',)]
2821
keys = [r.key for r in vf.get_record_stream(request_keys,
2822
'topological', False)]
2823
# We should have gotten the keys in topological order
2824
self.assertEqual([('A',), ('B',), ('C',), ('D',)], keys)
2825
# And recorded that the request was made
2826
self.assertEqual([('get_record_stream', request_keys, 'topological',
2829
def test_get_record_stream_ordered(self):
2830
vf = self.get_ordering_vf({('A',): 3, ('B',): 2, ('C',): 4, ('D',): 1})
2831
request_keys = [('B',), ('C',), ('D',), ('A',)]
2832
keys = [r.key for r in vf.get_record_stream(request_keys,
2833
'unordered', False)]
2834
# They should be returned based on their priority
2835
self.assertEqual([('D',), ('B',), ('A',), ('C',)], keys)
2836
# And the request recorded
2837
self.assertEqual([('get_record_stream', request_keys, 'unordered',
2840
def test_get_record_stream_implicit_order(self):
2841
vf = self.get_ordering_vf({('B',): 2, ('D',): 1})
2842
request_keys = [('B',), ('C',), ('D',), ('A',)]
2843
keys = [r.key for r in vf.get_record_stream(request_keys,
2844
'unordered', False)]
2845
# A and C are not in the map, so they get sorted to the front. A comes
2846
# before C alphabetically, so it comes back first
2847
self.assertEqual([('A',), ('C',), ('D',), ('B',)], keys)
2848
# And the request recorded
2849
self.assertEqual([('get_record_stream', request_keys, 'unordered',