1189
1186
write_weave(w, tmpf)
1190
1187
self.log(tmpf.getvalue())
1192
overlappedInsertExpected = ['aaa', '<<<<<<< ', 'xxx', 'yyy', '=======',
1189
overlappedInsertExpected = ['aaa', '<<<<<<< ', 'xxx', 'yyy', '=======',
1193
1190
'xxx', '>>>>>>> ', 'bbb']
1193
class TestContentFactoryAdaption(TestCaseWithMemoryTransport):
1195
def test_select_adaptor(self):
1196
"""Test expected adapters exist."""
1197
# One scenario for each lookup combination we expect to use.
1198
# Each is source_kind, requested_kind, adapter class
1200
('knit-delta-gz', 'fulltext', _mod_knit.DeltaPlainToFullText),
1201
('knit-ft-gz', 'fulltext', _mod_knit.FTPlainToFullText),
1202
('knit-annotated-delta-gz', 'knit-delta-gz',
1203
_mod_knit.DeltaAnnotatedToUnannotated),
1204
('knit-annotated-delta-gz', 'fulltext',
1205
_mod_knit.DeltaAnnotatedToFullText),
1206
('knit-annotated-ft-gz', 'knit-ft-gz',
1207
_mod_knit.FTAnnotatedToUnannotated),
1208
('knit-annotated-ft-gz', 'fulltext',
1209
_mod_knit.FTAnnotatedToFullText),
1211
for source, requested, klass in scenarios:
1212
adapter_factory = versionedfile.adapter_registry.get(
1213
(source, requested))
1214
adapter = adapter_factory(None)
1215
self.assertIsInstance(adapter, klass)
1217
def get_knit(self, annotated=True):
1218
mapper = ConstantMapper('knit')
1219
transport = self.get_transport()
1220
return make_file_factory(annotated, mapper)(transport)
1222
def helpGetBytes(self, f, ft_adapter, delta_adapter):
1223
"""Grab the interested adapted texts for tests."""
1224
# origin is a fulltext
1225
entries = f.get_record_stream([('origin',)], 'unordered', False)
1226
base = entries.next()
1227
ft_data = ft_adapter.get_bytes(base)
1228
# merged is both a delta and multiple parents.
1229
entries = f.get_record_stream([('merged',)], 'unordered', False)
1230
merged = entries.next()
1231
delta_data = delta_adapter.get_bytes(merged)
1232
return ft_data, delta_data
1234
def test_deannotation_noeol(self):
1235
"""Test converting annotated knits to unannotated knits."""
1236
# we need a full text, and a delta
1238
get_diamond_files(f, 1, trailing_eol=False)
1239
ft_data, delta_data = self.helpGetBytes(f,
1240
_mod_knit.FTAnnotatedToUnannotated(None),
1241
_mod_knit.DeltaAnnotatedToUnannotated(None))
1243
'version origin 1 b284f94827db1fa2970d9e2014f080413b547a7e\n'
1246
GzipFile(mode='rb', fileobj=StringIO(ft_data)).read())
1248
'version merged 4 32c2e79763b3f90e8ccde37f9710b6629c25a796\n'
1249
'1,2,3\nleft\nright\nmerged\nend merged\n',
1250
GzipFile(mode='rb', fileobj=StringIO(delta_data)).read())
1252
def test_deannotation(self):
1253
"""Test converting annotated knits to unannotated knits."""
1254
# we need a full text, and a delta
1256
get_diamond_files(f, 1)
1257
ft_data, delta_data = self.helpGetBytes(f,
1258
_mod_knit.FTAnnotatedToUnannotated(None),
1259
_mod_knit.DeltaAnnotatedToUnannotated(None))
1261
'version origin 1 00e364d235126be43292ab09cb4686cf703ddc17\n'
1264
GzipFile(mode='rb', fileobj=StringIO(ft_data)).read())
1266
'version merged 3 ed8bce375198ea62444dc71952b22cfc2b09226d\n'
1267
'2,2,2\nright\nmerged\nend merged\n',
1268
GzipFile(mode='rb', fileobj=StringIO(delta_data)).read())
1270
def test_annotated_to_fulltext_no_eol(self):
1271
"""Test adapting annotated knits to full texts (for -> weaves)."""
1272
# we need a full text, and a delta
1274
get_diamond_files(f, 1, trailing_eol=False)
1275
# Reconstructing a full text requires a backing versioned file, and it
1276
# must have the base lines requested from it.
1277
logged_vf = versionedfile.RecordingVersionedFilesDecorator(f)
1278
ft_data, delta_data = self.helpGetBytes(f,
1279
_mod_knit.FTAnnotatedToFullText(None),
1280
_mod_knit.DeltaAnnotatedToFullText(logged_vf))
1281
self.assertEqual('origin', ft_data)
1282
self.assertEqual('base\nleft\nright\nmerged', delta_data)
1283
self.assertEqual([('get_record_stream', [('left',)], 'unordered',
1284
True)], logged_vf.calls)
1286
def test_annotated_to_fulltext(self):
1287
"""Test adapting annotated knits to full texts (for -> weaves)."""
1288
# we need a full text, and a delta
1290
get_diamond_files(f, 1)
1291
# Reconstructing a full text requires a backing versioned file, and it
1292
# must have the base lines requested from it.
1293
logged_vf = versionedfile.RecordingVersionedFilesDecorator(f)
1294
ft_data, delta_data = self.helpGetBytes(f,
1295
_mod_knit.FTAnnotatedToFullText(None),
1296
_mod_knit.DeltaAnnotatedToFullText(logged_vf))
1297
self.assertEqual('origin\n', ft_data)
1298
self.assertEqual('base\nleft\nright\nmerged\n', delta_data)
1299
self.assertEqual([('get_record_stream', [('left',)], 'unordered',
1300
True)], logged_vf.calls)
1302
def test_unannotated_to_fulltext(self):
1303
"""Test adapting unannotated knits to full texts.
1305
This is used for -> weaves, and for -> annotated knits.
1307
# we need a full text, and a delta
1308
f = self.get_knit(annotated=False)
1309
get_diamond_files(f, 1)
1310
# Reconstructing a full text requires a backing versioned file, and it
1311
# must have the base lines requested from it.
1312
logged_vf = versionedfile.RecordingVersionedFilesDecorator(f)
1313
ft_data, delta_data = self.helpGetBytes(f,
1314
_mod_knit.FTPlainToFullText(None),
1315
_mod_knit.DeltaPlainToFullText(logged_vf))
1316
self.assertEqual('origin\n', ft_data)
1317
self.assertEqual('base\nleft\nright\nmerged\n', delta_data)
1318
self.assertEqual([('get_record_stream', [('left',)], 'unordered',
1319
True)], logged_vf.calls)
1321
def test_unannotated_to_fulltext_no_eol(self):
1322
"""Test adapting unannotated knits to full texts.
1324
This is used for -> weaves, and for -> annotated knits.
1326
# we need a full text, and a delta
1327
f = self.get_knit(annotated=False)
1328
get_diamond_files(f, 1, trailing_eol=False)
1329
# Reconstructing a full text requires a backing versioned file, and it
1330
# must have the base lines requested from it.
1331
logged_vf = versionedfile.RecordingVersionedFilesDecorator(f)
1332
ft_data, delta_data = self.helpGetBytes(f,
1333
_mod_knit.FTPlainToFullText(None),
1334
_mod_knit.DeltaPlainToFullText(logged_vf))
1335
self.assertEqual('origin', ft_data)
1336
self.assertEqual('base\nleft\nright\nmerged', delta_data)
1337
self.assertEqual([('get_record_stream', [('left',)], 'unordered',
1338
True)], logged_vf.calls)
1341
class TestKeyMapper(TestCaseWithMemoryTransport):
1342
"""Tests for various key mapping logic."""
1344
def test_identity_mapper(self):
1345
mapper = versionedfile.ConstantMapper("inventory")
1346
self.assertEqual("inventory", mapper.map(('foo@ar',)))
1347
self.assertEqual("inventory", mapper.map(('quux',)))
1349
def test_prefix_mapper(self):
1351
mapper = versionedfile.PrefixMapper()
1352
self.assertEqual("file-id", mapper.map(("file-id", "revision-id")))
1353
self.assertEqual("new-id", mapper.map(("new-id", "revision-id")))
1354
self.assertEqual(('file-id',), mapper.unmap("file-id"))
1355
self.assertEqual(('new-id',), mapper.unmap("new-id"))
1357
def test_hash_prefix_mapper(self):
1358
#format6: hash + plain
1359
mapper = versionedfile.HashPrefixMapper()
1360
self.assertEqual("9b/file-id", mapper.map(("file-id", "revision-id")))
1361
self.assertEqual("45/new-id", mapper.map(("new-id", "revision-id")))
1362
self.assertEqual(('file-id',), mapper.unmap("9b/file-id"))
1363
self.assertEqual(('new-id',), mapper.unmap("45/new-id"))
1365
def test_hash_escaped_mapper(self):
1366
#knit1: hash + escaped
1367
mapper = versionedfile.HashEscapedPrefixMapper()
1368
self.assertEqual("88/%2520", mapper.map((" ", "revision-id")))
1369
self.assertEqual("ed/fil%2545-%2549d", mapper.map(("filE-Id",
1371
self.assertEqual("88/ne%2557-%2549d", mapper.map(("neW-Id",
1373
self.assertEqual(('filE-Id',), mapper.unmap("ed/fil%2545-%2549d"))
1374
self.assertEqual(('neW-Id',), mapper.unmap("88/ne%2557-%2549d"))
1377
class TestVersionedFiles(TestCaseWithMemoryTransport):
1378
"""Tests for the multiple-file variant of VersionedFile."""
1380
# We want to be sure of behaviour for:
1381
# weaves prefix layout (weave texts)
1382
# individually named weaves (weave inventories)
1383
# annotated knits - prefix|hash|hash-escape layout, we test the third only
1384
# as it is the most complex mapper.
1385
# individually named knits
1386
# individual no-graph knits in packs (signatures)
1387
# individual graph knits in packs (inventories)
1388
# individual graph nocompression knits in packs (revisions)
1389
# plain text knits in packs (texts)
1390
len_one_scenarios = [
1393
'factory':make_versioned_files_factory(WeaveFile,
1394
ConstantMapper('inventory')),
1397
'support_partial_insertion': False,
1401
'factory':make_file_factory(False, ConstantMapper('revisions')),
1404
'support_partial_insertion': False,
1406
('named-nograph-nodelta-knit-pack', {
1407
'cleanup':cleanup_pack_knit,
1408
'factory':make_pack_factory(False, False, 1),
1411
'support_partial_insertion': False,
1413
('named-graph-knit-pack', {
1414
'cleanup':cleanup_pack_knit,
1415
'factory':make_pack_factory(True, True, 1),
1418
'support_partial_insertion': True,
1420
('named-graph-nodelta-knit-pack', {
1421
'cleanup':cleanup_pack_knit,
1422
'factory':make_pack_factory(True, False, 1),
1425
'support_partial_insertion': False,
1427
('groupcompress-nograph', {
1428
'cleanup':groupcompress.cleanup_pack_group,
1429
'factory':groupcompress.make_pack_factory(False, False, 1),
1432
'support_partial_insertion':False,
1435
len_two_scenarios = [
1438
'factory':make_versioned_files_factory(WeaveFile,
1442
'support_partial_insertion': False,
1444
('annotated-knit-escape', {
1446
'factory':make_file_factory(True, HashEscapedPrefixMapper()),
1449
'support_partial_insertion': False,
1451
('plain-knit-pack', {
1452
'cleanup':cleanup_pack_knit,
1453
'factory':make_pack_factory(True, True, 2),
1456
'support_partial_insertion': True,
1459
'cleanup':groupcompress.cleanup_pack_group,
1460
'factory':groupcompress.make_pack_factory(True, False, 1),
1463
'support_partial_insertion':False,
1467
scenarios = len_one_scenarios + len_two_scenarios
1469
def get_versionedfiles(self, relpath='files'):
1470
transport = self.get_transport(relpath)
1472
transport.mkdir('.')
1473
files = self.factory(transport)
1474
if self.cleanup is not None:
1475
self.addCleanup(self.cleanup, files)
1478
def get_simple_key(self, suffix):
1479
"""Return a key for the object under test."""
1480
if self.key_length == 1:
1483
return ('FileA',) + (suffix,)
1485
def test_add_lines(self):
1486
f = self.get_versionedfiles()
1487
key0 = self.get_simple_key('r0')
1488
key1 = self.get_simple_key('r1')
1489
key2 = self.get_simple_key('r2')
1490
keyf = self.get_simple_key('foo')
1491
f.add_lines(key0, [], ['a\n', 'b\n'])
1493
f.add_lines(key1, [key0], ['b\n', 'c\n'])
1495
f.add_lines(key1, [], ['b\n', 'c\n'])
1497
self.assertTrue(key0 in keys)
1498
self.assertTrue(key1 in keys)
1500
for record in f.get_record_stream([key0, key1], 'unordered', True):
1501
records.append((record.key, record.get_bytes_as('fulltext')))
1503
self.assertEqual([(key0, 'a\nb\n'), (key1, 'b\nc\n')], records)
1505
def test__add_text(self):
1506
f = self.get_versionedfiles()
1507
key0 = self.get_simple_key('r0')
1508
key1 = self.get_simple_key('r1')
1509
key2 = self.get_simple_key('r2')
1510
keyf = self.get_simple_key('foo')
1511
f._add_text(key0, [], 'a\nb\n')
1513
f._add_text(key1, [key0], 'b\nc\n')
1515
f._add_text(key1, [], 'b\nc\n')
1517
self.assertTrue(key0 in keys)
1518
self.assertTrue(key1 in keys)
1520
for record in f.get_record_stream([key0, key1], 'unordered', True):
1521
records.append((record.key, record.get_bytes_as('fulltext')))
1523
self.assertEqual([(key0, 'a\nb\n'), (key1, 'b\nc\n')], records)
1525
def test_annotate(self):
1526
files = self.get_versionedfiles()
1527
self.get_diamond_files(files)
1528
if self.key_length == 1:
1532
# introduced full text
1533
origins = files.annotate(prefix + ('origin',))
1535
(prefix + ('origin',), 'origin\n')],
1538
origins = files.annotate(prefix + ('base',))
1540
(prefix + ('base',), 'base\n')],
1543
origins = files.annotate(prefix + ('merged',))
1546
(prefix + ('base',), 'base\n'),
1547
(prefix + ('left',), 'left\n'),
1548
(prefix + ('right',), 'right\n'),
1549
(prefix + ('merged',), 'merged\n')
1553
# Without a graph everything is new.
1555
(prefix + ('merged',), 'base\n'),
1556
(prefix + ('merged',), 'left\n'),
1557
(prefix + ('merged',), 'right\n'),
1558
(prefix + ('merged',), 'merged\n')
1561
self.assertRaises(RevisionNotPresent,
1562
files.annotate, prefix + ('missing-key',))
1564
def test_check_no_parameters(self):
1565
files = self.get_versionedfiles()
1567
def test_check_progressbar_parameter(self):
1568
"""A progress bar can be supplied because check can be a generator."""
1569
pb = ui.ui_factory.nested_progress_bar()
1570
self.addCleanup(pb.finished)
1571
files = self.get_versionedfiles()
1572
files.check(progress_bar=pb)
1574
def test_check_with_keys_becomes_generator(self):
1575
files = self.get_versionedfiles()
1576
self.get_diamond_files(files)
1578
entries = files.check(keys=keys)
1580
# Texts output should be fulltexts.
1581
self.capture_stream(files, entries, seen.add,
1582
files.get_parent_map(keys), require_fulltext=True)
1583
# All texts should be output.
1584
self.assertEqual(set(keys), seen)
1586
def test_clear_cache(self):
1587
files = self.get_versionedfiles()
1590
def test_construct(self):
1591
"""Each parameterised test can be constructed on a transport."""
1592
files = self.get_versionedfiles()
1594
def get_diamond_files(self, files, trailing_eol=True, left_only=False,
1596
return get_diamond_files(files, self.key_length,
1597
trailing_eol=trailing_eol, nograph=not self.graph,
1598
left_only=left_only, nokeys=nokeys)
1600
def _add_content_nostoresha(self, add_lines):
1601
"""When nostore_sha is supplied using old content raises."""
1602
vf = self.get_versionedfiles()
1603
empty_text = ('a', [])
1604
sample_text_nl = ('b', ["foo\n", "bar\n"])
1605
sample_text_no_nl = ('c', ["foo\n", "bar"])
1607
for version, lines in (empty_text, sample_text_nl, sample_text_no_nl):
1609
sha, _, _ = vf.add_lines(self.get_simple_key(version), [],
1612
sha, _, _ = vf._add_text(self.get_simple_key(version), [],
1615
# we now have a copy of all the lines in the vf.
1616
for sha, (version, lines) in zip(
1617
shas, (empty_text, sample_text_nl, sample_text_no_nl)):
1618
new_key = self.get_simple_key(version + "2")
1619
self.assertRaises(errors.ExistingContent,
1620
vf.add_lines, new_key, [], lines,
1622
self.assertRaises(errors.ExistingContent,
1623
vf._add_text, new_key, [], ''.join(lines),
1625
# and no new version should have been added.
1626
record = vf.get_record_stream([new_key], 'unordered', True).next()
1627
self.assertEqual('absent', record.storage_kind)
1629
def test_add_lines_nostoresha(self):
1630
self._add_content_nostoresha(add_lines=True)
1632
def test__add_text_nostoresha(self):
1633
self._add_content_nostoresha(add_lines=False)
1635
def test_add_lines_return(self):
1636
files = self.get_versionedfiles()
1637
# save code by using the stock data insertion helper.
1638
adds = self.get_diamond_files(files)
1640
# We can only validate the first 2 elements returned from add_lines.
1642
self.assertEqual(3, len(add))
1643
results.append(add[:2])
1644
if self.key_length == 1:
1646
('00e364d235126be43292ab09cb4686cf703ddc17', 7),
1647
('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1648
('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1649
('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1650
('ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
1652
elif self.key_length == 2:
1654
('00e364d235126be43292ab09cb4686cf703ddc17', 7),
1655
('00e364d235126be43292ab09cb4686cf703ddc17', 7),
1656
('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1657
('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1658
('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1659
('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1660
('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1661
('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1662
('ed8bce375198ea62444dc71952b22cfc2b09226d', 23),
1663
('ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
1666
def test_add_lines_no_key_generates_chk_key(self):
1667
files = self.get_versionedfiles()
1668
# save code by using the stock data insertion helper.
1669
adds = self.get_diamond_files(files, nokeys=True)
1671
# We can only validate the first 2 elements returned from add_lines.
1673
self.assertEqual(3, len(add))
1674
results.append(add[:2])
1675
if self.key_length == 1:
1677
('00e364d235126be43292ab09cb4686cf703ddc17', 7),
1678
('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1679
('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1680
('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1681
('ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
1683
# Check the added items got CHK keys.
1684
self.assertEqual(set([
1685
('sha1:00e364d235126be43292ab09cb4686cf703ddc17',),
1686
('sha1:51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44',),
1687
('sha1:9ef09dfa9d86780bdec9219a22560c6ece8e0ef1',),
1688
('sha1:a8478686da38e370e32e42e8a0c220e33ee9132f',),
1689
('sha1:ed8bce375198ea62444dc71952b22cfc2b09226d',),
1692
elif self.key_length == 2:
1694
('00e364d235126be43292ab09cb4686cf703ddc17', 7),
1695
('00e364d235126be43292ab09cb4686cf703ddc17', 7),
1696
('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1697
('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1698
('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1699
('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1700
('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1701
('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1702
('ed8bce375198ea62444dc71952b22cfc2b09226d', 23),
1703
('ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
1705
# Check the added items got CHK keys.
1706
self.assertEqual(set([
1707
('FileA', 'sha1:00e364d235126be43292ab09cb4686cf703ddc17'),
1708
('FileA', 'sha1:51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44'),
1709
('FileA', 'sha1:9ef09dfa9d86780bdec9219a22560c6ece8e0ef1'),
1710
('FileA', 'sha1:a8478686da38e370e32e42e8a0c220e33ee9132f'),
1711
('FileA', 'sha1:ed8bce375198ea62444dc71952b22cfc2b09226d'),
1712
('FileB', 'sha1:00e364d235126be43292ab09cb4686cf703ddc17'),
1713
('FileB', 'sha1:51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44'),
1714
('FileB', 'sha1:9ef09dfa9d86780bdec9219a22560c6ece8e0ef1'),
1715
('FileB', 'sha1:a8478686da38e370e32e42e8a0c220e33ee9132f'),
1716
('FileB', 'sha1:ed8bce375198ea62444dc71952b22cfc2b09226d'),
1720
def test_empty_lines(self):
1721
"""Empty files can be stored."""
1722
f = self.get_versionedfiles()
1723
key_a = self.get_simple_key('a')
1724
f.add_lines(key_a, [], [])
1725
self.assertEqual('',
1726
f.get_record_stream([key_a], 'unordered', True
1727
).next().get_bytes_as('fulltext'))
1728
key_b = self.get_simple_key('b')
1729
f.add_lines(key_b, self.get_parents([key_a]), [])
1730
self.assertEqual('',
1731
f.get_record_stream([key_b], 'unordered', True
1732
).next().get_bytes_as('fulltext'))
1734
def test_newline_only(self):
1735
f = self.get_versionedfiles()
1736
key_a = self.get_simple_key('a')
1737
f.add_lines(key_a, [], ['\n'])
1738
self.assertEqual('\n',
1739
f.get_record_stream([key_a], 'unordered', True
1740
).next().get_bytes_as('fulltext'))
1741
key_b = self.get_simple_key('b')
1742
f.add_lines(key_b, self.get_parents([key_a]), ['\n'])
1743
self.assertEqual('\n',
1744
f.get_record_stream([key_b], 'unordered', True
1745
).next().get_bytes_as('fulltext'))
1747
def test_get_known_graph_ancestry(self):
1748
f = self.get_versionedfiles()
1750
raise TestNotApplicable('ancestry info only relevant with graph.')
1751
key_a = self.get_simple_key('a')
1752
key_b = self.get_simple_key('b')
1753
key_c = self.get_simple_key('c')
1759
f.add_lines(key_a, [], ['\n'])
1760
f.add_lines(key_b, [key_a], ['\n'])
1761
f.add_lines(key_c, [key_a, key_b], ['\n'])
1762
kg = f.get_known_graph_ancestry([key_c])
1763
self.assertIsInstance(kg, _mod_graph.KnownGraph)
1764
self.assertEqual([key_a, key_b, key_c], list(kg.topo_sort()))
1766
def test_known_graph_with_fallbacks(self):
1767
f = self.get_versionedfiles('files')
1769
raise TestNotApplicable('ancestry info only relevant with graph.')
1770
if getattr(f, 'add_fallback_versioned_files', None) is None:
1771
raise TestNotApplicable("%s doesn't support fallbacks"
1772
% (f.__class__.__name__,))
1773
key_a = self.get_simple_key('a')
1774
key_b = self.get_simple_key('b')
1775
key_c = self.get_simple_key('c')
1776
# A only in fallback
1781
g = self.get_versionedfiles('fallback')
1782
g.add_lines(key_a, [], ['\n'])
1783
f.add_fallback_versioned_files(g)
1784
f.add_lines(key_b, [key_a], ['\n'])
1785
f.add_lines(key_c, [key_a, key_b], ['\n'])
1786
kg = f.get_known_graph_ancestry([key_c])
1787
self.assertEqual([key_a, key_b, key_c], list(kg.topo_sort()))
1789
def test_get_record_stream_empty(self):
1790
"""An empty stream can be requested without error."""
1791
f = self.get_versionedfiles()
1792
entries = f.get_record_stream([], 'unordered', False)
1793
self.assertEqual([], list(entries))
1795
def assertValidStorageKind(self, storage_kind):
1796
"""Assert that storage_kind is a valid storage_kind."""
1797
self.assertSubset([storage_kind],
1798
['mpdiff', 'knit-annotated-ft', 'knit-annotated-delta',
1799
'knit-ft', 'knit-delta', 'chunked', 'fulltext',
1800
'knit-annotated-ft-gz', 'knit-annotated-delta-gz', 'knit-ft-gz',
1802
'knit-delta-closure', 'knit-delta-closure-ref',
1803
'groupcompress-block', 'groupcompress-block-ref'])
1805
def capture_stream(self, f, entries, on_seen, parents,
1806
require_fulltext=False):
1807
"""Capture a stream for testing."""
1808
for factory in entries:
1809
on_seen(factory.key)
1810
self.assertValidStorageKind(factory.storage_kind)
1811
if factory.sha1 is not None:
1812
self.assertEqual(f.get_sha1s([factory.key])[factory.key],
1814
self.assertEqual(parents[factory.key], factory.parents)
1815
self.assertIsInstance(factory.get_bytes_as(factory.storage_kind),
1817
if require_fulltext:
1818
factory.get_bytes_as('fulltext')
1820
def test_get_record_stream_interface(self):
1821
"""each item in a stream has to provide a regular interface."""
1822
files = self.get_versionedfiles()
1823
self.get_diamond_files(files)
1824
keys, _ = self.get_keys_and_sort_order()
1825
parent_map = files.get_parent_map(keys)
1826
entries = files.get_record_stream(keys, 'unordered', False)
1828
self.capture_stream(files, entries, seen.add, parent_map)
1829
self.assertEqual(set(keys), seen)
1831
def get_keys_and_sort_order(self):
1832
"""Get diamond test keys list, and their sort ordering."""
1833
if self.key_length == 1:
1834
keys = [('merged',), ('left',), ('right',), ('base',)]
1835
sort_order = {('merged',):2, ('left',):1, ('right',):1, ('base',):0}
1838
('FileA', 'merged'), ('FileA', 'left'), ('FileA', 'right'),
1840
('FileB', 'merged'), ('FileB', 'left'), ('FileB', 'right'),
1844
('FileA', 'merged'):2, ('FileA', 'left'):1, ('FileA', 'right'):1,
1845
('FileA', 'base'):0,
1846
('FileB', 'merged'):2, ('FileB', 'left'):1, ('FileB', 'right'):1,
1847
('FileB', 'base'):0,
1849
return keys, sort_order
1851
def get_keys_and_groupcompress_sort_order(self):
1852
"""Get diamond test keys list, and their groupcompress sort ordering."""
1853
if self.key_length == 1:
1854
keys = [('merged',), ('left',), ('right',), ('base',)]
1855
sort_order = {('merged',):0, ('left',):1, ('right',):1, ('base',):2}
1858
('FileA', 'merged'), ('FileA', 'left'), ('FileA', 'right'),
1860
('FileB', 'merged'), ('FileB', 'left'), ('FileB', 'right'),
1864
('FileA', 'merged'):0, ('FileA', 'left'):1, ('FileA', 'right'):1,
1865
('FileA', 'base'):2,
1866
('FileB', 'merged'):3, ('FileB', 'left'):4, ('FileB', 'right'):4,
1867
('FileB', 'base'):5,
1869
return keys, sort_order
1871
def test_get_record_stream_interface_ordered(self):
1872
"""each item in a stream has to provide a regular interface."""
1873
files = self.get_versionedfiles()
1874
self.get_diamond_files(files)
1875
keys, sort_order = self.get_keys_and_sort_order()
1876
parent_map = files.get_parent_map(keys)
1877
entries = files.get_record_stream(keys, 'topological', False)
1879
self.capture_stream(files, entries, seen.append, parent_map)
1880
self.assertStreamOrder(sort_order, seen, keys)
1882
def test_get_record_stream_interface_ordered_with_delta_closure(self):
1883
"""each item must be accessible as a fulltext."""
1884
files = self.get_versionedfiles()
1885
self.get_diamond_files(files)
1886
keys, sort_order = self.get_keys_and_sort_order()
1887
parent_map = files.get_parent_map(keys)
1888
entries = files.get_record_stream(keys, 'topological', True)
1890
for factory in entries:
1891
seen.append(factory.key)
1892
self.assertValidStorageKind(factory.storage_kind)
1893
self.assertSubset([factory.sha1],
1894
[None, files.get_sha1s([factory.key])[factory.key]])
1895
self.assertEqual(parent_map[factory.key], factory.parents)
1896
# self.assertEqual(files.get_text(factory.key),
1897
ft_bytes = factory.get_bytes_as('fulltext')
1898
self.assertIsInstance(ft_bytes, str)
1899
chunked_bytes = factory.get_bytes_as('chunked')
1900
self.assertEqualDiff(ft_bytes, ''.join(chunked_bytes))
1902
self.assertStreamOrder(sort_order, seen, keys)
1904
def test_get_record_stream_interface_groupcompress(self):
1905
"""each item in a stream has to provide a regular interface."""
1906
files = self.get_versionedfiles()
1907
self.get_diamond_files(files)
1908
keys, sort_order = self.get_keys_and_groupcompress_sort_order()
1909
parent_map = files.get_parent_map(keys)
1910
entries = files.get_record_stream(keys, 'groupcompress', False)
1912
self.capture_stream(files, entries, seen.append, parent_map)
1913
self.assertStreamOrder(sort_order, seen, keys)
1915
def assertStreamOrder(self, sort_order, seen, keys):
1916
self.assertEqual(len(set(seen)), len(keys))
1917
if self.key_length == 1:
1920
lows = {('FileA',):0, ('FileB',):0}
1922
self.assertEqual(set(keys), set(seen))
1925
sort_pos = sort_order[key]
1926
self.assertTrue(sort_pos >= lows[key[:-1]],
1927
"Out of order in sorted stream: %r, %r" % (key, seen))
1928
lows[key[:-1]] = sort_pos
1930
def test_get_record_stream_unknown_storage_kind_raises(self):
1931
"""Asking for a storage kind that the stream cannot supply raises."""
1932
files = self.get_versionedfiles()
1933
self.get_diamond_files(files)
1934
if self.key_length == 1:
1935
keys = [('merged',), ('left',), ('right',), ('base',)]
1938
('FileA', 'merged'), ('FileA', 'left'), ('FileA', 'right'),
1940
('FileB', 'merged'), ('FileB', 'left'), ('FileB', 'right'),
1943
parent_map = files.get_parent_map(keys)
1944
entries = files.get_record_stream(keys, 'unordered', False)
1945
# We track the contents because we should be able to try, fail a
1946
# particular kind and then ask for one that works and continue.
1948
for factory in entries:
1949
seen.add(factory.key)
1950
self.assertValidStorageKind(factory.storage_kind)
1951
if factory.sha1 is not None:
1952
self.assertEqual(files.get_sha1s([factory.key])[factory.key],
1954
self.assertEqual(parent_map[factory.key], factory.parents)
1955
# currently no stream emits mpdiff
1956
self.assertRaises(errors.UnavailableRepresentation,
1957
factory.get_bytes_as, 'mpdiff')
1958
self.assertIsInstance(factory.get_bytes_as(factory.storage_kind),
1960
self.assertEqual(set(keys), seen)
1962
def test_get_record_stream_missing_records_are_absent(self):
1963
files = self.get_versionedfiles()
1964
self.get_diamond_files(files)
1965
if self.key_length == 1:
1966
keys = [('merged',), ('left',), ('right',), ('absent',), ('base',)]
1969
('FileA', 'merged'), ('FileA', 'left'), ('FileA', 'right'),
1970
('FileA', 'absent'), ('FileA', 'base'),
1971
('FileB', 'merged'), ('FileB', 'left'), ('FileB', 'right'),
1972
('FileB', 'absent'), ('FileB', 'base'),
1973
('absent', 'absent'),
1975
parent_map = files.get_parent_map(keys)
1976
entries = files.get_record_stream(keys, 'unordered', False)
1977
self.assertAbsentRecord(files, keys, parent_map, entries)
1978
entries = files.get_record_stream(keys, 'topological', False)
1979
self.assertAbsentRecord(files, keys, parent_map, entries)
1981
def assertRecordHasContent(self, record, bytes):
1982
"""Assert that record has the bytes bytes."""
1983
self.assertEqual(bytes, record.get_bytes_as('fulltext'))
1984
self.assertEqual(bytes, ''.join(record.get_bytes_as('chunked')))
1986
def test_get_record_stream_native_formats_are_wire_ready_one_ft(self):
1987
files = self.get_versionedfiles()
1988
key = self.get_simple_key('foo')
1989
files.add_lines(key, (), ['my text\n', 'content'])
1990
stream = files.get_record_stream([key], 'unordered', False)
1991
record = stream.next()
1992
if record.storage_kind in ('chunked', 'fulltext'):
1993
# chunked and fulltext representations are for direct use not wire
1994
# serialisation: check they are able to be used directly. To send
1995
# such records over the wire translation will be needed.
1996
self.assertRecordHasContent(record, "my text\ncontent")
1998
bytes = [record.get_bytes_as(record.storage_kind)]
1999
network_stream = versionedfile.NetworkRecordStream(bytes).read()
2000
source_record = record
2002
for record in network_stream:
2003
records.append(record)
2004
self.assertEqual(source_record.storage_kind,
2005
record.storage_kind)
2006
self.assertEqual(source_record.parents, record.parents)
2008
source_record.get_bytes_as(source_record.storage_kind),
2009
record.get_bytes_as(record.storage_kind))
2010
self.assertEqual(1, len(records))
2012
def assertStreamMetaEqual(self, records, expected, stream):
2013
"""Assert that streams expected and stream have the same records.
2015
:param records: A list to collect the seen records.
2016
:return: A generator of the records in stream.
2018
# We make assertions during copying to catch things early for
2020
for record, ref_record in izip(stream, expected):
2021
records.append(record)
2022
self.assertEqual(ref_record.key, record.key)
2023
self.assertEqual(ref_record.storage_kind, record.storage_kind)
2024
self.assertEqual(ref_record.parents, record.parents)
2027
def stream_to_bytes_or_skip_counter(self, skipped_records, full_texts,
2029
"""Convert a stream to a bytes iterator.
2031
:param skipped_records: A list with one element to increment when a
2033
:param full_texts: A dict from key->fulltext representation, for
2034
checking chunked or fulltext stored records.
2035
:param stream: A record_stream.
2036
:return: An iterator over the bytes of each record.
2038
for record in stream:
2039
if record.storage_kind in ('chunked', 'fulltext'):
2040
skipped_records[0] += 1
2041
# check the content is correct for direct use.
2042
self.assertRecordHasContent(record, full_texts[record.key])
2044
yield record.get_bytes_as(record.storage_kind)
2046
def test_get_record_stream_native_formats_are_wire_ready_ft_delta(self):
2047
files = self.get_versionedfiles()
2048
target_files = self.get_versionedfiles('target')
2049
key = self.get_simple_key('ft')
2050
key_delta = self.get_simple_key('delta')
2051
files.add_lines(key, (), ['my text\n', 'content'])
2053
delta_parents = (key,)
2056
files.add_lines(key_delta, delta_parents, ['different\n', 'content\n'])
2057
local = files.get_record_stream([key, key_delta], 'unordered', False)
2058
ref = files.get_record_stream([key, key_delta], 'unordered', False)
2059
skipped_records = [0]
2061
key: "my text\ncontent",
2062
key_delta: "different\ncontent\n",
2064
byte_stream = self.stream_to_bytes_or_skip_counter(
2065
skipped_records, full_texts, local)
2066
network_stream = versionedfile.NetworkRecordStream(byte_stream).read()
2068
# insert the stream from the network into a versioned files object so we can
2069
# check the content was carried across correctly without doing delta
2071
target_files.insert_record_stream(
2072
self.assertStreamMetaEqual(records, ref, network_stream))
2073
# No duplicates on the wire thank you!
2074
self.assertEqual(2, len(records) + skipped_records[0])
2076
# if any content was copied it all must have all been.
2077
self.assertIdenticalVersionedFile(files, target_files)
2079
def test_get_record_stream_native_formats_are_wire_ready_delta(self):
2080
# copy a delta over the wire
2081
files = self.get_versionedfiles()
2082
target_files = self.get_versionedfiles('target')
2083
key = self.get_simple_key('ft')
2084
key_delta = self.get_simple_key('delta')
2085
files.add_lines(key, (), ['my text\n', 'content'])
2087
delta_parents = (key,)
2090
files.add_lines(key_delta, delta_parents, ['different\n', 'content\n'])
2091
# Copy the basis text across so we can reconstruct the delta during
2092
# insertion into target.
2093
target_files.insert_record_stream(files.get_record_stream([key],
2094
'unordered', False))
2095
local = files.get_record_stream([key_delta], 'unordered', False)
2096
ref = files.get_record_stream([key_delta], 'unordered', False)
2097
skipped_records = [0]
2099
key_delta: "different\ncontent\n",
2101
byte_stream = self.stream_to_bytes_or_skip_counter(
2102
skipped_records, full_texts, local)
2103
network_stream = versionedfile.NetworkRecordStream(byte_stream).read()
2105
# insert the stream from the network into a versioned files object so we can
2106
# check the content was carried across correctly without doing delta
2107
# inspection during check_stream.
2108
target_files.insert_record_stream(
2109
self.assertStreamMetaEqual(records, ref, network_stream))
2110
# No duplicates on the wire thank you!
2111
self.assertEqual(1, len(records) + skipped_records[0])
2113
# if any content was copied it all must have all been
2114
self.assertIdenticalVersionedFile(files, target_files)
2116
def test_get_record_stream_wire_ready_delta_closure_included(self):
2117
# copy a delta over the wire with the ability to get its full text.
2118
files = self.get_versionedfiles()
2119
key = self.get_simple_key('ft')
2120
key_delta = self.get_simple_key('delta')
2121
files.add_lines(key, (), ['my text\n', 'content'])
2123
delta_parents = (key,)
2126
files.add_lines(key_delta, delta_parents, ['different\n', 'content\n'])
2127
local = files.get_record_stream([key_delta], 'unordered', True)
2128
ref = files.get_record_stream([key_delta], 'unordered', True)
2129
skipped_records = [0]
2131
key_delta: "different\ncontent\n",
2133
byte_stream = self.stream_to_bytes_or_skip_counter(
2134
skipped_records, full_texts, local)
2135
network_stream = versionedfile.NetworkRecordStream(byte_stream).read()
2137
# insert the stream from the network into a versioned files object so we can
2138
# check the content was carried across correctly without doing delta
2139
# inspection during check_stream.
2140
for record in self.assertStreamMetaEqual(records, ref, network_stream):
2141
# we have to be able to get the full text out:
2142
self.assertRecordHasContent(record, full_texts[record.key])
2143
# No duplicates on the wire thank you!
2144
self.assertEqual(1, len(records) + skipped_records[0])
2146
def assertAbsentRecord(self, files, keys, parents, entries):
2147
"""Helper for test_get_record_stream_missing_records_are_absent."""
2149
for factory in entries:
2150
seen.add(factory.key)
2151
if factory.key[-1] == 'absent':
2152
self.assertEqual('absent', factory.storage_kind)
2153
self.assertEqual(None, factory.sha1)
2154
self.assertEqual(None, factory.parents)
2156
self.assertValidStorageKind(factory.storage_kind)
2157
if factory.sha1 is not None:
2158
sha1 = files.get_sha1s([factory.key])[factory.key]
2159
self.assertEqual(sha1, factory.sha1)
2160
self.assertEqual(parents[factory.key], factory.parents)
2161
self.assertIsInstance(factory.get_bytes_as(factory.storage_kind),
2163
self.assertEqual(set(keys), seen)
2165
def test_filter_absent_records(self):
2166
"""Requested missing records can be filter trivially."""
2167
files = self.get_versionedfiles()
2168
self.get_diamond_files(files)
2169
keys, _ = self.get_keys_and_sort_order()
2170
parent_map = files.get_parent_map(keys)
2171
# Add an absent record in the middle of the present keys. (We don't ask
2172
# for just absent keys to ensure that content before and after the
2173
# absent keys is still delivered).
2174
present_keys = list(keys)
2175
if self.key_length == 1:
2176
keys.insert(2, ('extra',))
2178
keys.insert(2, ('extra', 'extra'))
2179
entries = files.get_record_stream(keys, 'unordered', False)
2181
self.capture_stream(files, versionedfile.filter_absent(entries), seen.add,
2183
self.assertEqual(set(present_keys), seen)
2185
def get_mapper(self):
2186
"""Get a mapper suitable for the key length of the test interface."""
2187
if self.key_length == 1:
2188
return ConstantMapper('source')
2190
return HashEscapedPrefixMapper()
2192
def get_parents(self, parents):
2193
"""Get parents, taking self.graph into consideration."""
2199
def test_get_annotator(self):
2200
files = self.get_versionedfiles()
2201
self.get_diamond_files(files)
2202
origin_key = self.get_simple_key('origin')
2203
base_key = self.get_simple_key('base')
2204
left_key = self.get_simple_key('left')
2205
right_key = self.get_simple_key('right')
2206
merged_key = self.get_simple_key('merged')
2207
# annotator = files.get_annotator()
2208
# introduced full text
2209
origins, lines = files.get_annotator().annotate(origin_key)
2210
self.assertEqual([(origin_key,)], origins)
2211
self.assertEqual(['origin\n'], lines)
2213
origins, lines = files.get_annotator().annotate(base_key)
2214
self.assertEqual([(base_key,)], origins)
2216
origins, lines = files.get_annotator().annotate(merged_key)
2225
# Without a graph everything is new.
2232
self.assertRaises(RevisionNotPresent,
2233
files.get_annotator().annotate, self.get_simple_key('missing-key'))
2235
def test_get_parent_map(self):
2236
files = self.get_versionedfiles()
2237
if self.key_length == 1:
2239
(('r0',), self.get_parents(())),
2240
(('r1',), self.get_parents((('r0',),))),
2241
(('r2',), self.get_parents(())),
2242
(('r3',), self.get_parents(())),
2243
(('m',), self.get_parents((('r0',),('r1',),('r2',),('r3',)))),
2247
(('FileA', 'r0'), self.get_parents(())),
2248
(('FileA', 'r1'), self.get_parents((('FileA', 'r0'),))),
2249
(('FileA', 'r2'), self.get_parents(())),
2250
(('FileA', 'r3'), self.get_parents(())),
2251
(('FileA', 'm'), self.get_parents((('FileA', 'r0'),
2252
('FileA', 'r1'), ('FileA', 'r2'), ('FileA', 'r3')))),
2254
for key, parents in parent_details:
2255
files.add_lines(key, parents, [])
2256
# immediately after adding it should be queryable.
2257
self.assertEqual({key:parents}, files.get_parent_map([key]))
2258
# We can ask for an empty set
2259
self.assertEqual({}, files.get_parent_map([]))
2260
# We can ask for many keys
2261
all_parents = dict(parent_details)
2262
self.assertEqual(all_parents, files.get_parent_map(all_parents.keys()))
2263
# Absent keys are just not included in the result.
2264
keys = all_parents.keys()
2265
if self.key_length == 1:
2266
keys.insert(1, ('missing',))
2268
keys.insert(1, ('missing', 'missing'))
2269
# Absent keys are just ignored
2270
self.assertEqual(all_parents, files.get_parent_map(keys))
2272
def test_get_sha1s(self):
2273
files = self.get_versionedfiles()
2274
self.get_diamond_files(files)
2275
if self.key_length == 1:
2276
keys = [('base',), ('origin',), ('left',), ('merged',), ('right',)]
2278
# ask for shas from different prefixes.
2280
('FileA', 'base'), ('FileB', 'origin'), ('FileA', 'left'),
2281
('FileA', 'merged'), ('FileB', 'right'),
2284
keys[0]: '51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44',
2285
keys[1]: '00e364d235126be43292ab09cb4686cf703ddc17',
2286
keys[2]: 'a8478686da38e370e32e42e8a0c220e33ee9132f',
2287
keys[3]: 'ed8bce375198ea62444dc71952b22cfc2b09226d',
2288
keys[4]: '9ef09dfa9d86780bdec9219a22560c6ece8e0ef1',
2290
files.get_sha1s(keys))
2292
def test_insert_record_stream_empty(self):
2293
"""Inserting an empty record stream should work."""
2294
files = self.get_versionedfiles()
2295
files.insert_record_stream([])
2297
def assertIdenticalVersionedFile(self, expected, actual):
2298
"""Assert that left and right have the same contents."""
2299
self.assertEqual(set(actual.keys()), set(expected.keys()))
2300
actual_parents = actual.get_parent_map(actual.keys())
2302
self.assertEqual(actual_parents, expected.get_parent_map(expected.keys()))
2304
for key, parents in actual_parents.items():
2305
self.assertEqual(None, parents)
2306
for key in actual.keys():
2307
actual_text = actual.get_record_stream(
2308
[key], 'unordered', True).next().get_bytes_as('fulltext')
2309
expected_text = expected.get_record_stream(
2310
[key], 'unordered', True).next().get_bytes_as('fulltext')
2311
self.assertEqual(actual_text, expected_text)
2313
def test_insert_record_stream_fulltexts(self):
2314
"""Any file should accept a stream of fulltexts."""
2315
files = self.get_versionedfiles()
2316
mapper = self.get_mapper()
2317
source_transport = self.get_transport('source')
2318
source_transport.mkdir('.')
2319
# weaves always output fulltexts.
2320
source = make_versioned_files_factory(WeaveFile, mapper)(
2322
self.get_diamond_files(source, trailing_eol=False)
2323
stream = source.get_record_stream(source.keys(), 'topological',
2325
files.insert_record_stream(stream)
2326
self.assertIdenticalVersionedFile(source, files)
2328
def test_insert_record_stream_fulltexts_noeol(self):
2329
"""Any file should accept a stream of fulltexts."""
2330
files = self.get_versionedfiles()
2331
mapper = self.get_mapper()
2332
source_transport = self.get_transport('source')
2333
source_transport.mkdir('.')
2334
# weaves always output fulltexts.
2335
source = make_versioned_files_factory(WeaveFile, mapper)(
2337
self.get_diamond_files(source, trailing_eol=False)
2338
stream = source.get_record_stream(source.keys(), 'topological',
2340
files.insert_record_stream(stream)
2341
self.assertIdenticalVersionedFile(source, files)
2343
def test_insert_record_stream_annotated_knits(self):
2344
"""Any file should accept a stream from plain knits."""
2345
files = self.get_versionedfiles()
2346
mapper = self.get_mapper()
2347
source_transport = self.get_transport('source')
2348
source_transport.mkdir('.')
2349
source = make_file_factory(True, mapper)(source_transport)
2350
self.get_diamond_files(source)
2351
stream = source.get_record_stream(source.keys(), 'topological',
2353
files.insert_record_stream(stream)
2354
self.assertIdenticalVersionedFile(source, files)
2356
def test_insert_record_stream_annotated_knits_noeol(self):
2357
"""Any file should accept a stream from plain knits."""
2358
files = self.get_versionedfiles()
2359
mapper = self.get_mapper()
2360
source_transport = self.get_transport('source')
2361
source_transport.mkdir('.')
2362
source = make_file_factory(True, mapper)(source_transport)
2363
self.get_diamond_files(source, trailing_eol=False)
2364
stream = source.get_record_stream(source.keys(), 'topological',
2366
files.insert_record_stream(stream)
2367
self.assertIdenticalVersionedFile(source, files)
2369
def test_insert_record_stream_plain_knits(self):
2370
"""Any file should accept a stream from plain knits."""
2371
files = self.get_versionedfiles()
2372
mapper = self.get_mapper()
2373
source_transport = self.get_transport('source')
2374
source_transport.mkdir('.')
2375
source = make_file_factory(False, mapper)(source_transport)
2376
self.get_diamond_files(source)
2377
stream = source.get_record_stream(source.keys(), 'topological',
2379
files.insert_record_stream(stream)
2380
self.assertIdenticalVersionedFile(source, files)
2382
def test_insert_record_stream_plain_knits_noeol(self):
2383
"""Any file should accept a stream from plain knits."""
2384
files = self.get_versionedfiles()
2385
mapper = self.get_mapper()
2386
source_transport = self.get_transport('source')
2387
source_transport.mkdir('.')
2388
source = make_file_factory(False, mapper)(source_transport)
2389
self.get_diamond_files(source, trailing_eol=False)
2390
stream = source.get_record_stream(source.keys(), 'topological',
2392
files.insert_record_stream(stream)
2393
self.assertIdenticalVersionedFile(source, files)
2395
def test_insert_record_stream_existing_keys(self):
2396
"""Inserting keys already in a file should not error."""
2397
files = self.get_versionedfiles()
2398
source = self.get_versionedfiles('source')
2399
self.get_diamond_files(source)
2400
# insert some keys into f.
2401
self.get_diamond_files(files, left_only=True)
2402
stream = source.get_record_stream(source.keys(), 'topological',
2404
files.insert_record_stream(stream)
2405
self.assertIdenticalVersionedFile(source, files)
2407
def test_insert_record_stream_missing_keys(self):
2408
"""Inserting a stream with absent keys should raise an error."""
2409
files = self.get_versionedfiles()
2410
source = self.get_versionedfiles('source')
2411
stream = source.get_record_stream([('missing',) * self.key_length],
2412
'topological', False)
2413
self.assertRaises(errors.RevisionNotPresent, files.insert_record_stream,
2416
def test_insert_record_stream_out_of_order(self):
2417
"""An out of order stream can either error or work."""
2418
files = self.get_versionedfiles()
2419
source = self.get_versionedfiles('source')
2420
self.get_diamond_files(source)
2421
if self.key_length == 1:
2422
origin_keys = [('origin',)]
2423
end_keys = [('merged',), ('left',)]
2424
start_keys = [('right',), ('base',)]
2426
origin_keys = [('FileA', 'origin'), ('FileB', 'origin')]
2427
end_keys = [('FileA', 'merged',), ('FileA', 'left',),
2428
('FileB', 'merged',), ('FileB', 'left',)]
2429
start_keys = [('FileA', 'right',), ('FileA', 'base',),
2430
('FileB', 'right',), ('FileB', 'base',)]
2431
origin_entries = source.get_record_stream(origin_keys, 'unordered', False)
2432
end_entries = source.get_record_stream(end_keys, 'topological', False)
2433
start_entries = source.get_record_stream(start_keys, 'topological', False)
2434
entries = chain(origin_entries, end_entries, start_entries)
2436
files.insert_record_stream(entries)
2437
except RevisionNotPresent:
2438
# Must not have corrupted the file.
2441
self.assertIdenticalVersionedFile(source, files)
2443
def test_insert_record_stream_long_parent_chain_out_of_order(self):
2444
"""An out of order stream can either error or work."""
2446
raise TestNotApplicable('ancestry info only relevant with graph.')
2447
# Create a reasonably long chain of records based on each other, where
2448
# most will be deltas.
2449
source = self.get_versionedfiles('source')
2452
content = [('same same %d\n' % n) for n in range(500)]
2453
for letter in 'abcdefghijklmnopqrstuvwxyz':
2454
key = ('key-' + letter,)
2455
if self.key_length == 2:
2456
key = ('prefix',) + key
2457
content.append('content for ' + letter + '\n')
2458
source.add_lines(key, parents, content)
2461
# Create a stream of these records, excluding the first record that the
2462
# rest ultimately depend upon, and insert it into a new vf.
2464
for key in reversed(keys):
2465
streams.append(source.get_record_stream([key], 'unordered', False))
2466
deltas = chain(*streams[:-1])
2467
files = self.get_versionedfiles()
2469
files.insert_record_stream(deltas)
2470
except RevisionNotPresent:
2471
# Must not have corrupted the file.
2474
# Must only report either just the first key as a missing parent,
2475
# no key as missing (for nodelta scenarios).
2476
missing = set(files.get_missing_compression_parent_keys())
2477
missing.discard(keys[0])
2478
self.assertEqual(set(), missing)
2480
def get_knit_delta_source(self):
2481
"""Get a source that can produce a stream with knit delta records,
2482
regardless of this test's scenario.
2484
mapper = self.get_mapper()
2485
source_transport = self.get_transport('source')
2486
source_transport.mkdir('.')
2487
source = make_file_factory(False, mapper)(source_transport)
2488
get_diamond_files(source, self.key_length, trailing_eol=True,
2489
nograph=False, left_only=False)
2492
def test_insert_record_stream_delta_missing_basis_no_corruption(self):
2493
"""Insertion where a needed basis is not included notifies the caller
2494
of the missing basis. In the meantime a record missing its basis is
2497
source = self.get_knit_delta_source()
2498
keys = [self.get_simple_key('origin'), self.get_simple_key('merged')]
2499
entries = source.get_record_stream(keys, 'unordered', False)
2500
files = self.get_versionedfiles()
2501
if self.support_partial_insertion:
2502
self.assertEqual([],
2503
list(files.get_missing_compression_parent_keys()))
2504
files.insert_record_stream(entries)
2505
missing_bases = files.get_missing_compression_parent_keys()
2506
self.assertEqual(set([self.get_simple_key('left')]),
2508
self.assertEqual(set(keys), set(files.get_parent_map(keys)))
2511
errors.RevisionNotPresent, files.insert_record_stream, entries)
2514
def test_insert_record_stream_delta_missing_basis_can_be_added_later(self):
2515
"""Insertion where a needed basis is not included notifies the caller
2516
of the missing basis. That basis can be added in a second
2517
insert_record_stream call that does not need to repeat records present
2518
in the previous stream. The record(s) that required that basis are
2519
fully inserted once their basis is no longer missing.
2521
if not self.support_partial_insertion:
2522
raise TestNotApplicable(
2523
'versioned file scenario does not support partial insertion')
2524
source = self.get_knit_delta_source()
2525
entries = source.get_record_stream([self.get_simple_key('origin'),
2526
self.get_simple_key('merged')], 'unordered', False)
2527
files = self.get_versionedfiles()
2528
files.insert_record_stream(entries)
2529
missing_bases = files.get_missing_compression_parent_keys()
2530
self.assertEqual(set([self.get_simple_key('left')]),
2532
# 'merged' is inserted (although a commit of a write group involving
2533
# this versionedfiles would fail).
2534
merged_key = self.get_simple_key('merged')
2536
[merged_key], files.get_parent_map([merged_key]).keys())
2537
# Add the full delta closure of the missing records
2538
missing_entries = source.get_record_stream(
2539
missing_bases, 'unordered', True)
2540
files.insert_record_stream(missing_entries)
2541
# Now 'merged' is fully inserted (and a commit would succeed).
2542
self.assertEqual([], list(files.get_missing_compression_parent_keys()))
2544
[merged_key], files.get_parent_map([merged_key]).keys())
2547
def test_iter_lines_added_or_present_in_keys(self):
2548
# test that we get at least an equalset of the lines added by
2549
# versions in the store.
2550
# the ordering here is to make a tree so that dumb searches have
2551
# more changes to muck up.
2553
class InstrumentedProgress(progress.ProgressTask):
2556
progress.ProgressTask.__init__(self)
2559
def update(self, msg=None, current=None, total=None):
2560
self.updates.append((msg, current, total))
2562
files = self.get_versionedfiles()
2563
# add a base to get included
2564
files.add_lines(self.get_simple_key('base'), (), ['base\n'])
2565
# add a ancestor to be included on one side
2566
files.add_lines(self.get_simple_key('lancestor'), (), ['lancestor\n'])
2567
# add a ancestor to be included on the other side
2568
files.add_lines(self.get_simple_key('rancestor'),
2569
self.get_parents([self.get_simple_key('base')]), ['rancestor\n'])
2570
# add a child of rancestor with no eofile-nl
2571
files.add_lines(self.get_simple_key('child'),
2572
self.get_parents([self.get_simple_key('rancestor')]),
2573
['base\n', 'child\n'])
2574
# add a child of lancestor and base to join the two roots
2575
files.add_lines(self.get_simple_key('otherchild'),
2576
self.get_parents([self.get_simple_key('lancestor'),
2577
self.get_simple_key('base')]),
2578
['base\n', 'lancestor\n', 'otherchild\n'])
2579
def iter_with_keys(keys, expected):
2580
# now we need to see what lines are returned, and how often.
2582
progress = InstrumentedProgress()
2583
# iterate over the lines
2584
for line in files.iter_lines_added_or_present_in_keys(keys,
2586
lines.setdefault(line, 0)
2588
if []!= progress.updates:
2589
self.assertEqual(expected, progress.updates)
2591
lines = iter_with_keys(
2592
[self.get_simple_key('child'), self.get_simple_key('otherchild')],
2593
[('Walking content', 0, 2),
2594
('Walking content', 1, 2),
2595
('Walking content', 2, 2)])
2596
# we must see child and otherchild
2597
self.assertTrue(lines[('child\n', self.get_simple_key('child'))] > 0)
2599
lines[('otherchild\n', self.get_simple_key('otherchild'))] > 0)
2600
# we dont care if we got more than that.
2603
lines = iter_with_keys(files.keys(),
2604
[('Walking content', 0, 5),
2605
('Walking content', 1, 5),
2606
('Walking content', 2, 5),
2607
('Walking content', 3, 5),
2608
('Walking content', 4, 5),
2609
('Walking content', 5, 5)])
2610
# all lines must be seen at least once
2611
self.assertTrue(lines[('base\n', self.get_simple_key('base'))] > 0)
2613
lines[('lancestor\n', self.get_simple_key('lancestor'))] > 0)
2615
lines[('rancestor\n', self.get_simple_key('rancestor'))] > 0)
2616
self.assertTrue(lines[('child\n', self.get_simple_key('child'))] > 0)
2618
lines[('otherchild\n', self.get_simple_key('otherchild'))] > 0)
2620
def test_make_mpdiffs(self):
2621
from bzrlib import multiparent
2622
files = self.get_versionedfiles('source')
2623
# add texts that should trip the knit maximum delta chain threshold
2624
# as well as doing parallel chains of data in knits.
2625
# this is done by two chains of 25 insertions
2626
files.add_lines(self.get_simple_key('base'), [], ['line\n'])
2627
files.add_lines(self.get_simple_key('noeol'),
2628
self.get_parents([self.get_simple_key('base')]), ['line'])
2629
# detailed eol tests:
2630
# shared last line with parent no-eol
2631
files.add_lines(self.get_simple_key('noeolsecond'),
2632
self.get_parents([self.get_simple_key('noeol')]),
2634
# differing last line with parent, both no-eol
2635
files.add_lines(self.get_simple_key('noeolnotshared'),
2636
self.get_parents([self.get_simple_key('noeolsecond')]),
2637
['line\n', 'phone'])
2638
# add eol following a noneol parent, change content
2639
files.add_lines(self.get_simple_key('eol'),
2640
self.get_parents([self.get_simple_key('noeol')]), ['phone\n'])
2641
# add eol following a noneol parent, no change content
2642
files.add_lines(self.get_simple_key('eolline'),
2643
self.get_parents([self.get_simple_key('noeol')]), ['line\n'])
2644
# noeol with no parents:
2645
files.add_lines(self.get_simple_key('noeolbase'), [], ['line'])
2646
# noeol preceeding its leftmost parent in the output:
2647
# this is done by making it a merge of two parents with no common
2648
# anestry: noeolbase and noeol with the
2649
# later-inserted parent the leftmost.
2650
files.add_lines(self.get_simple_key('eolbeforefirstparent'),
2651
self.get_parents([self.get_simple_key('noeolbase'),
2652
self.get_simple_key('noeol')]),
2654
# two identical eol texts
2655
files.add_lines(self.get_simple_key('noeoldup'),
2656
self.get_parents([self.get_simple_key('noeol')]), ['line'])
2657
next_parent = self.get_simple_key('base')
2658
text_name = 'chain1-'
2660
sha1s = {0 :'da6d3141cb4a5e6f464bf6e0518042ddc7bfd079',
2661
1 :'45e21ea146a81ea44a821737acdb4f9791c8abe7',
2662
2 :'e1f11570edf3e2a070052366c582837a4fe4e9fa',
2663
3 :'26b4b8626da827088c514b8f9bbe4ebf181edda1',
2664
4 :'e28a5510be25ba84d31121cff00956f9970ae6f6',
2665
5 :'d63ec0ce22e11dcf65a931b69255d3ac747a318d',
2666
6 :'2c2888d288cb5e1d98009d822fedfe6019c6a4ea',
2667
7 :'95c14da9cafbf828e3e74a6f016d87926ba234ab',
2668
8 :'779e9a0b28f9f832528d4b21e17e168c67697272',
2669
9 :'1f8ff4e5c6ff78ac106fcfe6b1e8cb8740ff9a8f',
2670
10:'131a2ae712cf51ed62f143e3fbac3d4206c25a05',
2671
11:'c5a9d6f520d2515e1ec401a8f8a67e6c3c89f199',
2672
12:'31a2286267f24d8bedaa43355f8ad7129509ea85',
2673
13:'dc2a7fe80e8ec5cae920973973a8ee28b2da5e0a',
2674
14:'2c4b1736566b8ca6051e668de68650686a3922f2',
2675
15:'5912e4ecd9b0c07be4d013e7e2bdcf9323276cde',
2676
16:'b0d2e18d3559a00580f6b49804c23fea500feab3',
2677
17:'8e1d43ad72f7562d7cb8f57ee584e20eb1a69fc7',
2678
18:'5cf64a3459ae28efa60239e44b20312d25b253f3',
2679
19:'1ebed371807ba5935958ad0884595126e8c4e823',
2680
20:'2aa62a8b06fb3b3b892a3292a068ade69d5ee0d3',
2681
21:'01edc447978004f6e4e962b417a4ae1955b6fe5d',
2682
22:'d8d8dc49c4bf0bab401e0298bb5ad827768618bb',
2683
23:'c21f62b1c482862983a8ffb2b0c64b3451876e3f',
2684
24:'c0593fe795e00dff6b3c0fe857a074364d5f04fc',
2685
25:'dd1a1cf2ba9cc225c3aff729953e6364bf1d1855',
2687
for depth in range(26):
2688
new_version = self.get_simple_key(text_name + '%s' % depth)
2689
text = text + ['line\n']
2690
files.add_lines(new_version, self.get_parents([next_parent]), text)
2691
next_parent = new_version
2692
next_parent = self.get_simple_key('base')
2693
text_name = 'chain2-'
2695
for depth in range(26):
2696
new_version = self.get_simple_key(text_name + '%s' % depth)
2697
text = text + ['line\n']
2698
files.add_lines(new_version, self.get_parents([next_parent]), text)
2699
next_parent = new_version
2700
target = self.get_versionedfiles('target')
2701
for key in multiparent.topo_iter_keys(files, files.keys()):
2702
mpdiff = files.make_mpdiffs([key])[0]
2703
parents = files.get_parent_map([key])[key] or []
2705
[(key, parents, files.get_sha1s([key])[key], mpdiff)])
2706
self.assertEqualDiff(
2707
files.get_record_stream([key], 'unordered',
2708
True).next().get_bytes_as('fulltext'),
2709
target.get_record_stream([key], 'unordered',
2710
True).next().get_bytes_as('fulltext')
2713
def test_keys(self):
2714
# While use is discouraged, versions() is still needed by aspects of
2716
files = self.get_versionedfiles()
2717
self.assertEqual(set(), set(files.keys()))
2718
if self.key_length == 1:
2721
key = ('foo', 'bar',)
2722
files.add_lines(key, (), [])
2723
self.assertEqual(set([key]), set(files.keys()))
2726
class VirtualVersionedFilesTests(TestCase):
2727
"""Basic tests for the VirtualVersionedFiles implementations."""
2729
def _get_parent_map(self, keys):
2732
if k in self._parent_map:
2733
ret[k] = self._parent_map[k]
2737
TestCase.setUp(self)
2739
self._parent_map = {}
2740
self.texts = VirtualVersionedFiles(self._get_parent_map,
2743
def test_add_lines(self):
2744
self.assertRaises(NotImplementedError,
2745
self.texts.add_lines, "foo", [], [])
2747
def test_add_mpdiffs(self):
2748
self.assertRaises(NotImplementedError,
2749
self.texts.add_mpdiffs, [])
2751
def test_check_noerrors(self):
2754
def test_insert_record_stream(self):
2755
self.assertRaises(NotImplementedError, self.texts.insert_record_stream,
2758
def test_get_sha1s_nonexistent(self):
2759
self.assertEquals({}, self.texts.get_sha1s([("NONEXISTENT",)]))
2761
def test_get_sha1s(self):
2762
self._lines["key"] = ["dataline1", "dataline2"]
2763
self.assertEquals({("key",): osutils.sha_strings(self._lines["key"])},
2764
self.texts.get_sha1s([("key",)]))
2766
def test_get_parent_map(self):
2767
self._parent_map = {"G": ("A", "B")}
2768
self.assertEquals({("G",): (("A",),("B",))},
2769
self.texts.get_parent_map([("G",), ("L",)]))
2771
def test_get_record_stream(self):
2772
self._lines["A"] = ["FOO", "BAR"]
2773
it = self.texts.get_record_stream([("A",)], "unordered", True)
2775
self.assertEquals("chunked", record.storage_kind)
2776
self.assertEquals("FOOBAR", record.get_bytes_as("fulltext"))
2777
self.assertEquals(["FOO", "BAR"], record.get_bytes_as("chunked"))
2779
def test_get_record_stream_absent(self):
2780
it = self.texts.get_record_stream([("A",)], "unordered", True)
2782
self.assertEquals("absent", record.storage_kind)
2784
def test_iter_lines_added_or_present_in_keys(self):
2785
self._lines["A"] = ["FOO", "BAR"]
2786
self._lines["B"] = ["HEY"]
2787
self._lines["C"] = ["Alberta"]
2788
it = self.texts.iter_lines_added_or_present_in_keys([("A",), ("B",)])
2789
self.assertEquals(sorted([("FOO", "A"), ("BAR", "A"), ("HEY", "B")]),
2793
class TestOrderingVersionedFilesDecorator(TestCaseWithMemoryTransport):
2795
def get_ordering_vf(self, key_priority):
2796
builder = self.make_branch_builder('test')
2797
builder.start_series()
2798
builder.build_snapshot('A', None, [
2799
('add', ('', 'TREE_ROOT', 'directory', None))])
2800
builder.build_snapshot('B', ['A'], [])
2801
builder.build_snapshot('C', ['B'], [])
2802
builder.build_snapshot('D', ['C'], [])
2803
builder.finish_series()
2804
b = builder.get_branch()
2806
self.addCleanup(b.unlock)
2807
vf = b.repository.inventories
2808
return versionedfile.OrderingVersionedFilesDecorator(vf, key_priority)
2810
def test_get_empty(self):
2811
vf = self.get_ordering_vf({})
2812
self.assertEqual([], vf.calls)
2814
def test_get_record_stream_topological(self):
2815
vf = self.get_ordering_vf({('A',): 3, ('B',): 2, ('C',): 4, ('D',): 1})
2816
request_keys = [('B',), ('C',), ('D',), ('A',)]
2817
keys = [r.key for r in vf.get_record_stream(request_keys,
2818
'topological', False)]
2819
# We should have gotten the keys in topological order
2820
self.assertEqual([('A',), ('B',), ('C',), ('D',)], keys)
2821
# And recorded that the request was made
2822
self.assertEqual([('get_record_stream', request_keys, 'topological',
2825
def test_get_record_stream_ordered(self):
2826
vf = self.get_ordering_vf({('A',): 3, ('B',): 2, ('C',): 4, ('D',): 1})
2827
request_keys = [('B',), ('C',), ('D',), ('A',)]
2828
keys = [r.key for r in vf.get_record_stream(request_keys,
2829
'unordered', False)]
2830
# They should be returned based on their priority
2831
self.assertEqual([('D',), ('B',), ('A',), ('C',)], keys)
2832
# And the request recorded
2833
self.assertEqual([('get_record_stream', request_keys, 'unordered',
2836
def test_get_record_stream_implicit_order(self):
2837
vf = self.get_ordering_vf({('B',): 2, ('D',): 1})
2838
request_keys = [('B',), ('C',), ('D',), ('A',)]
2839
keys = [r.key for r in vf.get_record_stream(request_keys,
2840
'unordered', False)]
2841
# A and C are not in the map, so they get sorted to the front. A comes
2842
# before C alphabetically, so it comes back first
2843
self.assertEqual([('A',), ('C',), ('D',), ('B',)], keys)
2844
# And the request recorded
2845
self.assertEqual([('get_record_stream', request_keys, 'unordered',