1162
1186
write_weave(w, tmpf)
1163
1187
self.log(tmpf.getvalue())
1165
overlappedInsertExpected = ['aaa', '<<<<<<< ', 'xxx', 'yyy', '=======',
1189
overlappedInsertExpected = ['aaa', '<<<<<<< ', 'xxx', 'yyy', '=======',
1166
1190
'xxx', '>>>>>>> ', 'bbb']
1193
class TestContentFactoryAdaption(TestCaseWithMemoryTransport):
1195
def test_select_adaptor(self):
1196
"""Test expected adapters exist."""
1197
# One scenario for each lookup combination we expect to use.
1198
# Each is source_kind, requested_kind, adapter class
1200
('knit-delta-gz', 'fulltext', _mod_knit.DeltaPlainToFullText),
1201
('knit-ft-gz', 'fulltext', _mod_knit.FTPlainToFullText),
1202
('knit-annotated-delta-gz', 'knit-delta-gz',
1203
_mod_knit.DeltaAnnotatedToUnannotated),
1204
('knit-annotated-delta-gz', 'fulltext',
1205
_mod_knit.DeltaAnnotatedToFullText),
1206
('knit-annotated-ft-gz', 'knit-ft-gz',
1207
_mod_knit.FTAnnotatedToUnannotated),
1208
('knit-annotated-ft-gz', 'fulltext',
1209
_mod_knit.FTAnnotatedToFullText),
1211
for source, requested, klass in scenarios:
1212
adapter_factory = versionedfile.adapter_registry.get(
1213
(source, requested))
1214
adapter = adapter_factory(None)
1215
self.assertIsInstance(adapter, klass)
1217
def get_knit(self, annotated=True):
1218
mapper = ConstantMapper('knit')
1219
transport = self.get_transport()
1220
return make_file_factory(annotated, mapper)(transport)
1222
def helpGetBytes(self, f, ft_adapter, delta_adapter):
1223
"""Grab the interested adapted texts for tests."""
1224
# origin is a fulltext
1225
entries = f.get_record_stream([('origin',)], 'unordered', False)
1226
base = entries.next()
1227
ft_data = ft_adapter.get_bytes(base)
1228
# merged is both a delta and multiple parents.
1229
entries = f.get_record_stream([('merged',)], 'unordered', False)
1230
merged = entries.next()
1231
delta_data = delta_adapter.get_bytes(merged)
1232
return ft_data, delta_data
1234
def test_deannotation_noeol(self):
1235
"""Test converting annotated knits to unannotated knits."""
1236
# we need a full text, and a delta
1238
get_diamond_files(f, 1, trailing_eol=False)
1239
ft_data, delta_data = self.helpGetBytes(f,
1240
_mod_knit.FTAnnotatedToUnannotated(None),
1241
_mod_knit.DeltaAnnotatedToUnannotated(None))
1243
'version origin 1 b284f94827db1fa2970d9e2014f080413b547a7e\n'
1246
GzipFile(mode='rb', fileobj=StringIO(ft_data)).read())
1248
'version merged 4 32c2e79763b3f90e8ccde37f9710b6629c25a796\n'
1249
'1,2,3\nleft\nright\nmerged\nend merged\n',
1250
GzipFile(mode='rb', fileobj=StringIO(delta_data)).read())
1252
def test_deannotation(self):
1253
"""Test converting annotated knits to unannotated knits."""
1254
# we need a full text, and a delta
1256
get_diamond_files(f, 1)
1257
ft_data, delta_data = self.helpGetBytes(f,
1258
_mod_knit.FTAnnotatedToUnannotated(None),
1259
_mod_knit.DeltaAnnotatedToUnannotated(None))
1261
'version origin 1 00e364d235126be43292ab09cb4686cf703ddc17\n'
1264
GzipFile(mode='rb', fileobj=StringIO(ft_data)).read())
1266
'version merged 3 ed8bce375198ea62444dc71952b22cfc2b09226d\n'
1267
'2,2,2\nright\nmerged\nend merged\n',
1268
GzipFile(mode='rb', fileobj=StringIO(delta_data)).read())
1270
def test_annotated_to_fulltext_no_eol(self):
1271
"""Test adapting annotated knits to full texts (for -> weaves)."""
1272
# we need a full text, and a delta
1274
get_diamond_files(f, 1, trailing_eol=False)
1275
# Reconstructing a full text requires a backing versioned file, and it
1276
# must have the base lines requested from it.
1277
logged_vf = versionedfile.RecordingVersionedFilesDecorator(f)
1278
ft_data, delta_data = self.helpGetBytes(f,
1279
_mod_knit.FTAnnotatedToFullText(None),
1280
_mod_knit.DeltaAnnotatedToFullText(logged_vf))
1281
self.assertEqual('origin', ft_data)
1282
self.assertEqual('base\nleft\nright\nmerged', delta_data)
1283
self.assertEqual([('get_record_stream', [('left',)], 'unordered',
1284
True)], logged_vf.calls)
1286
def test_annotated_to_fulltext(self):
1287
"""Test adapting annotated knits to full texts (for -> weaves)."""
1288
# we need a full text, and a delta
1290
get_diamond_files(f, 1)
1291
# Reconstructing a full text requires a backing versioned file, and it
1292
# must have the base lines requested from it.
1293
logged_vf = versionedfile.RecordingVersionedFilesDecorator(f)
1294
ft_data, delta_data = self.helpGetBytes(f,
1295
_mod_knit.FTAnnotatedToFullText(None),
1296
_mod_knit.DeltaAnnotatedToFullText(logged_vf))
1297
self.assertEqual('origin\n', ft_data)
1298
self.assertEqual('base\nleft\nright\nmerged\n', delta_data)
1299
self.assertEqual([('get_record_stream', [('left',)], 'unordered',
1300
True)], logged_vf.calls)
1302
def test_unannotated_to_fulltext(self):
1303
"""Test adapting unannotated knits to full texts.
1305
This is used for -> weaves, and for -> annotated knits.
1307
# we need a full text, and a delta
1308
f = self.get_knit(annotated=False)
1309
get_diamond_files(f, 1)
1310
# Reconstructing a full text requires a backing versioned file, and it
1311
# must have the base lines requested from it.
1312
logged_vf = versionedfile.RecordingVersionedFilesDecorator(f)
1313
ft_data, delta_data = self.helpGetBytes(f,
1314
_mod_knit.FTPlainToFullText(None),
1315
_mod_knit.DeltaPlainToFullText(logged_vf))
1316
self.assertEqual('origin\n', ft_data)
1317
self.assertEqual('base\nleft\nright\nmerged\n', delta_data)
1318
self.assertEqual([('get_record_stream', [('left',)], 'unordered',
1319
True)], logged_vf.calls)
1321
def test_unannotated_to_fulltext_no_eol(self):
1322
"""Test adapting unannotated knits to full texts.
1324
This is used for -> weaves, and for -> annotated knits.
1326
# we need a full text, and a delta
1327
f = self.get_knit(annotated=False)
1328
get_diamond_files(f, 1, trailing_eol=False)
1329
# Reconstructing a full text requires a backing versioned file, and it
1330
# must have the base lines requested from it.
1331
logged_vf = versionedfile.RecordingVersionedFilesDecorator(f)
1332
ft_data, delta_data = self.helpGetBytes(f,
1333
_mod_knit.FTPlainToFullText(None),
1334
_mod_knit.DeltaPlainToFullText(logged_vf))
1335
self.assertEqual('origin', ft_data)
1336
self.assertEqual('base\nleft\nright\nmerged', delta_data)
1337
self.assertEqual([('get_record_stream', [('left',)], 'unordered',
1338
True)], logged_vf.calls)
1341
class TestKeyMapper(TestCaseWithMemoryTransport):
1342
"""Tests for various key mapping logic."""
1344
def test_identity_mapper(self):
1345
mapper = versionedfile.ConstantMapper("inventory")
1346
self.assertEqual("inventory", mapper.map(('foo@ar',)))
1347
self.assertEqual("inventory", mapper.map(('quux',)))
1349
def test_prefix_mapper(self):
1351
mapper = versionedfile.PrefixMapper()
1352
self.assertEqual("file-id", mapper.map(("file-id", "revision-id")))
1353
self.assertEqual("new-id", mapper.map(("new-id", "revision-id")))
1354
self.assertEqual(('file-id',), mapper.unmap("file-id"))
1355
self.assertEqual(('new-id',), mapper.unmap("new-id"))
1357
def test_hash_prefix_mapper(self):
1358
#format6: hash + plain
1359
mapper = versionedfile.HashPrefixMapper()
1360
self.assertEqual("9b/file-id", mapper.map(("file-id", "revision-id")))
1361
self.assertEqual("45/new-id", mapper.map(("new-id", "revision-id")))
1362
self.assertEqual(('file-id',), mapper.unmap("9b/file-id"))
1363
self.assertEqual(('new-id',), mapper.unmap("45/new-id"))
1365
def test_hash_escaped_mapper(self):
1366
#knit1: hash + escaped
1367
mapper = versionedfile.HashEscapedPrefixMapper()
1368
self.assertEqual("88/%2520", mapper.map((" ", "revision-id")))
1369
self.assertEqual("ed/fil%2545-%2549d", mapper.map(("filE-Id",
1371
self.assertEqual("88/ne%2557-%2549d", mapper.map(("neW-Id",
1373
self.assertEqual(('filE-Id',), mapper.unmap("ed/fil%2545-%2549d"))
1374
self.assertEqual(('neW-Id',), mapper.unmap("88/ne%2557-%2549d"))
1377
class TestVersionedFiles(TestCaseWithMemoryTransport):
1378
"""Tests for the multiple-file variant of VersionedFile."""
1380
# We want to be sure of behaviour for:
1381
# weaves prefix layout (weave texts)
1382
# individually named weaves (weave inventories)
1383
# annotated knits - prefix|hash|hash-escape layout, we test the third only
1384
# as it is the most complex mapper.
1385
# individually named knits
1386
# individual no-graph knits in packs (signatures)
1387
# individual graph knits in packs (inventories)
1388
# individual graph nocompression knits in packs (revisions)
1389
# plain text knits in packs (texts)
1390
len_one_scenarios = [
1393
'factory':make_versioned_files_factory(WeaveFile,
1394
ConstantMapper('inventory')),
1397
'support_partial_insertion': False,
1401
'factory':make_file_factory(False, ConstantMapper('revisions')),
1404
'support_partial_insertion': False,
1406
('named-nograph-nodelta-knit-pack', {
1407
'cleanup':cleanup_pack_knit,
1408
'factory':make_pack_factory(False, False, 1),
1411
'support_partial_insertion': False,
1413
('named-graph-knit-pack', {
1414
'cleanup':cleanup_pack_knit,
1415
'factory':make_pack_factory(True, True, 1),
1418
'support_partial_insertion': True,
1420
('named-graph-nodelta-knit-pack', {
1421
'cleanup':cleanup_pack_knit,
1422
'factory':make_pack_factory(True, False, 1),
1425
'support_partial_insertion': False,
1427
('groupcompress-nograph', {
1428
'cleanup':groupcompress.cleanup_pack_group,
1429
'factory':groupcompress.make_pack_factory(False, False, 1),
1432
'support_partial_insertion':False,
1435
len_two_scenarios = [
1438
'factory':make_versioned_files_factory(WeaveFile,
1442
'support_partial_insertion': False,
1444
('annotated-knit-escape', {
1446
'factory':make_file_factory(True, HashEscapedPrefixMapper()),
1449
'support_partial_insertion': False,
1451
('plain-knit-pack', {
1452
'cleanup':cleanup_pack_knit,
1453
'factory':make_pack_factory(True, True, 2),
1456
'support_partial_insertion': True,
1459
'cleanup':groupcompress.cleanup_pack_group,
1460
'factory':groupcompress.make_pack_factory(True, False, 1),
1463
'support_partial_insertion':False,
1467
scenarios = len_one_scenarios + len_two_scenarios
1469
def get_versionedfiles(self, relpath='files'):
1470
transport = self.get_transport(relpath)
1472
transport.mkdir('.')
1473
files = self.factory(transport)
1474
if self.cleanup is not None:
1475
self.addCleanup(self.cleanup, files)
1478
def get_simple_key(self, suffix):
1479
"""Return a key for the object under test."""
1480
if self.key_length == 1:
1483
return ('FileA',) + (suffix,)
1485
def test_add_fallback_implies_without_fallbacks(self):
1486
f = self.get_versionedfiles('files')
1487
if getattr(f, 'add_fallback_versioned_files', None) is None:
1488
raise TestNotApplicable("%s doesn't support fallbacks"
1489
% (f.__class__.__name__,))
1490
g = self.get_versionedfiles('fallback')
1491
key_a = self.get_simple_key('a')
1492
g.add_lines(key_a, [], ['\n'])
1493
f.add_fallback_versioned_files(g)
1494
self.assertTrue(key_a in f.get_parent_map([key_a]))
1495
self.assertFalse(key_a in f.without_fallbacks().get_parent_map([key_a]))
1497
def test_add_lines(self):
1498
f = self.get_versionedfiles()
1499
key0 = self.get_simple_key('r0')
1500
key1 = self.get_simple_key('r1')
1501
key2 = self.get_simple_key('r2')
1502
keyf = self.get_simple_key('foo')
1503
f.add_lines(key0, [], ['a\n', 'b\n'])
1505
f.add_lines(key1, [key0], ['b\n', 'c\n'])
1507
f.add_lines(key1, [], ['b\n', 'c\n'])
1509
self.assertTrue(key0 in keys)
1510
self.assertTrue(key1 in keys)
1512
for record in f.get_record_stream([key0, key1], 'unordered', True):
1513
records.append((record.key, record.get_bytes_as('fulltext')))
1515
self.assertEqual([(key0, 'a\nb\n'), (key1, 'b\nc\n')], records)
1517
def test__add_text(self):
1518
f = self.get_versionedfiles()
1519
key0 = self.get_simple_key('r0')
1520
key1 = self.get_simple_key('r1')
1521
key2 = self.get_simple_key('r2')
1522
keyf = self.get_simple_key('foo')
1523
f._add_text(key0, [], 'a\nb\n')
1525
f._add_text(key1, [key0], 'b\nc\n')
1527
f._add_text(key1, [], 'b\nc\n')
1529
self.assertTrue(key0 in keys)
1530
self.assertTrue(key1 in keys)
1532
for record in f.get_record_stream([key0, key1], 'unordered', True):
1533
records.append((record.key, record.get_bytes_as('fulltext')))
1535
self.assertEqual([(key0, 'a\nb\n'), (key1, 'b\nc\n')], records)
1537
def test_annotate(self):
1538
files = self.get_versionedfiles()
1539
self.get_diamond_files(files)
1540
if self.key_length == 1:
1544
# introduced full text
1545
origins = files.annotate(prefix + ('origin',))
1547
(prefix + ('origin',), 'origin\n')],
1550
origins = files.annotate(prefix + ('base',))
1552
(prefix + ('base',), 'base\n')],
1555
origins = files.annotate(prefix + ('merged',))
1558
(prefix + ('base',), 'base\n'),
1559
(prefix + ('left',), 'left\n'),
1560
(prefix + ('right',), 'right\n'),
1561
(prefix + ('merged',), 'merged\n')
1565
# Without a graph everything is new.
1567
(prefix + ('merged',), 'base\n'),
1568
(prefix + ('merged',), 'left\n'),
1569
(prefix + ('merged',), 'right\n'),
1570
(prefix + ('merged',), 'merged\n')
1573
self.assertRaises(RevisionNotPresent,
1574
files.annotate, prefix + ('missing-key',))
1576
def test_check_no_parameters(self):
1577
files = self.get_versionedfiles()
1579
def test_check_progressbar_parameter(self):
1580
"""A progress bar can be supplied because check can be a generator."""
1581
pb = ui.ui_factory.nested_progress_bar()
1582
self.addCleanup(pb.finished)
1583
files = self.get_versionedfiles()
1584
files.check(progress_bar=pb)
1586
def test_check_with_keys_becomes_generator(self):
1587
files = self.get_versionedfiles()
1588
self.get_diamond_files(files)
1590
entries = files.check(keys=keys)
1592
# Texts output should be fulltexts.
1593
self.capture_stream(files, entries, seen.add,
1594
files.get_parent_map(keys), require_fulltext=True)
1595
# All texts should be output.
1596
self.assertEqual(set(keys), seen)
1598
def test_clear_cache(self):
1599
files = self.get_versionedfiles()
1602
def test_construct(self):
1603
"""Each parameterised test can be constructed on a transport."""
1604
files = self.get_versionedfiles()
1606
def get_diamond_files(self, files, trailing_eol=True, left_only=False,
1608
return get_diamond_files(files, self.key_length,
1609
trailing_eol=trailing_eol, nograph=not self.graph,
1610
left_only=left_only, nokeys=nokeys)
1612
def _add_content_nostoresha(self, add_lines):
1613
"""When nostore_sha is supplied using old content raises."""
1614
vf = self.get_versionedfiles()
1615
empty_text = ('a', [])
1616
sample_text_nl = ('b', ["foo\n", "bar\n"])
1617
sample_text_no_nl = ('c', ["foo\n", "bar"])
1619
for version, lines in (empty_text, sample_text_nl, sample_text_no_nl):
1621
sha, _, _ = vf.add_lines(self.get_simple_key(version), [],
1624
sha, _, _ = vf._add_text(self.get_simple_key(version), [],
1627
# we now have a copy of all the lines in the vf.
1628
for sha, (version, lines) in zip(
1629
shas, (empty_text, sample_text_nl, sample_text_no_nl)):
1630
new_key = self.get_simple_key(version + "2")
1631
self.assertRaises(errors.ExistingContent,
1632
vf.add_lines, new_key, [], lines,
1634
self.assertRaises(errors.ExistingContent,
1635
vf._add_text, new_key, [], ''.join(lines),
1637
# and no new version should have been added.
1638
record = vf.get_record_stream([new_key], 'unordered', True).next()
1639
self.assertEqual('absent', record.storage_kind)
1641
def test_add_lines_nostoresha(self):
1642
self._add_content_nostoresha(add_lines=True)
1644
def test__add_text_nostoresha(self):
1645
self._add_content_nostoresha(add_lines=False)
1647
def test_add_lines_return(self):
1648
files = self.get_versionedfiles()
1649
# save code by using the stock data insertion helper.
1650
adds = self.get_diamond_files(files)
1652
# We can only validate the first 2 elements returned from add_lines.
1654
self.assertEqual(3, len(add))
1655
results.append(add[:2])
1656
if self.key_length == 1:
1658
('00e364d235126be43292ab09cb4686cf703ddc17', 7),
1659
('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1660
('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1661
('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1662
('ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
1664
elif self.key_length == 2:
1666
('00e364d235126be43292ab09cb4686cf703ddc17', 7),
1667
('00e364d235126be43292ab09cb4686cf703ddc17', 7),
1668
('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1669
('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1670
('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1671
('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1672
('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1673
('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1674
('ed8bce375198ea62444dc71952b22cfc2b09226d', 23),
1675
('ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
1678
def test_add_lines_no_key_generates_chk_key(self):
1679
files = self.get_versionedfiles()
1680
# save code by using the stock data insertion helper.
1681
adds = self.get_diamond_files(files, nokeys=True)
1683
# We can only validate the first 2 elements returned from add_lines.
1685
self.assertEqual(3, len(add))
1686
results.append(add[:2])
1687
if self.key_length == 1:
1689
('00e364d235126be43292ab09cb4686cf703ddc17', 7),
1690
('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1691
('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1692
('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1693
('ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
1695
# Check the added items got CHK keys.
1696
self.assertEqual(set([
1697
('sha1:00e364d235126be43292ab09cb4686cf703ddc17',),
1698
('sha1:51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44',),
1699
('sha1:9ef09dfa9d86780bdec9219a22560c6ece8e0ef1',),
1700
('sha1:a8478686da38e370e32e42e8a0c220e33ee9132f',),
1701
('sha1:ed8bce375198ea62444dc71952b22cfc2b09226d',),
1704
elif self.key_length == 2:
1706
('00e364d235126be43292ab09cb4686cf703ddc17', 7),
1707
('00e364d235126be43292ab09cb4686cf703ddc17', 7),
1708
('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1709
('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1710
('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1711
('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1712
('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1713
('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1714
('ed8bce375198ea62444dc71952b22cfc2b09226d', 23),
1715
('ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
1717
# Check the added items got CHK keys.
1718
self.assertEqual(set([
1719
('FileA', 'sha1:00e364d235126be43292ab09cb4686cf703ddc17'),
1720
('FileA', 'sha1:51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44'),
1721
('FileA', 'sha1:9ef09dfa9d86780bdec9219a22560c6ece8e0ef1'),
1722
('FileA', 'sha1:a8478686da38e370e32e42e8a0c220e33ee9132f'),
1723
('FileA', 'sha1:ed8bce375198ea62444dc71952b22cfc2b09226d'),
1724
('FileB', 'sha1:00e364d235126be43292ab09cb4686cf703ddc17'),
1725
('FileB', 'sha1:51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44'),
1726
('FileB', 'sha1:9ef09dfa9d86780bdec9219a22560c6ece8e0ef1'),
1727
('FileB', 'sha1:a8478686da38e370e32e42e8a0c220e33ee9132f'),
1728
('FileB', 'sha1:ed8bce375198ea62444dc71952b22cfc2b09226d'),
1732
def test_empty_lines(self):
1733
"""Empty files can be stored."""
1734
f = self.get_versionedfiles()
1735
key_a = self.get_simple_key('a')
1736
f.add_lines(key_a, [], [])
1737
self.assertEqual('',
1738
f.get_record_stream([key_a], 'unordered', True
1739
).next().get_bytes_as('fulltext'))
1740
key_b = self.get_simple_key('b')
1741
f.add_lines(key_b, self.get_parents([key_a]), [])
1742
self.assertEqual('',
1743
f.get_record_stream([key_b], 'unordered', True
1744
).next().get_bytes_as('fulltext'))
1746
def test_newline_only(self):
1747
f = self.get_versionedfiles()
1748
key_a = self.get_simple_key('a')
1749
f.add_lines(key_a, [], ['\n'])
1750
self.assertEqual('\n',
1751
f.get_record_stream([key_a], 'unordered', True
1752
).next().get_bytes_as('fulltext'))
1753
key_b = self.get_simple_key('b')
1754
f.add_lines(key_b, self.get_parents([key_a]), ['\n'])
1755
self.assertEqual('\n',
1756
f.get_record_stream([key_b], 'unordered', True
1757
).next().get_bytes_as('fulltext'))
1759
def test_get_known_graph_ancestry(self):
1760
f = self.get_versionedfiles()
1762
raise TestNotApplicable('ancestry info only relevant with graph.')
1763
key_a = self.get_simple_key('a')
1764
key_b = self.get_simple_key('b')
1765
key_c = self.get_simple_key('c')
1771
f.add_lines(key_a, [], ['\n'])
1772
f.add_lines(key_b, [key_a], ['\n'])
1773
f.add_lines(key_c, [key_a, key_b], ['\n'])
1774
kg = f.get_known_graph_ancestry([key_c])
1775
self.assertIsInstance(kg, _mod_graph.KnownGraph)
1776
self.assertEqual([key_a, key_b, key_c], list(kg.topo_sort()))
1778
def test_known_graph_with_fallbacks(self):
1779
f = self.get_versionedfiles('files')
1781
raise TestNotApplicable('ancestry info only relevant with graph.')
1782
if getattr(f, 'add_fallback_versioned_files', None) is None:
1783
raise TestNotApplicable("%s doesn't support fallbacks"
1784
% (f.__class__.__name__,))
1785
key_a = self.get_simple_key('a')
1786
key_b = self.get_simple_key('b')
1787
key_c = self.get_simple_key('c')
1788
# A only in fallback
1793
g = self.get_versionedfiles('fallback')
1794
g.add_lines(key_a, [], ['\n'])
1795
f.add_fallback_versioned_files(g)
1796
f.add_lines(key_b, [key_a], ['\n'])
1797
f.add_lines(key_c, [key_a, key_b], ['\n'])
1798
kg = f.get_known_graph_ancestry([key_c])
1799
self.assertEqual([key_a, key_b, key_c], list(kg.topo_sort()))
1801
def test_get_record_stream_empty(self):
1802
"""An empty stream can be requested without error."""
1803
f = self.get_versionedfiles()
1804
entries = f.get_record_stream([], 'unordered', False)
1805
self.assertEqual([], list(entries))
1807
def assertValidStorageKind(self, storage_kind):
1808
"""Assert that storage_kind is a valid storage_kind."""
1809
self.assertSubset([storage_kind],
1810
['mpdiff', 'knit-annotated-ft', 'knit-annotated-delta',
1811
'knit-ft', 'knit-delta', 'chunked', 'fulltext',
1812
'knit-annotated-ft-gz', 'knit-annotated-delta-gz', 'knit-ft-gz',
1814
'knit-delta-closure', 'knit-delta-closure-ref',
1815
'groupcompress-block', 'groupcompress-block-ref'])
1817
def capture_stream(self, f, entries, on_seen, parents,
1818
require_fulltext=False):
1819
"""Capture a stream for testing."""
1820
for factory in entries:
1821
on_seen(factory.key)
1822
self.assertValidStorageKind(factory.storage_kind)
1823
if factory.sha1 is not None:
1824
self.assertEqual(f.get_sha1s([factory.key])[factory.key],
1826
self.assertEqual(parents[factory.key], factory.parents)
1827
self.assertIsInstance(factory.get_bytes_as(factory.storage_kind),
1829
if require_fulltext:
1830
factory.get_bytes_as('fulltext')
1832
def test_get_record_stream_interface(self):
1833
"""each item in a stream has to provide a regular interface."""
1834
files = self.get_versionedfiles()
1835
self.get_diamond_files(files)
1836
keys, _ = self.get_keys_and_sort_order()
1837
parent_map = files.get_parent_map(keys)
1838
entries = files.get_record_stream(keys, 'unordered', False)
1840
self.capture_stream(files, entries, seen.add, parent_map)
1841
self.assertEqual(set(keys), seen)
1843
def get_keys_and_sort_order(self):
1844
"""Get diamond test keys list, and their sort ordering."""
1845
if self.key_length == 1:
1846
keys = [('merged',), ('left',), ('right',), ('base',)]
1847
sort_order = {('merged',):2, ('left',):1, ('right',):1, ('base',):0}
1850
('FileA', 'merged'), ('FileA', 'left'), ('FileA', 'right'),
1852
('FileB', 'merged'), ('FileB', 'left'), ('FileB', 'right'),
1856
('FileA', 'merged'):2, ('FileA', 'left'):1, ('FileA', 'right'):1,
1857
('FileA', 'base'):0,
1858
('FileB', 'merged'):2, ('FileB', 'left'):1, ('FileB', 'right'):1,
1859
('FileB', 'base'):0,
1861
return keys, sort_order
1863
def get_keys_and_groupcompress_sort_order(self):
1864
"""Get diamond test keys list, and their groupcompress sort ordering."""
1865
if self.key_length == 1:
1866
keys = [('merged',), ('left',), ('right',), ('base',)]
1867
sort_order = {('merged',):0, ('left',):1, ('right',):1, ('base',):2}
1870
('FileA', 'merged'), ('FileA', 'left'), ('FileA', 'right'),
1872
('FileB', 'merged'), ('FileB', 'left'), ('FileB', 'right'),
1876
('FileA', 'merged'):0, ('FileA', 'left'):1, ('FileA', 'right'):1,
1877
('FileA', 'base'):2,
1878
('FileB', 'merged'):3, ('FileB', 'left'):4, ('FileB', 'right'):4,
1879
('FileB', 'base'):5,
1881
return keys, sort_order
1883
def test_get_record_stream_interface_ordered(self):
1884
"""each item in a stream has to provide a regular interface."""
1885
files = self.get_versionedfiles()
1886
self.get_diamond_files(files)
1887
keys, sort_order = self.get_keys_and_sort_order()
1888
parent_map = files.get_parent_map(keys)
1889
entries = files.get_record_stream(keys, 'topological', False)
1891
self.capture_stream(files, entries, seen.append, parent_map)
1892
self.assertStreamOrder(sort_order, seen, keys)
1894
def test_get_record_stream_interface_ordered_with_delta_closure(self):
1895
"""each item must be accessible as a fulltext."""
1896
files = self.get_versionedfiles()
1897
self.get_diamond_files(files)
1898
keys, sort_order = self.get_keys_and_sort_order()
1899
parent_map = files.get_parent_map(keys)
1900
entries = files.get_record_stream(keys, 'topological', True)
1902
for factory in entries:
1903
seen.append(factory.key)
1904
self.assertValidStorageKind(factory.storage_kind)
1905
self.assertSubset([factory.sha1],
1906
[None, files.get_sha1s([factory.key])[factory.key]])
1907
self.assertEqual(parent_map[factory.key], factory.parents)
1908
# self.assertEqual(files.get_text(factory.key),
1909
ft_bytes = factory.get_bytes_as('fulltext')
1910
self.assertIsInstance(ft_bytes, str)
1911
chunked_bytes = factory.get_bytes_as('chunked')
1912
self.assertEqualDiff(ft_bytes, ''.join(chunked_bytes))
1914
self.assertStreamOrder(sort_order, seen, keys)
1916
def test_get_record_stream_interface_groupcompress(self):
1917
"""each item in a stream has to provide a regular interface."""
1918
files = self.get_versionedfiles()
1919
self.get_diamond_files(files)
1920
keys, sort_order = self.get_keys_and_groupcompress_sort_order()
1921
parent_map = files.get_parent_map(keys)
1922
entries = files.get_record_stream(keys, 'groupcompress', False)
1924
self.capture_stream(files, entries, seen.append, parent_map)
1925
self.assertStreamOrder(sort_order, seen, keys)
1927
def assertStreamOrder(self, sort_order, seen, keys):
1928
self.assertEqual(len(set(seen)), len(keys))
1929
if self.key_length == 1:
1932
lows = {('FileA',):0, ('FileB',):0}
1934
self.assertEqual(set(keys), set(seen))
1937
sort_pos = sort_order[key]
1938
self.assertTrue(sort_pos >= lows[key[:-1]],
1939
"Out of order in sorted stream: %r, %r" % (key, seen))
1940
lows[key[:-1]] = sort_pos
1942
def test_get_record_stream_unknown_storage_kind_raises(self):
1943
"""Asking for a storage kind that the stream cannot supply raises."""
1944
files = self.get_versionedfiles()
1945
self.get_diamond_files(files)
1946
if self.key_length == 1:
1947
keys = [('merged',), ('left',), ('right',), ('base',)]
1950
('FileA', 'merged'), ('FileA', 'left'), ('FileA', 'right'),
1952
('FileB', 'merged'), ('FileB', 'left'), ('FileB', 'right'),
1955
parent_map = files.get_parent_map(keys)
1956
entries = files.get_record_stream(keys, 'unordered', False)
1957
# We track the contents because we should be able to try, fail a
1958
# particular kind and then ask for one that works and continue.
1960
for factory in entries:
1961
seen.add(factory.key)
1962
self.assertValidStorageKind(factory.storage_kind)
1963
if factory.sha1 is not None:
1964
self.assertEqual(files.get_sha1s([factory.key])[factory.key],
1966
self.assertEqual(parent_map[factory.key], factory.parents)
1967
# currently no stream emits mpdiff
1968
self.assertRaises(errors.UnavailableRepresentation,
1969
factory.get_bytes_as, 'mpdiff')
1970
self.assertIsInstance(factory.get_bytes_as(factory.storage_kind),
1972
self.assertEqual(set(keys), seen)
1974
def test_get_record_stream_missing_records_are_absent(self):
1975
files = self.get_versionedfiles()
1976
self.get_diamond_files(files)
1977
if self.key_length == 1:
1978
keys = [('merged',), ('left',), ('right',), ('absent',), ('base',)]
1981
('FileA', 'merged'), ('FileA', 'left'), ('FileA', 'right'),
1982
('FileA', 'absent'), ('FileA', 'base'),
1983
('FileB', 'merged'), ('FileB', 'left'), ('FileB', 'right'),
1984
('FileB', 'absent'), ('FileB', 'base'),
1985
('absent', 'absent'),
1987
parent_map = files.get_parent_map(keys)
1988
entries = files.get_record_stream(keys, 'unordered', False)
1989
self.assertAbsentRecord(files, keys, parent_map, entries)
1990
entries = files.get_record_stream(keys, 'topological', False)
1991
self.assertAbsentRecord(files, keys, parent_map, entries)
1993
def assertRecordHasContent(self, record, bytes):
1994
"""Assert that record has the bytes bytes."""
1995
self.assertEqual(bytes, record.get_bytes_as('fulltext'))
1996
self.assertEqual(bytes, ''.join(record.get_bytes_as('chunked')))
1998
def test_get_record_stream_native_formats_are_wire_ready_one_ft(self):
1999
files = self.get_versionedfiles()
2000
key = self.get_simple_key('foo')
2001
files.add_lines(key, (), ['my text\n', 'content'])
2002
stream = files.get_record_stream([key], 'unordered', False)
2003
record = stream.next()
2004
if record.storage_kind in ('chunked', 'fulltext'):
2005
# chunked and fulltext representations are for direct use not wire
2006
# serialisation: check they are able to be used directly. To send
2007
# such records over the wire translation will be needed.
2008
self.assertRecordHasContent(record, "my text\ncontent")
2010
bytes = [record.get_bytes_as(record.storage_kind)]
2011
network_stream = versionedfile.NetworkRecordStream(bytes).read()
2012
source_record = record
2014
for record in network_stream:
2015
records.append(record)
2016
self.assertEqual(source_record.storage_kind,
2017
record.storage_kind)
2018
self.assertEqual(source_record.parents, record.parents)
2020
source_record.get_bytes_as(source_record.storage_kind),
2021
record.get_bytes_as(record.storage_kind))
2022
self.assertEqual(1, len(records))
2024
def assertStreamMetaEqual(self, records, expected, stream):
2025
"""Assert that streams expected and stream have the same records.
2027
:param records: A list to collect the seen records.
2028
:return: A generator of the records in stream.
2030
# We make assertions during copying to catch things early for
2032
for record, ref_record in izip(stream, expected):
2033
records.append(record)
2034
self.assertEqual(ref_record.key, record.key)
2035
self.assertEqual(ref_record.storage_kind, record.storage_kind)
2036
self.assertEqual(ref_record.parents, record.parents)
2039
def stream_to_bytes_or_skip_counter(self, skipped_records, full_texts,
2041
"""Convert a stream to a bytes iterator.
2043
:param skipped_records: A list with one element to increment when a
2045
:param full_texts: A dict from key->fulltext representation, for
2046
checking chunked or fulltext stored records.
2047
:param stream: A record_stream.
2048
:return: An iterator over the bytes of each record.
2050
for record in stream:
2051
if record.storage_kind in ('chunked', 'fulltext'):
2052
skipped_records[0] += 1
2053
# check the content is correct for direct use.
2054
self.assertRecordHasContent(record, full_texts[record.key])
2056
yield record.get_bytes_as(record.storage_kind)
2058
def test_get_record_stream_native_formats_are_wire_ready_ft_delta(self):
2059
files = self.get_versionedfiles()
2060
target_files = self.get_versionedfiles('target')
2061
key = self.get_simple_key('ft')
2062
key_delta = self.get_simple_key('delta')
2063
files.add_lines(key, (), ['my text\n', 'content'])
2065
delta_parents = (key,)
2068
files.add_lines(key_delta, delta_parents, ['different\n', 'content\n'])
2069
local = files.get_record_stream([key, key_delta], 'unordered', False)
2070
ref = files.get_record_stream([key, key_delta], 'unordered', False)
2071
skipped_records = [0]
2073
key: "my text\ncontent",
2074
key_delta: "different\ncontent\n",
2076
byte_stream = self.stream_to_bytes_or_skip_counter(
2077
skipped_records, full_texts, local)
2078
network_stream = versionedfile.NetworkRecordStream(byte_stream).read()
2080
# insert the stream from the network into a versioned files object so we can
2081
# check the content was carried across correctly without doing delta
2083
target_files.insert_record_stream(
2084
self.assertStreamMetaEqual(records, ref, network_stream))
2085
# No duplicates on the wire thank you!
2086
self.assertEqual(2, len(records) + skipped_records[0])
2088
# if any content was copied it all must have all been.
2089
self.assertIdenticalVersionedFile(files, target_files)
2091
def test_get_record_stream_native_formats_are_wire_ready_delta(self):
2092
# copy a delta over the wire
2093
files = self.get_versionedfiles()
2094
target_files = self.get_versionedfiles('target')
2095
key = self.get_simple_key('ft')
2096
key_delta = self.get_simple_key('delta')
2097
files.add_lines(key, (), ['my text\n', 'content'])
2099
delta_parents = (key,)
2102
files.add_lines(key_delta, delta_parents, ['different\n', 'content\n'])
2103
# Copy the basis text across so we can reconstruct the delta during
2104
# insertion into target.
2105
target_files.insert_record_stream(files.get_record_stream([key],
2106
'unordered', False))
2107
local = files.get_record_stream([key_delta], 'unordered', False)
2108
ref = files.get_record_stream([key_delta], 'unordered', False)
2109
skipped_records = [0]
2111
key_delta: "different\ncontent\n",
2113
byte_stream = self.stream_to_bytes_or_skip_counter(
2114
skipped_records, full_texts, local)
2115
network_stream = versionedfile.NetworkRecordStream(byte_stream).read()
2117
# insert the stream from the network into a versioned files object so we can
2118
# check the content was carried across correctly without doing delta
2119
# inspection during check_stream.
2120
target_files.insert_record_stream(
2121
self.assertStreamMetaEqual(records, ref, network_stream))
2122
# No duplicates on the wire thank you!
2123
self.assertEqual(1, len(records) + skipped_records[0])
2125
# if any content was copied it all must have all been
2126
self.assertIdenticalVersionedFile(files, target_files)
2128
def test_get_record_stream_wire_ready_delta_closure_included(self):
2129
# copy a delta over the wire with the ability to get its full text.
2130
files = self.get_versionedfiles()
2131
key = self.get_simple_key('ft')
2132
key_delta = self.get_simple_key('delta')
2133
files.add_lines(key, (), ['my text\n', 'content'])
2135
delta_parents = (key,)
2138
files.add_lines(key_delta, delta_parents, ['different\n', 'content\n'])
2139
local = files.get_record_stream([key_delta], 'unordered', True)
2140
ref = files.get_record_stream([key_delta], 'unordered', True)
2141
skipped_records = [0]
2143
key_delta: "different\ncontent\n",
2145
byte_stream = self.stream_to_bytes_or_skip_counter(
2146
skipped_records, full_texts, local)
2147
network_stream = versionedfile.NetworkRecordStream(byte_stream).read()
2149
# insert the stream from the network into a versioned files object so we can
2150
# check the content was carried across correctly without doing delta
2151
# inspection during check_stream.
2152
for record in self.assertStreamMetaEqual(records, ref, network_stream):
2153
# we have to be able to get the full text out:
2154
self.assertRecordHasContent(record, full_texts[record.key])
2155
# No duplicates on the wire thank you!
2156
self.assertEqual(1, len(records) + skipped_records[0])
2158
def assertAbsentRecord(self, files, keys, parents, entries):
2159
"""Helper for test_get_record_stream_missing_records_are_absent."""
2161
for factory in entries:
2162
seen.add(factory.key)
2163
if factory.key[-1] == 'absent':
2164
self.assertEqual('absent', factory.storage_kind)
2165
self.assertEqual(None, factory.sha1)
2166
self.assertEqual(None, factory.parents)
2168
self.assertValidStorageKind(factory.storage_kind)
2169
if factory.sha1 is not None:
2170
sha1 = files.get_sha1s([factory.key])[factory.key]
2171
self.assertEqual(sha1, factory.sha1)
2172
self.assertEqual(parents[factory.key], factory.parents)
2173
self.assertIsInstance(factory.get_bytes_as(factory.storage_kind),
2175
self.assertEqual(set(keys), seen)
2177
def test_filter_absent_records(self):
2178
"""Requested missing records can be filter trivially."""
2179
files = self.get_versionedfiles()
2180
self.get_diamond_files(files)
2181
keys, _ = self.get_keys_and_sort_order()
2182
parent_map = files.get_parent_map(keys)
2183
# Add an absent record in the middle of the present keys. (We don't ask
2184
# for just absent keys to ensure that content before and after the
2185
# absent keys is still delivered).
2186
present_keys = list(keys)
2187
if self.key_length == 1:
2188
keys.insert(2, ('extra',))
2190
keys.insert(2, ('extra', 'extra'))
2191
entries = files.get_record_stream(keys, 'unordered', False)
2193
self.capture_stream(files, versionedfile.filter_absent(entries), seen.add,
2195
self.assertEqual(set(present_keys), seen)
2197
def get_mapper(self):
2198
"""Get a mapper suitable for the key length of the test interface."""
2199
if self.key_length == 1:
2200
return ConstantMapper('source')
2202
return HashEscapedPrefixMapper()
2204
def get_parents(self, parents):
2205
"""Get parents, taking self.graph into consideration."""
2211
def test_get_annotator(self):
2212
files = self.get_versionedfiles()
2213
self.get_diamond_files(files)
2214
origin_key = self.get_simple_key('origin')
2215
base_key = self.get_simple_key('base')
2216
left_key = self.get_simple_key('left')
2217
right_key = self.get_simple_key('right')
2218
merged_key = self.get_simple_key('merged')
2219
# annotator = files.get_annotator()
2220
# introduced full text
2221
origins, lines = files.get_annotator().annotate(origin_key)
2222
self.assertEqual([(origin_key,)], origins)
2223
self.assertEqual(['origin\n'], lines)
2225
origins, lines = files.get_annotator().annotate(base_key)
2226
self.assertEqual([(base_key,)], origins)
2228
origins, lines = files.get_annotator().annotate(merged_key)
2237
# Without a graph everything is new.
2244
self.assertRaises(RevisionNotPresent,
2245
files.get_annotator().annotate, self.get_simple_key('missing-key'))
2247
def test_get_parent_map(self):
2248
files = self.get_versionedfiles()
2249
if self.key_length == 1:
2251
(('r0',), self.get_parents(())),
2252
(('r1',), self.get_parents((('r0',),))),
2253
(('r2',), self.get_parents(())),
2254
(('r3',), self.get_parents(())),
2255
(('m',), self.get_parents((('r0',),('r1',),('r2',),('r3',)))),
2259
(('FileA', 'r0'), self.get_parents(())),
2260
(('FileA', 'r1'), self.get_parents((('FileA', 'r0'),))),
2261
(('FileA', 'r2'), self.get_parents(())),
2262
(('FileA', 'r3'), self.get_parents(())),
2263
(('FileA', 'm'), self.get_parents((('FileA', 'r0'),
2264
('FileA', 'r1'), ('FileA', 'r2'), ('FileA', 'r3')))),
2266
for key, parents in parent_details:
2267
files.add_lines(key, parents, [])
2268
# immediately after adding it should be queryable.
2269
self.assertEqual({key:parents}, files.get_parent_map([key]))
2270
# We can ask for an empty set
2271
self.assertEqual({}, files.get_parent_map([]))
2272
# We can ask for many keys
2273
all_parents = dict(parent_details)
2274
self.assertEqual(all_parents, files.get_parent_map(all_parents.keys()))
2275
# Absent keys are just not included in the result.
2276
keys = all_parents.keys()
2277
if self.key_length == 1:
2278
keys.insert(1, ('missing',))
2280
keys.insert(1, ('missing', 'missing'))
2281
# Absent keys are just ignored
2282
self.assertEqual(all_parents, files.get_parent_map(keys))
2284
def test_get_sha1s(self):
2285
files = self.get_versionedfiles()
2286
self.get_diamond_files(files)
2287
if self.key_length == 1:
2288
keys = [('base',), ('origin',), ('left',), ('merged',), ('right',)]
2290
# ask for shas from different prefixes.
2292
('FileA', 'base'), ('FileB', 'origin'), ('FileA', 'left'),
2293
('FileA', 'merged'), ('FileB', 'right'),
2296
keys[0]: '51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44',
2297
keys[1]: '00e364d235126be43292ab09cb4686cf703ddc17',
2298
keys[2]: 'a8478686da38e370e32e42e8a0c220e33ee9132f',
2299
keys[3]: 'ed8bce375198ea62444dc71952b22cfc2b09226d',
2300
keys[4]: '9ef09dfa9d86780bdec9219a22560c6ece8e0ef1',
2302
files.get_sha1s(keys))
2304
def test_insert_record_stream_empty(self):
2305
"""Inserting an empty record stream should work."""
2306
files = self.get_versionedfiles()
2307
files.insert_record_stream([])
2309
def assertIdenticalVersionedFile(self, expected, actual):
2310
"""Assert that left and right have the same contents."""
2311
self.assertEqual(set(actual.keys()), set(expected.keys()))
2312
actual_parents = actual.get_parent_map(actual.keys())
2314
self.assertEqual(actual_parents, expected.get_parent_map(expected.keys()))
2316
for key, parents in actual_parents.items():
2317
self.assertEqual(None, parents)
2318
for key in actual.keys():
2319
actual_text = actual.get_record_stream(
2320
[key], 'unordered', True).next().get_bytes_as('fulltext')
2321
expected_text = expected.get_record_stream(
2322
[key], 'unordered', True).next().get_bytes_as('fulltext')
2323
self.assertEqual(actual_text, expected_text)
2325
def test_insert_record_stream_fulltexts(self):
2326
"""Any file should accept a stream of fulltexts."""
2327
files = self.get_versionedfiles()
2328
mapper = self.get_mapper()
2329
source_transport = self.get_transport('source')
2330
source_transport.mkdir('.')
2331
# weaves always output fulltexts.
2332
source = make_versioned_files_factory(WeaveFile, mapper)(
2334
self.get_diamond_files(source, trailing_eol=False)
2335
stream = source.get_record_stream(source.keys(), 'topological',
2337
files.insert_record_stream(stream)
2338
self.assertIdenticalVersionedFile(source, files)
2340
def test_insert_record_stream_fulltexts_noeol(self):
2341
"""Any file should accept a stream of fulltexts."""
2342
files = self.get_versionedfiles()
2343
mapper = self.get_mapper()
2344
source_transport = self.get_transport('source')
2345
source_transport.mkdir('.')
2346
# weaves always output fulltexts.
2347
source = make_versioned_files_factory(WeaveFile, mapper)(
2349
self.get_diamond_files(source, trailing_eol=False)
2350
stream = source.get_record_stream(source.keys(), 'topological',
2352
files.insert_record_stream(stream)
2353
self.assertIdenticalVersionedFile(source, files)
2355
def test_insert_record_stream_annotated_knits(self):
2356
"""Any file should accept a stream from plain knits."""
2357
files = self.get_versionedfiles()
2358
mapper = self.get_mapper()
2359
source_transport = self.get_transport('source')
2360
source_transport.mkdir('.')
2361
source = make_file_factory(True, mapper)(source_transport)
2362
self.get_diamond_files(source)
2363
stream = source.get_record_stream(source.keys(), 'topological',
2365
files.insert_record_stream(stream)
2366
self.assertIdenticalVersionedFile(source, files)
2368
def test_insert_record_stream_annotated_knits_noeol(self):
2369
"""Any file should accept a stream from plain knits."""
2370
files = self.get_versionedfiles()
2371
mapper = self.get_mapper()
2372
source_transport = self.get_transport('source')
2373
source_transport.mkdir('.')
2374
source = make_file_factory(True, mapper)(source_transport)
2375
self.get_diamond_files(source, trailing_eol=False)
2376
stream = source.get_record_stream(source.keys(), 'topological',
2378
files.insert_record_stream(stream)
2379
self.assertIdenticalVersionedFile(source, files)
2381
def test_insert_record_stream_plain_knits(self):
2382
"""Any file should accept a stream from plain knits."""
2383
files = self.get_versionedfiles()
2384
mapper = self.get_mapper()
2385
source_transport = self.get_transport('source')
2386
source_transport.mkdir('.')
2387
source = make_file_factory(False, mapper)(source_transport)
2388
self.get_diamond_files(source)
2389
stream = source.get_record_stream(source.keys(), 'topological',
2391
files.insert_record_stream(stream)
2392
self.assertIdenticalVersionedFile(source, files)
2394
def test_insert_record_stream_plain_knits_noeol(self):
2395
"""Any file should accept a stream from plain knits."""
2396
files = self.get_versionedfiles()
2397
mapper = self.get_mapper()
2398
source_transport = self.get_transport('source')
2399
source_transport.mkdir('.')
2400
source = make_file_factory(False, mapper)(source_transport)
2401
self.get_diamond_files(source, trailing_eol=False)
2402
stream = source.get_record_stream(source.keys(), 'topological',
2404
files.insert_record_stream(stream)
2405
self.assertIdenticalVersionedFile(source, files)
2407
def test_insert_record_stream_existing_keys(self):
2408
"""Inserting keys already in a file should not error."""
2409
files = self.get_versionedfiles()
2410
source = self.get_versionedfiles('source')
2411
self.get_diamond_files(source)
2412
# insert some keys into f.
2413
self.get_diamond_files(files, left_only=True)
2414
stream = source.get_record_stream(source.keys(), 'topological',
2416
files.insert_record_stream(stream)
2417
self.assertIdenticalVersionedFile(source, files)
2419
def test_insert_record_stream_missing_keys(self):
2420
"""Inserting a stream with absent keys should raise an error."""
2421
files = self.get_versionedfiles()
2422
source = self.get_versionedfiles('source')
2423
stream = source.get_record_stream([('missing',) * self.key_length],
2424
'topological', False)
2425
self.assertRaises(errors.RevisionNotPresent, files.insert_record_stream,
2428
def test_insert_record_stream_out_of_order(self):
2429
"""An out of order stream can either error or work."""
2430
files = self.get_versionedfiles()
2431
source = self.get_versionedfiles('source')
2432
self.get_diamond_files(source)
2433
if self.key_length == 1:
2434
origin_keys = [('origin',)]
2435
end_keys = [('merged',), ('left',)]
2436
start_keys = [('right',), ('base',)]
2438
origin_keys = [('FileA', 'origin'), ('FileB', 'origin')]
2439
end_keys = [('FileA', 'merged',), ('FileA', 'left',),
2440
('FileB', 'merged',), ('FileB', 'left',)]
2441
start_keys = [('FileA', 'right',), ('FileA', 'base',),
2442
('FileB', 'right',), ('FileB', 'base',)]
2443
origin_entries = source.get_record_stream(origin_keys, 'unordered', False)
2444
end_entries = source.get_record_stream(end_keys, 'topological', False)
2445
start_entries = source.get_record_stream(start_keys, 'topological', False)
2446
entries = chain(origin_entries, end_entries, start_entries)
2448
files.insert_record_stream(entries)
2449
except RevisionNotPresent:
2450
# Must not have corrupted the file.
2453
self.assertIdenticalVersionedFile(source, files)
2455
def test_insert_record_stream_long_parent_chain_out_of_order(self):
2456
"""An out of order stream can either error or work."""
2458
raise TestNotApplicable('ancestry info only relevant with graph.')
2459
# Create a reasonably long chain of records based on each other, where
2460
# most will be deltas.
2461
source = self.get_versionedfiles('source')
2464
content = [('same same %d\n' % n) for n in range(500)]
2465
for letter in 'abcdefghijklmnopqrstuvwxyz':
2466
key = ('key-' + letter,)
2467
if self.key_length == 2:
2468
key = ('prefix',) + key
2469
content.append('content for ' + letter + '\n')
2470
source.add_lines(key, parents, content)
2473
# Create a stream of these records, excluding the first record that the
2474
# rest ultimately depend upon, and insert it into a new vf.
2476
for key in reversed(keys):
2477
streams.append(source.get_record_stream([key], 'unordered', False))
2478
deltas = chain(*streams[:-1])
2479
files = self.get_versionedfiles()
2481
files.insert_record_stream(deltas)
2482
except RevisionNotPresent:
2483
# Must not have corrupted the file.
2486
# Must only report either just the first key as a missing parent,
2487
# no key as missing (for nodelta scenarios).
2488
missing = set(files.get_missing_compression_parent_keys())
2489
missing.discard(keys[0])
2490
self.assertEqual(set(), missing)
2492
def get_knit_delta_source(self):
2493
"""Get a source that can produce a stream with knit delta records,
2494
regardless of this test's scenario.
2496
mapper = self.get_mapper()
2497
source_transport = self.get_transport('source')
2498
source_transport.mkdir('.')
2499
source = make_file_factory(False, mapper)(source_transport)
2500
get_diamond_files(source, self.key_length, trailing_eol=True,
2501
nograph=False, left_only=False)
2504
def test_insert_record_stream_delta_missing_basis_no_corruption(self):
2505
"""Insertion where a needed basis is not included notifies the caller
2506
of the missing basis. In the meantime a record missing its basis is
2509
source = self.get_knit_delta_source()
2510
keys = [self.get_simple_key('origin'), self.get_simple_key('merged')]
2511
entries = source.get_record_stream(keys, 'unordered', False)
2512
files = self.get_versionedfiles()
2513
if self.support_partial_insertion:
2514
self.assertEqual([],
2515
list(files.get_missing_compression_parent_keys()))
2516
files.insert_record_stream(entries)
2517
missing_bases = files.get_missing_compression_parent_keys()
2518
self.assertEqual(set([self.get_simple_key('left')]),
2520
self.assertEqual(set(keys), set(files.get_parent_map(keys)))
2523
errors.RevisionNotPresent, files.insert_record_stream, entries)
2526
def test_insert_record_stream_delta_missing_basis_can_be_added_later(self):
2527
"""Insertion where a needed basis is not included notifies the caller
2528
of the missing basis. That basis can be added in a second
2529
insert_record_stream call that does not need to repeat records present
2530
in the previous stream. The record(s) that required that basis are
2531
fully inserted once their basis is no longer missing.
2533
if not self.support_partial_insertion:
2534
raise TestNotApplicable(
2535
'versioned file scenario does not support partial insertion')
2536
source = self.get_knit_delta_source()
2537
entries = source.get_record_stream([self.get_simple_key('origin'),
2538
self.get_simple_key('merged')], 'unordered', False)
2539
files = self.get_versionedfiles()
2540
files.insert_record_stream(entries)
2541
missing_bases = files.get_missing_compression_parent_keys()
2542
self.assertEqual(set([self.get_simple_key('left')]),
2544
# 'merged' is inserted (although a commit of a write group involving
2545
# this versionedfiles would fail).
2546
merged_key = self.get_simple_key('merged')
2548
[merged_key], files.get_parent_map([merged_key]).keys())
2549
# Add the full delta closure of the missing records
2550
missing_entries = source.get_record_stream(
2551
missing_bases, 'unordered', True)
2552
files.insert_record_stream(missing_entries)
2553
# Now 'merged' is fully inserted (and a commit would succeed).
2554
self.assertEqual([], list(files.get_missing_compression_parent_keys()))
2556
[merged_key], files.get_parent_map([merged_key]).keys())
2559
def test_iter_lines_added_or_present_in_keys(self):
2560
# test that we get at least an equalset of the lines added by
2561
# versions in the store.
2562
# the ordering here is to make a tree so that dumb searches have
2563
# more changes to muck up.
2565
class InstrumentedProgress(progress.ProgressTask):
2568
progress.ProgressTask.__init__(self)
2571
def update(self, msg=None, current=None, total=None):
2572
self.updates.append((msg, current, total))
2574
files = self.get_versionedfiles()
2575
# add a base to get included
2576
files.add_lines(self.get_simple_key('base'), (), ['base\n'])
2577
# add a ancestor to be included on one side
2578
files.add_lines(self.get_simple_key('lancestor'), (), ['lancestor\n'])
2579
# add a ancestor to be included on the other side
2580
files.add_lines(self.get_simple_key('rancestor'),
2581
self.get_parents([self.get_simple_key('base')]), ['rancestor\n'])
2582
# add a child of rancestor with no eofile-nl
2583
files.add_lines(self.get_simple_key('child'),
2584
self.get_parents([self.get_simple_key('rancestor')]),
2585
['base\n', 'child\n'])
2586
# add a child of lancestor and base to join the two roots
2587
files.add_lines(self.get_simple_key('otherchild'),
2588
self.get_parents([self.get_simple_key('lancestor'),
2589
self.get_simple_key('base')]),
2590
['base\n', 'lancestor\n', 'otherchild\n'])
2591
def iter_with_keys(keys, expected):
2592
# now we need to see what lines are returned, and how often.
2594
progress = InstrumentedProgress()
2595
# iterate over the lines
2596
for line in files.iter_lines_added_or_present_in_keys(keys,
2598
lines.setdefault(line, 0)
2600
if []!= progress.updates:
2601
self.assertEqual(expected, progress.updates)
2603
lines = iter_with_keys(
2604
[self.get_simple_key('child'), self.get_simple_key('otherchild')],
2605
[('Walking content', 0, 2),
2606
('Walking content', 1, 2),
2607
('Walking content', 2, 2)])
2608
# we must see child and otherchild
2609
self.assertTrue(lines[('child\n', self.get_simple_key('child'))] > 0)
2611
lines[('otherchild\n', self.get_simple_key('otherchild'))] > 0)
2612
# we dont care if we got more than that.
2615
lines = iter_with_keys(files.keys(),
2616
[('Walking content', 0, 5),
2617
('Walking content', 1, 5),
2618
('Walking content', 2, 5),
2619
('Walking content', 3, 5),
2620
('Walking content', 4, 5),
2621
('Walking content', 5, 5)])
2622
# all lines must be seen at least once
2623
self.assertTrue(lines[('base\n', self.get_simple_key('base'))] > 0)
2625
lines[('lancestor\n', self.get_simple_key('lancestor'))] > 0)
2627
lines[('rancestor\n', self.get_simple_key('rancestor'))] > 0)
2628
self.assertTrue(lines[('child\n', self.get_simple_key('child'))] > 0)
2630
lines[('otherchild\n', self.get_simple_key('otherchild'))] > 0)
2632
def test_make_mpdiffs(self):
2633
from bzrlib import multiparent
2634
files = self.get_versionedfiles('source')
2635
# add texts that should trip the knit maximum delta chain threshold
2636
# as well as doing parallel chains of data in knits.
2637
# this is done by two chains of 25 insertions
2638
files.add_lines(self.get_simple_key('base'), [], ['line\n'])
2639
files.add_lines(self.get_simple_key('noeol'),
2640
self.get_parents([self.get_simple_key('base')]), ['line'])
2641
# detailed eol tests:
2642
# shared last line with parent no-eol
2643
files.add_lines(self.get_simple_key('noeolsecond'),
2644
self.get_parents([self.get_simple_key('noeol')]),
2646
# differing last line with parent, both no-eol
2647
files.add_lines(self.get_simple_key('noeolnotshared'),
2648
self.get_parents([self.get_simple_key('noeolsecond')]),
2649
['line\n', 'phone'])
2650
# add eol following a noneol parent, change content
2651
files.add_lines(self.get_simple_key('eol'),
2652
self.get_parents([self.get_simple_key('noeol')]), ['phone\n'])
2653
# add eol following a noneol parent, no change content
2654
files.add_lines(self.get_simple_key('eolline'),
2655
self.get_parents([self.get_simple_key('noeol')]), ['line\n'])
2656
# noeol with no parents:
2657
files.add_lines(self.get_simple_key('noeolbase'), [], ['line'])
2658
# noeol preceeding its leftmost parent in the output:
2659
# this is done by making it a merge of two parents with no common
2660
# anestry: noeolbase and noeol with the
2661
# later-inserted parent the leftmost.
2662
files.add_lines(self.get_simple_key('eolbeforefirstparent'),
2663
self.get_parents([self.get_simple_key('noeolbase'),
2664
self.get_simple_key('noeol')]),
2666
# two identical eol texts
2667
files.add_lines(self.get_simple_key('noeoldup'),
2668
self.get_parents([self.get_simple_key('noeol')]), ['line'])
2669
next_parent = self.get_simple_key('base')
2670
text_name = 'chain1-'
2672
sha1s = {0 :'da6d3141cb4a5e6f464bf6e0518042ddc7bfd079',
2673
1 :'45e21ea146a81ea44a821737acdb4f9791c8abe7',
2674
2 :'e1f11570edf3e2a070052366c582837a4fe4e9fa',
2675
3 :'26b4b8626da827088c514b8f9bbe4ebf181edda1',
2676
4 :'e28a5510be25ba84d31121cff00956f9970ae6f6',
2677
5 :'d63ec0ce22e11dcf65a931b69255d3ac747a318d',
2678
6 :'2c2888d288cb5e1d98009d822fedfe6019c6a4ea',
2679
7 :'95c14da9cafbf828e3e74a6f016d87926ba234ab',
2680
8 :'779e9a0b28f9f832528d4b21e17e168c67697272',
2681
9 :'1f8ff4e5c6ff78ac106fcfe6b1e8cb8740ff9a8f',
2682
10:'131a2ae712cf51ed62f143e3fbac3d4206c25a05',
2683
11:'c5a9d6f520d2515e1ec401a8f8a67e6c3c89f199',
2684
12:'31a2286267f24d8bedaa43355f8ad7129509ea85',
2685
13:'dc2a7fe80e8ec5cae920973973a8ee28b2da5e0a',
2686
14:'2c4b1736566b8ca6051e668de68650686a3922f2',
2687
15:'5912e4ecd9b0c07be4d013e7e2bdcf9323276cde',
2688
16:'b0d2e18d3559a00580f6b49804c23fea500feab3',
2689
17:'8e1d43ad72f7562d7cb8f57ee584e20eb1a69fc7',
2690
18:'5cf64a3459ae28efa60239e44b20312d25b253f3',
2691
19:'1ebed371807ba5935958ad0884595126e8c4e823',
2692
20:'2aa62a8b06fb3b3b892a3292a068ade69d5ee0d3',
2693
21:'01edc447978004f6e4e962b417a4ae1955b6fe5d',
2694
22:'d8d8dc49c4bf0bab401e0298bb5ad827768618bb',
2695
23:'c21f62b1c482862983a8ffb2b0c64b3451876e3f',
2696
24:'c0593fe795e00dff6b3c0fe857a074364d5f04fc',
2697
25:'dd1a1cf2ba9cc225c3aff729953e6364bf1d1855',
2699
for depth in range(26):
2700
new_version = self.get_simple_key(text_name + '%s' % depth)
2701
text = text + ['line\n']
2702
files.add_lines(new_version, self.get_parents([next_parent]), text)
2703
next_parent = new_version
2704
next_parent = self.get_simple_key('base')
2705
text_name = 'chain2-'
2707
for depth in range(26):
2708
new_version = self.get_simple_key(text_name + '%s' % depth)
2709
text = text + ['line\n']
2710
files.add_lines(new_version, self.get_parents([next_parent]), text)
2711
next_parent = new_version
2712
target = self.get_versionedfiles('target')
2713
for key in multiparent.topo_iter_keys(files, files.keys()):
2714
mpdiff = files.make_mpdiffs([key])[0]
2715
parents = files.get_parent_map([key])[key] or []
2717
[(key, parents, files.get_sha1s([key])[key], mpdiff)])
2718
self.assertEqualDiff(
2719
files.get_record_stream([key], 'unordered',
2720
True).next().get_bytes_as('fulltext'),
2721
target.get_record_stream([key], 'unordered',
2722
True).next().get_bytes_as('fulltext')
2725
def test_keys(self):
2726
# While use is discouraged, versions() is still needed by aspects of
2728
files = self.get_versionedfiles()
2729
self.assertEqual(set(), set(files.keys()))
2730
if self.key_length == 1:
2733
key = ('foo', 'bar',)
2734
files.add_lines(key, (), [])
2735
self.assertEqual(set([key]), set(files.keys()))
2738
class VirtualVersionedFilesTests(TestCase):
2739
"""Basic tests for the VirtualVersionedFiles implementations."""
2741
def _get_parent_map(self, keys):
2744
if k in self._parent_map:
2745
ret[k] = self._parent_map[k]
2749
TestCase.setUp(self)
2751
self._parent_map = {}
2752
self.texts = VirtualVersionedFiles(self._get_parent_map,
2755
def test_add_lines(self):
2756
self.assertRaises(NotImplementedError,
2757
self.texts.add_lines, "foo", [], [])
2759
def test_add_mpdiffs(self):
2760
self.assertRaises(NotImplementedError,
2761
self.texts.add_mpdiffs, [])
2763
def test_check_noerrors(self):
2766
def test_insert_record_stream(self):
2767
self.assertRaises(NotImplementedError, self.texts.insert_record_stream,
2770
def test_get_sha1s_nonexistent(self):
2771
self.assertEquals({}, self.texts.get_sha1s([("NONEXISTENT",)]))
2773
def test_get_sha1s(self):
2774
self._lines["key"] = ["dataline1", "dataline2"]
2775
self.assertEquals({("key",): osutils.sha_strings(self._lines["key"])},
2776
self.texts.get_sha1s([("key",)]))
2778
def test_get_parent_map(self):
2779
self._parent_map = {"G": ("A", "B")}
2780
self.assertEquals({("G",): (("A",),("B",))},
2781
self.texts.get_parent_map([("G",), ("L",)]))
2783
def test_get_record_stream(self):
2784
self._lines["A"] = ["FOO", "BAR"]
2785
it = self.texts.get_record_stream([("A",)], "unordered", True)
2787
self.assertEquals("chunked", record.storage_kind)
2788
self.assertEquals("FOOBAR", record.get_bytes_as("fulltext"))
2789
self.assertEquals(["FOO", "BAR"], record.get_bytes_as("chunked"))
2791
def test_get_record_stream_absent(self):
2792
it = self.texts.get_record_stream([("A",)], "unordered", True)
2794
self.assertEquals("absent", record.storage_kind)
2796
def test_iter_lines_added_or_present_in_keys(self):
2797
self._lines["A"] = ["FOO", "BAR"]
2798
self._lines["B"] = ["HEY"]
2799
self._lines["C"] = ["Alberta"]
2800
it = self.texts.iter_lines_added_or_present_in_keys([("A",), ("B",)])
2801
self.assertEquals(sorted([("FOO", "A"), ("BAR", "A"), ("HEY", "B")]),
2805
class TestOrderingVersionedFilesDecorator(TestCaseWithMemoryTransport):
2807
def get_ordering_vf(self, key_priority):
2808
builder = self.make_branch_builder('test')
2809
builder.start_series()
2810
builder.build_snapshot('A', None, [
2811
('add', ('', 'TREE_ROOT', 'directory', None))])
2812
builder.build_snapshot('B', ['A'], [])
2813
builder.build_snapshot('C', ['B'], [])
2814
builder.build_snapshot('D', ['C'], [])
2815
builder.finish_series()
2816
b = builder.get_branch()
2818
self.addCleanup(b.unlock)
2819
vf = b.repository.inventories
2820
return versionedfile.OrderingVersionedFilesDecorator(vf, key_priority)
2822
def test_get_empty(self):
2823
vf = self.get_ordering_vf({})
2824
self.assertEqual([], vf.calls)
2826
def test_get_record_stream_topological(self):
2827
vf = self.get_ordering_vf({('A',): 3, ('B',): 2, ('C',): 4, ('D',): 1})
2828
request_keys = [('B',), ('C',), ('D',), ('A',)]
2829
keys = [r.key for r in vf.get_record_stream(request_keys,
2830
'topological', False)]
2831
# We should have gotten the keys in topological order
2832
self.assertEqual([('A',), ('B',), ('C',), ('D',)], keys)
2833
# And recorded that the request was made
2834
self.assertEqual([('get_record_stream', request_keys, 'topological',
2837
def test_get_record_stream_ordered(self):
2838
vf = self.get_ordering_vf({('A',): 3, ('B',): 2, ('C',): 4, ('D',): 1})
2839
request_keys = [('B',), ('C',), ('D',), ('A',)]
2840
keys = [r.key for r in vf.get_record_stream(request_keys,
2841
'unordered', False)]
2842
# They should be returned based on their priority
2843
self.assertEqual([('D',), ('B',), ('A',), ('C',)], keys)
2844
# And the request recorded
2845
self.assertEqual([('get_record_stream', request_keys, 'unordered',
2848
def test_get_record_stream_implicit_order(self):
2849
vf = self.get_ordering_vf({('B',): 2, ('D',): 1})
2850
request_keys = [('B',), ('C',), ('D',), ('A',)]
2851
keys = [r.key for r in vf.get_record_stream(request_keys,
2852
'unordered', False)]
2853
# A and C are not in the map, so they get sorted to the front. A comes
2854
# before C alphabetically, so it comes back first
2855
self.assertEqual([('A',), ('C',), ('D',), ('B',)], keys)
2856
# And the request recorded
2857
self.assertEqual([('get_record_stream', request_keys, 'unordered',