87
86
# individual graph knits in packs (inventories)
88
87
# individual graph nocompression knits in packs (revisions)
89
88
# plain text knits in packs (texts)
90
len_one_adapter.scenarios = [
93
92
'factory':make_versioned_files_factory(WeaveFile,
94
93
ConstantMapper('inventory')),
96
'support_partial_insertion': False,
100
100
'factory':make_file_factory(False, ConstantMapper('revisions')),
103
'support_partial_insertion': False,
104
('named-nograph-knit-pack', {
105
('named-nograph-nodelta-knit-pack', {
105
106
'cleanup':cleanup_pack_knit,
106
107
'factory':make_pack_factory(False, False, 1),
110
'support_partial_insertion': False,
110
112
('named-graph-knit-pack', {
111
113
'cleanup':cleanup_pack_knit,
112
114
'factory':make_pack_factory(True, True, 1),
117
'support_partial_insertion': True,
116
119
('named-graph-nodelta-knit-pack', {
117
120
'cleanup':cleanup_pack_knit,
118
121
'factory':make_pack_factory(True, False, 1),
124
'support_partial_insertion': False,
126
('groupcompress-nograph', {
127
'cleanup':groupcompress.cleanup_pack_group,
128
'factory':groupcompress.make_pack_factory(False, False, 1),
131
'support_partial_insertion':False,
123
len_two_adapter.scenarios = [
134
len_two_scenarios = [
124
135
('weave-prefix', {
126
137
'factory':make_versioned_files_factory(WeaveFile,
141
'support_partial_insertion': False,
131
143
('annotated-knit-escape', {
133
145
'factory':make_file_factory(True, HashEscapedPrefixMapper()),
148
'support_partial_insertion': False,
137
150
('plain-knit-pack', {
138
151
'cleanup':cleanup_pack_knit,
139
152
'factory':make_pack_factory(True, True, 2),
155
'support_partial_insertion': True,
158
'cleanup':groupcompress.cleanup_pack_group,
159
'factory':groupcompress.make_pack_factory(True, False, 1),
162
'support_partial_insertion':False,
144
for test in iter_suite_tests(to_adapt):
145
result.addTests(len_one_adapter.adapt(test))
146
result.addTests(len_two_adapter.adapt(test))
165
scenarios = len_one_scenarios + len_two_scenarios
166
return multiply_tests(to_adapt, scenarios, result)
150
169
def get_diamond_vf(f, trailing_eol=True, left_only=False):
151
170
"""Get a diamond graph to exercise deltas and merges.
153
172
:param trailing_eol: If True end the last line with \n.
208
231
result = [prefix + suffix for suffix in suffix_list]
210
238
# we loop over each key because that spreads the inserts across prefixes,
211
239
# which is how commit operates.
212
240
for prefix in prefixes:
213
result.append(files.add_lines(prefix + ('origin',), (),
241
result.append(files.add_lines(prefix + get_key('origin'), (),
214
242
['origin' + last_char]))
215
243
for prefix in prefixes:
216
result.append(files.add_lines(prefix + ('base',),
244
result.append(files.add_lines(prefix + get_key('base'),
217
245
get_parents([('origin',)]), ['base' + last_char]))
218
246
for prefix in prefixes:
219
result.append(files.add_lines(prefix + ('left',),
247
result.append(files.add_lines(prefix + get_key('left'),
220
248
get_parents([('base',)]),
221
249
['base\n', 'left' + last_char]))
222
250
if not left_only:
223
251
for prefix in prefixes:
224
result.append(files.add_lines(prefix + ('right',),
252
result.append(files.add_lines(prefix + get_key('right'),
225
253
get_parents([('base',)]),
226
254
['base\n', 'right' + last_char]))
227
255
for prefix in prefixes:
228
result.append(files.add_lines(prefix + ('merged',),
256
result.append(files.add_lines(prefix + get_key('merged'),
229
257
get_parents([('left',), ('right',)]),
230
258
['base\n', 'left\n', 'right\n', 'merged' + last_char]))
742
770
self.assertEqual(expected, progress.updates)
744
772
lines = iter_with_versions(['child', 'otherchild'],
745
[('Walking content.', 0, 2),
746
('Walking content.', 1, 2),
747
('Walking content.', 2, 2)])
773
[('Walking content', 0, 2),
774
('Walking content', 1, 2),
775
('Walking content', 2, 2)])
748
776
# we must see child and otherchild
749
777
self.assertTrue(lines[('child\n', 'child')] > 0)
750
778
self.assertTrue(lines[('otherchild\n', 'otherchild')] > 0)
751
779
# we dont care if we got more than that.
754
lines = iter_with_versions(None, [('Walking content.', 0, 5),
755
('Walking content.', 1, 5),
756
('Walking content.', 2, 5),
757
('Walking content.', 3, 5),
758
('Walking content.', 4, 5),
759
('Walking content.', 5, 5)])
782
lines = iter_with_versions(None, [('Walking content', 0, 5),
783
('Walking content', 1, 5),
784
('Walking content', 2, 5),
785
('Walking content', 3, 5),
786
('Walking content', 4, 5),
787
('Walking content', 5, 5)])
760
788
# all lines must be seen at least once
761
789
self.assertTrue(lines[('base\n', 'base')] > 0)
762
790
self.assertTrue(lines[('lancestor\n', 'lancestor')] > 0)
1442
1473
self.addCleanup(lambda:self.cleanup(files))
1476
def get_simple_key(self, suffix):
1477
"""Return a key for the object under test."""
1478
if self.key_length == 1:
1481
return ('FileA',) + (suffix,)
1483
def test_add_lines(self):
1484
f = self.get_versionedfiles()
1485
key0 = self.get_simple_key('r0')
1486
key1 = self.get_simple_key('r1')
1487
key2 = self.get_simple_key('r2')
1488
keyf = self.get_simple_key('foo')
1489
f.add_lines(key0, [], ['a\n', 'b\n'])
1491
f.add_lines(key1, [key0], ['b\n', 'c\n'])
1493
f.add_lines(key1, [], ['b\n', 'c\n'])
1495
self.assertTrue(key0 in keys)
1496
self.assertTrue(key1 in keys)
1498
for record in f.get_record_stream([key0, key1], 'unordered', True):
1499
records.append((record.key, record.get_bytes_as('fulltext')))
1501
self.assertEqual([(key0, 'a\nb\n'), (key1, 'b\nc\n')], records)
1503
def test__add_text(self):
1504
f = self.get_versionedfiles()
1505
key0 = self.get_simple_key('r0')
1506
key1 = self.get_simple_key('r1')
1507
key2 = self.get_simple_key('r2')
1508
keyf = self.get_simple_key('foo')
1509
f._add_text(key0, [], 'a\nb\n')
1511
f._add_text(key1, [key0], 'b\nc\n')
1513
f._add_text(key1, [], 'b\nc\n')
1515
self.assertTrue(key0 in keys)
1516
self.assertTrue(key1 in keys)
1518
for record in f.get_record_stream([key0, key1], 'unordered', True):
1519
records.append((record.key, record.get_bytes_as('fulltext')))
1521
self.assertEqual([(key0, 'a\nb\n'), (key1, 'b\nc\n')], records)
1445
1523
def test_annotate(self):
1446
1524
files = self.get_versionedfiles()
1447
1525
self.get_diamond_files(files)
1481
1559
self.assertRaises(RevisionNotPresent,
1482
1560
files.annotate, prefix + ('missing-key',))
1562
def test_check_no_parameters(self):
1563
files = self.get_versionedfiles()
1565
def test_check_progressbar_parameter(self):
1566
"""A progress bar can be supplied because check can be a generator."""
1567
pb = ui.ui_factory.nested_progress_bar()
1568
self.addCleanup(pb.finished)
1569
files = self.get_versionedfiles()
1570
files.check(progress_bar=pb)
1572
def test_check_with_keys_becomes_generator(self):
1573
files = self.get_versionedfiles()
1574
self.get_diamond_files(files)
1576
entries = files.check(keys=keys)
1578
# Texts output should be fulltexts.
1579
self.capture_stream(files, entries, seen.add,
1580
files.get_parent_map(keys), require_fulltext=True)
1581
# All texts should be output.
1582
self.assertEqual(set(keys), seen)
1584
def test_clear_cache(self):
1585
files = self.get_versionedfiles()
1484
1588
def test_construct(self):
1485
1589
"""Each parameterised test can be constructed on a transport."""
1486
1590
files = self.get_versionedfiles()
1488
def get_diamond_files(self, files, trailing_eol=True, left_only=False):
1592
def get_diamond_files(self, files, trailing_eol=True, left_only=False,
1489
1594
return get_diamond_files(files, self.key_length,
1490
1595
trailing_eol=trailing_eol, nograph=not self.graph,
1491
left_only=left_only)
1596
left_only=left_only, nokeys=nokeys)
1598
def _add_content_nostoresha(self, add_lines):
1599
"""When nostore_sha is supplied using old content raises."""
1600
vf = self.get_versionedfiles()
1601
empty_text = ('a', [])
1602
sample_text_nl = ('b', ["foo\n", "bar\n"])
1603
sample_text_no_nl = ('c', ["foo\n", "bar"])
1605
for version, lines in (empty_text, sample_text_nl, sample_text_no_nl):
1607
sha, _, _ = vf.add_lines(self.get_simple_key(version), [],
1610
sha, _, _ = vf._add_text(self.get_simple_key(version), [],
1613
# we now have a copy of all the lines in the vf.
1614
for sha, (version, lines) in zip(
1615
shas, (empty_text, sample_text_nl, sample_text_no_nl)):
1616
new_key = self.get_simple_key(version + "2")
1617
self.assertRaises(errors.ExistingContent,
1618
vf.add_lines, new_key, [], lines,
1620
self.assertRaises(errors.ExistingContent,
1621
vf._add_text, new_key, [], ''.join(lines),
1623
# and no new version should have been added.
1624
record = vf.get_record_stream([new_key], 'unordered', True).next()
1625
self.assertEqual('absent', record.storage_kind)
1627
def test_add_lines_nostoresha(self):
1628
self._add_content_nostoresha(add_lines=True)
1630
def test__add_text_nostoresha(self):
1631
self._add_content_nostoresha(add_lines=False)
1493
1633
def test_add_lines_return(self):
1494
1634
files = self.get_versionedfiles()
1521
1661
('ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
1664
def test_add_lines_no_key_generates_chk_key(self):
1665
files = self.get_versionedfiles()
1666
# save code by using the stock data insertion helper.
1667
adds = self.get_diamond_files(files, nokeys=True)
1669
# We can only validate the first 2 elements returned from add_lines.
1671
self.assertEqual(3, len(add))
1672
results.append(add[:2])
1673
if self.key_length == 1:
1675
('00e364d235126be43292ab09cb4686cf703ddc17', 7),
1676
('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1677
('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1678
('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1679
('ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
1681
# Check the added items got CHK keys.
1682
self.assertEqual(set([
1683
('sha1:00e364d235126be43292ab09cb4686cf703ddc17',),
1684
('sha1:51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44',),
1685
('sha1:9ef09dfa9d86780bdec9219a22560c6ece8e0ef1',),
1686
('sha1:a8478686da38e370e32e42e8a0c220e33ee9132f',),
1687
('sha1:ed8bce375198ea62444dc71952b22cfc2b09226d',),
1690
elif self.key_length == 2:
1692
('00e364d235126be43292ab09cb4686cf703ddc17', 7),
1693
('00e364d235126be43292ab09cb4686cf703ddc17', 7),
1694
('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1695
('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1696
('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1697
('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1698
('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1699
('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1700
('ed8bce375198ea62444dc71952b22cfc2b09226d', 23),
1701
('ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
1703
# Check the added items got CHK keys.
1704
self.assertEqual(set([
1705
('FileA', 'sha1:00e364d235126be43292ab09cb4686cf703ddc17'),
1706
('FileA', 'sha1:51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44'),
1707
('FileA', 'sha1:9ef09dfa9d86780bdec9219a22560c6ece8e0ef1'),
1708
('FileA', 'sha1:a8478686da38e370e32e42e8a0c220e33ee9132f'),
1709
('FileA', 'sha1:ed8bce375198ea62444dc71952b22cfc2b09226d'),
1710
('FileB', 'sha1:00e364d235126be43292ab09cb4686cf703ddc17'),
1711
('FileB', 'sha1:51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44'),
1712
('FileB', 'sha1:9ef09dfa9d86780bdec9219a22560c6ece8e0ef1'),
1713
('FileB', 'sha1:a8478686da38e370e32e42e8a0c220e33ee9132f'),
1714
('FileB', 'sha1:ed8bce375198ea62444dc71952b22cfc2b09226d'),
1524
1718
def test_empty_lines(self):
1525
1719
"""Empty files can be stored."""
1526
1720
f = self.get_versionedfiles()
1548
1742
f.get_record_stream([key_b], 'unordered', True
1549
1743
).next().get_bytes_as('fulltext'))
1745
def test_get_known_graph_ancestry(self):
1746
f = self.get_versionedfiles()
1748
raise TestNotApplicable('ancestry info only relevant with graph.')
1749
key_a = self.get_simple_key('a')
1750
key_b = self.get_simple_key('b')
1751
key_c = self.get_simple_key('c')
1757
f.add_lines(key_a, [], ['\n'])
1758
f.add_lines(key_b, [key_a], ['\n'])
1759
f.add_lines(key_c, [key_a, key_b], ['\n'])
1760
kg = f.get_known_graph_ancestry([key_c])
1761
self.assertIsInstance(kg, _mod_graph.KnownGraph)
1762
self.assertEqual([key_a, key_b, key_c], list(kg.topo_sort()))
1764
def test_known_graph_with_fallbacks(self):
1765
f = self.get_versionedfiles('files')
1767
raise TestNotApplicable('ancestry info only relevant with graph.')
1768
if getattr(f, 'add_fallback_versioned_files', None) is None:
1769
raise TestNotApplicable("%s doesn't support fallbacks"
1770
% (f.__class__.__name__,))
1771
key_a = self.get_simple_key('a')
1772
key_b = self.get_simple_key('b')
1773
key_c = self.get_simple_key('c')
1774
# A only in fallback
1779
g = self.get_versionedfiles('fallback')
1780
g.add_lines(key_a, [], ['\n'])
1781
f.add_fallback_versioned_files(g)
1782
f.add_lines(key_b, [key_a], ['\n'])
1783
f.add_lines(key_c, [key_a, key_b], ['\n'])
1784
kg = f.get_known_graph_ancestry([key_c])
1785
self.assertEqual([key_a, key_b, key_c], list(kg.topo_sort()))
1551
1787
def test_get_record_stream_empty(self):
1552
1788
"""An empty stream can be requested without error."""
1553
1789
f = self.get_versionedfiles()
1560
1796
['mpdiff', 'knit-annotated-ft', 'knit-annotated-delta',
1561
1797
'knit-ft', 'knit-delta', 'chunked', 'fulltext',
1562
1798
'knit-annotated-ft-gz', 'knit-annotated-delta-gz', 'knit-ft-gz',
1800
'knit-delta-closure', 'knit-delta-closure-ref',
1801
'groupcompress-block', 'groupcompress-block-ref'])
1565
def capture_stream(self, f, entries, on_seen, parents):
1803
def capture_stream(self, f, entries, on_seen, parents,
1804
require_fulltext=False):
1566
1805
"""Capture a stream for testing."""
1567
1806
for factory in entries:
1568
1807
on_seen(factory.key)
1569
1808
self.assertValidStorageKind(factory.storage_kind)
1570
self.assertEqual(f.get_sha1s([factory.key])[factory.key],
1809
if factory.sha1 is not None:
1810
self.assertEqual(f.get_sha1s([factory.key])[factory.key],
1572
1812
self.assertEqual(parents[factory.key], factory.parents)
1573
1813
self.assertIsInstance(factory.get_bytes_as(factory.storage_kind),
1815
if require_fulltext:
1816
factory.get_bytes_as('fulltext')
1576
1818
def test_get_record_stream_interface(self):
1577
1819
"""each item in a stream has to provide a regular interface."""
1709
1976
entries = files.get_record_stream(keys, 'topological', False)
1710
1977
self.assertAbsentRecord(files, keys, parent_map, entries)
1979
def assertRecordHasContent(self, record, bytes):
1980
"""Assert that record has the bytes bytes."""
1981
self.assertEqual(bytes, record.get_bytes_as('fulltext'))
1982
self.assertEqual(bytes, ''.join(record.get_bytes_as('chunked')))
1984
def test_get_record_stream_native_formats_are_wire_ready_one_ft(self):
1985
files = self.get_versionedfiles()
1986
key = self.get_simple_key('foo')
1987
files.add_lines(key, (), ['my text\n', 'content'])
1988
stream = files.get_record_stream([key], 'unordered', False)
1989
record = stream.next()
1990
if record.storage_kind in ('chunked', 'fulltext'):
1991
# chunked and fulltext representations are for direct use not wire
1992
# serialisation: check they are able to be used directly. To send
1993
# such records over the wire translation will be needed.
1994
self.assertRecordHasContent(record, "my text\ncontent")
1996
bytes = [record.get_bytes_as(record.storage_kind)]
1997
network_stream = versionedfile.NetworkRecordStream(bytes).read()
1998
source_record = record
2000
for record in network_stream:
2001
records.append(record)
2002
self.assertEqual(source_record.storage_kind,
2003
record.storage_kind)
2004
self.assertEqual(source_record.parents, record.parents)
2006
source_record.get_bytes_as(source_record.storage_kind),
2007
record.get_bytes_as(record.storage_kind))
2008
self.assertEqual(1, len(records))
2010
def assertStreamMetaEqual(self, records, expected, stream):
2011
"""Assert that streams expected and stream have the same records.
2013
:param records: A list to collect the seen records.
2014
:return: A generator of the records in stream.
2016
# We make assertions during copying to catch things early for
2018
for record, ref_record in izip(stream, expected):
2019
records.append(record)
2020
self.assertEqual(ref_record.key, record.key)
2021
self.assertEqual(ref_record.storage_kind, record.storage_kind)
2022
self.assertEqual(ref_record.parents, record.parents)
2025
def stream_to_bytes_or_skip_counter(self, skipped_records, full_texts,
2027
"""Convert a stream to a bytes iterator.
2029
:param skipped_records: A list with one element to increment when a
2031
:param full_texts: A dict from key->fulltext representation, for
2032
checking chunked or fulltext stored records.
2033
:param stream: A record_stream.
2034
:return: An iterator over the bytes of each record.
2036
for record in stream:
2037
if record.storage_kind in ('chunked', 'fulltext'):
2038
skipped_records[0] += 1
2039
# check the content is correct for direct use.
2040
self.assertRecordHasContent(record, full_texts[record.key])
2042
yield record.get_bytes_as(record.storage_kind)
2044
def test_get_record_stream_native_formats_are_wire_ready_ft_delta(self):
2045
files = self.get_versionedfiles()
2046
target_files = self.get_versionedfiles('target')
2047
key = self.get_simple_key('ft')
2048
key_delta = self.get_simple_key('delta')
2049
files.add_lines(key, (), ['my text\n', 'content'])
2051
delta_parents = (key,)
2054
files.add_lines(key_delta, delta_parents, ['different\n', 'content\n'])
2055
local = files.get_record_stream([key, key_delta], 'unordered', False)
2056
ref = files.get_record_stream([key, key_delta], 'unordered', False)
2057
skipped_records = [0]
2059
key: "my text\ncontent",
2060
key_delta: "different\ncontent\n",
2062
byte_stream = self.stream_to_bytes_or_skip_counter(
2063
skipped_records, full_texts, local)
2064
network_stream = versionedfile.NetworkRecordStream(byte_stream).read()
2066
# insert the stream from the network into a versioned files object so we can
2067
# check the content was carried across correctly without doing delta
2069
target_files.insert_record_stream(
2070
self.assertStreamMetaEqual(records, ref, network_stream))
2071
# No duplicates on the wire thank you!
2072
self.assertEqual(2, len(records) + skipped_records[0])
2074
# if any content was copied it all must have all been.
2075
self.assertIdenticalVersionedFile(files, target_files)
2077
def test_get_record_stream_native_formats_are_wire_ready_delta(self):
2078
# copy a delta over the wire
2079
files = self.get_versionedfiles()
2080
target_files = self.get_versionedfiles('target')
2081
key = self.get_simple_key('ft')
2082
key_delta = self.get_simple_key('delta')
2083
files.add_lines(key, (), ['my text\n', 'content'])
2085
delta_parents = (key,)
2088
files.add_lines(key_delta, delta_parents, ['different\n', 'content\n'])
2089
# Copy the basis text across so we can reconstruct the delta during
2090
# insertion into target.
2091
target_files.insert_record_stream(files.get_record_stream([key],
2092
'unordered', False))
2093
local = files.get_record_stream([key_delta], 'unordered', False)
2094
ref = files.get_record_stream([key_delta], 'unordered', False)
2095
skipped_records = [0]
2097
key_delta: "different\ncontent\n",
2099
byte_stream = self.stream_to_bytes_or_skip_counter(
2100
skipped_records, full_texts, local)
2101
network_stream = versionedfile.NetworkRecordStream(byte_stream).read()
2103
# insert the stream from the network into a versioned files object so we can
2104
# check the content was carried across correctly without doing delta
2105
# inspection during check_stream.
2106
target_files.insert_record_stream(
2107
self.assertStreamMetaEqual(records, ref, network_stream))
2108
# No duplicates on the wire thank you!
2109
self.assertEqual(1, len(records) + skipped_records[0])
2111
# if any content was copied it all must have all been
2112
self.assertIdenticalVersionedFile(files, target_files)
2114
def test_get_record_stream_wire_ready_delta_closure_included(self):
2115
# copy a delta over the wire with the ability to get its full text.
2116
files = self.get_versionedfiles()
2117
key = self.get_simple_key('ft')
2118
key_delta = self.get_simple_key('delta')
2119
files.add_lines(key, (), ['my text\n', 'content'])
2121
delta_parents = (key,)
2124
files.add_lines(key_delta, delta_parents, ['different\n', 'content\n'])
2125
local = files.get_record_stream([key_delta], 'unordered', True)
2126
ref = files.get_record_stream([key_delta], 'unordered', True)
2127
skipped_records = [0]
2129
key_delta: "different\ncontent\n",
2131
byte_stream = self.stream_to_bytes_or_skip_counter(
2132
skipped_records, full_texts, local)
2133
network_stream = versionedfile.NetworkRecordStream(byte_stream).read()
2135
# insert the stream from the network into a versioned files object so we can
2136
# check the content was carried across correctly without doing delta
2137
# inspection during check_stream.
2138
for record in self.assertStreamMetaEqual(records, ref, network_stream):
2139
# we have to be able to get the full text out:
2140
self.assertRecordHasContent(record, full_texts[record.key])
2141
# No duplicates on the wire thank you!
2142
self.assertEqual(1, len(records) + skipped_records[0])
1712
2144
def assertAbsentRecord(self, files, keys, parents, entries):
1713
2145
"""Helper for test_get_record_stream_missing_records_are_absent."""
1970
2439
self.assertIdenticalVersionedFile(source, files)
2441
def test_insert_record_stream_long_parent_chain_out_of_order(self):
2442
"""An out of order stream can either error or work."""
2444
raise TestNotApplicable('ancestry info only relevant with graph.')
2445
# Create a reasonably long chain of records based on each other, where
2446
# most will be deltas.
2447
source = self.get_versionedfiles('source')
2450
content = [('same same %d\n' % n) for n in range(500)]
2451
for letter in 'abcdefghijklmnopqrstuvwxyz':
2452
key = ('key-' + letter,)
2453
if self.key_length == 2:
2454
key = ('prefix',) + key
2455
content.append('content for ' + letter + '\n')
2456
source.add_lines(key, parents, content)
2459
# Create a stream of these records, excluding the first record that the
2460
# rest ultimately depend upon, and insert it into a new vf.
2462
for key in reversed(keys):
2463
streams.append(source.get_record_stream([key], 'unordered', False))
2464
deltas = chain(*streams[:-1])
2465
files = self.get_versionedfiles()
2467
files.insert_record_stream(deltas)
2468
except RevisionNotPresent:
2469
# Must not have corrupted the file.
2472
# Must only report either just the first key as a missing parent,
2473
# no key as missing (for nodelta scenarios).
2474
missing = set(files.get_missing_compression_parent_keys())
2475
missing.discard(keys[0])
2476
self.assertEqual(set(), missing)
2478
def get_knit_delta_source(self):
2479
"""Get a source that can produce a stream with knit delta records,
2480
regardless of this test's scenario.
2482
mapper = self.get_mapper()
2483
source_transport = self.get_transport('source')
2484
source_transport.mkdir('.')
2485
source = make_file_factory(False, mapper)(source_transport)
2486
get_diamond_files(source, self.key_length, trailing_eol=True,
2487
nograph=False, left_only=False)
1972
2490
def test_insert_record_stream_delta_missing_basis_no_corruption(self):
1973
"""Insertion where a needed basis is not included aborts safely."""
1974
# We use a knit always here to be sure we are getting a binary delta.
1975
mapper = self.get_mapper()
1976
source_transport = self.get_transport('source')
1977
source_transport.mkdir('.')
1978
source = make_file_factory(False, mapper)(source_transport)
1979
self.get_diamond_files(source)
1980
entries = source.get_record_stream(['origin', 'merged'], 'unordered', False)
1981
files = self.get_versionedfiles()
1982
self.assertRaises(RevisionNotPresent, files.insert_record_stream,
2491
"""Insertion where a needed basis is not included notifies the caller
2492
of the missing basis. In the meantime a record missing its basis is
2495
source = self.get_knit_delta_source()
2496
keys = [self.get_simple_key('origin'), self.get_simple_key('merged')]
2497
entries = source.get_record_stream(keys, 'unordered', False)
2498
files = self.get_versionedfiles()
2499
if self.support_partial_insertion:
2500
self.assertEqual([],
2501
list(files.get_missing_compression_parent_keys()))
2502
files.insert_record_stream(entries)
2503
missing_bases = files.get_missing_compression_parent_keys()
2504
self.assertEqual(set([self.get_simple_key('left')]),
2506
self.assertEqual(set(keys), set(files.get_parent_map(keys)))
2509
errors.RevisionNotPresent, files.insert_record_stream, entries)
2512
def test_insert_record_stream_delta_missing_basis_can_be_added_later(self):
2513
"""Insertion where a needed basis is not included notifies the caller
2514
of the missing basis. That basis can be added in a second
2515
insert_record_stream call that does not need to repeat records present
2516
in the previous stream. The record(s) that required that basis are
2517
fully inserted once their basis is no longer missing.
2519
if not self.support_partial_insertion:
2520
raise TestNotApplicable(
2521
'versioned file scenario does not support partial insertion')
2522
source = self.get_knit_delta_source()
2523
entries = source.get_record_stream([self.get_simple_key('origin'),
2524
self.get_simple_key('merged')], 'unordered', False)
2525
files = self.get_versionedfiles()
2526
files.insert_record_stream(entries)
2527
missing_bases = files.get_missing_compression_parent_keys()
2528
self.assertEqual(set([self.get_simple_key('left')]),
2530
# 'merged' is inserted (although a commit of a write group involving
2531
# this versionedfiles would fail).
2532
merged_key = self.get_simple_key('merged')
2534
[merged_key], files.get_parent_map([merged_key]).keys())
2535
# Add the full delta closure of the missing records
2536
missing_entries = source.get_record_stream(
2537
missing_bases, 'unordered', True)
2538
files.insert_record_stream(missing_entries)
2539
# Now 'merged' is fully inserted (and a commit would succeed).
2540
self.assertEqual([], list(files.get_missing_compression_parent_keys()))
2542
[merged_key], files.get_parent_map([merged_key]).keys())
1985
self.assertEqual({}, files.get_parent_map([]))
1987
2545
def test_iter_lines_added_or_present_in_keys(self):
1988
2546
# test that we get at least an equalset of the lines added by
2032
2590
lines = iter_with_keys(
2033
2591
[self.get_simple_key('child'), self.get_simple_key('otherchild')],
2034
[('Walking content.', 0, 2),
2035
('Walking content.', 1, 2),
2036
('Walking content.', 2, 2)])
2592
[('Walking content', 0, 2),
2593
('Walking content', 1, 2),
2594
('Walking content', 2, 2)])
2037
2595
# we must see child and otherchild
2038
2596
self.assertTrue(lines[('child\n', self.get_simple_key('child'))] > 0)
2039
2597
self.assertTrue(
2040
2598
lines[('otherchild\n', self.get_simple_key('otherchild'))] > 0)
2041
2599
# we dont care if we got more than that.
2043
2601
# test all lines
2044
2602
lines = iter_with_keys(files.keys(),
2045
[('Walking content.', 0, 5),
2046
('Walking content.', 1, 5),
2047
('Walking content.', 2, 5),
2048
('Walking content.', 3, 5),
2049
('Walking content.', 4, 5),
2050
('Walking content.', 5, 5)])
2603
[('Walking content', 0, 5),
2604
('Walking content', 1, 5),
2605
('Walking content', 2, 5),
2606
('Walking content', 3, 5),
2607
('Walking content', 4, 5),
2608
('Walking content', 5, 5)])
2051
2609
# all lines must be seen at least once
2052
2610
self.assertTrue(lines[('base\n', self.get_simple_key('base'))] > 0)
2053
2611
self.assertTrue(