1
# Copyright (C) 2005 Canonical Ltd
4
# Johan Rydberg <jrydberg@gnu.org>
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
11
# This program is distributed in the hope that it will be useful,
12
# but WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14
# GNU General Public License for more details.
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21
# TODO: might be nice to create a versionedfile with some type of corruption
22
# considered typical and check that it can be detected/corrected.
24
from itertools import chain
25
from StringIO import StringIO
33
from bzrlib.errors import (
35
RevisionAlreadyPresent,
38
from bzrlib import knit as _mod_knit
39
from bzrlib.knit import (
46
from bzrlib.symbol_versioning import one_four, one_five
47
from bzrlib.tests import (
49
TestCaseWithMemoryTransport,
53
split_suite_by_condition,
56
from bzrlib.tests.http_utils import TestCaseWithWebserver
57
from bzrlib.trace import mutter
58
from bzrlib.transport import get_transport
59
from bzrlib.transport.memory import MemoryTransport
60
from bzrlib.tsort import topo_sort
61
from bzrlib.tuned_gzip import GzipFile
62
import bzrlib.versionedfile as versionedfile
63
from bzrlib.versionedfile import (
65
HashEscapedPrefixMapper,
67
VirtualVersionedFiles,
68
make_versioned_files_factory,
70
from bzrlib.weave import WeaveFile
71
from bzrlib.weavefile import read_weave, write_weave
74
def load_tests(standard_tests, module, loader):
75
"""Parameterize VersionedFiles tests for different implementations."""
76
to_adapt, result = split_suite_by_condition(
77
standard_tests, condition_isinstance(TestVersionedFiles))
78
len_one_adapter = TestScenarioApplier()
79
len_two_adapter = TestScenarioApplier()
80
# We want to be sure of behaviour for:
81
# weaves prefix layout (weave texts)
82
# individually named weaves (weave inventories)
83
# annotated knits - prefix|hash|hash-escape layout, we test the third only
84
# as it is the most complex mapper.
85
# individually named knits
86
# individual no-graph knits in packs (signatures)
87
# individual graph knits in packs (inventories)
88
# individual graph nocompression knits in packs (revisions)
89
# plain text knits in packs (texts)
90
len_one_adapter.scenarios = [
93
'factory':make_versioned_files_factory(WeaveFile,
94
ConstantMapper('inventory')),
100
'factory':make_file_factory(False, ConstantMapper('revisions')),
104
('named-nograph-knit-pack', {
105
'cleanup':cleanup_pack_knit,
106
'factory':make_pack_factory(False, False, 1),
110
('named-graph-knit-pack', {
111
'cleanup':cleanup_pack_knit,
112
'factory':make_pack_factory(True, True, 1),
116
('named-graph-nodelta-knit-pack', {
117
'cleanup':cleanup_pack_knit,
118
'factory':make_pack_factory(True, False, 1),
123
len_two_adapter.scenarios = [
126
'factory':make_versioned_files_factory(WeaveFile,
131
('annotated-knit-escape', {
133
'factory':make_file_factory(True, HashEscapedPrefixMapper()),
137
('plain-knit-pack', {
138
'cleanup':cleanup_pack_knit,
139
'factory':make_pack_factory(True, True, 2),
144
for test in iter_suite_tests(to_adapt):
145
result.addTests(len_one_adapter.adapt(test))
146
result.addTests(len_two_adapter.adapt(test))
150
def get_diamond_vf(f, trailing_eol=True, left_only=False):
151
"""Get a diamond graph to exercise deltas and merges.
153
:param trailing_eol: If True end the last line with \n.
157
'base': (('origin',),),
158
'left': (('base',),),
159
'right': (('base',),),
160
'merged': (('left',), ('right',)),
162
# insert a diamond graph to exercise deltas and merges.
167
f.add_lines('origin', [], ['origin' + last_char])
168
f.add_lines('base', ['origin'], ['base' + last_char])
169
f.add_lines('left', ['base'], ['base\n', 'left' + last_char])
171
f.add_lines('right', ['base'],
172
['base\n', 'right' + last_char])
173
f.add_lines('merged', ['left', 'right'],
174
['base\n', 'left\n', 'right\n', 'merged' + last_char])
178
def get_diamond_files(files, key_length, trailing_eol=True, left_only=False,
180
"""Get a diamond graph to exercise deltas and merges.
182
This creates a 5-node graph in files. If files supports 2-length keys two
183
graphs are made to exercise the support for multiple ids.
185
:param trailing_eol: If True end the last line with \n.
186
:param key_length: The length of keys in files. Currently supports length 1
188
:param left_only: If True do not add the right and merged nodes.
189
:param nograph: If True, do not provide parents to the add_lines calls;
190
this is useful for tests that need inserted data but have graphless
192
:return: The results of the add_lines calls.
197
prefixes = [('FileA',), ('FileB',)]
198
# insert a diamond graph to exercise deltas and merges.
204
def get_parents(suffix_list):
208
result = [prefix + suffix for suffix in suffix_list]
210
# we loop over each key because that spreads the inserts across prefixes,
211
# which is how commit operates.
212
for prefix in prefixes:
213
result.append(files.add_lines(prefix + ('origin',), (),
214
['origin' + last_char]))
215
for prefix in prefixes:
216
result.append(files.add_lines(prefix + ('base',),
217
get_parents([('origin',)]), ['base' + last_char]))
218
for prefix in prefixes:
219
result.append(files.add_lines(prefix + ('left',),
220
get_parents([('base',)]),
221
['base\n', 'left' + last_char]))
223
for prefix in prefixes:
224
result.append(files.add_lines(prefix + ('right',),
225
get_parents([('base',)]),
226
['base\n', 'right' + last_char]))
227
for prefix in prefixes:
228
result.append(files.add_lines(prefix + ('merged',),
229
get_parents([('left',), ('right',)]),
230
['base\n', 'left\n', 'right\n', 'merged' + last_char]))
234
class VersionedFileTestMixIn(object):
235
"""A mixin test class for testing VersionedFiles.
237
This is not an adaptor-style test at this point because
238
theres no dynamic substitution of versioned file implementations,
239
they are strictly controlled by their owning repositories.
242
def get_transaction(self):
243
if not hasattr(self, '_transaction'):
244
self._transaction = None
245
return self._transaction
249
f.add_lines('r0', [], ['a\n', 'b\n'])
250
f.add_lines('r1', ['r0'], ['b\n', 'c\n'])
252
versions = f.versions()
253
self.assertTrue('r0' in versions)
254
self.assertTrue('r1' in versions)
255
self.assertEquals(f.get_lines('r0'), ['a\n', 'b\n'])
256
self.assertEquals(f.get_text('r0'), 'a\nb\n')
257
self.assertEquals(f.get_lines('r1'), ['b\n', 'c\n'])
258
self.assertEqual(2, len(f))
259
self.assertEqual(2, f.num_versions())
261
self.assertRaises(RevisionNotPresent,
262
f.add_lines, 'r2', ['foo'], [])
263
self.assertRaises(RevisionAlreadyPresent,
264
f.add_lines, 'r1', [], [])
266
# this checks that reopen with create=True does not break anything.
267
f = self.reopen_file(create=True)
270
def test_adds_with_parent_texts(self):
273
_, _, parent_texts['r0'] = f.add_lines('r0', [], ['a\n', 'b\n'])
275
_, _, parent_texts['r1'] = f.add_lines_with_ghosts('r1',
276
['r0', 'ghost'], ['b\n', 'c\n'], parent_texts=parent_texts)
277
except NotImplementedError:
278
# if the format doesn't support ghosts, just add normally.
279
_, _, parent_texts['r1'] = f.add_lines('r1',
280
['r0'], ['b\n', 'c\n'], parent_texts=parent_texts)
281
f.add_lines('r2', ['r1'], ['c\n', 'd\n'], parent_texts=parent_texts)
282
self.assertNotEqual(None, parent_texts['r0'])
283
self.assertNotEqual(None, parent_texts['r1'])
285
versions = f.versions()
286
self.assertTrue('r0' in versions)
287
self.assertTrue('r1' in versions)
288
self.assertTrue('r2' in versions)
289
self.assertEquals(f.get_lines('r0'), ['a\n', 'b\n'])
290
self.assertEquals(f.get_lines('r1'), ['b\n', 'c\n'])
291
self.assertEquals(f.get_lines('r2'), ['c\n', 'd\n'])
292
self.assertEqual(3, f.num_versions())
293
origins = f.annotate('r1')
294
self.assertEquals(origins[0][0], 'r0')
295
self.assertEquals(origins[1][0], 'r1')
296
origins = f.annotate('r2')
297
self.assertEquals(origins[0][0], 'r1')
298
self.assertEquals(origins[1][0], 'r2')
301
f = self.reopen_file()
304
def test_add_unicode_content(self):
305
# unicode content is not permitted in versioned files.
306
# versioned files version sequences of bytes only.
308
self.assertRaises(errors.BzrBadParameterUnicode,
309
vf.add_lines, 'a', [], ['a\n', u'b\n', 'c\n'])
311
(errors.BzrBadParameterUnicode, NotImplementedError),
312
vf.add_lines_with_ghosts, 'a', [], ['a\n', u'b\n', 'c\n'])
314
def test_add_follows_left_matching_blocks(self):
315
"""If we change left_matching_blocks, delta changes
317
Note: There are multiple correct deltas in this case, because
318
we start with 1 "a" and we get 3.
321
if isinstance(vf, WeaveFile):
322
raise TestSkipped("WeaveFile ignores left_matching_blocks")
323
vf.add_lines('1', [], ['a\n'])
324
vf.add_lines('2', ['1'], ['a\n', 'a\n', 'a\n'],
325
left_matching_blocks=[(0, 0, 1), (1, 3, 0)])
326
self.assertEqual(['a\n', 'a\n', 'a\n'], vf.get_lines('2'))
327
vf.add_lines('3', ['1'], ['a\n', 'a\n', 'a\n'],
328
left_matching_blocks=[(0, 2, 1), (1, 3, 0)])
329
self.assertEqual(['a\n', 'a\n', 'a\n'], vf.get_lines('3'))
331
def test_inline_newline_throws(self):
332
# \r characters are not permitted in lines being added
334
self.assertRaises(errors.BzrBadParameterContainsNewline,
335
vf.add_lines, 'a', [], ['a\n\n'])
337
(errors.BzrBadParameterContainsNewline, NotImplementedError),
338
vf.add_lines_with_ghosts, 'a', [], ['a\n\n'])
339
# but inline CR's are allowed
340
vf.add_lines('a', [], ['a\r\n'])
342
vf.add_lines_with_ghosts('b', [], ['a\r\n'])
343
except NotImplementedError:
346
def test_add_reserved(self):
348
self.assertRaises(errors.ReservedId,
349
vf.add_lines, 'a:', [], ['a\n', 'b\n', 'c\n'])
351
def test_add_lines_nostoresha(self):
352
"""When nostore_sha is supplied using old content raises."""
354
empty_text = ('a', [])
355
sample_text_nl = ('b', ["foo\n", "bar\n"])
356
sample_text_no_nl = ('c', ["foo\n", "bar"])
358
for version, lines in (empty_text, sample_text_nl, sample_text_no_nl):
359
sha, _, _ = vf.add_lines(version, [], lines)
361
# we now have a copy of all the lines in the vf.
362
for sha, (version, lines) in zip(
363
shas, (empty_text, sample_text_nl, sample_text_no_nl)):
364
self.assertRaises(errors.ExistingContent,
365
vf.add_lines, version + "2", [], lines,
367
# and no new version should have been added.
368
self.assertRaises(errors.RevisionNotPresent, vf.get_lines,
371
def test_add_lines_with_ghosts_nostoresha(self):
372
"""When nostore_sha is supplied using old content raises."""
374
empty_text = ('a', [])
375
sample_text_nl = ('b', ["foo\n", "bar\n"])
376
sample_text_no_nl = ('c', ["foo\n", "bar"])
378
for version, lines in (empty_text, sample_text_nl, sample_text_no_nl):
379
sha, _, _ = vf.add_lines(version, [], lines)
381
# we now have a copy of all the lines in the vf.
382
# is the test applicable to this vf implementation?
384
vf.add_lines_with_ghosts('d', [], [])
385
except NotImplementedError:
386
raise TestSkipped("add_lines_with_ghosts is optional")
387
for sha, (version, lines) in zip(
388
shas, (empty_text, sample_text_nl, sample_text_no_nl)):
389
self.assertRaises(errors.ExistingContent,
390
vf.add_lines_with_ghosts, version + "2", [], lines,
392
# and no new version should have been added.
393
self.assertRaises(errors.RevisionNotPresent, vf.get_lines,
396
def test_add_lines_return_value(self):
397
# add_lines should return the sha1 and the text size.
399
empty_text = ('a', [])
400
sample_text_nl = ('b', ["foo\n", "bar\n"])
401
sample_text_no_nl = ('c', ["foo\n", "bar"])
402
# check results for the three cases:
403
for version, lines in (empty_text, sample_text_nl, sample_text_no_nl):
404
# the first two elements are the same for all versioned files:
405
# - the digest and the size of the text. For some versioned files
406
# additional data is returned in additional tuple elements.
407
result = vf.add_lines(version, [], lines)
408
self.assertEqual(3, len(result))
409
self.assertEqual((osutils.sha_strings(lines), sum(map(len, lines))),
411
# parents should not affect the result:
412
lines = sample_text_nl[1]
413
self.assertEqual((osutils.sha_strings(lines), sum(map(len, lines))),
414
vf.add_lines('d', ['b', 'c'], lines)[0:2])
416
def test_get_reserved(self):
418
self.assertRaises(errors.ReservedId, vf.get_texts, ['b:'])
419
self.assertRaises(errors.ReservedId, vf.get_lines, 'b:')
420
self.assertRaises(errors.ReservedId, vf.get_text, 'b:')
422
def test_add_unchanged_last_line_noeol_snapshot(self):
423
"""Add a text with an unchanged last line with no eol should work."""
424
# Test adding this in a number of chain lengths; because the interface
425
# for VersionedFile does not allow forcing a specific chain length, we
426
# just use a small base to get the first snapshot, then a much longer
427
# first line for the next add (which will make the third add snapshot)
428
# and so on. 20 has been chosen as an aribtrary figure - knits use 200
429
# as a capped delta length, but ideally we would have some way of
430
# tuning the test to the store (e.g. keep going until a snapshot
432
for length in range(20):
434
vf = self.get_file('case-%d' % length)
437
for step in range(length):
438
version = prefix % step
439
lines = (['prelude \n'] * step) + ['line']
440
vf.add_lines(version, parents, lines)
441
version_lines[version] = lines
443
vf.add_lines('no-eol', parents, ['line'])
444
vf.get_texts(version_lines.keys())
445
self.assertEqualDiff('line', vf.get_text('no-eol'))
447
def test_get_texts_eol_variation(self):
448
# similar to the failure in <http://bugs.launchpad.net/234748>
450
sample_text_nl = ["line\n"]
451
sample_text_no_nl = ["line"]
458
lines = sample_text_nl
460
lines = sample_text_no_nl
461
# left_matching blocks is an internal api; it operates on the
462
# *internal* representation for a knit, which is with *all* lines
463
# being normalised to end with \n - even the final line in a no_nl
464
# file. Using it here ensures that a broken internal implementation
465
# (which is what this test tests) will generate a correct line
466
# delta (which is to say, an empty delta).
467
vf.add_lines(version, parents, lines,
468
left_matching_blocks=[(0, 0, 1)])
470
versions.append(version)
471
version_lines[version] = lines
473
vf.get_texts(versions)
474
vf.get_texts(reversed(versions))
476
def test_add_lines_with_matching_blocks_noeol_last_line(self):
477
"""Add a text with an unchanged last line with no eol should work."""
478
from bzrlib import multiparent
479
# Hand verified sha1 of the text we're adding.
480
sha1 = '6a1d115ec7b60afb664dc14890b5af5ce3c827a4'
481
# Create a mpdiff which adds a new line before the trailing line, and
482
# reuse the last line unaltered (which can cause annotation reuse).
483
# Test adding this in two situations:
484
# On top of a new insertion
485
vf = self.get_file('fulltext')
486
vf.add_lines('noeol', [], ['line'])
487
vf.add_lines('noeol2', ['noeol'], ['newline\n', 'line'],
488
left_matching_blocks=[(0, 1, 1)])
489
self.assertEqualDiff('newline\nline', vf.get_text('noeol2'))
491
vf = self.get_file('delta')
492
vf.add_lines('base', [], ['line'])
493
vf.add_lines('noeol', ['base'], ['prelude\n', 'line'])
494
vf.add_lines('noeol2', ['noeol'], ['newline\n', 'line'],
495
left_matching_blocks=[(1, 1, 1)])
496
self.assertEqualDiff('newline\nline', vf.get_text('noeol2'))
498
def test_make_mpdiffs(self):
499
from bzrlib import multiparent
500
vf = self.get_file('foo')
501
sha1s = self._setup_for_deltas(vf)
502
new_vf = self.get_file('bar')
503
for version in multiparent.topo_iter(vf):
504
mpdiff = vf.make_mpdiffs([version])[0]
505
new_vf.add_mpdiffs([(version, vf.get_parent_map([version])[version],
506
vf.get_sha1s([version])[version], mpdiff)])
507
self.assertEqualDiff(vf.get_text(version),
508
new_vf.get_text(version))
510
def test_make_mpdiffs_with_ghosts(self):
511
vf = self.get_file('foo')
513
vf.add_lines_with_ghosts('text', ['ghost'], ['line\n'])
514
except NotImplementedError:
515
# old Weave formats do not allow ghosts
517
self.assertRaises(errors.RevisionNotPresent, vf.make_mpdiffs, ['ghost'])
519
def _setup_for_deltas(self, f):
520
self.assertFalse(f.has_version('base'))
521
# add texts that should trip the knit maximum delta chain threshold
522
# as well as doing parallel chains of data in knits.
523
# this is done by two chains of 25 insertions
524
f.add_lines('base', [], ['line\n'])
525
f.add_lines('noeol', ['base'], ['line'])
526
# detailed eol tests:
527
# shared last line with parent no-eol
528
f.add_lines('noeolsecond', ['noeol'], ['line\n', 'line'])
529
# differing last line with parent, both no-eol
530
f.add_lines('noeolnotshared', ['noeolsecond'], ['line\n', 'phone'])
531
# add eol following a noneol parent, change content
532
f.add_lines('eol', ['noeol'], ['phone\n'])
533
# add eol following a noneol parent, no change content
534
f.add_lines('eolline', ['noeol'], ['line\n'])
535
# noeol with no parents:
536
f.add_lines('noeolbase', [], ['line'])
537
# noeol preceeding its leftmost parent in the output:
538
# this is done by making it a merge of two parents with no common
539
# anestry: noeolbase and noeol with the
540
# later-inserted parent the leftmost.
541
f.add_lines('eolbeforefirstparent', ['noeolbase', 'noeol'], ['line'])
542
# two identical eol texts
543
f.add_lines('noeoldup', ['noeol'], ['line'])
545
text_name = 'chain1-'
547
sha1s = {0 :'da6d3141cb4a5e6f464bf6e0518042ddc7bfd079',
548
1 :'45e21ea146a81ea44a821737acdb4f9791c8abe7',
549
2 :'e1f11570edf3e2a070052366c582837a4fe4e9fa',
550
3 :'26b4b8626da827088c514b8f9bbe4ebf181edda1',
551
4 :'e28a5510be25ba84d31121cff00956f9970ae6f6',
552
5 :'d63ec0ce22e11dcf65a931b69255d3ac747a318d',
553
6 :'2c2888d288cb5e1d98009d822fedfe6019c6a4ea',
554
7 :'95c14da9cafbf828e3e74a6f016d87926ba234ab',
555
8 :'779e9a0b28f9f832528d4b21e17e168c67697272',
556
9 :'1f8ff4e5c6ff78ac106fcfe6b1e8cb8740ff9a8f',
557
10:'131a2ae712cf51ed62f143e3fbac3d4206c25a05',
558
11:'c5a9d6f520d2515e1ec401a8f8a67e6c3c89f199',
559
12:'31a2286267f24d8bedaa43355f8ad7129509ea85',
560
13:'dc2a7fe80e8ec5cae920973973a8ee28b2da5e0a',
561
14:'2c4b1736566b8ca6051e668de68650686a3922f2',
562
15:'5912e4ecd9b0c07be4d013e7e2bdcf9323276cde',
563
16:'b0d2e18d3559a00580f6b49804c23fea500feab3',
564
17:'8e1d43ad72f7562d7cb8f57ee584e20eb1a69fc7',
565
18:'5cf64a3459ae28efa60239e44b20312d25b253f3',
566
19:'1ebed371807ba5935958ad0884595126e8c4e823',
567
20:'2aa62a8b06fb3b3b892a3292a068ade69d5ee0d3',
568
21:'01edc447978004f6e4e962b417a4ae1955b6fe5d',
569
22:'d8d8dc49c4bf0bab401e0298bb5ad827768618bb',
570
23:'c21f62b1c482862983a8ffb2b0c64b3451876e3f',
571
24:'c0593fe795e00dff6b3c0fe857a074364d5f04fc',
572
25:'dd1a1cf2ba9cc225c3aff729953e6364bf1d1855',
574
for depth in range(26):
575
new_version = text_name + '%s' % depth
576
text = text + ['line\n']
577
f.add_lines(new_version, [next_parent], text)
578
next_parent = new_version
580
text_name = 'chain2-'
582
for depth in range(26):
583
new_version = text_name + '%s' % depth
584
text = text + ['line\n']
585
f.add_lines(new_version, [next_parent], text)
586
next_parent = new_version
589
def test_ancestry(self):
591
self.assertEqual([], f.get_ancestry([]))
592
f.add_lines('r0', [], ['a\n', 'b\n'])
593
f.add_lines('r1', ['r0'], ['b\n', 'c\n'])
594
f.add_lines('r2', ['r0'], ['b\n', 'c\n'])
595
f.add_lines('r3', ['r2'], ['b\n', 'c\n'])
596
f.add_lines('rM', ['r1', 'r2'], ['b\n', 'c\n'])
597
self.assertEqual([], f.get_ancestry([]))
598
versions = f.get_ancestry(['rM'])
599
# there are some possibilities:
603
# so we check indexes
604
r0 = versions.index('r0')
605
r1 = versions.index('r1')
606
r2 = versions.index('r2')
607
self.assertFalse('r3' in versions)
608
rM = versions.index('rM')
609
self.assertTrue(r0 < r1)
610
self.assertTrue(r0 < r2)
611
self.assertTrue(r1 < rM)
612
self.assertTrue(r2 < rM)
614
self.assertRaises(RevisionNotPresent,
615
f.get_ancestry, ['rM', 'rX'])
617
self.assertEqual(set(f.get_ancestry('rM')),
618
set(f.get_ancestry('rM', topo_sorted=False)))
620
def test_mutate_after_finish(self):
621
self._transaction = 'before'
623
self._transaction = 'after'
624
self.assertRaises(errors.OutSideTransaction, f.add_lines, '', [], [])
625
self.assertRaises(errors.OutSideTransaction, f.add_lines_with_ghosts, '', [], [])
627
def test_copy_to(self):
629
f.add_lines('0', [], ['a\n'])
630
t = MemoryTransport()
632
for suffix in self.get_factory().get_suffixes():
633
self.assertTrue(t.has('foo' + suffix))
635
def test_get_suffixes(self):
637
# and should be a list
638
self.assertTrue(isinstance(self.get_factory().get_suffixes(), list))
640
def test_get_parent_map(self):
642
f.add_lines('r0', [], ['a\n', 'b\n'])
644
{'r0':()}, f.get_parent_map(['r0']))
645
f.add_lines('r1', ['r0'], ['a\n', 'b\n'])
647
{'r1':('r0',)}, f.get_parent_map(['r1']))
651
f.get_parent_map(['r0', 'r1']))
652
f.add_lines('r2', [], ['a\n', 'b\n'])
653
f.add_lines('r3', [], ['a\n', 'b\n'])
654
f.add_lines('m', ['r0', 'r1', 'r2', 'r3'], ['a\n', 'b\n'])
656
{'m':('r0', 'r1', 'r2', 'r3')}, f.get_parent_map(['m']))
657
self.assertEqual({}, f.get_parent_map('y'))
661
f.get_parent_map(['r0', 'y', 'r1']))
663
def test_annotate(self):
665
f.add_lines('r0', [], ['a\n', 'b\n'])
666
f.add_lines('r1', ['r0'], ['c\n', 'b\n'])
667
origins = f.annotate('r1')
668
self.assertEquals(origins[0][0], 'r1')
669
self.assertEquals(origins[1][0], 'r0')
671
self.assertRaises(RevisionNotPresent,
674
def test_detection(self):
675
# Test weaves detect corruption.
677
# Weaves contain a checksum of their texts.
678
# When a text is extracted, this checksum should be
681
w = self.get_file_corrupted_text()
683
self.assertEqual('hello\n', w.get_text('v1'))
684
self.assertRaises(errors.WeaveInvalidChecksum, w.get_text, 'v2')
685
self.assertRaises(errors.WeaveInvalidChecksum, w.get_lines, 'v2')
686
self.assertRaises(errors.WeaveInvalidChecksum, w.check)
688
w = self.get_file_corrupted_checksum()
690
self.assertEqual('hello\n', w.get_text('v1'))
691
self.assertRaises(errors.WeaveInvalidChecksum, w.get_text, 'v2')
692
self.assertRaises(errors.WeaveInvalidChecksum, w.get_lines, 'v2')
693
self.assertRaises(errors.WeaveInvalidChecksum, w.check)
695
def get_file_corrupted_text(self):
696
"""Return a versioned file with corrupt text but valid metadata."""
697
raise NotImplementedError(self.get_file_corrupted_text)
699
def reopen_file(self, name='foo'):
700
"""Open the versioned file from disk again."""
701
raise NotImplementedError(self.reopen_file)
703
def test_iter_lines_added_or_present_in_versions(self):
704
# test that we get at least an equalset of the lines added by
705
# versions in the weave
706
# the ordering here is to make a tree so that dumb searches have
707
# more changes to muck up.
709
class InstrumentedProgress(progress.DummyProgress):
713
progress.DummyProgress.__init__(self)
716
def update(self, msg=None, current=None, total=None):
717
self.updates.append((msg, current, total))
720
# add a base to get included
721
vf.add_lines('base', [], ['base\n'])
722
# add a ancestor to be included on one side
723
vf.add_lines('lancestor', [], ['lancestor\n'])
724
# add a ancestor to be included on the other side
725
vf.add_lines('rancestor', ['base'], ['rancestor\n'])
726
# add a child of rancestor with no eofile-nl
727
vf.add_lines('child', ['rancestor'], ['base\n', 'child\n'])
728
# add a child of lancestor and base to join the two roots
729
vf.add_lines('otherchild',
730
['lancestor', 'base'],
731
['base\n', 'lancestor\n', 'otherchild\n'])
732
def iter_with_versions(versions, expected):
733
# now we need to see what lines are returned, and how often.
735
progress = InstrumentedProgress()
736
# iterate over the lines
737
for line in vf.iter_lines_added_or_present_in_versions(versions,
739
lines.setdefault(line, 0)
741
if []!= progress.updates:
742
self.assertEqual(expected, progress.updates)
744
lines = iter_with_versions(['child', 'otherchild'],
745
[('Walking content.', 0, 2),
746
('Walking content.', 1, 2),
747
('Walking content.', 2, 2)])
748
# we must see child and otherchild
749
self.assertTrue(lines[('child\n', 'child')] > 0)
750
self.assertTrue(lines[('otherchild\n', 'otherchild')] > 0)
751
# we dont care if we got more than that.
754
lines = iter_with_versions(None, [('Walking content.', 0, 5),
755
('Walking content.', 1, 5),
756
('Walking content.', 2, 5),
757
('Walking content.', 3, 5),
758
('Walking content.', 4, 5),
759
('Walking content.', 5, 5)])
760
# all lines must be seen at least once
761
self.assertTrue(lines[('base\n', 'base')] > 0)
762
self.assertTrue(lines[('lancestor\n', 'lancestor')] > 0)
763
self.assertTrue(lines[('rancestor\n', 'rancestor')] > 0)
764
self.assertTrue(lines[('child\n', 'child')] > 0)
765
self.assertTrue(lines[('otherchild\n', 'otherchild')] > 0)
767
def test_add_lines_with_ghosts(self):
768
# some versioned file formats allow lines to be added with parent
769
# information that is > than that in the format. Formats that do
770
# not support this need to raise NotImplementedError on the
771
# add_lines_with_ghosts api.
773
# add a revision with ghost parents
774
# The preferred form is utf8, but we should translate when needed
775
parent_id_unicode = u'b\xbfse'
776
parent_id_utf8 = parent_id_unicode.encode('utf8')
778
vf.add_lines_with_ghosts('notbxbfse', [parent_id_utf8], [])
779
except NotImplementedError:
780
# check the other ghost apis are also not implemented
781
self.assertRaises(NotImplementedError, vf.get_ancestry_with_ghosts, ['foo'])
782
self.assertRaises(NotImplementedError, vf.get_parents_with_ghosts, 'foo')
784
vf = self.reopen_file()
785
# test key graph related apis: getncestry, _graph, get_parents
787
# - these are ghost unaware and must not be reflect ghosts
788
self.assertEqual(['notbxbfse'], vf.get_ancestry('notbxbfse'))
789
self.assertFalse(vf.has_version(parent_id_utf8))
790
# we have _with_ghost apis to give us ghost information.
791
self.assertEqual([parent_id_utf8, 'notbxbfse'], vf.get_ancestry_with_ghosts(['notbxbfse']))
792
self.assertEqual([parent_id_utf8], vf.get_parents_with_ghosts('notbxbfse'))
793
# if we add something that is a ghost of another, it should correct the
794
# results of the prior apis
795
vf.add_lines(parent_id_utf8, [], [])
796
self.assertEqual([parent_id_utf8, 'notbxbfse'], vf.get_ancestry(['notbxbfse']))
797
self.assertEqual({'notbxbfse':(parent_id_utf8,)},
798
vf.get_parent_map(['notbxbfse']))
799
self.assertTrue(vf.has_version(parent_id_utf8))
800
# we have _with_ghost apis to give us ghost information.
801
self.assertEqual([parent_id_utf8, 'notbxbfse'],
802
vf.get_ancestry_with_ghosts(['notbxbfse']))
803
self.assertEqual([parent_id_utf8], vf.get_parents_with_ghosts('notbxbfse'))
805
def test_add_lines_with_ghosts_after_normal_revs(self):
806
# some versioned file formats allow lines to be added with parent
807
# information that is > than that in the format. Formats that do
808
# not support this need to raise NotImplementedError on the
809
# add_lines_with_ghosts api.
811
# probe for ghost support
813
vf.add_lines_with_ghosts('base', [], ['line\n', 'line_b\n'])
814
except NotImplementedError:
816
vf.add_lines_with_ghosts('references_ghost',
818
['line\n', 'line_b\n', 'line_c\n'])
819
origins = vf.annotate('references_ghost')
820
self.assertEquals(('base', 'line\n'), origins[0])
821
self.assertEquals(('base', 'line_b\n'), origins[1])
822
self.assertEquals(('references_ghost', 'line_c\n'), origins[2])
824
def test_readonly_mode(self):
825
transport = get_transport(self.get_url('.'))
826
factory = self.get_factory()
827
vf = factory('id', transport, 0777, create=True, access_mode='w')
828
vf = factory('id', transport, access_mode='r')
829
self.assertRaises(errors.ReadOnlyError, vf.add_lines, 'base', [], [])
830
self.assertRaises(errors.ReadOnlyError,
831
vf.add_lines_with_ghosts,
836
def test_get_sha1s(self):
837
# check the sha1 data is available
840
vf.add_lines('a', [], ['a\n'])
841
# the same file, different metadata
842
vf.add_lines('b', ['a'], ['a\n'])
843
# a file differing only in last newline.
844
vf.add_lines('c', [], ['a'])
846
'a': '3f786850e387550fdab836ed7e6dc881de23001b',
847
'c': '86f7e437faa5a7fce15d1ddcb9eaeaea377667b8',
848
'b': '3f786850e387550fdab836ed7e6dc881de23001b',
850
vf.get_sha1s(['a', 'c', 'b']))
853
class TestWeave(TestCaseWithMemoryTransport, VersionedFileTestMixIn):
855
def get_file(self, name='foo'):
856
return WeaveFile(name, get_transport(self.get_url('.')), create=True,
857
get_scope=self.get_transaction)
859
def get_file_corrupted_text(self):
860
w = WeaveFile('foo', get_transport(self.get_url('.')), create=True,
861
get_scope=self.get_transaction)
862
w.add_lines('v1', [], ['hello\n'])
863
w.add_lines('v2', ['v1'], ['hello\n', 'there\n'])
865
# We are going to invasively corrupt the text
866
# Make sure the internals of weave are the same
867
self.assertEqual([('{', 0)
875
self.assertEqual(['f572d396fae9206628714fb2ce00f72e94f2258f'
876
, '90f265c6e75f1c8f9ab76dcf85528352c5f215ef'
881
w._weave[4] = 'There\n'
884
def get_file_corrupted_checksum(self):
885
w = self.get_file_corrupted_text()
887
w._weave[4] = 'there\n'
888
self.assertEqual('hello\nthere\n', w.get_text('v2'))
890
#Invalid checksum, first digit changed
891
w._sha1s[1] = 'f0f265c6e75f1c8f9ab76dcf85528352c5f215ef'
894
def reopen_file(self, name='foo', create=False):
895
return WeaveFile(name, get_transport(self.get_url('.')), create=create,
896
get_scope=self.get_transaction)
898
def test_no_implicit_create(self):
899
self.assertRaises(errors.NoSuchFile,
902
get_transport(self.get_url('.')),
903
get_scope=self.get_transaction)
905
def get_factory(self):
909
class TestPlanMergeVersionedFile(TestCaseWithMemoryTransport):
912
TestCaseWithMemoryTransport.setUp(self)
913
mapper = PrefixMapper()
914
factory = make_file_factory(True, mapper)
915
self.vf1 = factory(self.get_transport('root-1'))
916
self.vf2 = factory(self.get_transport('root-2'))
917
self.plan_merge_vf = versionedfile._PlanMergeVersionedFile('root')
918
self.plan_merge_vf.fallback_versionedfiles.extend([self.vf1, self.vf2])
920
def test_add_lines(self):
921
self.plan_merge_vf.add_lines(('root', 'a:'), [], [])
922
self.assertRaises(ValueError, self.plan_merge_vf.add_lines,
923
('root', 'a'), [], [])
924
self.assertRaises(ValueError, self.plan_merge_vf.add_lines,
925
('root', 'a:'), None, [])
926
self.assertRaises(ValueError, self.plan_merge_vf.add_lines,
927
('root', 'a:'), [], None)
929
def setup_abcde(self):
930
self.vf1.add_lines(('root', 'A'), [], ['a'])
931
self.vf1.add_lines(('root', 'B'), [('root', 'A')], ['b'])
932
self.vf2.add_lines(('root', 'C'), [], ['c'])
933
self.vf2.add_lines(('root', 'D'), [('root', 'C')], ['d'])
934
self.plan_merge_vf.add_lines(('root', 'E:'),
935
[('root', 'B'), ('root', 'D')], ['e'])
937
def test_get_parents(self):
939
self.assertEqual({('root', 'B'):(('root', 'A'),)},
940
self.plan_merge_vf.get_parent_map([('root', 'B')]))
941
self.assertEqual({('root', 'D'):(('root', 'C'),)},
942
self.plan_merge_vf.get_parent_map([('root', 'D')]))
943
self.assertEqual({('root', 'E:'):(('root', 'B'),('root', 'D'))},
944
self.plan_merge_vf.get_parent_map([('root', 'E:')]))
946
self.plan_merge_vf.get_parent_map([('root', 'F')]))
948
('root', 'B'):(('root', 'A'),),
949
('root', 'D'):(('root', 'C'),),
950
('root', 'E:'):(('root', 'B'),('root', 'D')),
952
self.plan_merge_vf.get_parent_map(
953
[('root', 'B'), ('root', 'D'), ('root', 'E:'), ('root', 'F')]))
955
def test_get_record_stream(self):
957
def get_record(suffix):
958
return self.plan_merge_vf.get_record_stream(
959
[('root', suffix)], 'unordered', True).next()
960
self.assertEqual('a', get_record('A').get_bytes_as('fulltext'))
961
self.assertEqual('c', get_record('C').get_bytes_as('fulltext'))
962
self.assertEqual('e', get_record('E:').get_bytes_as('fulltext'))
963
self.assertEqual('absent', get_record('F').storage_kind)
966
class TestReadonlyHttpMixin(object):
968
def get_transaction(self):
971
def test_readonly_http_works(self):
972
# we should be able to read from http with a versioned file.
974
# try an empty file access
975
readonly_vf = self.get_factory()('foo', get_transport(self.get_readonly_url('.')))
976
self.assertEqual([], readonly_vf.versions())
978
vf.add_lines('1', [], ['a\n'])
979
vf.add_lines('2', ['1'], ['b\n', 'a\n'])
980
readonly_vf = self.get_factory()('foo', get_transport(self.get_readonly_url('.')))
981
self.assertEqual(['1', '2'], vf.versions())
982
for version in readonly_vf.versions():
983
readonly_vf.get_lines(version)
986
class TestWeaveHTTP(TestCaseWithWebserver, TestReadonlyHttpMixin):
989
return WeaveFile('foo', get_transport(self.get_url('.')), create=True,
990
get_scope=self.get_transaction)
992
def get_factory(self):
996
class MergeCasesMixin(object):
998
def doMerge(self, base, a, b, mp):
999
from cStringIO import StringIO
1000
from textwrap import dedent
1006
w.add_lines('text0', [], map(addcrlf, base))
1007
w.add_lines('text1', ['text0'], map(addcrlf, a))
1008
w.add_lines('text2', ['text0'], map(addcrlf, b))
1010
self.log_contents(w)
1012
self.log('merge plan:')
1013
p = list(w.plan_merge('text1', 'text2'))
1014
for state, line in p:
1016
self.log('%12s | %s' % (state, line[:-1]))
1020
mt.writelines(w.weave_merge(p))
1022
self.log(mt.getvalue())
1024
mp = map(addcrlf, mp)
1025
self.assertEqual(mt.readlines(), mp)
1028
def testOneInsert(self):
1034
def testSeparateInserts(self):
1035
self.doMerge(['aaa', 'bbb', 'ccc'],
1036
['aaa', 'xxx', 'bbb', 'ccc'],
1037
['aaa', 'bbb', 'yyy', 'ccc'],
1038
['aaa', 'xxx', 'bbb', 'yyy', 'ccc'])
1040
def testSameInsert(self):
1041
self.doMerge(['aaa', 'bbb', 'ccc'],
1042
['aaa', 'xxx', 'bbb', 'ccc'],
1043
['aaa', 'xxx', 'bbb', 'yyy', 'ccc'],
1044
['aaa', 'xxx', 'bbb', 'yyy', 'ccc'])
1045
overlappedInsertExpected = ['aaa', 'xxx', 'yyy', 'bbb']
1046
def testOverlappedInsert(self):
1047
self.doMerge(['aaa', 'bbb'],
1048
['aaa', 'xxx', 'yyy', 'bbb'],
1049
['aaa', 'xxx', 'bbb'], self.overlappedInsertExpected)
1051
# really it ought to reduce this to
1052
# ['aaa', 'xxx', 'yyy', 'bbb']
1055
def testClashReplace(self):
1056
self.doMerge(['aaa'],
1059
['<<<<<<< ', 'xxx', '=======', 'yyy', 'zzz',
1062
def testNonClashInsert1(self):
1063
self.doMerge(['aaa'],
1066
['<<<<<<< ', 'xxx', 'aaa', '=======', 'yyy', 'zzz',
1069
def testNonClashInsert2(self):
1070
self.doMerge(['aaa'],
1076
def testDeleteAndModify(self):
1077
"""Clashing delete and modification.
1079
If one side modifies a region and the other deletes it then
1080
there should be a conflict with one side blank.
1083
#######################################
1084
# skippd, not working yet
1087
self.doMerge(['aaa', 'bbb', 'ccc'],
1088
['aaa', 'ddd', 'ccc'],
1090
['<<<<<<<< ', 'aaa', '=======', '>>>>>>> ', 'ccc'])
1092
def _test_merge_from_strings(self, base, a, b, expected):
1094
w.add_lines('text0', [], base.splitlines(True))
1095
w.add_lines('text1', ['text0'], a.splitlines(True))
1096
w.add_lines('text2', ['text0'], b.splitlines(True))
1097
self.log('merge plan:')
1098
p = list(w.plan_merge('text1', 'text2'))
1099
for state, line in p:
1101
self.log('%12s | %s' % (state, line[:-1]))
1102
self.log('merge result:')
1103
result_text = ''.join(w.weave_merge(p))
1104
self.log(result_text)
1105
self.assertEqualDiff(result_text, expected)
1107
def test_weave_merge_conflicts(self):
1108
# does weave merge properly handle plans that end with unchanged?
1109
result = ''.join(self.get_file().weave_merge([('new-a', 'hello\n')]))
1110
self.assertEqual(result, 'hello\n')
1112
def test_deletion_extended(self):
1113
"""One side deletes, the other deletes more.
1130
self._test_merge_from_strings(base, a, b, result)
1132
def test_deletion_overlap(self):
1133
"""Delete overlapping regions with no other conflict.
1135
Arguably it'd be better to treat these as agreement, rather than
1136
conflict, but for now conflict is safer.
1164
self._test_merge_from_strings(base, a, b, result)
1166
def test_agreement_deletion(self):
1167
"""Agree to delete some lines, without conflicts."""
1189
self._test_merge_from_strings(base, a, b, result)
1191
def test_sync_on_deletion(self):
1192
"""Specific case of merge where we can synchronize incorrectly.
1194
A previous version of the weave merge concluded that the two versions
1195
agreed on deleting line 2, and this could be a synchronization point.
1196
Line 1 was then considered in isolation, and thought to be deleted on
1199
It's better to consider the whole thing as a disagreement region.
1210
a's replacement line 2
1223
a's replacement line 2
1230
self._test_merge_from_strings(base, a, b, result)
1233
class TestWeaveMerge(TestCaseWithMemoryTransport, MergeCasesMixin):
1235
def get_file(self, name='foo'):
1236
return WeaveFile(name, get_transport(self.get_url('.')), create=True)
1238
def log_contents(self, w):
1239
self.log('weave is:')
1241
write_weave(w, tmpf)
1242
self.log(tmpf.getvalue())
1244
overlappedInsertExpected = ['aaa', '<<<<<<< ', 'xxx', 'yyy', '=======',
1245
'xxx', '>>>>>>> ', 'bbb']
1248
class TestContentFactoryAdaption(TestCaseWithMemoryTransport):
1250
def test_select_adaptor(self):
1251
"""Test expected adapters exist."""
1252
# One scenario for each lookup combination we expect to use.
1253
# Each is source_kind, requested_kind, adapter class
1255
('knit-delta-gz', 'fulltext', _mod_knit.DeltaPlainToFullText),
1256
('knit-ft-gz', 'fulltext', _mod_knit.FTPlainToFullText),
1257
('knit-annotated-delta-gz', 'knit-delta-gz',
1258
_mod_knit.DeltaAnnotatedToUnannotated),
1259
('knit-annotated-delta-gz', 'fulltext',
1260
_mod_knit.DeltaAnnotatedToFullText),
1261
('knit-annotated-ft-gz', 'knit-ft-gz',
1262
_mod_knit.FTAnnotatedToUnannotated),
1263
('knit-annotated-ft-gz', 'fulltext',
1264
_mod_knit.FTAnnotatedToFullText),
1266
for source, requested, klass in scenarios:
1267
adapter_factory = versionedfile.adapter_registry.get(
1268
(source, requested))
1269
adapter = adapter_factory(None)
1270
self.assertIsInstance(adapter, klass)
1272
def get_knit(self, annotated=True):
1273
mapper = ConstantMapper('knit')
1274
transport = self.get_transport()
1275
return make_file_factory(annotated, mapper)(transport)
1277
def helpGetBytes(self, f, ft_adapter, delta_adapter):
1278
"""Grab the interested adapted texts for tests."""
1279
# origin is a fulltext
1280
entries = f.get_record_stream([('origin',)], 'unordered', False)
1281
base = entries.next()
1282
ft_data = ft_adapter.get_bytes(base, base.get_bytes_as(base.storage_kind))
1283
# merged is both a delta and multiple parents.
1284
entries = f.get_record_stream([('merged',)], 'unordered', False)
1285
merged = entries.next()
1286
delta_data = delta_adapter.get_bytes(merged,
1287
merged.get_bytes_as(merged.storage_kind))
1288
return ft_data, delta_data
1290
def test_deannotation_noeol(self):
1291
"""Test converting annotated knits to unannotated knits."""
1292
# we need a full text, and a delta
1294
get_diamond_files(f, 1, trailing_eol=False)
1295
ft_data, delta_data = self.helpGetBytes(f,
1296
_mod_knit.FTAnnotatedToUnannotated(None),
1297
_mod_knit.DeltaAnnotatedToUnannotated(None))
1299
'version origin 1 b284f94827db1fa2970d9e2014f080413b547a7e\n'
1302
GzipFile(mode='rb', fileobj=StringIO(ft_data)).read())
1304
'version merged 4 32c2e79763b3f90e8ccde37f9710b6629c25a796\n'
1305
'1,2,3\nleft\nright\nmerged\nend merged\n',
1306
GzipFile(mode='rb', fileobj=StringIO(delta_data)).read())
1308
def test_deannotation(self):
1309
"""Test converting annotated knits to unannotated knits."""
1310
# we need a full text, and a delta
1312
get_diamond_files(f, 1)
1313
ft_data, delta_data = self.helpGetBytes(f,
1314
_mod_knit.FTAnnotatedToUnannotated(None),
1315
_mod_knit.DeltaAnnotatedToUnannotated(None))
1317
'version origin 1 00e364d235126be43292ab09cb4686cf703ddc17\n'
1320
GzipFile(mode='rb', fileobj=StringIO(ft_data)).read())
1322
'version merged 3 ed8bce375198ea62444dc71952b22cfc2b09226d\n'
1323
'2,2,2\nright\nmerged\nend merged\n',
1324
GzipFile(mode='rb', fileobj=StringIO(delta_data)).read())
1326
def test_annotated_to_fulltext_no_eol(self):
1327
"""Test adapting annotated knits to full texts (for -> weaves)."""
1328
# we need a full text, and a delta
1330
get_diamond_files(f, 1, trailing_eol=False)
1331
# Reconstructing a full text requires a backing versioned file, and it
1332
# must have the base lines requested from it.
1333
logged_vf = versionedfile.RecordingVersionedFilesDecorator(f)
1334
ft_data, delta_data = self.helpGetBytes(f,
1335
_mod_knit.FTAnnotatedToFullText(None),
1336
_mod_knit.DeltaAnnotatedToFullText(logged_vf))
1337
self.assertEqual('origin', ft_data)
1338
self.assertEqual('base\nleft\nright\nmerged', delta_data)
1339
self.assertEqual([('get_record_stream', [('left',)], 'unordered',
1340
True)], logged_vf.calls)
1342
def test_annotated_to_fulltext(self):
1343
"""Test adapting annotated knits to full texts (for -> weaves)."""
1344
# we need a full text, and a delta
1346
get_diamond_files(f, 1)
1347
# Reconstructing a full text requires a backing versioned file, and it
1348
# must have the base lines requested from it.
1349
logged_vf = versionedfile.RecordingVersionedFilesDecorator(f)
1350
ft_data, delta_data = self.helpGetBytes(f,
1351
_mod_knit.FTAnnotatedToFullText(None),
1352
_mod_knit.DeltaAnnotatedToFullText(logged_vf))
1353
self.assertEqual('origin\n', ft_data)
1354
self.assertEqual('base\nleft\nright\nmerged\n', delta_data)
1355
self.assertEqual([('get_record_stream', [('left',)], 'unordered',
1356
True)], logged_vf.calls)
1358
def test_unannotated_to_fulltext(self):
1359
"""Test adapting unannotated knits to full texts.
1361
This is used for -> weaves, and for -> annotated knits.
1363
# we need a full text, and a delta
1364
f = self.get_knit(annotated=False)
1365
get_diamond_files(f, 1)
1366
# Reconstructing a full text requires a backing versioned file, and it
1367
# must have the base lines requested from it.
1368
logged_vf = versionedfile.RecordingVersionedFilesDecorator(f)
1369
ft_data, delta_data = self.helpGetBytes(f,
1370
_mod_knit.FTPlainToFullText(None),
1371
_mod_knit.DeltaPlainToFullText(logged_vf))
1372
self.assertEqual('origin\n', ft_data)
1373
self.assertEqual('base\nleft\nright\nmerged\n', delta_data)
1374
self.assertEqual([('get_record_stream', [('left',)], 'unordered',
1375
True)], logged_vf.calls)
1377
def test_unannotated_to_fulltext_no_eol(self):
1378
"""Test adapting unannotated knits to full texts.
1380
This is used for -> weaves, and for -> annotated knits.
1382
# we need a full text, and a delta
1383
f = self.get_knit(annotated=False)
1384
get_diamond_files(f, 1, trailing_eol=False)
1385
# Reconstructing a full text requires a backing versioned file, and it
1386
# must have the base lines requested from it.
1387
logged_vf = versionedfile.RecordingVersionedFilesDecorator(f)
1388
ft_data, delta_data = self.helpGetBytes(f,
1389
_mod_knit.FTPlainToFullText(None),
1390
_mod_knit.DeltaPlainToFullText(logged_vf))
1391
self.assertEqual('origin', ft_data)
1392
self.assertEqual('base\nleft\nright\nmerged', delta_data)
1393
self.assertEqual([('get_record_stream', [('left',)], 'unordered',
1394
True)], logged_vf.calls)
1397
class TestKeyMapper(TestCaseWithMemoryTransport):
1398
"""Tests for various key mapping logic."""
1400
def test_identity_mapper(self):
1401
mapper = versionedfile.ConstantMapper("inventory")
1402
self.assertEqual("inventory", mapper.map(('foo@ar',)))
1403
self.assertEqual("inventory", mapper.map(('quux',)))
1405
def test_prefix_mapper(self):
1407
mapper = versionedfile.PrefixMapper()
1408
self.assertEqual("file-id", mapper.map(("file-id", "revision-id")))
1409
self.assertEqual("new-id", mapper.map(("new-id", "revision-id")))
1410
self.assertEqual(('file-id',), mapper.unmap("file-id"))
1411
self.assertEqual(('new-id',), mapper.unmap("new-id"))
1413
def test_hash_prefix_mapper(self):
1414
#format6: hash + plain
1415
mapper = versionedfile.HashPrefixMapper()
1416
self.assertEqual("9b/file-id", mapper.map(("file-id", "revision-id")))
1417
self.assertEqual("45/new-id", mapper.map(("new-id", "revision-id")))
1418
self.assertEqual(('file-id',), mapper.unmap("9b/file-id"))
1419
self.assertEqual(('new-id',), mapper.unmap("45/new-id"))
1421
def test_hash_escaped_mapper(self):
1422
#knit1: hash + escaped
1423
mapper = versionedfile.HashEscapedPrefixMapper()
1424
self.assertEqual("88/%2520", mapper.map((" ", "revision-id")))
1425
self.assertEqual("ed/fil%2545-%2549d", mapper.map(("filE-Id",
1427
self.assertEqual("88/ne%2557-%2549d", mapper.map(("neW-Id",
1429
self.assertEqual(('filE-Id',), mapper.unmap("ed/fil%2545-%2549d"))
1430
self.assertEqual(('neW-Id',), mapper.unmap("88/ne%2557-%2549d"))
1433
class TestVersionedFiles(TestCaseWithMemoryTransport):
1434
"""Tests for the multiple-file variant of VersionedFile."""
1436
def get_versionedfiles(self, relpath='files'):
1437
transport = self.get_transport(relpath)
1439
transport.mkdir('.')
1440
files = self.factory(transport)
1441
if self.cleanup is not None:
1442
self.addCleanup(lambda:self.cleanup(files))
1445
def test_annotate(self):
1446
files = self.get_versionedfiles()
1447
self.get_diamond_files(files)
1448
if self.key_length == 1:
1452
# introduced full text
1453
origins = files.annotate(prefix + ('origin',))
1455
(prefix + ('origin',), 'origin\n')],
1458
origins = files.annotate(prefix + ('base',))
1460
(prefix + ('base',), 'base\n')],
1463
origins = files.annotate(prefix + ('merged',))
1466
(prefix + ('base',), 'base\n'),
1467
(prefix + ('left',), 'left\n'),
1468
(prefix + ('right',), 'right\n'),
1469
(prefix + ('merged',), 'merged\n')
1473
# Without a graph everything is new.
1475
(prefix + ('merged',), 'base\n'),
1476
(prefix + ('merged',), 'left\n'),
1477
(prefix + ('merged',), 'right\n'),
1478
(prefix + ('merged',), 'merged\n')
1481
self.assertRaises(RevisionNotPresent,
1482
files.annotate, prefix + ('missing-key',))
1484
def test_construct(self):
1485
"""Each parameterised test can be constructed on a transport."""
1486
files = self.get_versionedfiles()
1488
def get_diamond_files(self, files, trailing_eol=True, left_only=False):
1489
return get_diamond_files(files, self.key_length,
1490
trailing_eol=trailing_eol, nograph=not self.graph,
1491
left_only=left_only)
1493
def test_add_lines_return(self):
1494
files = self.get_versionedfiles()
1495
# save code by using the stock data insertion helper.
1496
adds = self.get_diamond_files(files)
1498
# We can only validate the first 2 elements returned from add_lines.
1500
self.assertEqual(3, len(add))
1501
results.append(add[:2])
1502
if self.key_length == 1:
1504
('00e364d235126be43292ab09cb4686cf703ddc17', 7),
1505
('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1506
('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1507
('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1508
('ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
1510
elif self.key_length == 2:
1512
('00e364d235126be43292ab09cb4686cf703ddc17', 7),
1513
('00e364d235126be43292ab09cb4686cf703ddc17', 7),
1514
('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1515
('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1516
('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1517
('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1518
('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1519
('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1520
('ed8bce375198ea62444dc71952b22cfc2b09226d', 23),
1521
('ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
1524
def test_empty_lines(self):
1525
"""Empty files can be stored."""
1526
f = self.get_versionedfiles()
1527
key_a = self.get_simple_key('a')
1528
f.add_lines(key_a, [], [])
1529
self.assertEqual('',
1530
f.get_record_stream([key_a], 'unordered', True
1531
).next().get_bytes_as('fulltext'))
1532
key_b = self.get_simple_key('b')
1533
f.add_lines(key_b, self.get_parents([key_a]), [])
1534
self.assertEqual('',
1535
f.get_record_stream([key_b], 'unordered', True
1536
).next().get_bytes_as('fulltext'))
1538
def test_newline_only(self):
1539
f = self.get_versionedfiles()
1540
key_a = self.get_simple_key('a')
1541
f.add_lines(key_a, [], ['\n'])
1542
self.assertEqual('\n',
1543
f.get_record_stream([key_a], 'unordered', True
1544
).next().get_bytes_as('fulltext'))
1545
key_b = self.get_simple_key('b')
1546
f.add_lines(key_b, self.get_parents([key_a]), ['\n'])
1547
self.assertEqual('\n',
1548
f.get_record_stream([key_b], 'unordered', True
1549
).next().get_bytes_as('fulltext'))
1551
def test_get_record_stream_empty(self):
1552
"""An empty stream can be requested without error."""
1553
f = self.get_versionedfiles()
1554
entries = f.get_record_stream([], 'unordered', False)
1555
self.assertEqual([], list(entries))
1557
def assertValidStorageKind(self, storage_kind):
1558
"""Assert that storage_kind is a valid storage_kind."""
1559
self.assertSubset([storage_kind],
1560
['mpdiff', 'knit-annotated-ft', 'knit-annotated-delta',
1561
'knit-ft', 'knit-delta', 'chunked', 'fulltext',
1562
'knit-annotated-ft-gz', 'knit-annotated-delta-gz', 'knit-ft-gz',
1565
def capture_stream(self, f, entries, on_seen, parents):
1566
"""Capture a stream for testing."""
1567
for factory in entries:
1568
on_seen(factory.key)
1569
self.assertValidStorageKind(factory.storage_kind)
1570
self.assertEqual(f.get_sha1s([factory.key])[factory.key],
1572
self.assertEqual(parents[factory.key], factory.parents)
1573
self.assertIsInstance(factory.get_bytes_as(factory.storage_kind),
1576
def test_get_record_stream_interface(self):
1577
"""each item in a stream has to provide a regular interface."""
1578
files = self.get_versionedfiles()
1579
self.get_diamond_files(files)
1580
keys, _ = self.get_keys_and_sort_order()
1581
parent_map = files.get_parent_map(keys)
1582
entries = files.get_record_stream(keys, 'unordered', False)
1584
self.capture_stream(files, entries, seen.add, parent_map)
1585
self.assertEqual(set(keys), seen)
1587
def get_simple_key(self, suffix):
1588
"""Return a key for the object under test."""
1589
if self.key_length == 1:
1592
return ('FileA',) + (suffix,)
1594
def get_keys_and_sort_order(self):
1595
"""Get diamond test keys list, and their sort ordering."""
1596
if self.key_length == 1:
1597
keys = [('merged',), ('left',), ('right',), ('base',)]
1598
sort_order = {('merged',):2, ('left',):1, ('right',):1, ('base',):0}
1601
('FileA', 'merged'), ('FileA', 'left'), ('FileA', 'right'),
1603
('FileB', 'merged'), ('FileB', 'left'), ('FileB', 'right'),
1607
('FileA', 'merged'):2, ('FileA', 'left'):1, ('FileA', 'right'):1,
1608
('FileA', 'base'):0,
1609
('FileB', 'merged'):2, ('FileB', 'left'):1, ('FileB', 'right'):1,
1610
('FileB', 'base'):0,
1612
return keys, sort_order
1614
def test_get_record_stream_interface_ordered(self):
1615
"""each item in a stream has to provide a regular interface."""
1616
files = self.get_versionedfiles()
1617
self.get_diamond_files(files)
1618
keys, sort_order = self.get_keys_and_sort_order()
1619
parent_map = files.get_parent_map(keys)
1620
entries = files.get_record_stream(keys, 'topological', False)
1622
self.capture_stream(files, entries, seen.append, parent_map)
1623
self.assertStreamOrder(sort_order, seen, keys)
1625
def test_get_record_stream_interface_ordered_with_delta_closure(self):
1626
"""each item must be accessible as a fulltext."""
1627
files = self.get_versionedfiles()
1628
self.get_diamond_files(files)
1629
keys, sort_order = self.get_keys_and_sort_order()
1630
parent_map = files.get_parent_map(keys)
1631
entries = files.get_record_stream(keys, 'topological', True)
1633
for factory in entries:
1634
seen.append(factory.key)
1635
self.assertValidStorageKind(factory.storage_kind)
1636
self.assertSubset([factory.sha1],
1637
[None, files.get_sha1s([factory.key])[factory.key]])
1638
self.assertEqual(parent_map[factory.key], factory.parents)
1639
# self.assertEqual(files.get_text(factory.key),
1640
ft_bytes = factory.get_bytes_as('fulltext')
1641
self.assertIsInstance(ft_bytes, str)
1642
chunked_bytes = factory.get_bytes_as('chunked')
1643
self.assertEqualDiff(ft_bytes, ''.join(chunked_bytes))
1645
self.assertStreamOrder(sort_order, seen, keys)
1647
def assertStreamOrder(self, sort_order, seen, keys):
1648
self.assertEqual(len(set(seen)), len(keys))
1649
if self.key_length == 1:
1652
lows = {('FileA',):0, ('FileB',):0}
1654
self.assertEqual(set(keys), set(seen))
1657
sort_pos = sort_order[key]
1658
self.assertTrue(sort_pos >= lows[key[:-1]],
1659
"Out of order in sorted stream: %r, %r" % (key, seen))
1660
lows[key[:-1]] = sort_pos
1662
def test_get_record_stream_unknown_storage_kind_raises(self):
1663
"""Asking for a storage kind that the stream cannot supply raises."""
1664
files = self.get_versionedfiles()
1665
self.get_diamond_files(files)
1666
if self.key_length == 1:
1667
keys = [('merged',), ('left',), ('right',), ('base',)]
1670
('FileA', 'merged'), ('FileA', 'left'), ('FileA', 'right'),
1672
('FileB', 'merged'), ('FileB', 'left'), ('FileB', 'right'),
1675
parent_map = files.get_parent_map(keys)
1676
entries = files.get_record_stream(keys, 'unordered', False)
1677
# We track the contents because we should be able to try, fail a
1678
# particular kind and then ask for one that works and continue.
1680
for factory in entries:
1681
seen.add(factory.key)
1682
self.assertValidStorageKind(factory.storage_kind)
1683
self.assertEqual(files.get_sha1s([factory.key])[factory.key],
1685
self.assertEqual(parent_map[factory.key], factory.parents)
1686
# currently no stream emits mpdiff
1687
self.assertRaises(errors.UnavailableRepresentation,
1688
factory.get_bytes_as, 'mpdiff')
1689
self.assertIsInstance(factory.get_bytes_as(factory.storage_kind),
1691
self.assertEqual(set(keys), seen)
1693
def test_get_record_stream_missing_records_are_absent(self):
1694
files = self.get_versionedfiles()
1695
self.get_diamond_files(files)
1696
if self.key_length == 1:
1697
keys = [('merged',), ('left',), ('right',), ('absent',), ('base',)]
1700
('FileA', 'merged'), ('FileA', 'left'), ('FileA', 'right'),
1701
('FileA', 'absent'), ('FileA', 'base'),
1702
('FileB', 'merged'), ('FileB', 'left'), ('FileB', 'right'),
1703
('FileB', 'absent'), ('FileB', 'base'),
1704
('absent', 'absent'),
1706
parent_map = files.get_parent_map(keys)
1707
entries = files.get_record_stream(keys, 'unordered', False)
1708
self.assertAbsentRecord(files, keys, parent_map, entries)
1709
entries = files.get_record_stream(keys, 'topological', False)
1710
self.assertAbsentRecord(files, keys, parent_map, entries)
1712
def assertAbsentRecord(self, files, keys, parents, entries):
1713
"""Helper for test_get_record_stream_missing_records_are_absent."""
1715
for factory in entries:
1716
seen.add(factory.key)
1717
if factory.key[-1] == 'absent':
1718
self.assertEqual('absent', factory.storage_kind)
1719
self.assertEqual(None, factory.sha1)
1720
self.assertEqual(None, factory.parents)
1722
self.assertValidStorageKind(factory.storage_kind)
1723
self.assertEqual(files.get_sha1s([factory.key])[factory.key],
1725
self.assertEqual(parents[factory.key], factory.parents)
1726
self.assertIsInstance(factory.get_bytes_as(factory.storage_kind),
1728
self.assertEqual(set(keys), seen)
1730
def test_filter_absent_records(self):
1731
"""Requested missing records can be filter trivially."""
1732
files = self.get_versionedfiles()
1733
self.get_diamond_files(files)
1734
keys, _ = self.get_keys_and_sort_order()
1735
parent_map = files.get_parent_map(keys)
1736
# Add an absent record in the middle of the present keys. (We don't ask
1737
# for just absent keys to ensure that content before and after the
1738
# absent keys is still delivered).
1739
present_keys = list(keys)
1740
if self.key_length == 1:
1741
keys.insert(2, ('extra',))
1743
keys.insert(2, ('extra', 'extra'))
1744
entries = files.get_record_stream(keys, 'unordered', False)
1746
self.capture_stream(files, versionedfile.filter_absent(entries), seen.add,
1748
self.assertEqual(set(present_keys), seen)
1750
def get_mapper(self):
1751
"""Get a mapper suitable for the key length of the test interface."""
1752
if self.key_length == 1:
1753
return ConstantMapper('source')
1755
return HashEscapedPrefixMapper()
1757
def get_parents(self, parents):
1758
"""Get parents, taking self.graph into consideration."""
1764
def test_get_parent_map(self):
1765
files = self.get_versionedfiles()
1766
if self.key_length == 1:
1768
(('r0',), self.get_parents(())),
1769
(('r1',), self.get_parents((('r0',),))),
1770
(('r2',), self.get_parents(())),
1771
(('r3',), self.get_parents(())),
1772
(('m',), self.get_parents((('r0',),('r1',),('r2',),('r3',)))),
1776
(('FileA', 'r0'), self.get_parents(())),
1777
(('FileA', 'r1'), self.get_parents((('FileA', 'r0'),))),
1778
(('FileA', 'r2'), self.get_parents(())),
1779
(('FileA', 'r3'), self.get_parents(())),
1780
(('FileA', 'm'), self.get_parents((('FileA', 'r0'),
1781
('FileA', 'r1'), ('FileA', 'r2'), ('FileA', 'r3')))),
1783
for key, parents in parent_details:
1784
files.add_lines(key, parents, [])
1785
# immediately after adding it should be queryable.
1786
self.assertEqual({key:parents}, files.get_parent_map([key]))
1787
# We can ask for an empty set
1788
self.assertEqual({}, files.get_parent_map([]))
1789
# We can ask for many keys
1790
all_parents = dict(parent_details)
1791
self.assertEqual(all_parents, files.get_parent_map(all_parents.keys()))
1792
# Absent keys are just not included in the result.
1793
keys = all_parents.keys()
1794
if self.key_length == 1:
1795
keys.insert(1, ('missing',))
1797
keys.insert(1, ('missing', 'missing'))
1798
# Absent keys are just ignored
1799
self.assertEqual(all_parents, files.get_parent_map(keys))
1801
def test_get_sha1s(self):
1802
files = self.get_versionedfiles()
1803
self.get_diamond_files(files)
1804
if self.key_length == 1:
1805
keys = [('base',), ('origin',), ('left',), ('merged',), ('right',)]
1807
# ask for shas from different prefixes.
1809
('FileA', 'base'), ('FileB', 'origin'), ('FileA', 'left'),
1810
('FileA', 'merged'), ('FileB', 'right'),
1813
keys[0]: '51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44',
1814
keys[1]: '00e364d235126be43292ab09cb4686cf703ddc17',
1815
keys[2]: 'a8478686da38e370e32e42e8a0c220e33ee9132f',
1816
keys[3]: 'ed8bce375198ea62444dc71952b22cfc2b09226d',
1817
keys[4]: '9ef09dfa9d86780bdec9219a22560c6ece8e0ef1',
1819
files.get_sha1s(keys))
1821
def test_insert_record_stream_empty(self):
1822
"""Inserting an empty record stream should work."""
1823
files = self.get_versionedfiles()
1824
files.insert_record_stream([])
1826
def assertIdenticalVersionedFile(self, expected, actual):
1827
"""Assert that left and right have the same contents."""
1828
self.assertEqual(set(actual.keys()), set(expected.keys()))
1829
actual_parents = actual.get_parent_map(actual.keys())
1831
self.assertEqual(actual_parents, expected.get_parent_map(expected.keys()))
1833
for key, parents in actual_parents.items():
1834
self.assertEqual(None, parents)
1835
for key in actual.keys():
1836
actual_text = actual.get_record_stream(
1837
[key], 'unordered', True).next().get_bytes_as('fulltext')
1838
expected_text = expected.get_record_stream(
1839
[key], 'unordered', True).next().get_bytes_as('fulltext')
1840
self.assertEqual(actual_text, expected_text)
1842
def test_insert_record_stream_fulltexts(self):
1843
"""Any file should accept a stream of fulltexts."""
1844
files = self.get_versionedfiles()
1845
mapper = self.get_mapper()
1846
source_transport = self.get_transport('source')
1847
source_transport.mkdir('.')
1848
# weaves always output fulltexts.
1849
source = make_versioned_files_factory(WeaveFile, mapper)(
1851
self.get_diamond_files(source, trailing_eol=False)
1852
stream = source.get_record_stream(source.keys(), 'topological',
1854
files.insert_record_stream(stream)
1855
self.assertIdenticalVersionedFile(source, files)
1857
def test_insert_record_stream_fulltexts_noeol(self):
1858
"""Any file should accept a stream of fulltexts."""
1859
files = self.get_versionedfiles()
1860
mapper = self.get_mapper()
1861
source_transport = self.get_transport('source')
1862
source_transport.mkdir('.')
1863
# weaves always output fulltexts.
1864
source = make_versioned_files_factory(WeaveFile, mapper)(
1866
self.get_diamond_files(source, trailing_eol=False)
1867
stream = source.get_record_stream(source.keys(), 'topological',
1869
files.insert_record_stream(stream)
1870
self.assertIdenticalVersionedFile(source, files)
1872
def test_insert_record_stream_annotated_knits(self):
1873
"""Any file should accept a stream from plain knits."""
1874
files = self.get_versionedfiles()
1875
mapper = self.get_mapper()
1876
source_transport = self.get_transport('source')
1877
source_transport.mkdir('.')
1878
source = make_file_factory(True, mapper)(source_transport)
1879
self.get_diamond_files(source)
1880
stream = source.get_record_stream(source.keys(), 'topological',
1882
files.insert_record_stream(stream)
1883
self.assertIdenticalVersionedFile(source, files)
1885
def test_insert_record_stream_annotated_knits_noeol(self):
1886
"""Any file should accept a stream from plain knits."""
1887
files = self.get_versionedfiles()
1888
mapper = self.get_mapper()
1889
source_transport = self.get_transport('source')
1890
source_transport.mkdir('.')
1891
source = make_file_factory(True, mapper)(source_transport)
1892
self.get_diamond_files(source, trailing_eol=False)
1893
stream = source.get_record_stream(source.keys(), 'topological',
1895
files.insert_record_stream(stream)
1896
self.assertIdenticalVersionedFile(source, files)
1898
def test_insert_record_stream_plain_knits(self):
1899
"""Any file should accept a stream from plain knits."""
1900
files = self.get_versionedfiles()
1901
mapper = self.get_mapper()
1902
source_transport = self.get_transport('source')
1903
source_transport.mkdir('.')
1904
source = make_file_factory(False, mapper)(source_transport)
1905
self.get_diamond_files(source)
1906
stream = source.get_record_stream(source.keys(), 'topological',
1908
files.insert_record_stream(stream)
1909
self.assertIdenticalVersionedFile(source, files)
1911
def test_insert_record_stream_plain_knits_noeol(self):
1912
"""Any file should accept a stream from plain knits."""
1913
files = self.get_versionedfiles()
1914
mapper = self.get_mapper()
1915
source_transport = self.get_transport('source')
1916
source_transport.mkdir('.')
1917
source = make_file_factory(False, mapper)(source_transport)
1918
self.get_diamond_files(source, trailing_eol=False)
1919
stream = source.get_record_stream(source.keys(), 'topological',
1921
files.insert_record_stream(stream)
1922
self.assertIdenticalVersionedFile(source, files)
1924
def test_insert_record_stream_existing_keys(self):
1925
"""Inserting keys already in a file should not error."""
1926
files = self.get_versionedfiles()
1927
source = self.get_versionedfiles('source')
1928
self.get_diamond_files(source)
1929
# insert some keys into f.
1930
self.get_diamond_files(files, left_only=True)
1931
stream = source.get_record_stream(source.keys(), 'topological',
1933
files.insert_record_stream(stream)
1934
self.assertIdenticalVersionedFile(source, files)
1936
def test_insert_record_stream_missing_keys(self):
1937
"""Inserting a stream with absent keys should raise an error."""
1938
files = self.get_versionedfiles()
1939
source = self.get_versionedfiles('source')
1940
stream = source.get_record_stream([('missing',) * self.key_length],
1941
'topological', False)
1942
self.assertRaises(errors.RevisionNotPresent, files.insert_record_stream,
1945
def test_insert_record_stream_out_of_order(self):
1946
"""An out of order stream can either error or work."""
1947
files = self.get_versionedfiles()
1948
source = self.get_versionedfiles('source')
1949
self.get_diamond_files(source)
1950
if self.key_length == 1:
1951
origin_keys = [('origin',)]
1952
end_keys = [('merged',), ('left',)]
1953
start_keys = [('right',), ('base',)]
1955
origin_keys = [('FileA', 'origin'), ('FileB', 'origin')]
1956
end_keys = [('FileA', 'merged',), ('FileA', 'left',),
1957
('FileB', 'merged',), ('FileB', 'left',)]
1958
start_keys = [('FileA', 'right',), ('FileA', 'base',),
1959
('FileB', 'right',), ('FileB', 'base',)]
1960
origin_entries = source.get_record_stream(origin_keys, 'unordered', False)
1961
end_entries = source.get_record_stream(end_keys, 'topological', False)
1962
start_entries = source.get_record_stream(start_keys, 'topological', False)
1963
entries = chain(origin_entries, end_entries, start_entries)
1965
files.insert_record_stream(entries)
1966
except RevisionNotPresent:
1967
# Must not have corrupted the file.
1970
self.assertIdenticalVersionedFile(source, files)
1972
def test_insert_record_stream_delta_missing_basis_no_corruption(self):
1973
"""Insertion where a needed basis is not included aborts safely."""
1974
# We use a knit always here to be sure we are getting a binary delta.
1975
mapper = self.get_mapper()
1976
source_transport = self.get_transport('source')
1977
source_transport.mkdir('.')
1978
source = make_file_factory(False, mapper)(source_transport)
1979
self.get_diamond_files(source)
1980
entries = source.get_record_stream(['origin', 'merged'], 'unordered', False)
1981
files = self.get_versionedfiles()
1982
self.assertRaises(RevisionNotPresent, files.insert_record_stream,
1985
self.assertEqual({}, files.get_parent_map([]))
1987
def test_iter_lines_added_or_present_in_keys(self):
1988
# test that we get at least an equalset of the lines added by
1989
# versions in the store.
1990
# the ordering here is to make a tree so that dumb searches have
1991
# more changes to muck up.
1993
class InstrumentedProgress(progress.DummyProgress):
1997
progress.DummyProgress.__init__(self)
2000
def update(self, msg=None, current=None, total=None):
2001
self.updates.append((msg, current, total))
2003
files = self.get_versionedfiles()
2004
# add a base to get included
2005
files.add_lines(self.get_simple_key('base'), (), ['base\n'])
2006
# add a ancestor to be included on one side
2007
files.add_lines(self.get_simple_key('lancestor'), (), ['lancestor\n'])
2008
# add a ancestor to be included on the other side
2009
files.add_lines(self.get_simple_key('rancestor'),
2010
self.get_parents([self.get_simple_key('base')]), ['rancestor\n'])
2011
# add a child of rancestor with no eofile-nl
2012
files.add_lines(self.get_simple_key('child'),
2013
self.get_parents([self.get_simple_key('rancestor')]),
2014
['base\n', 'child\n'])
2015
# add a child of lancestor and base to join the two roots
2016
files.add_lines(self.get_simple_key('otherchild'),
2017
self.get_parents([self.get_simple_key('lancestor'),
2018
self.get_simple_key('base')]),
2019
['base\n', 'lancestor\n', 'otherchild\n'])
2020
def iter_with_keys(keys, expected):
2021
# now we need to see what lines are returned, and how often.
2023
progress = InstrumentedProgress()
2024
# iterate over the lines
2025
for line in files.iter_lines_added_or_present_in_keys(keys,
2027
lines.setdefault(line, 0)
2029
if []!= progress.updates:
2030
self.assertEqual(expected, progress.updates)
2032
lines = iter_with_keys(
2033
[self.get_simple_key('child'), self.get_simple_key('otherchild')],
2034
[('Walking content.', 0, 2),
2035
('Walking content.', 1, 2),
2036
('Walking content.', 2, 2)])
2037
# we must see child and otherchild
2038
self.assertTrue(lines[('child\n', self.get_simple_key('child'))] > 0)
2040
lines[('otherchild\n', self.get_simple_key('otherchild'))] > 0)
2041
# we dont care if we got more than that.
2044
lines = iter_with_keys(files.keys(),
2045
[('Walking content.', 0, 5),
2046
('Walking content.', 1, 5),
2047
('Walking content.', 2, 5),
2048
('Walking content.', 3, 5),
2049
('Walking content.', 4, 5),
2050
('Walking content.', 5, 5)])
2051
# all lines must be seen at least once
2052
self.assertTrue(lines[('base\n', self.get_simple_key('base'))] > 0)
2054
lines[('lancestor\n', self.get_simple_key('lancestor'))] > 0)
2056
lines[('rancestor\n', self.get_simple_key('rancestor'))] > 0)
2057
self.assertTrue(lines[('child\n', self.get_simple_key('child'))] > 0)
2059
lines[('otherchild\n', self.get_simple_key('otherchild'))] > 0)
2061
def test_make_mpdiffs(self):
2062
from bzrlib import multiparent
2063
files = self.get_versionedfiles('source')
2064
# add texts that should trip the knit maximum delta chain threshold
2065
# as well as doing parallel chains of data in knits.
2066
# this is done by two chains of 25 insertions
2067
files.add_lines(self.get_simple_key('base'), [], ['line\n'])
2068
files.add_lines(self.get_simple_key('noeol'),
2069
self.get_parents([self.get_simple_key('base')]), ['line'])
2070
# detailed eol tests:
2071
# shared last line with parent no-eol
2072
files.add_lines(self.get_simple_key('noeolsecond'),
2073
self.get_parents([self.get_simple_key('noeol')]),
2075
# differing last line with parent, both no-eol
2076
files.add_lines(self.get_simple_key('noeolnotshared'),
2077
self.get_parents([self.get_simple_key('noeolsecond')]),
2078
['line\n', 'phone'])
2079
# add eol following a noneol parent, change content
2080
files.add_lines(self.get_simple_key('eol'),
2081
self.get_parents([self.get_simple_key('noeol')]), ['phone\n'])
2082
# add eol following a noneol parent, no change content
2083
files.add_lines(self.get_simple_key('eolline'),
2084
self.get_parents([self.get_simple_key('noeol')]), ['line\n'])
2085
# noeol with no parents:
2086
files.add_lines(self.get_simple_key('noeolbase'), [], ['line'])
2087
# noeol preceeding its leftmost parent in the output:
2088
# this is done by making it a merge of two parents with no common
2089
# anestry: noeolbase and noeol with the
2090
# later-inserted parent the leftmost.
2091
files.add_lines(self.get_simple_key('eolbeforefirstparent'),
2092
self.get_parents([self.get_simple_key('noeolbase'),
2093
self.get_simple_key('noeol')]),
2095
# two identical eol texts
2096
files.add_lines(self.get_simple_key('noeoldup'),
2097
self.get_parents([self.get_simple_key('noeol')]), ['line'])
2098
next_parent = self.get_simple_key('base')
2099
text_name = 'chain1-'
2101
sha1s = {0 :'da6d3141cb4a5e6f464bf6e0518042ddc7bfd079',
2102
1 :'45e21ea146a81ea44a821737acdb4f9791c8abe7',
2103
2 :'e1f11570edf3e2a070052366c582837a4fe4e9fa',
2104
3 :'26b4b8626da827088c514b8f9bbe4ebf181edda1',
2105
4 :'e28a5510be25ba84d31121cff00956f9970ae6f6',
2106
5 :'d63ec0ce22e11dcf65a931b69255d3ac747a318d',
2107
6 :'2c2888d288cb5e1d98009d822fedfe6019c6a4ea',
2108
7 :'95c14da9cafbf828e3e74a6f016d87926ba234ab',
2109
8 :'779e9a0b28f9f832528d4b21e17e168c67697272',
2110
9 :'1f8ff4e5c6ff78ac106fcfe6b1e8cb8740ff9a8f',
2111
10:'131a2ae712cf51ed62f143e3fbac3d4206c25a05',
2112
11:'c5a9d6f520d2515e1ec401a8f8a67e6c3c89f199',
2113
12:'31a2286267f24d8bedaa43355f8ad7129509ea85',
2114
13:'dc2a7fe80e8ec5cae920973973a8ee28b2da5e0a',
2115
14:'2c4b1736566b8ca6051e668de68650686a3922f2',
2116
15:'5912e4ecd9b0c07be4d013e7e2bdcf9323276cde',
2117
16:'b0d2e18d3559a00580f6b49804c23fea500feab3',
2118
17:'8e1d43ad72f7562d7cb8f57ee584e20eb1a69fc7',
2119
18:'5cf64a3459ae28efa60239e44b20312d25b253f3',
2120
19:'1ebed371807ba5935958ad0884595126e8c4e823',
2121
20:'2aa62a8b06fb3b3b892a3292a068ade69d5ee0d3',
2122
21:'01edc447978004f6e4e962b417a4ae1955b6fe5d',
2123
22:'d8d8dc49c4bf0bab401e0298bb5ad827768618bb',
2124
23:'c21f62b1c482862983a8ffb2b0c64b3451876e3f',
2125
24:'c0593fe795e00dff6b3c0fe857a074364d5f04fc',
2126
25:'dd1a1cf2ba9cc225c3aff729953e6364bf1d1855',
2128
for depth in range(26):
2129
new_version = self.get_simple_key(text_name + '%s' % depth)
2130
text = text + ['line\n']
2131
files.add_lines(new_version, self.get_parents([next_parent]), text)
2132
next_parent = new_version
2133
next_parent = self.get_simple_key('base')
2134
text_name = 'chain2-'
2136
for depth in range(26):
2137
new_version = self.get_simple_key(text_name + '%s' % depth)
2138
text = text + ['line\n']
2139
files.add_lines(new_version, self.get_parents([next_parent]), text)
2140
next_parent = new_version
2141
target = self.get_versionedfiles('target')
2142
for key in multiparent.topo_iter_keys(files, files.keys()):
2143
mpdiff = files.make_mpdiffs([key])[0]
2144
parents = files.get_parent_map([key])[key] or []
2146
[(key, parents, files.get_sha1s([key])[key], mpdiff)])
2147
self.assertEqualDiff(
2148
files.get_record_stream([key], 'unordered',
2149
True).next().get_bytes_as('fulltext'),
2150
target.get_record_stream([key], 'unordered',
2151
True).next().get_bytes_as('fulltext')
2154
def test_keys(self):
2155
# While use is discouraged, versions() is still needed by aspects of
2157
files = self.get_versionedfiles()
2158
self.assertEqual(set(), set(files.keys()))
2159
if self.key_length == 1:
2162
key = ('foo', 'bar',)
2163
files.add_lines(key, (), [])
2164
self.assertEqual(set([key]), set(files.keys()))
2167
class VirtualVersionedFilesTests(TestCase):
2168
"""Basic tests for the VirtualVersionedFiles implementations."""
2170
def _get_parent_map(self, keys):
2173
if k in self._parent_map:
2174
ret[k] = self._parent_map[k]
2178
TestCase.setUp(self)
2180
self._parent_map = {}
2181
self.texts = VirtualVersionedFiles(self._get_parent_map,
2184
def test_add_lines(self):
2185
self.assertRaises(NotImplementedError,
2186
self.texts.add_lines, "foo", [], [])
2188
def test_add_mpdiffs(self):
2189
self.assertRaises(NotImplementedError,
2190
self.texts.add_mpdiffs, [])
2192
def test_check(self):
2193
self.assertTrue(self.texts.check())
2195
def test_insert_record_stream(self):
2196
self.assertRaises(NotImplementedError, self.texts.insert_record_stream,
2199
def test_get_sha1s_nonexistent(self):
2200
self.assertEquals({}, self.texts.get_sha1s([("NONEXISTENT",)]))
2202
def test_get_sha1s(self):
2203
self._lines["key"] = ["dataline1", "dataline2"]
2204
self.assertEquals({("key",): osutils.sha_strings(self._lines["key"])},
2205
self.texts.get_sha1s([("key",)]))
2207
def test_get_parent_map(self):
2208
self._parent_map = {"G": ("A", "B")}
2209
self.assertEquals({("G",): (("A",),("B",))},
2210
self.texts.get_parent_map([("G",), ("L",)]))
2212
def test_get_record_stream(self):
2213
self._lines["A"] = ["FOO", "BAR"]
2214
it = self.texts.get_record_stream([("A",)], "unordered", True)
2216
self.assertEquals("chunked", record.storage_kind)
2217
self.assertEquals("FOOBAR", record.get_bytes_as("fulltext"))
2218
self.assertEquals(["FOO", "BAR"], record.get_bytes_as("chunked"))
2220
def test_get_record_stream_absent(self):
2221
it = self.texts.get_record_stream([("A",)], "unordered", True)
2223
self.assertEquals("absent", record.storage_kind)
2225
def test_iter_lines_added_or_present_in_keys(self):
2226
self._lines["A"] = ["FOO", "BAR"]
2227
self._lines["B"] = ["HEY"]
2228
self._lines["C"] = ["Alberta"]
2229
it = self.texts.iter_lines_added_or_present_in_keys([("A",), ("B",)])
2230
self.assertEquals(sorted([("FOO", "A"), ("BAR", "A"), ("HEY", "B")]),
2234
class TestOrderingVersionedFilesDecorator(TestCaseWithMemoryTransport):
2236
def get_ordering_vf(self, key_priority):
2237
builder = self.make_branch_builder('test')
2238
builder.start_series()
2239
builder.build_snapshot('A', None, [
2240
('add', ('', 'TREE_ROOT', 'directory', None))])
2241
builder.build_snapshot('B', ['A'], [])
2242
builder.build_snapshot('C', ['B'], [])
2243
builder.build_snapshot('D', ['C'], [])
2244
builder.finish_series()
2245
b = builder.get_branch()
2247
self.addCleanup(b.unlock)
2248
vf = b.repository.inventories
2249
return versionedfile.OrderingVersionedFilesDecorator(vf, key_priority)
2251
def test_get_empty(self):
2252
vf = self.get_ordering_vf({})
2253
self.assertEqual([], vf.calls)
2255
def test_get_record_stream_topological(self):
2256
vf = self.get_ordering_vf({('A',): 3, ('B',): 2, ('C',): 4, ('D',): 1})
2257
request_keys = [('B',), ('C',), ('D',), ('A',)]
2258
keys = [r.key for r in vf.get_record_stream(request_keys,
2259
'topological', False)]
2260
# We should have gotten the keys in topological order
2261
self.assertEqual([('A',), ('B',), ('C',), ('D',)], keys)
2262
# And recorded that the request was made
2263
self.assertEqual([('get_record_stream', request_keys, 'topological',
2266
def test_get_record_stream_ordered(self):
2267
vf = self.get_ordering_vf({('A',): 3, ('B',): 2, ('C',): 4, ('D',): 1})
2268
request_keys = [('B',), ('C',), ('D',), ('A',)]
2269
keys = [r.key for r in vf.get_record_stream(request_keys,
2270
'unordered', False)]
2271
# They should be returned based on their priority
2272
self.assertEqual([('D',), ('B',), ('A',), ('C',)], keys)
2273
# And the request recorded
2274
self.assertEqual([('get_record_stream', request_keys, 'unordered',
2277
def test_get_record_stream_implicit_order(self):
2278
vf = self.get_ordering_vf({('B',): 2, ('D',): 1})
2279
request_keys = [('B',), ('C',), ('D',), ('A',)]
2280
keys = [r.key for r in vf.get_record_stream(request_keys,
2281
'unordered', False)]
2282
# A and C are not in the map, so they get sorted to the front. A comes
2283
# before C alphabetically, so it comes back first
2284
self.assertEqual([('A',), ('C',), ('D',), ('B',)], keys)
2285
# And the request recorded
2286
self.assertEqual([('get_record_stream', request_keys, 'unordered',