1
# Copyright (C) 2005 Canonical Ltd
4
# Johan Rydberg <jrydberg@gnu.org>
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
11
# This program is distributed in the hope that it will be useful,
12
# but WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14
# GNU General Public License for more details.
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21
# TODO: might be nice to create a versionedfile with some type of corruption
22
# considered typical and check that it can be detected/corrected.
24
from StringIO import StringIO
32
from bzrlib.errors import (
34
RevisionAlreadyPresent,
37
from bzrlib.knit import KnitVersionedFile, \
39
from bzrlib.tests import TestCaseWithTransport
40
from bzrlib.tests.HTTPTestUtil import TestCaseWithWebserver
41
from bzrlib.trace import mutter
42
from bzrlib.transport import get_transport
43
from bzrlib.transport.memory import MemoryTransport
44
from bzrlib.tsort import topo_sort
45
import bzrlib.versionedfile as versionedfile
46
from bzrlib.weave import WeaveFile
47
from bzrlib.weavefile import read_weave, write_weave
50
class VersionedFileTestMixIn(object):
51
"""A mixin test class for testing VersionedFiles.
53
This is not an adaptor-style test at this point because
54
theres no dynamic substitution of versioned file implementations,
55
they are strictly controlled by their owning repositories.
60
f.add_lines('r0', [], ['a\n', 'b\n'])
61
f.add_lines('r1', ['r0'], ['b\n', 'c\n'])
63
versions = f.versions()
64
self.assertTrue('r0' in versions)
65
self.assertTrue('r1' in versions)
66
self.assertEquals(f.get_lines('r0'), ['a\n', 'b\n'])
67
self.assertEquals(f.get_text('r0'), 'a\nb\n')
68
self.assertEquals(f.get_lines('r1'), ['b\n', 'c\n'])
69
self.assertEqual(2, len(f))
70
self.assertEqual(2, f.num_versions())
72
self.assertRaises(RevisionNotPresent,
73
f.add_lines, 'r2', ['foo'], [])
74
self.assertRaises(RevisionAlreadyPresent,
75
f.add_lines, 'r1', [], [])
77
# this checks that reopen with create=True does not break anything.
78
f = self.reopen_file(create=True)
81
def test_adds_with_parent_texts(self):
84
parent_texts['r0'] = f.add_lines('r0', [], ['a\n', 'b\n'])
86
parent_texts['r1'] = f.add_lines_with_ghosts('r1',
89
parent_texts=parent_texts)
90
except NotImplementedError:
91
# if the format doesn't support ghosts, just add normally.
92
parent_texts['r1'] = f.add_lines('r1',
95
parent_texts=parent_texts)
96
f.add_lines('r2', ['r1'], ['c\n', 'd\n'], parent_texts=parent_texts)
97
self.assertNotEqual(None, parent_texts['r0'])
98
self.assertNotEqual(None, parent_texts['r1'])
100
versions = f.versions()
101
self.assertTrue('r0' in versions)
102
self.assertTrue('r1' in versions)
103
self.assertTrue('r2' in versions)
104
self.assertEquals(f.get_lines('r0'), ['a\n', 'b\n'])
105
self.assertEquals(f.get_lines('r1'), ['b\n', 'c\n'])
106
self.assertEquals(f.get_lines('r2'), ['c\n', 'd\n'])
107
self.assertEqual(3, f.num_versions())
108
origins = f.annotate('r1')
109
self.assertEquals(origins[0][0], 'r0')
110
self.assertEquals(origins[1][0], 'r1')
111
origins = f.annotate('r2')
112
self.assertEquals(origins[0][0], 'r1')
113
self.assertEquals(origins[1][0], 'r2')
116
f = self.reopen_file()
119
def test_add_unicode_content(self):
120
# unicode content is not permitted in versioned files.
121
# versioned files version sequences of bytes only.
123
self.assertRaises(errors.BzrBadParameterUnicode,
124
vf.add_lines, 'a', [], ['a\n', u'b\n', 'c\n'])
126
(errors.BzrBadParameterUnicode, NotImplementedError),
127
vf.add_lines_with_ghosts, 'a', [], ['a\n', u'b\n', 'c\n'])
129
def test_inline_newline_throws(self):
130
# \r characters are not permitted in lines being added
132
self.assertRaises(errors.BzrBadParameterContainsNewline,
133
vf.add_lines, 'a', [], ['a\n\n'])
135
(errors.BzrBadParameterContainsNewline, NotImplementedError),
136
vf.add_lines_with_ghosts, 'a', [], ['a\n\n'])
137
# but inline CR's are allowed
138
vf.add_lines('a', [], ['a\r\n'])
140
vf.add_lines_with_ghosts('b', [], ['a\r\n'])
141
except NotImplementedError:
144
def test_add_reserved(self):
146
self.assertRaises(errors.ReservedId,
147
vf.add_lines, 'a:', [], ['a\n', 'b\n', 'c\n'])
149
self.assertRaises(errors.ReservedId,
150
vf.add_delta, 'a:', [], None, 'sha1', False, ((0, 0, 0, []),))
152
def test_get_reserved(self):
154
self.assertRaises(errors.ReservedId, vf.get_delta, 'b:')
155
self.assertRaises(errors.ReservedId, vf.get_texts, ['b:'])
156
self.assertRaises(errors.ReservedId, vf.get_lines, 'b:')
157
self.assertRaises(errors.ReservedId, vf.get_text, 'b:')
159
def test_get_delta(self):
161
sha1s = self._setup_for_deltas(f)
162
expected_delta = (None, '6bfa09d82ce3e898ad4641ae13dd4fdb9cf0d76b', False,
163
[(0, 0, 1, [('base', 'line\n')])])
164
self.assertEqual(expected_delta, f.get_delta('base'))
166
text_name = 'chain1-'
167
for depth in range(26):
168
new_version = text_name + '%s' % depth
169
expected_delta = (next_parent, sha1s[depth],
171
[(depth + 1, depth + 1, 1, [(new_version, 'line\n')])])
172
self.assertEqual(expected_delta, f.get_delta(new_version))
173
next_parent = new_version
175
text_name = 'chain2-'
176
for depth in range(26):
177
new_version = text_name + '%s' % depth
178
expected_delta = (next_parent, sha1s[depth], False,
179
[(depth + 1, depth + 1, 1, [(new_version, 'line\n')])])
180
self.assertEqual(expected_delta, f.get_delta(new_version))
181
next_parent = new_version
182
# smoke test for eol support
183
expected_delta = ('base', '264f39cab871e4cfd65b3a002f7255888bb5ed97', True, [])
184
self.assertEqual(['line'], f.get_lines('noeol'))
185
self.assertEqual(expected_delta, f.get_delta('noeol'))
187
def test_get_deltas(self):
189
sha1s = self._setup_for_deltas(f)
190
deltas = f.get_deltas(f.versions())
191
expected_delta = (None, '6bfa09d82ce3e898ad4641ae13dd4fdb9cf0d76b', False,
192
[(0, 0, 1, [('base', 'line\n')])])
193
self.assertEqual(expected_delta, deltas['base'])
195
text_name = 'chain1-'
196
for depth in range(26):
197
new_version = text_name + '%s' % depth
198
expected_delta = (next_parent, sha1s[depth],
200
[(depth + 1, depth + 1, 1, [(new_version, 'line\n')])])
201
self.assertEqual(expected_delta, deltas[new_version])
202
next_parent = new_version
204
text_name = 'chain2-'
205
for depth in range(26):
206
new_version = text_name + '%s' % depth
207
expected_delta = (next_parent, sha1s[depth], False,
208
[(depth + 1, depth + 1, 1, [(new_version, 'line\n')])])
209
self.assertEqual(expected_delta, deltas[new_version])
210
next_parent = new_version
211
# smoke tests for eol support
212
expected_delta = ('base', '264f39cab871e4cfd65b3a002f7255888bb5ed97', True, [])
213
self.assertEqual(['line'], f.get_lines('noeol'))
214
self.assertEqual(expected_delta, deltas['noeol'])
215
# smoke tests for eol support - two noeol in a row same content
216
expected_deltas = (('noeol', '3ad7ee82dbd8f29ecba073f96e43e414b3f70a4d', True,
217
[(0, 1, 2, [('noeolsecond', 'line\n'), ('noeolsecond', 'line\n')])]),
218
('noeol', '3ad7ee82dbd8f29ecba073f96e43e414b3f70a4d', True,
219
[(0, 0, 1, [('noeolsecond', 'line\n')]), (1, 1, 0, [])]))
220
self.assertEqual(['line\n', 'line'], f.get_lines('noeolsecond'))
221
self.assertTrue(deltas['noeolsecond'] in expected_deltas)
222
# two no-eol in a row, different content
223
expected_delta = ('noeolsecond', '8bb553a84e019ef1149db082d65f3133b195223b', True,
224
[(1, 2, 1, [('noeolnotshared', 'phone\n')])])
225
self.assertEqual(['line\n', 'phone'], f.get_lines('noeolnotshared'))
226
self.assertEqual(expected_delta, deltas['noeolnotshared'])
227
# eol folling a no-eol with content change
228
expected_delta = ('noeol', 'a61f6fb6cfc4596e8d88c34a308d1e724caf8977', False,
229
[(0, 1, 1, [('eol', 'phone\n')])])
230
self.assertEqual(['phone\n'], f.get_lines('eol'))
231
self.assertEqual(expected_delta, deltas['eol'])
232
# eol folling a no-eol with content change
233
expected_delta = ('noeol', '6bfa09d82ce3e898ad4641ae13dd4fdb9cf0d76b', False,
234
[(0, 1, 1, [('eolline', 'line\n')])])
235
self.assertEqual(['line\n'], f.get_lines('eolline'))
236
self.assertEqual(expected_delta, deltas['eolline'])
237
# eol with no parents
238
expected_delta = (None, '264f39cab871e4cfd65b3a002f7255888bb5ed97', True,
239
[(0, 0, 1, [('noeolbase', 'line\n')])])
240
self.assertEqual(['line'], f.get_lines('noeolbase'))
241
self.assertEqual(expected_delta, deltas['noeolbase'])
242
# eol with two parents, in inverse insertion order
243
expected_deltas = (('noeolbase', '264f39cab871e4cfd65b3a002f7255888bb5ed97', True,
244
[(0, 1, 1, [('eolbeforefirstparent', 'line\n')])]),
245
('noeolbase', '264f39cab871e4cfd65b3a002f7255888bb5ed97', True,
246
[(0, 1, 1, [('eolbeforefirstparent', 'line\n')])]))
247
self.assertEqual(['line'], f.get_lines('eolbeforefirstparent'))
248
#self.assertTrue(deltas['eolbeforefirstparent'] in expected_deltas)
250
def test_make_mpdiffs(self):
251
from bzrlib import multiparent
252
vf = self.get_file('foo')
253
sha1s = self._setup_for_deltas(vf)
254
new_vf = self.get_file('bar')
255
for version in multiparent.topo_iter(vf):
256
mpdiff = vf.make_mpdiffs([version])[0]
257
new_vf.add_mpdiffs([(version, vf.get_parents(version),
258
vf.get_sha1(version), mpdiff)])
259
self.assertEqualDiff(vf.get_text(version),
260
new_vf.get_text(version))
262
def _setup_for_deltas(self, f):
263
self.assertRaises(errors.RevisionNotPresent, f.get_delta, 'base')
264
# add texts that should trip the knit maximum delta chain threshold
265
# as well as doing parallel chains of data in knits.
266
# this is done by two chains of 25 insertions
267
f.add_lines('base', [], ['line\n'])
268
f.add_lines('noeol', ['base'], ['line'])
269
# detailed eol tests:
270
# shared last line with parent no-eol
271
f.add_lines('noeolsecond', ['noeol'], ['line\n', 'line'])
272
# differing last line with parent, both no-eol
273
f.add_lines('noeolnotshared', ['noeolsecond'], ['line\n', 'phone'])
274
# add eol following a noneol parent, change content
275
f.add_lines('eol', ['noeol'], ['phone\n'])
276
# add eol following a noneol parent, no change content
277
f.add_lines('eolline', ['noeol'], ['line\n'])
278
# noeol with no parents:
279
f.add_lines('noeolbase', [], ['line'])
280
# noeol preceeding its leftmost parent in the output:
281
# this is done by making it a merge of two parents with no common
282
# anestry: noeolbase and noeol with the
283
# later-inserted parent the leftmost.
284
f.add_lines('eolbeforefirstparent', ['noeolbase', 'noeol'], ['line'])
285
# two identical eol texts
286
f.add_lines('noeoldup', ['noeol'], ['line'])
288
text_name = 'chain1-'
290
sha1s = {0 :'da6d3141cb4a5e6f464bf6e0518042ddc7bfd079',
291
1 :'45e21ea146a81ea44a821737acdb4f9791c8abe7',
292
2 :'e1f11570edf3e2a070052366c582837a4fe4e9fa',
293
3 :'26b4b8626da827088c514b8f9bbe4ebf181edda1',
294
4 :'e28a5510be25ba84d31121cff00956f9970ae6f6',
295
5 :'d63ec0ce22e11dcf65a931b69255d3ac747a318d',
296
6 :'2c2888d288cb5e1d98009d822fedfe6019c6a4ea',
297
7 :'95c14da9cafbf828e3e74a6f016d87926ba234ab',
298
8 :'779e9a0b28f9f832528d4b21e17e168c67697272',
299
9 :'1f8ff4e5c6ff78ac106fcfe6b1e8cb8740ff9a8f',
300
10:'131a2ae712cf51ed62f143e3fbac3d4206c25a05',
301
11:'c5a9d6f520d2515e1ec401a8f8a67e6c3c89f199',
302
12:'31a2286267f24d8bedaa43355f8ad7129509ea85',
303
13:'dc2a7fe80e8ec5cae920973973a8ee28b2da5e0a',
304
14:'2c4b1736566b8ca6051e668de68650686a3922f2',
305
15:'5912e4ecd9b0c07be4d013e7e2bdcf9323276cde',
306
16:'b0d2e18d3559a00580f6b49804c23fea500feab3',
307
17:'8e1d43ad72f7562d7cb8f57ee584e20eb1a69fc7',
308
18:'5cf64a3459ae28efa60239e44b20312d25b253f3',
309
19:'1ebed371807ba5935958ad0884595126e8c4e823',
310
20:'2aa62a8b06fb3b3b892a3292a068ade69d5ee0d3',
311
21:'01edc447978004f6e4e962b417a4ae1955b6fe5d',
312
22:'d8d8dc49c4bf0bab401e0298bb5ad827768618bb',
313
23:'c21f62b1c482862983a8ffb2b0c64b3451876e3f',
314
24:'c0593fe795e00dff6b3c0fe857a074364d5f04fc',
315
25:'dd1a1cf2ba9cc225c3aff729953e6364bf1d1855',
317
for depth in range(26):
318
new_version = text_name + '%s' % depth
319
text = text + ['line\n']
320
f.add_lines(new_version, [next_parent], text)
321
next_parent = new_version
323
text_name = 'chain2-'
325
for depth in range(26):
326
new_version = text_name + '%s' % depth
327
text = text + ['line\n']
328
f.add_lines(new_version, [next_parent], text)
329
next_parent = new_version
332
def test_add_delta(self):
333
# tests for the add-delta facility.
334
# at this point, optimising for speed, we assume no checks when deltas are inserted.
335
# this may need to be revisited.
336
source = self.get_file('source')
337
source.add_lines('base', [], ['line\n'])
339
text_name = 'chain1-'
341
for depth in range(26):
342
new_version = text_name + '%s' % depth
343
text = text + ['line\n']
344
source.add_lines(new_version, [next_parent], text)
345
next_parent = new_version
347
text_name = 'chain2-'
349
for depth in range(26):
350
new_version = text_name + '%s' % depth
351
text = text + ['line\n']
352
source.add_lines(new_version, [next_parent], text)
353
next_parent = new_version
354
source.add_lines('noeol', ['base'], ['line'])
356
target = self.get_file('target')
357
for version in source.versions():
358
parent, sha1, noeol, delta = source.get_delta(version)
359
target.add_delta(version,
360
source.get_parents(version),
365
self.assertRaises(RevisionAlreadyPresent,
366
target.add_delta, 'base', [], None, '', False, [])
367
for version in source.versions():
368
self.assertEqual(source.get_lines(version),
369
target.get_lines(version))
371
def test_ancestry(self):
373
self.assertEqual([], f.get_ancestry([]))
374
f.add_lines('r0', [], ['a\n', 'b\n'])
375
f.add_lines('r1', ['r0'], ['b\n', 'c\n'])
376
f.add_lines('r2', ['r0'], ['b\n', 'c\n'])
377
f.add_lines('r3', ['r2'], ['b\n', 'c\n'])
378
f.add_lines('rM', ['r1', 'r2'], ['b\n', 'c\n'])
379
self.assertEqual([], f.get_ancestry([]))
380
versions = f.get_ancestry(['rM'])
381
# there are some possibilities:
385
# so we check indexes
386
r0 = versions.index('r0')
387
r1 = versions.index('r1')
388
r2 = versions.index('r2')
389
self.assertFalse('r3' in versions)
390
rM = versions.index('rM')
391
self.assertTrue(r0 < r1)
392
self.assertTrue(r0 < r2)
393
self.assertTrue(r1 < rM)
394
self.assertTrue(r2 < rM)
396
self.assertRaises(RevisionNotPresent,
397
f.get_ancestry, ['rM', 'rX'])
399
self.assertEqual(set(f.get_ancestry('rM')),
400
set(f.get_ancestry('rM', topo_sorted=False)))
402
def test_mutate_after_finish(self):
404
f.transaction_finished()
405
self.assertRaises(errors.OutSideTransaction, f.add_delta, '', [], '', '', False, [])
406
self.assertRaises(errors.OutSideTransaction, f.add_lines, '', [], [])
407
self.assertRaises(errors.OutSideTransaction, f.add_lines_with_ghosts, '', [], [])
408
self.assertRaises(errors.OutSideTransaction, f.fix_parents, '', [])
409
self.assertRaises(errors.OutSideTransaction, f.join, '')
410
self.assertRaises(errors.OutSideTransaction, f.clone_text, 'base', 'bar', ['foo'])
412
def test_clear_cache(self):
414
# on a new file it should not error
416
# and after adding content, doing a clear_cache and a get should work.
417
f.add_lines('0', [], ['a'])
419
self.assertEqual(['a'], f.get_lines('0'))
421
def test_clone_text(self):
423
f.add_lines('r0', [], ['a\n', 'b\n'])
424
f.clone_text('r1', 'r0', ['r0'])
426
self.assertEquals(f.get_lines('r1'), f.get_lines('r0'))
427
self.assertEquals(f.get_lines('r1'), ['a\n', 'b\n'])
428
self.assertEquals(f.get_parents('r1'), ['r0'])
430
self.assertRaises(RevisionNotPresent,
431
f.clone_text, 'r2', 'rX', [])
432
self.assertRaises(RevisionAlreadyPresent,
433
f.clone_text, 'r1', 'r0', [])
435
verify_file(self.reopen_file())
437
def test_create_empty(self):
439
f.add_lines('0', [], ['a\n'])
440
new_f = f.create_empty('t', MemoryTransport())
441
# smoke test, specific types should check it is honoured correctly for
442
# non type attributes
443
self.assertEqual([], new_f.versions())
444
self.assertTrue(isinstance(new_f, f.__class__))
446
def test_copy_to(self):
448
f.add_lines('0', [], ['a\n'])
449
t = MemoryTransport()
451
for suffix in f.__class__.get_suffixes():
452
self.assertTrue(t.has('foo' + suffix))
454
def test_get_suffixes(self):
457
self.assertEqual(f.__class__.get_suffixes(), f.__class__.get_suffixes())
458
# and should be a list
459
self.assertTrue(isinstance(f.__class__.get_suffixes(), list))
461
def build_graph(self, file, graph):
462
for node in topo_sort(graph.items()):
463
file.add_lines(node, graph[node], [])
465
def test_get_graph(self):
471
self.build_graph(f, graph)
472
self.assertEqual(graph, f.get_graph())
474
def test_get_graph_partial(self):
482
complex_graph.update(simple_a)
487
complex_graph.update(simple_b)
494
complex_graph.update(simple_gam)
496
simple_b_gam.update(simple_gam)
497
simple_b_gam.update(simple_b)
498
self.build_graph(f, complex_graph)
499
self.assertEqual(simple_a, f.get_graph(['a']))
500
self.assertEqual(simple_b, f.get_graph(['b']))
501
self.assertEqual(simple_gam, f.get_graph(['gam']))
502
self.assertEqual(simple_b_gam, f.get_graph(['b', 'gam']))
504
def test_get_parents(self):
506
f.add_lines('r0', [], ['a\n', 'b\n'])
507
f.add_lines('r1', [], ['a\n', 'b\n'])
508
f.add_lines('r2', [], ['a\n', 'b\n'])
509
f.add_lines('r3', [], ['a\n', 'b\n'])
510
f.add_lines('m', ['r0', 'r1', 'r2', 'r3'], ['a\n', 'b\n'])
511
self.assertEquals(f.get_parents('m'), ['r0', 'r1', 'r2', 'r3'])
513
self.assertRaises(RevisionNotPresent,
516
def test_annotate(self):
518
f.add_lines('r0', [], ['a\n', 'b\n'])
519
f.add_lines('r1', ['r0'], ['c\n', 'b\n'])
520
origins = f.annotate('r1')
521
self.assertEquals(origins[0][0], 'r1')
522
self.assertEquals(origins[1][0], 'r0')
524
self.assertRaises(RevisionNotPresent,
528
# tests that walk returns all the inclusions for the requested
529
# revisions as well as the revisions changes themselves.
530
f = self.get_file('1')
531
f.add_lines('r0', [], ['a\n', 'b\n'])
532
f.add_lines('r1', ['r0'], ['c\n', 'b\n'])
533
f.add_lines('rX', ['r1'], ['d\n', 'b\n'])
534
f.add_lines('rY', ['r1'], ['c\n', 'e\n'])
537
for lineno, insert, dset, text in f.walk(['rX', 'rY']):
538
lines[text] = (insert, dset)
540
self.assertTrue(lines['a\n'], ('r0', set(['r1'])))
541
self.assertTrue(lines['b\n'], ('r0', set(['rY'])))
542
self.assertTrue(lines['c\n'], ('r1', set(['rX'])))
543
self.assertTrue(lines['d\n'], ('rX', set([])))
544
self.assertTrue(lines['e\n'], ('rY', set([])))
546
def test_detection(self):
547
# Test weaves detect corruption.
549
# Weaves contain a checksum of their texts.
550
# When a text is extracted, this checksum should be
553
w = self.get_file_corrupted_text()
555
self.assertEqual('hello\n', w.get_text('v1'))
556
self.assertRaises(errors.WeaveInvalidChecksum, w.get_text, 'v2')
557
self.assertRaises(errors.WeaveInvalidChecksum, w.get_lines, 'v2')
558
self.assertRaises(errors.WeaveInvalidChecksum, w.check)
560
w = self.get_file_corrupted_checksum()
562
self.assertEqual('hello\n', w.get_text('v1'))
563
self.assertRaises(errors.WeaveInvalidChecksum, w.get_text, 'v2')
564
self.assertRaises(errors.WeaveInvalidChecksum, w.get_lines, 'v2')
565
self.assertRaises(errors.WeaveInvalidChecksum, w.check)
567
def get_file_corrupted_text(self):
568
"""Return a versioned file with corrupt text but valid metadata."""
569
raise NotImplementedError(self.get_file_corrupted_text)
571
def reopen_file(self, name='foo'):
572
"""Open the versioned file from disk again."""
573
raise NotImplementedError(self.reopen_file)
575
def test_iter_parents(self):
576
"""iter_parents returns the parents for many nodes."""
580
f.add_lines('r0', [], ['a\n', 'b\n'])
582
f.add_lines('r1', ['r0'], ['a\n', 'b\n'])
584
f.add_lines('r2', ['r1', 'r0'], ['a\n', 'b\n'])
586
# cases: each sample data individually:
587
self.assertEqual(set([('r0', ())]),
588
set(f.iter_parents(['r0'])))
589
self.assertEqual(set([('r1', ('r0', ))]),
590
set(f.iter_parents(['r1'])))
591
self.assertEqual(set([('r2', ('r1', 'r0'))]),
592
set(f.iter_parents(['r2'])))
593
# no nodes returned for a missing node
594
self.assertEqual(set(),
595
set(f.iter_parents(['missing'])))
596
# 1 node returned with missing nodes skipped
597
self.assertEqual(set([('r1', ('r0', ))]),
598
set(f.iter_parents(['ghost1', 'r1', 'ghost'])))
600
self.assertEqual(set([('r0', ()), ('r1', ('r0', ))]),
601
set(f.iter_parents(['r0', 'r1'])))
602
# 2 nodes returned, missing skipped
603
self.assertEqual(set([('r0', ()), ('r1', ('r0', ))]),
604
set(f.iter_parents(['a', 'r0', 'b', 'r1', 'c'])))
606
def test_iter_lines_added_or_present_in_versions(self):
607
# test that we get at least an equalset of the lines added by
608
# versions in the weave
609
# the ordering here is to make a tree so that dumb searches have
610
# more changes to muck up.
612
class InstrumentedProgress(progress.DummyProgress):
616
progress.DummyProgress.__init__(self)
619
def update(self, msg=None, current=None, total=None):
620
self.updates.append((msg, current, total))
623
# add a base to get included
624
vf.add_lines('base', [], ['base\n'])
625
# add a ancestor to be included on one side
626
vf.add_lines('lancestor', [], ['lancestor\n'])
627
# add a ancestor to be included on the other side
628
vf.add_lines('rancestor', ['base'], ['rancestor\n'])
629
# add a child of rancestor with no eofile-nl
630
vf.add_lines('child', ['rancestor'], ['base\n', 'child\n'])
631
# add a child of lancestor and base to join the two roots
632
vf.add_lines('otherchild',
633
['lancestor', 'base'],
634
['base\n', 'lancestor\n', 'otherchild\n'])
635
def iter_with_versions(versions, expected):
636
# now we need to see what lines are returned, and how often.
643
progress = InstrumentedProgress()
644
# iterate over the lines
645
for line in vf.iter_lines_added_or_present_in_versions(versions,
648
if []!= progress.updates:
649
self.assertEqual(expected, progress.updates)
651
lines = iter_with_versions(['child', 'otherchild'],
652
[('Walking content.', 0, 2),
653
('Walking content.', 1, 2),
654
('Walking content.', 2, 2)])
655
# we must see child and otherchild
656
self.assertTrue(lines['child\n'] > 0)
657
self.assertTrue(lines['otherchild\n'] > 0)
658
# we dont care if we got more than that.
661
lines = iter_with_versions(None, [('Walking content.', 0, 5),
662
('Walking content.', 1, 5),
663
('Walking content.', 2, 5),
664
('Walking content.', 3, 5),
665
('Walking content.', 4, 5),
666
('Walking content.', 5, 5)])
667
# all lines must be seen at least once
668
self.assertTrue(lines['base\n'] > 0)
669
self.assertTrue(lines['lancestor\n'] > 0)
670
self.assertTrue(lines['rancestor\n'] > 0)
671
self.assertTrue(lines['child\n'] > 0)
672
self.assertTrue(lines['otherchild\n'] > 0)
674
def test_fix_parents(self):
675
# some versioned files allow incorrect parents to be corrected after
676
# insertion - this may not fix ancestry..
677
# if they do not supported, they just do not implement it.
678
# we test this as an interface test to ensure that those that *do*
679
# implementent it get it right.
681
vf.add_lines('notbase', [], [])
682
vf.add_lines('base', [], [])
684
vf.fix_parents('notbase', ['base'])
685
except NotImplementedError:
687
self.assertEqual(['base'], vf.get_parents('notbase'))
688
# open again, check it stuck.
690
self.assertEqual(['base'], vf.get_parents('notbase'))
692
def test_fix_parents_with_ghosts(self):
693
# when fixing parents, ghosts that are listed should not be ghosts
698
vf.add_lines_with_ghosts('notbase', ['base', 'stillghost'], [])
699
except NotImplementedError:
701
vf.add_lines('base', [], [])
702
vf.fix_parents('notbase', ['base', 'stillghost'])
703
self.assertEqual(['base'], vf.get_parents('notbase'))
704
# open again, check it stuck.
706
self.assertEqual(['base'], vf.get_parents('notbase'))
707
# and check the ghosts
708
self.assertEqual(['base', 'stillghost'],
709
vf.get_parents_with_ghosts('notbase'))
711
def test_add_lines_with_ghosts(self):
712
# some versioned file formats allow lines to be added with parent
713
# information that is > than that in the format. Formats that do
714
# not support this need to raise NotImplementedError on the
715
# add_lines_with_ghosts api.
717
# add a revision with ghost parents
718
# The preferred form is utf8, but we should translate when needed
719
parent_id_unicode = u'b\xbfse'
720
parent_id_utf8 = parent_id_unicode.encode('utf8')
722
vf.add_lines_with_ghosts('notbxbfse', [parent_id_utf8], [])
723
except NotImplementedError:
724
# check the other ghost apis are also not implemented
725
self.assertRaises(NotImplementedError, vf.has_ghost, 'foo')
726
self.assertRaises(NotImplementedError, vf.get_ancestry_with_ghosts, ['foo'])
727
self.assertRaises(NotImplementedError, vf.get_parents_with_ghosts, 'foo')
728
self.assertRaises(NotImplementedError, vf.get_graph_with_ghosts)
730
vf = self.reopen_file()
731
# test key graph related apis: getncestry, _graph, get_parents
733
# - these are ghost unaware and must not be reflect ghosts
734
self.assertEqual(['notbxbfse'], vf.get_ancestry('notbxbfse'))
735
self.assertEqual([], vf.get_parents('notbxbfse'))
736
self.assertEqual({'notbxbfse':()}, vf.get_graph())
737
self.assertFalse(self.callDeprecated([osutils._revision_id_warning],
738
vf.has_version, parent_id_unicode))
739
self.assertFalse(vf.has_version(parent_id_utf8))
740
# we have _with_ghost apis to give us ghost information.
741
self.assertEqual([parent_id_utf8, 'notbxbfse'], vf.get_ancestry_with_ghosts(['notbxbfse']))
742
self.assertEqual([parent_id_utf8], vf.get_parents_with_ghosts('notbxbfse'))
743
self.assertEqual({'notbxbfse':[parent_id_utf8]}, vf.get_graph_with_ghosts())
744
self.assertTrue(self.callDeprecated([osutils._revision_id_warning],
745
vf.has_ghost, parent_id_unicode))
746
self.assertTrue(vf.has_ghost(parent_id_utf8))
747
# if we add something that is a ghost of another, it should correct the
748
# results of the prior apis
749
self.callDeprecated([osutils._revision_id_warning],
750
vf.add_lines, parent_id_unicode, [], [])
751
self.assertEqual([parent_id_utf8, 'notbxbfse'], vf.get_ancestry(['notbxbfse']))
752
self.assertEqual([parent_id_utf8], vf.get_parents('notbxbfse'))
753
self.assertEqual({parent_id_utf8:(),
754
'notbxbfse':(parent_id_utf8, ),
757
self.assertTrue(self.callDeprecated([osutils._revision_id_warning],
758
vf.has_version, parent_id_unicode))
759
self.assertTrue(vf.has_version(parent_id_utf8))
760
# we have _with_ghost apis to give us ghost information.
761
self.assertEqual([parent_id_utf8, 'notbxbfse'], vf.get_ancestry_with_ghosts(['notbxbfse']))
762
self.assertEqual([parent_id_utf8], vf.get_parents_with_ghosts('notbxbfse'))
763
self.assertEqual({parent_id_utf8:[],
764
'notbxbfse':[parent_id_utf8],
766
vf.get_graph_with_ghosts())
767
self.assertFalse(self.callDeprecated([osutils._revision_id_warning],
768
vf.has_ghost, parent_id_unicode))
769
self.assertFalse(vf.has_ghost(parent_id_utf8))
771
def test_add_lines_with_ghosts_after_normal_revs(self):
772
# some versioned file formats allow lines to be added with parent
773
# information that is > than that in the format. Formats that do
774
# not support this need to raise NotImplementedError on the
775
# add_lines_with_ghosts api.
777
# probe for ghost support
780
except NotImplementedError:
782
vf.add_lines_with_ghosts('base', [], ['line\n', 'line_b\n'])
783
vf.add_lines_with_ghosts('references_ghost',
785
['line\n', 'line_b\n', 'line_c\n'])
786
origins = vf.annotate('references_ghost')
787
self.assertEquals(('base', 'line\n'), origins[0])
788
self.assertEquals(('base', 'line_b\n'), origins[1])
789
self.assertEquals(('references_ghost', 'line_c\n'), origins[2])
791
def test_readonly_mode(self):
792
transport = get_transport(self.get_url('.'))
793
factory = self.get_factory()
794
vf = factory('id', transport, 0777, create=True, access_mode='w')
795
vf = factory('id', transport, access_mode='r')
796
self.assertRaises(errors.ReadOnlyError, vf.add_delta, '', [], '', '', False, [])
797
self.assertRaises(errors.ReadOnlyError, vf.add_lines, 'base', [], [])
798
self.assertRaises(errors.ReadOnlyError,
799
vf.add_lines_with_ghosts,
803
self.assertRaises(errors.ReadOnlyError, vf.fix_parents, 'base', [])
804
self.assertRaises(errors.ReadOnlyError, vf.join, 'base')
805
self.assertRaises(errors.ReadOnlyError, vf.clone_text, 'base', 'bar', ['foo'])
807
def test_get_sha1(self):
808
# check the sha1 data is available
811
vf.add_lines('a', [], ['a\n'])
812
# the same file, different metadata
813
vf.add_lines('b', ['a'], ['a\n'])
814
# a file differing only in last newline.
815
vf.add_lines('c', [], ['a'])
817
'3f786850e387550fdab836ed7e6dc881de23001b', vf.get_sha1('a'))
819
'3f786850e387550fdab836ed7e6dc881de23001b', vf.get_sha1('b'))
821
'86f7e437faa5a7fce15d1ddcb9eaeaea377667b8', vf.get_sha1('c'))
823
self.assertEqual(['3f786850e387550fdab836ed7e6dc881de23001b',
824
'86f7e437faa5a7fce15d1ddcb9eaeaea377667b8',
825
'3f786850e387550fdab836ed7e6dc881de23001b'],
826
vf.get_sha1s(['a', 'c', 'b']))
829
class TestWeave(TestCaseWithTransport, VersionedFileTestMixIn):
831
def get_file(self, name='foo'):
832
return WeaveFile(name, get_transport(self.get_url('.')), create=True)
834
def get_file_corrupted_text(self):
835
w = WeaveFile('foo', get_transport(self.get_url('.')), create=True)
836
w.add_lines('v1', [], ['hello\n'])
837
w.add_lines('v2', ['v1'], ['hello\n', 'there\n'])
839
# We are going to invasively corrupt the text
840
# Make sure the internals of weave are the same
841
self.assertEqual([('{', 0)
849
self.assertEqual(['f572d396fae9206628714fb2ce00f72e94f2258f'
850
, '90f265c6e75f1c8f9ab76dcf85528352c5f215ef'
855
w._weave[4] = 'There\n'
858
def get_file_corrupted_checksum(self):
859
w = self.get_file_corrupted_text()
861
w._weave[4] = 'there\n'
862
self.assertEqual('hello\nthere\n', w.get_text('v2'))
864
#Invalid checksum, first digit changed
865
w._sha1s[1] = 'f0f265c6e75f1c8f9ab76dcf85528352c5f215ef'
868
def reopen_file(self, name='foo', create=False):
869
return WeaveFile(name, get_transport(self.get_url('.')), create=create)
871
def test_no_implicit_create(self):
872
self.assertRaises(errors.NoSuchFile,
875
get_transport(self.get_url('.')))
877
def get_factory(self):
881
class TestKnit(TestCaseWithTransport, VersionedFileTestMixIn):
883
def get_file(self, name='foo'):
884
return KnitVersionedFile(name, get_transport(self.get_url('.')),
885
delta=True, create=True)
887
def get_factory(self):
888
return KnitVersionedFile
890
def get_file_corrupted_text(self):
891
knit = self.get_file()
892
knit.add_lines('v1', [], ['hello\n'])
893
knit.add_lines('v2', ['v1'], ['hello\n', 'there\n'])
896
def reopen_file(self, name='foo', create=False):
897
return KnitVersionedFile(name, get_transport(self.get_url('.')),
901
def test_detection(self):
902
knit = self.get_file()
905
def test_no_implicit_create(self):
906
self.assertRaises(errors.NoSuchFile,
909
get_transport(self.get_url('.')))
912
class InterString(versionedfile.InterVersionedFile):
913
"""An inter-versionedfile optimised code path for strings.
915
This is for use during testing where we use strings as versionedfiles
916
so that none of the default regsitered interversionedfile classes will
917
match - which lets us test the match logic.
921
def is_compatible(source, target):
922
"""InterString is compatible with strings-as-versionedfiles."""
923
return isinstance(source, str) and isinstance(target, str)
926
# TODO this and the InterRepository core logic should be consolidatable
927
# if we make the registry a separate class though we still need to
928
# test the behaviour in the active registry to catch failure-to-handle-
930
class TestInterVersionedFile(TestCaseWithTransport):
932
def test_get_default_inter_versionedfile(self):
933
# test that the InterVersionedFile.get(a, b) probes
934
# for a class where is_compatible(a, b) returns
935
# true and returns a default interversionedfile otherwise.
936
# This also tests that the default registered optimised interversionedfile
937
# classes do not barf inappropriately when a surprising versionedfile type
939
dummy_a = "VersionedFile 1."
940
dummy_b = "VersionedFile 2."
941
self.assertGetsDefaultInterVersionedFile(dummy_a, dummy_b)
943
def assertGetsDefaultInterVersionedFile(self, a, b):
944
"""Asserts that InterVersionedFile.get(a, b) -> the default."""
945
inter = versionedfile.InterVersionedFile.get(a, b)
946
self.assertEqual(versionedfile.InterVersionedFile,
948
self.assertEqual(a, inter.source)
949
self.assertEqual(b, inter.target)
951
def test_register_inter_versionedfile_class(self):
952
# test that a optimised code path provider - a
953
# InterVersionedFile subclass can be registered and unregistered
954
# and that it is correctly selected when given a versionedfile
955
# pair that it returns true on for the is_compatible static method
957
dummy_a = "VersionedFile 1."
958
dummy_b = "VersionedFile 2."
959
versionedfile.InterVersionedFile.register_optimiser(InterString)
961
# we should get the default for something InterString returns False
963
self.assertFalse(InterString.is_compatible(dummy_a, None))
964
self.assertGetsDefaultInterVersionedFile(dummy_a, None)
965
# and we should get an InterString for a pair it 'likes'
966
self.assertTrue(InterString.is_compatible(dummy_a, dummy_b))
967
inter = versionedfile.InterVersionedFile.get(dummy_a, dummy_b)
968
self.assertEqual(InterString, inter.__class__)
969
self.assertEqual(dummy_a, inter.source)
970
self.assertEqual(dummy_b, inter.target)
972
versionedfile.InterVersionedFile.unregister_optimiser(InterString)
973
# now we should get the default InterVersionedFile object again.
974
self.assertGetsDefaultInterVersionedFile(dummy_a, dummy_b)
977
class TestReadonlyHttpMixin(object):
979
def test_readonly_http_works(self):
980
# we should be able to read from http with a versioned file.
982
# try an empty file access
983
readonly_vf = self.get_factory()('foo', get_transport(self.get_readonly_url('.')))
984
self.assertEqual([], readonly_vf.versions())
986
vf.add_lines('1', [], ['a\n'])
987
vf.add_lines('2', ['1'], ['b\n', 'a\n'])
988
readonly_vf = self.get_factory()('foo', get_transport(self.get_readonly_url('.')))
989
self.assertEqual(['1', '2'], vf.versions())
990
for version in readonly_vf.versions():
991
readonly_vf.get_lines(version)
994
class TestWeaveHTTP(TestCaseWithWebserver, TestReadonlyHttpMixin):
997
return WeaveFile('foo', get_transport(self.get_url('.')), create=True)
999
def get_factory(self):
1003
class TestKnitHTTP(TestCaseWithWebserver, TestReadonlyHttpMixin):
1006
return KnitVersionedFile('foo', get_transport(self.get_url('.')),
1007
delta=True, create=True)
1009
def get_factory(self):
1010
return KnitVersionedFile
1013
class MergeCasesMixin(object):
1015
def doMerge(self, base, a, b, mp):
1016
from cStringIO import StringIO
1017
from textwrap import dedent
1023
w.add_lines('text0', [], map(addcrlf, base))
1024
w.add_lines('text1', ['text0'], map(addcrlf, a))
1025
w.add_lines('text2', ['text0'], map(addcrlf, b))
1027
self.log_contents(w)
1029
self.log('merge plan:')
1030
p = list(w.plan_merge('text1', 'text2'))
1031
for state, line in p:
1033
self.log('%12s | %s' % (state, line[:-1]))
1037
mt.writelines(w.weave_merge(p))
1039
self.log(mt.getvalue())
1041
mp = map(addcrlf, mp)
1042
self.assertEqual(mt.readlines(), mp)
1045
def testOneInsert(self):
1051
def testSeparateInserts(self):
1052
self.doMerge(['aaa', 'bbb', 'ccc'],
1053
['aaa', 'xxx', 'bbb', 'ccc'],
1054
['aaa', 'bbb', 'yyy', 'ccc'],
1055
['aaa', 'xxx', 'bbb', 'yyy', 'ccc'])
1057
def testSameInsert(self):
1058
self.doMerge(['aaa', 'bbb', 'ccc'],
1059
['aaa', 'xxx', 'bbb', 'ccc'],
1060
['aaa', 'xxx', 'bbb', 'yyy', 'ccc'],
1061
['aaa', 'xxx', 'bbb', 'yyy', 'ccc'])
1062
overlappedInsertExpected = ['aaa', 'xxx', 'yyy', 'bbb']
1063
def testOverlappedInsert(self):
1064
self.doMerge(['aaa', 'bbb'],
1065
['aaa', 'xxx', 'yyy', 'bbb'],
1066
['aaa', 'xxx', 'bbb'], self.overlappedInsertExpected)
1068
# really it ought to reduce this to
1069
# ['aaa', 'xxx', 'yyy', 'bbb']
1072
def testClashReplace(self):
1073
self.doMerge(['aaa'],
1076
['<<<<<<< ', 'xxx', '=======', 'yyy', 'zzz',
1079
def testNonClashInsert1(self):
1080
self.doMerge(['aaa'],
1083
['<<<<<<< ', 'xxx', 'aaa', '=======', 'yyy', 'zzz',
1086
def testNonClashInsert2(self):
1087
self.doMerge(['aaa'],
1093
def testDeleteAndModify(self):
1094
"""Clashing delete and modification.
1096
If one side modifies a region and the other deletes it then
1097
there should be a conflict with one side blank.
1100
#######################################
1101
# skippd, not working yet
1104
self.doMerge(['aaa', 'bbb', 'ccc'],
1105
['aaa', 'ddd', 'ccc'],
1107
['<<<<<<<< ', 'aaa', '=======', '>>>>>>> ', 'ccc'])
1109
def _test_merge_from_strings(self, base, a, b, expected):
1111
w.add_lines('text0', [], base.splitlines(True))
1112
w.add_lines('text1', ['text0'], a.splitlines(True))
1113
w.add_lines('text2', ['text0'], b.splitlines(True))
1114
self.log('merge plan:')
1115
p = list(w.plan_merge('text1', 'text2'))
1116
for state, line in p:
1118
self.log('%12s | %s' % (state, line[:-1]))
1119
self.log('merge result:')
1120
result_text = ''.join(w.weave_merge(p))
1121
self.log(result_text)
1122
self.assertEqualDiff(result_text, expected)
1124
def test_weave_merge_conflicts(self):
1125
# does weave merge properly handle plans that end with unchanged?
1126
result = ''.join(self.get_file().weave_merge([('new-a', 'hello\n')]))
1127
self.assertEqual(result, 'hello\n')
1129
def test_deletion_extended(self):
1130
"""One side deletes, the other deletes more.
1147
self._test_merge_from_strings(base, a, b, result)
1149
def test_deletion_overlap(self):
1150
"""Delete overlapping regions with no other conflict.
1152
Arguably it'd be better to treat these as agreement, rather than
1153
conflict, but for now conflict is safer.
1181
self._test_merge_from_strings(base, a, b, result)
1183
def test_agreement_deletion(self):
1184
"""Agree to delete some lines, without conflicts."""
1206
self._test_merge_from_strings(base, a, b, result)
1208
def test_sync_on_deletion(self):
1209
"""Specific case of merge where we can synchronize incorrectly.
1211
A previous version of the weave merge concluded that the two versions
1212
agreed on deleting line 2, and this could be a synchronization point.
1213
Line 1 was then considered in isolation, and thought to be deleted on
1216
It's better to consider the whole thing as a disagreement region.
1227
a's replacement line 2
1240
a's replacement line 2
1247
self._test_merge_from_strings(base, a, b, result)
1250
class TestKnitMerge(TestCaseWithTransport, MergeCasesMixin):
1252
def get_file(self, name='foo'):
1253
return KnitVersionedFile(name, get_transport(self.get_url('.')),
1254
delta=True, create=True)
1256
def log_contents(self, w):
1260
class TestWeaveMerge(TestCaseWithTransport, MergeCasesMixin):
1262
def get_file(self, name='foo'):
1263
return WeaveFile(name, get_transport(self.get_url('.')), create=True)
1265
def log_contents(self, w):
1266
self.log('weave is:')
1268
write_weave(w, tmpf)
1269
self.log(tmpf.getvalue())
1271
overlappedInsertExpected = ['aaa', '<<<<<<< ', 'xxx', 'yyy', '=======',
1272
'xxx', '>>>>>>> ', 'bbb']