17
17
"""Tests for Knit data structure"""
19
from cStringIO import StringIO
23
from bzrlib.errors import KnitError, RevisionAlreadyPresent
32
from bzrlib.errors import (
33
RevisionAlreadyPresent,
38
from bzrlib.index import *
24
39
from bzrlib.knit import (
29
from bzrlib.osutils import split_lines
30
from bzrlib.tests import TestCaseWithTransport
31
from bzrlib.transport import TransportLogger, get_transport
51
from bzrlib.tests import (
55
TestCaseWithMemoryTransport,
56
TestCaseWithTransport,
58
from bzrlib.transport import get_transport
32
59
from bzrlib.transport.memory import MemoryTransport
33
from bzrlib.weave import Weave
36
class KnitTests(TestCaseWithTransport):
37
"""Class containing knit test helper routines."""
39
def make_test_knit(self, annotate=False):
41
factory = KnitPlainFactory()
44
return KnitVersionedFile('test', get_transport('.'), access_mode='w', factory=factory, create=True)
47
class BasicKnitTests(KnitTests):
49
def add_stock_one_and_one_a(self, k):
50
k.add_lines('text-1', [], split_lines(TEXT_1))
51
k.add_lines('text-1a', ['text-1'], split_lines(TEXT_1A))
53
def test_knit_constructor(self):
54
"""Construct empty k"""
57
def test_knit_add(self):
58
"""Store one text in knit and retrieve"""
59
k = self.make_test_knit()
60
k.add_lines('text-1', [], split_lines(TEXT_1))
61
self.assertTrue(k.has_version('text-1'))
62
self.assertEqualDiff(''.join(k.get_lines('text-1')), TEXT_1)
64
def test_knit_reload(self):
65
# test that the content in a reloaded knit is correct
66
k = self.make_test_knit()
67
k.add_lines('text-1', [], split_lines(TEXT_1))
69
k2 = KnitVersionedFile('test', get_transport('.'), access_mode='r', factory=KnitPlainFactory(), create=True)
70
self.assertTrue(k2.has_version('text-1'))
71
self.assertEqualDiff(''.join(k2.get_lines('text-1')), TEXT_1)
73
def test_knit_several(self):
74
"""Store several texts in a knit"""
75
k = self.make_test_knit()
76
k.add_lines('text-1', [], split_lines(TEXT_1))
77
k.add_lines('text-2', [], split_lines(TEXT_2))
78
self.assertEqualDiff(''.join(k.get_lines('text-1')), TEXT_1)
79
self.assertEqualDiff(''.join(k.get_lines('text-2')), TEXT_2)
81
def test_repeated_add(self):
82
"""Knit traps attempt to replace existing version"""
83
k = self.make_test_knit()
84
k.add_lines('text-1', [], split_lines(TEXT_1))
85
self.assertRaises(RevisionAlreadyPresent,
87
'text-1', [], split_lines(TEXT_1))
90
k = self.make_test_knit(True)
91
k.add_lines('text-1', [], [])
92
self.assertEquals(k.get_lines('text-1'), [])
94
def test_incomplete(self):
95
"""Test if texts without a ending line-end can be inserted and
97
k = KnitVersionedFile('test', get_transport('.'), delta=False, create=True)
98
k.add_lines('text-1', [], ['a\n', 'b' ])
99
k.add_lines('text-2', ['text-1'], ['a\rb\n', 'b\n'])
100
# reopening ensures maximum room for confusion
101
k = KnitVersionedFile('test', get_transport('.'), delta=False, create=True)
102
self.assertEquals(k.get_lines('text-1'), ['a\n', 'b' ])
103
self.assertEquals(k.get_lines('text-2'), ['a\rb\n', 'b\n'])
105
def test_delta(self):
106
"""Expression of knit delta as lines"""
107
k = self.make_test_knit()
108
td = list(line_delta(TEXT_1.splitlines(True),
109
TEXT_1A.splitlines(True)))
110
self.assertEqualDiff(''.join(td), delta_1_1a)
111
out = apply_line_delta(TEXT_1.splitlines(True), td)
112
self.assertEqualDiff(''.join(out), TEXT_1A)
114
def test_add_with_parents(self):
115
"""Store in knit with parents"""
116
k = self.make_test_knit()
117
self.add_stock_one_and_one_a(k)
118
self.assertEquals(k.get_parents('text-1'), [])
119
self.assertEquals(k.get_parents('text-1a'), ['text-1'])
121
def test_ancestry(self):
122
"""Store in knit with parents"""
123
k = self.make_test_knit()
124
self.add_stock_one_and_one_a(k)
125
self.assertEquals(set(k.get_ancestry(['text-1a'])), set(['text-1a', 'text-1']))
127
def test_add_delta(self):
128
"""Store in knit with parents"""
129
k = KnitVersionedFile('test', get_transport('.'), factory=KnitPlainFactory(),
130
delta=True, create=True)
131
self.add_stock_one_and_one_a(k)
133
self.assertEqualDiff(''.join(k.get_lines('text-1a')), TEXT_1A)
135
def test_annotate(self):
137
k = KnitVersionedFile('knit', get_transport('.'), factory=KnitAnnotateFactory(),
138
delta=True, create=True)
139
self.insert_and_test_small_annotate(k)
141
def insert_and_test_small_annotate(self, k):
142
"""test annotation with k works correctly."""
143
k.add_lines('text-1', [], ['a\n', 'b\n'])
144
k.add_lines('text-2', ['text-1'], ['a\n', 'c\n'])
146
origins = k.annotate('text-2')
147
self.assertEquals(origins[0], ('text-1', 'a\n'))
148
self.assertEquals(origins[1], ('text-2', 'c\n'))
150
def test_annotate_fulltext(self):
152
k = KnitVersionedFile('knit', get_transport('.'), factory=KnitAnnotateFactory(),
153
delta=False, create=True)
154
self.insert_and_test_small_annotate(k)
156
def test_annotate_merge_1(self):
157
k = self.make_test_knit(True)
158
k.add_lines('text-a1', [], ['a\n', 'b\n'])
159
k.add_lines('text-a2', [], ['d\n', 'c\n'])
160
k.add_lines('text-am', ['text-a1', 'text-a2'], ['d\n', 'b\n'])
161
origins = k.annotate('text-am')
162
self.assertEquals(origins[0], ('text-a2', 'd\n'))
163
self.assertEquals(origins[1], ('text-a1', 'b\n'))
165
def test_annotate_merge_2(self):
166
k = self.make_test_knit(True)
167
k.add_lines('text-a1', [], ['a\n', 'b\n', 'c\n'])
168
k.add_lines('text-a2', [], ['x\n', 'y\n', 'z\n'])
169
k.add_lines('text-am', ['text-a1', 'text-a2'], ['a\n', 'y\n', 'c\n'])
170
origins = k.annotate('text-am')
171
self.assertEquals(origins[0], ('text-a1', 'a\n'))
172
self.assertEquals(origins[1], ('text-a2', 'y\n'))
173
self.assertEquals(origins[2], ('text-a1', 'c\n'))
175
def test_annotate_merge_9(self):
176
k = self.make_test_knit(True)
177
k.add_lines('text-a1', [], ['a\n', 'b\n', 'c\n'])
178
k.add_lines('text-a2', [], ['x\n', 'y\n', 'z\n'])
179
k.add_lines('text-am', ['text-a1', 'text-a2'], ['k\n', 'y\n', 'c\n'])
180
origins = k.annotate('text-am')
181
self.assertEquals(origins[0], ('text-am', 'k\n'))
182
self.assertEquals(origins[1], ('text-a2', 'y\n'))
183
self.assertEquals(origins[2], ('text-a1', 'c\n'))
185
def test_annotate_merge_3(self):
186
k = self.make_test_knit(True)
187
k.add_lines('text-a1', [], ['a\n', 'b\n', 'c\n'])
188
k.add_lines('text-a2', [] ,['x\n', 'y\n', 'z\n'])
189
k.add_lines('text-am', ['text-a1', 'text-a2'], ['k\n', 'y\n', 'z\n'])
190
origins = k.annotate('text-am')
191
self.assertEquals(origins[0], ('text-am', 'k\n'))
192
self.assertEquals(origins[1], ('text-a2', 'y\n'))
193
self.assertEquals(origins[2], ('text-a2', 'z\n'))
195
def test_annotate_merge_4(self):
196
k = self.make_test_knit(True)
197
k.add_lines('text-a1', [], ['a\n', 'b\n', 'c\n'])
198
k.add_lines('text-a2', [], ['x\n', 'y\n', 'z\n'])
199
k.add_lines('text-a3', ['text-a1'], ['a\n', 'b\n', 'p\n'])
200
k.add_lines('text-am', ['text-a2', 'text-a3'], ['a\n', 'b\n', 'z\n'])
201
origins = k.annotate('text-am')
202
self.assertEquals(origins[0], ('text-a1', 'a\n'))
203
self.assertEquals(origins[1], ('text-a1', 'b\n'))
204
self.assertEquals(origins[2], ('text-a2', 'z\n'))
206
def test_annotate_merge_5(self):
207
k = self.make_test_knit(True)
208
k.add_lines('text-a1', [], ['a\n', 'b\n', 'c\n'])
209
k.add_lines('text-a2', [], ['d\n', 'e\n', 'f\n'])
210
k.add_lines('text-a3', [], ['x\n', 'y\n', 'z\n'])
211
k.add_lines('text-am',
212
['text-a1', 'text-a2', 'text-a3'],
213
['a\n', 'e\n', 'z\n'])
214
origins = k.annotate('text-am')
215
self.assertEquals(origins[0], ('text-a1', 'a\n'))
216
self.assertEquals(origins[1], ('text-a2', 'e\n'))
217
self.assertEquals(origins[2], ('text-a3', 'z\n'))
219
def test_annotate_file_cherry_pick(self):
220
k = self.make_test_knit(True)
221
k.add_lines('text-1', [], ['a\n', 'b\n', 'c\n'])
222
k.add_lines('text-2', ['text-1'], ['d\n', 'e\n', 'f\n'])
223
k.add_lines('text-3', ['text-2', 'text-1'], ['a\n', 'b\n', 'c\n'])
224
origins = k.annotate('text-3')
225
self.assertEquals(origins[0], ('text-1', 'a\n'))
226
self.assertEquals(origins[1], ('text-1', 'b\n'))
227
self.assertEquals(origins[2], ('text-1', 'c\n'))
229
def test_knit_join(self):
230
"""Store in knit with parents"""
231
k1 = KnitVersionedFile('test1', get_transport('.'), factory=KnitPlainFactory(), create=True)
232
k1.add_lines('text-a', [], split_lines(TEXT_1))
233
k1.add_lines('text-b', ['text-a'], split_lines(TEXT_1))
235
k1.add_lines('text-c', [], split_lines(TEXT_1))
236
k1.add_lines('text-d', ['text-c'], split_lines(TEXT_1))
238
k1.add_lines('text-m', ['text-b', 'text-d'], split_lines(TEXT_1))
240
k2 = KnitVersionedFile('test2', get_transport('.'), factory=KnitPlainFactory(), create=True)
241
count = k2.join(k1, version_ids=['text-m'])
242
self.assertEquals(count, 5)
243
self.assertTrue(k2.has_version('text-a'))
244
self.assertTrue(k2.has_version('text-c'))
246
def test_reannotate(self):
247
k1 = KnitVersionedFile('knit1', get_transport('.'),
248
factory=KnitAnnotateFactory(), create=True)
250
k1.add_lines('text-a', [], ['a\n', 'b\n'])
252
k1.add_lines('text-b', ['text-a'], ['a\n', 'c\n'])
254
k2 = KnitVersionedFile('test2', get_transport('.'),
255
factory=KnitAnnotateFactory(), create=True)
256
k2.join(k1, version_ids=['text-b'])
259
k1.add_lines('text-X', ['text-b'], ['a\n', 'b\n'])
261
k2.add_lines('text-c', ['text-b'], ['z\n', 'c\n'])
263
k2.add_lines('text-Y', ['text-b'], ['b\n', 'c\n'])
265
# test-c will have index 3
266
k1.join(k2, version_ids=['text-c'])
268
lines = k1.get_lines('text-c')
269
self.assertEquals(lines, ['z\n', 'c\n'])
271
origins = k1.annotate('text-c')
272
self.assertEquals(origins[0], ('text-c', 'z\n'))
273
self.assertEquals(origins[1], ('text-b', 'c\n'))
275
def test_extraction_reads_components_once(self):
276
t = MemoryTransport()
277
instrumented_t = TransportLogger(t)
278
k1 = KnitVersionedFile('id', instrumented_t, create=True, delta=True)
279
# should read the index
280
self.assertEqual([('id.kndx',)], instrumented_t._calls)
281
instrumented_t._calls = []
283
k1.add_lines('base', [], ['text\n'])
284
# should not have read at all
285
self.assertEqual([], instrumented_t._calls)
288
k1.add_lines('sub', ['base'], ['text\n', 'text2\n'])
289
# should not have read at all
290
self.assertEqual([], instrumented_t._calls)
294
# should not have read at all
295
self.assertEqual([], instrumented_t._calls)
302
# should have read a component
303
# should not have read the first component only
304
self.assertEqual([('id.knit', [(0, 87)])], instrumented_t._calls)
305
instrumented_t._calls = []
308
# should not have read at all
309
self.assertEqual([], instrumented_t._calls)
310
# and now read the other component
312
# should have read the second component
313
self.assertEqual([('id.knit', [(87, 93)])], instrumented_t._calls)
314
instrumented_t._calls = []
319
k1.add_lines('sub2', ['base'], ['text\n', 'text3\n'])
320
# should read the first component only
321
self.assertEqual([('id.knit', [(0, 87)])], instrumented_t._calls)
323
def test_iter_lines_reads_in_order(self):
324
t = MemoryTransport()
325
instrumented_t = TransportLogger(t)
326
k1 = KnitVersionedFile('id', instrumented_t, create=True, delta=True)
327
self.assertEqual([('id.kndx',)], instrumented_t._calls)
328
# add texts with no required ordering
329
k1.add_lines('base', [], ['text\n'])
330
k1.add_lines('base2', [], ['text2\n'])
332
instrumented_t._calls = []
333
# request a last-first iteration
334
results = list(k1.iter_lines_added_or_present_in_versions(['base2', 'base']))
335
self.assertEqual([('id.knit', [(0, 87), (87, 89)])], instrumented_t._calls)
336
self.assertEqual(['text\n', 'text2\n'], results)
338
def test_create_empty_annotated(self):
339
k1 = self.make_test_knit(True)
341
k1.add_lines('text-a', [], ['a\n', 'b\n'])
342
k2 = k1.create_empty('t', MemoryTransport())
343
self.assertTrue(isinstance(k2.factory, KnitAnnotateFactory))
344
self.assertEqual(k1.delta, k2.delta)
345
# the generic test checks for empty content and file class
347
def test_knit_format(self):
348
# this tests that a new knit index file has the expected content
349
# and that is writes the data we expect as records are added.
350
knit = self.make_test_knit(True)
351
self.assertFileEqual("# bzr knit index 8\n", 'test.kndx')
352
knit.add_lines_with_ghosts('revid', ['a_ghost'], ['a\n'])
353
self.assertFileEqual(
354
"# bzr knit index 8\n"
356
"revid fulltext 0 84 .a_ghost :",
358
knit.add_lines_with_ghosts('revid2', ['revid'], ['a\n'])
359
self.assertFileEqual(
360
"# bzr knit index 8\n"
361
"\nrevid fulltext 0 84 .a_ghost :"
362
"\nrevid2 line-delta 84 82 0 :",
364
# we should be able to load this file again
365
knit = KnitVersionedFile('test', get_transport('.'), access_mode='r')
366
self.assertEqual(['revid', 'revid2'], knit.versions())
367
# write a short write to the file and ensure that its ignored
368
indexfile = file('test.kndx', 'at')
369
indexfile.write('\nrevid3 line-delta 166 82 1 2 3 4 5 .phwoar:demo ')
371
# we should be able to load this file again
372
knit = KnitVersionedFile('test', get_transport('.'), access_mode='w')
373
self.assertEqual(['revid', 'revid2'], knit.versions())
374
# and add a revision with the same id the failed write had
375
knit.add_lines('revid3', ['revid2'], ['a\n'])
376
# and when reading it revid3 should now appear.
377
knit = KnitVersionedFile('test', get_transport('.'), access_mode='r')
378
self.assertEqual(['revid', 'revid2', 'revid3'], knit.versions())
379
self.assertEqual(['revid2'], knit.get_parents('revid3'))
381
def test_plan_merge(self):
382
my_knit = self.make_test_knit(annotate=True)
383
my_knit.add_lines('text1', [], split_lines(TEXT_1))
384
my_knit.add_lines('text1a', ['text1'], split_lines(TEXT_1A))
385
my_knit.add_lines('text1b', ['text1'], split_lines(TEXT_1B))
386
plan = list(my_knit.plan_merge('text1a', 'text1b'))
387
for plan_line, expected_line in zip(plan, AB_MERGE):
388
self.assertEqual(plan_line, expected_line)
60
from bzrlib.tuned_gzip import GzipFile
61
from bzrlib.versionedfile import (
64
RecordingVersionedFilesDecorator,
68
class _CompiledKnitFeature(Feature):
72
import bzrlib._knit_load_data_c
77
def feature_name(self):
78
return 'bzrlib._knit_load_data_c'
80
CompiledKnitFeature = _CompiledKnitFeature()
83
class KnitContentTestsMixin(object):
85
def test_constructor(self):
86
content = self._make_content([])
89
content = self._make_content([])
90
self.assertEqual(content.text(), [])
92
content = self._make_content([("origin1", "text1"), ("origin2", "text2")])
93
self.assertEqual(content.text(), ["text1", "text2"])
96
content = self._make_content([("origin1", "text1"), ("origin2", "text2")])
98
self.assertIsInstance(copy, content.__class__)
99
self.assertEqual(copy.annotate(), content.annotate())
101
def assertDerivedBlocksEqual(self, source, target, noeol=False):
102
"""Assert that the derived matching blocks match real output"""
103
source_lines = source.splitlines(True)
104
target_lines = target.splitlines(True)
106
if noeol and not line.endswith('\n'):
110
source_content = self._make_content([(None, nl(l)) for l in source_lines])
111
target_content = self._make_content([(None, nl(l)) for l in target_lines])
112
line_delta = source_content.line_delta(target_content)
113
delta_blocks = list(KnitContent.get_line_delta_blocks(line_delta,
114
source_lines, target_lines))
115
matcher = KnitSequenceMatcher(None, source_lines, target_lines)
116
matcher_blocks = list(list(matcher.get_matching_blocks()))
117
self.assertEqual(matcher_blocks, delta_blocks)
119
def test_get_line_delta_blocks(self):
120
self.assertDerivedBlocksEqual('a\nb\nc\n', 'q\nc\n')
121
self.assertDerivedBlocksEqual(TEXT_1, TEXT_1)
122
self.assertDerivedBlocksEqual(TEXT_1, TEXT_1A)
123
self.assertDerivedBlocksEqual(TEXT_1, TEXT_1B)
124
self.assertDerivedBlocksEqual(TEXT_1B, TEXT_1A)
125
self.assertDerivedBlocksEqual(TEXT_1A, TEXT_1B)
126
self.assertDerivedBlocksEqual(TEXT_1A, '')
127
self.assertDerivedBlocksEqual('', TEXT_1A)
128
self.assertDerivedBlocksEqual('', '')
129
self.assertDerivedBlocksEqual('a\nb\nc', 'a\nb\nc\nd')
131
def test_get_line_delta_blocks_noeol(self):
132
"""Handle historical knit deltas safely
134
Some existing knit deltas don't consider the last line to differ
135
when the only difference whether it has a final newline.
137
New knit deltas appear to always consider the last line to differ
140
self.assertDerivedBlocksEqual('a\nb\nc', 'a\nb\nc\nd\n', noeol=True)
141
self.assertDerivedBlocksEqual('a\nb\nc\nd\n', 'a\nb\nc', noeol=True)
142
self.assertDerivedBlocksEqual('a\nb\nc\n', 'a\nb\nc', noeol=True)
143
self.assertDerivedBlocksEqual('a\nb\nc', 'a\nb\nc\n', noeol=True)
435
AB_MERGE_TEXT="""unchanged|Banana cup cake recipe
440
new-b|- bananas (do not use plantains!!!)
441
unchanged|- broken tea cups
442
new-a|- self-raising flour
445
AB_MERGE=[tuple(l.split('|')) for l in AB_MERGE_TEXT.splitlines(True)]
448
def line_delta(from_lines, to_lines):
449
"""Generate line-based delta from one text to another"""
450
s = difflib.SequenceMatcher(None, from_lines, to_lines)
451
for op in s.get_opcodes():
454
yield '%d,%d,%d\n' % (op[1], op[2], op[4]-op[3])
455
for i in range(op[3], op[4]):
459
def apply_line_delta(basis_lines, delta_lines):
460
"""Apply a line-based perfect diff
191
class TestPlainKnitContent(TestCase, KnitContentTestsMixin):
193
def _make_content(self, lines):
194
annotated_content = AnnotatedKnitContent(lines)
195
return PlainKnitContent(annotated_content.text(), 'bogus')
197
def test_annotate(self):
198
content = self._make_content([])
199
self.assertEqual(content.annotate(), [])
201
content = self._make_content([("origin1", "text1"), ("origin2", "text2")])
202
self.assertEqual(content.annotate(),
203
[("bogus", "text1"), ("bogus", "text2")])
205
def test_line_delta(self):
206
content1 = self._make_content([("", "a"), ("", "b")])
207
content2 = self._make_content([("", "a"), ("", "a"), ("", "c")])
208
self.assertEqual(content1.line_delta(content2),
209
[(1, 2, 2, ["a", "c"])])
211
def test_line_delta_iter(self):
212
content1 = self._make_content([("", "a"), ("", "b")])
213
content2 = self._make_content([("", "a"), ("", "a"), ("", "c")])
214
it = content1.line_delta_iter(content2)
215
self.assertEqual(it.next(), (1, 2, 2, ["a", "c"]))
216
self.assertRaises(StopIteration, it.next)
219
class TestAnnotatedKnitContent(TestCase, KnitContentTestsMixin):
221
def _make_content(self, lines):
222
return AnnotatedKnitContent(lines)
224
def test_annotate(self):
225
content = self._make_content([])
226
self.assertEqual(content.annotate(), [])
228
content = self._make_content([("origin1", "text1"), ("origin2", "text2")])
229
self.assertEqual(content.annotate(),
230
[("origin1", "text1"), ("origin2", "text2")])
232
def test_line_delta(self):
233
content1 = self._make_content([("", "a"), ("", "b")])
234
content2 = self._make_content([("", "a"), ("", "a"), ("", "c")])
235
self.assertEqual(content1.line_delta(content2),
236
[(1, 2, 2, [("", "a"), ("", "c")])])
238
def test_line_delta_iter(self):
239
content1 = self._make_content([("", "a"), ("", "b")])
240
content2 = self._make_content([("", "a"), ("", "a"), ("", "c")])
241
it = content1.line_delta_iter(content2)
242
self.assertEqual(it.next(), (1, 2, 2, [("", "a"), ("", "c")]))
243
self.assertRaises(StopIteration, it.next)
246
class MockTransport(object):
248
def __init__(self, file_lines=None):
249
self.file_lines = file_lines
251
# We have no base directory for the MockTransport
254
def get(self, filename):
255
if self.file_lines is None:
256
raise NoSuchFile(filename)
258
return StringIO("\n".join(self.file_lines))
260
def readv(self, relpath, offsets):
261
fp = self.get(relpath)
262
for offset, size in offsets:
264
yield offset, fp.read(size)
266
def __getattr__(self, name):
267
def queue_call(*args, **kwargs):
268
self.calls.append((name, args, kwargs))
272
class KnitRecordAccessTestsMixin(object):
273
"""Tests for getting and putting knit records."""
275
def test_add_raw_records(self):
276
"""Add_raw_records adds records retrievable later."""
277
access = self.get_access()
278
memos = access.add_raw_records([('key', 10)], '1234567890')
279
self.assertEqual(['1234567890'], list(access.get_raw_records(memos)))
281
def test_add_several_raw_records(self):
282
"""add_raw_records with many records and read some back."""
283
access = self.get_access()
284
memos = access.add_raw_records([('key', 10), ('key2', 2), ('key3', 5)],
286
self.assertEqual(['1234567890', '12', '34567'],
287
list(access.get_raw_records(memos)))
288
self.assertEqual(['1234567890'],
289
list(access.get_raw_records(memos[0:1])))
290
self.assertEqual(['12'],
291
list(access.get_raw_records(memos[1:2])))
292
self.assertEqual(['34567'],
293
list(access.get_raw_records(memos[2:3])))
294
self.assertEqual(['1234567890', '34567'],
295
list(access.get_raw_records(memos[0:1] + memos[2:3])))
298
class TestKnitKnitAccess(TestCaseWithMemoryTransport, KnitRecordAccessTestsMixin):
299
"""Tests for the .kndx implementation."""
301
def get_access(self):
302
"""Get a .knit style access instance."""
303
mapper = ConstantMapper("foo")
304
access = _KnitKeyAccess(self.get_transport(), mapper)
462
basis_lines -- text to apply the patch to
463
delta_lines -- diff instructions and content
468
while i < len(delta_lines):
470
a, b, c = map(long, l.split(','))
472
out[offset+a:offset+b] = delta_lines[i:i+c]
474
offset = offset + (b - a) + c
478
class TestWeaveToKnit(KnitTests):
480
def test_weave_to_knit_matches(self):
481
# check that the WeaveToKnit is_compatible function
482
# registers True for a Weave to a Knit.
308
class TestPackKnitAccess(TestCaseWithMemoryTransport, KnitRecordAccessTestsMixin):
309
"""Tests for the pack based access."""
311
def get_access(self):
312
return self._get_access()[0]
314
def _get_access(self, packname='packfile', index='FOO'):
315
transport = self.get_transport()
316
def write_data(bytes):
317
transport.append_bytes(packname, bytes)
318
writer = pack.ContainerWriter(write_data)
320
access = _DirectPackAccess({})
321
access.set_writer(writer, index, (transport, packname))
322
return access, writer
324
def test_read_from_several_packs(self):
325
access, writer = self._get_access()
327
memos.extend(access.add_raw_records([('key', 10)], '1234567890'))
329
access, writer = self._get_access('pack2', 'FOOBAR')
330
memos.extend(access.add_raw_records([('key', 5)], '12345'))
332
access, writer = self._get_access('pack3', 'BAZ')
333
memos.extend(access.add_raw_records([('key', 5)], 'alpha'))
335
transport = self.get_transport()
336
access = _DirectPackAccess({"FOO":(transport, 'packfile'),
337
"FOOBAR":(transport, 'pack2'),
338
"BAZ":(transport, 'pack3')})
339
self.assertEqual(['1234567890', '12345', 'alpha'],
340
list(access.get_raw_records(memos)))
341
self.assertEqual(['1234567890'],
342
list(access.get_raw_records(memos[0:1])))
343
self.assertEqual(['12345'],
344
list(access.get_raw_records(memos[1:2])))
345
self.assertEqual(['alpha'],
346
list(access.get_raw_records(memos[2:3])))
347
self.assertEqual(['1234567890', 'alpha'],
348
list(access.get_raw_records(memos[0:1] + memos[2:3])))
350
def test_set_writer(self):
351
"""The writer should be settable post construction."""
352
access = _DirectPackAccess({})
353
transport = self.get_transport()
354
packname = 'packfile'
356
def write_data(bytes):
357
transport.append_bytes(packname, bytes)
358
writer = pack.ContainerWriter(write_data)
360
access.set_writer(writer, index, (transport, packname))
361
memos = access.add_raw_records([('key', 10)], '1234567890')
363
self.assertEqual(['1234567890'], list(access.get_raw_records(memos)))
366
class LowLevelKnitDataTests(TestCase):
368
def create_gz_content(self, text):
370
gz_file = gzip.GzipFile(mode='wb', fileobj=sio)
373
return sio.getvalue()
375
def test_valid_knit_data(self):
376
sha1sum = osutils.sha('foo\nbar\n').hexdigest()
377
gz_txt = self.create_gz_content('version rev-id-1 2 %s\n'
382
transport = MockTransport([gz_txt])
383
access = _KnitKeyAccess(transport, ConstantMapper('filename'))
384
knit = KnitVersionedFiles(None, access)
385
records = [(('rev-id-1',), (('rev-id-1',), 0, len(gz_txt)))]
387
contents = list(knit._read_records_iter(records))
388
self.assertEqual([(('rev-id-1',), ['foo\n', 'bar\n'],
389
'4e48e2c9a3d2ca8a708cb0cc545700544efb5021')], contents)
391
raw_contents = list(knit._read_records_iter_raw(records))
392
self.assertEqual([(('rev-id-1',), gz_txt, sha1sum)], raw_contents)
394
def test_not_enough_lines(self):
395
sha1sum = osutils.sha('foo\n').hexdigest()
396
# record says 2 lines data says 1
397
gz_txt = self.create_gz_content('version rev-id-1 2 %s\n'
401
transport = MockTransport([gz_txt])
402
access = _KnitKeyAccess(transport, ConstantMapper('filename'))
403
knit = KnitVersionedFiles(None, access)
404
records = [(('rev-id-1',), (('rev-id-1',), 0, len(gz_txt)))]
405
self.assertRaises(errors.KnitCorrupt, list,
406
knit._read_records_iter(records))
408
# read_records_iter_raw won't detect that sort of mismatch/corruption
409
raw_contents = list(knit._read_records_iter_raw(records))
410
self.assertEqual([(('rev-id-1',), gz_txt, sha1sum)], raw_contents)
412
def test_too_many_lines(self):
413
sha1sum = osutils.sha('foo\nbar\n').hexdigest()
414
# record says 1 lines data says 2
415
gz_txt = self.create_gz_content('version rev-id-1 1 %s\n'
420
transport = MockTransport([gz_txt])
421
access = _KnitKeyAccess(transport, ConstantMapper('filename'))
422
knit = KnitVersionedFiles(None, access)
423
records = [(('rev-id-1',), (('rev-id-1',), 0, len(gz_txt)))]
424
self.assertRaises(errors.KnitCorrupt, list,
425
knit._read_records_iter(records))
427
# read_records_iter_raw won't detect that sort of mismatch/corruption
428
raw_contents = list(knit._read_records_iter_raw(records))
429
self.assertEqual([(('rev-id-1',), gz_txt, sha1sum)], raw_contents)
431
def test_mismatched_version_id(self):
432
sha1sum = osutils.sha('foo\nbar\n').hexdigest()
433
gz_txt = self.create_gz_content('version rev-id-1 2 %s\n'
438
transport = MockTransport([gz_txt])
439
access = _KnitKeyAccess(transport, ConstantMapper('filename'))
440
knit = KnitVersionedFiles(None, access)
441
# We are asking for rev-id-2, but the data is rev-id-1
442
records = [(('rev-id-2',), (('rev-id-2',), 0, len(gz_txt)))]
443
self.assertRaises(errors.KnitCorrupt, list,
444
knit._read_records_iter(records))
446
# read_records_iter_raw detects mismatches in the header
447
self.assertRaises(errors.KnitCorrupt, list,
448
knit._read_records_iter_raw(records))
450
def test_uncompressed_data(self):
451
sha1sum = osutils.sha('foo\nbar\n').hexdigest()
452
txt = ('version rev-id-1 2 %s\n'
457
transport = MockTransport([txt])
458
access = _KnitKeyAccess(transport, ConstantMapper('filename'))
459
knit = KnitVersionedFiles(None, access)
460
records = [(('rev-id-1',), (('rev-id-1',), 0, len(txt)))]
462
# We don't have valid gzip data ==> corrupt
463
self.assertRaises(errors.KnitCorrupt, list,
464
knit._read_records_iter(records))
466
# read_records_iter_raw will notice the bad data
467
self.assertRaises(errors.KnitCorrupt, list,
468
knit._read_records_iter_raw(records))
470
def test_corrupted_data(self):
471
sha1sum = osutils.sha('foo\nbar\n').hexdigest()
472
gz_txt = self.create_gz_content('version rev-id-1 2 %s\n'
477
# Change 2 bytes in the middle to \xff
478
gz_txt = gz_txt[:10] + '\xff\xff' + gz_txt[12:]
479
transport = MockTransport([gz_txt])
480
access = _KnitKeyAccess(transport, ConstantMapper('filename'))
481
knit = KnitVersionedFiles(None, access)
482
records = [(('rev-id-1',), (('rev-id-1',), 0, len(gz_txt)))]
483
self.assertRaises(errors.KnitCorrupt, list,
484
knit._read_records_iter(records))
485
# read_records_iter_raw will barf on bad gz data
486
self.assertRaises(errors.KnitCorrupt, list,
487
knit._read_records_iter_raw(records))
490
class LowLevelKnitIndexTests(TestCase):
492
def get_knit_index(self, transport, name, mode):
493
mapper = ConstantMapper(name)
494
orig = knit._load_data
496
knit._load_data = orig
497
self.addCleanup(reset)
498
from bzrlib._knit_load_data_py import _load_data_py
499
knit._load_data = _load_data_py
500
allow_writes = lambda: 'w' in mode
501
return _KndxIndex(transport, mapper, lambda:None, allow_writes, lambda:True)
503
def test_create_file(self):
504
transport = MockTransport()
505
index = self.get_knit_index(transport, "filename", "w")
507
call = transport.calls.pop(0)
508
# call[1][1] is a StringIO - we can't test it by simple equality.
509
self.assertEqual('put_file_non_atomic', call[0])
510
self.assertEqual('filename.kndx', call[1][0])
511
# With no history, _KndxIndex writes a new index:
512
self.assertEqual(_KndxIndex.HEADER,
513
call[1][1].getvalue())
514
self.assertEqual({'create_parent_dir': True}, call[2])
516
def test_read_utf8_version_id(self):
517
unicode_revision_id = u"version-\N{CYRILLIC CAPITAL LETTER A}"
518
utf8_revision_id = unicode_revision_id.encode('utf-8')
519
transport = MockTransport([
521
'%s option 0 1 :' % (utf8_revision_id,)
523
index = self.get_knit_index(transport, "filename", "r")
524
# _KndxIndex is a private class, and deals in utf8 revision_ids, not
525
# Unicode revision_ids.
526
self.assertEqual({(utf8_revision_id,):()},
527
index.get_parent_map(index.keys()))
528
self.assertFalse((unicode_revision_id,) in index.keys())
530
def test_read_utf8_parents(self):
531
unicode_revision_id = u"version-\N{CYRILLIC CAPITAL LETTER A}"
532
utf8_revision_id = unicode_revision_id.encode('utf-8')
533
transport = MockTransport([
535
"version option 0 1 .%s :" % (utf8_revision_id,)
537
index = self.get_knit_index(transport, "filename", "r")
538
self.assertEqual({("version",):((utf8_revision_id,),)},
539
index.get_parent_map(index.keys()))
541
def test_read_ignore_corrupted_lines(self):
542
transport = MockTransport([
545
"corrupted options 0 1 .b .c ",
546
"version options 0 1 :"
548
index = self.get_knit_index(transport, "filename", "r")
549
self.assertEqual(1, len(index.keys()))
550
self.assertEqual(set([("version",)]), index.keys())
552
def test_read_corrupted_header(self):
553
transport = MockTransport(['not a bzr knit index header\n'])
554
index = self.get_knit_index(transport, "filename", "r")
555
self.assertRaises(KnitHeaderError, index.keys)
557
def test_read_duplicate_entries(self):
558
transport = MockTransport([
560
"parent options 0 1 :",
561
"version options1 0 1 0 :",
562
"version options2 1 2 .other :",
563
"version options3 3 4 0 .other :"
565
index = self.get_knit_index(transport, "filename", "r")
566
self.assertEqual(2, len(index.keys()))
567
# check that the index used is the first one written. (Specific
568
# to KnitIndex style indices.
569
self.assertEqual("1", index._dictionary_compress([("version",)]))
570
self.assertEqual((("version",), 3, 4), index.get_position(("version",)))
571
self.assertEqual(["options3"], index.get_options(("version",)))
572
self.assertEqual({("version",):(("parent",), ("other",))},
573
index.get_parent_map([("version",)]))
575
def test_read_compressed_parents(self):
576
transport = MockTransport([
580
"c option 0 1 1 0 :",
582
index = self.get_knit_index(transport, "filename", "r")
583
self.assertEqual({("b",):(("a",),), ("c",):(("b",), ("a",))},
584
index.get_parent_map([("b",), ("c",)]))
586
def test_write_utf8_version_id(self):
587
unicode_revision_id = u"version-\N{CYRILLIC CAPITAL LETTER A}"
588
utf8_revision_id = unicode_revision_id.encode('utf-8')
589
transport = MockTransport([
592
index = self.get_knit_index(transport, "filename", "r")
594
((utf8_revision_id,), ["option"], ((utf8_revision_id,), 0, 1), [])])
595
call = transport.calls.pop(0)
596
# call[1][1] is a StringIO - we can't test it by simple equality.
597
self.assertEqual('put_file_non_atomic', call[0])
598
self.assertEqual('filename.kndx', call[1][0])
599
# With no history, _KndxIndex writes a new index:
600
self.assertEqual(_KndxIndex.HEADER +
601
"\n%s option 0 1 :" % (utf8_revision_id,),
602
call[1][1].getvalue())
603
self.assertEqual({'create_parent_dir': True}, call[2])
605
def test_write_utf8_parents(self):
606
unicode_revision_id = u"version-\N{CYRILLIC CAPITAL LETTER A}"
607
utf8_revision_id = unicode_revision_id.encode('utf-8')
608
transport = MockTransport([
611
index = self.get_knit_index(transport, "filename", "r")
613
(("version",), ["option"], (("version",), 0, 1), [(utf8_revision_id,)])])
614
call = transport.calls.pop(0)
615
# call[1][1] is a StringIO - we can't test it by simple equality.
616
self.assertEqual('put_file_non_atomic', call[0])
617
self.assertEqual('filename.kndx', call[1][0])
618
# With no history, _KndxIndex writes a new index:
619
self.assertEqual(_KndxIndex.HEADER +
620
"\nversion option 0 1 .%s :" % (utf8_revision_id,),
621
call[1][1].getvalue())
622
self.assertEqual({'create_parent_dir': True}, call[2])
625
transport = MockTransport([
628
index = self.get_knit_index(transport, "filename", "r")
630
self.assertEqual(set(), index.keys())
632
index.add_records([(("a",), ["option"], (("a",), 0, 1), [])])
633
self.assertEqual(set([("a",)]), index.keys())
635
index.add_records([(("a",), ["option"], (("a",), 0, 1), [])])
636
self.assertEqual(set([("a",)]), index.keys())
638
index.add_records([(("b",), ["option"], (("b",), 0, 1), [])])
639
self.assertEqual(set([("a",), ("b",)]), index.keys())
641
def add_a_b(self, index, random_id=None):
643
if random_id is not None:
644
kwargs["random_id"] = random_id
646
(("a",), ["option"], (("a",), 0, 1), [("b",)]),
647
(("a",), ["opt"], (("a",), 1, 2), [("c",)]),
648
(("b",), ["option"], (("b",), 2, 3), [("a",)])
651
def assertIndexIsAB(self, index):
656
index.get_parent_map(index.keys()))
657
self.assertEqual((("a",), 1, 2), index.get_position(("a",)))
658
self.assertEqual((("b",), 2, 3), index.get_position(("b",)))
659
self.assertEqual(["opt"], index.get_options(("a",)))
661
def test_add_versions(self):
662
transport = MockTransport([
665
index = self.get_knit_index(transport, "filename", "r")
668
call = transport.calls.pop(0)
669
# call[1][1] is a StringIO - we can't test it by simple equality.
670
self.assertEqual('put_file_non_atomic', call[0])
671
self.assertEqual('filename.kndx', call[1][0])
672
# With no history, _KndxIndex writes a new index:
675
"\na option 0 1 .b :"
677
"\nb option 2 3 0 :",
678
call[1][1].getvalue())
679
self.assertEqual({'create_parent_dir': True}, call[2])
680
self.assertIndexIsAB(index)
682
def test_add_versions_random_id_is_accepted(self):
683
transport = MockTransport([
686
index = self.get_knit_index(transport, "filename", "r")
687
self.add_a_b(index, random_id=True)
689
def test_delay_create_and_add_versions(self):
690
transport = MockTransport()
692
index = self.get_knit_index(transport, "filename", "w")
694
self.assertEqual([], transport.calls)
697
#[ {"dir_mode": 0777, "create_parent_dir": True, "mode": "wb"},
699
# Two calls: one during which we load the existing index (and when its
700
# missing create it), then a second where we write the contents out.
701
self.assertEqual(2, len(transport.calls))
702
call = transport.calls.pop(0)
703
self.assertEqual('put_file_non_atomic', call[0])
704
self.assertEqual('filename.kndx', call[1][0])
705
# With no history, _KndxIndex writes a new index:
706
self.assertEqual(_KndxIndex.HEADER, call[1][1].getvalue())
707
self.assertEqual({'create_parent_dir': True}, call[2])
708
call = transport.calls.pop(0)
709
# call[1][1] is a StringIO - we can't test it by simple equality.
710
self.assertEqual('put_file_non_atomic', call[0])
711
self.assertEqual('filename.kndx', call[1][0])
712
# With no history, _KndxIndex writes a new index:
715
"\na option 0 1 .b :"
717
"\nb option 2 3 0 :",
718
call[1][1].getvalue())
719
self.assertEqual({'create_parent_dir': True}, call[2])
721
def test_get_position(self):
722
transport = MockTransport([
727
index = self.get_knit_index(transport, "filename", "r")
729
self.assertEqual((("a",), 0, 1), index.get_position(("a",)))
730
self.assertEqual((("b",), 1, 2), index.get_position(("b",)))
732
def test_get_method(self):
733
transport = MockTransport([
735
"a fulltext,unknown 0 1 :",
736
"b unknown,line-delta 1 2 :",
739
index = self.get_knit_index(transport, "filename", "r")
741
self.assertEqual("fulltext", index.get_method("a"))
742
self.assertEqual("line-delta", index.get_method("b"))
743
self.assertRaises(errors.KnitIndexUnknownMethod, index.get_method, "c")
745
def test_get_options(self):
746
transport = MockTransport([
751
index = self.get_knit_index(transport, "filename", "r")
753
self.assertEqual(["opt1"], index.get_options("a"))
754
self.assertEqual(["opt2", "opt3"], index.get_options("b"))
756
def test_get_parent_map(self):
757
transport = MockTransport([
760
"b option 1 2 0 .c :",
761
"c option 1 2 1 0 .e :"
763
index = self.get_knit_index(transport, "filename", "r")
767
("b",):(("a",), ("c",)),
768
("c",):(("b",), ("a",), ("e",)),
769
}, index.get_parent_map(index.keys()))
771
def test_impossible_parent(self):
772
"""Test we get KnitCorrupt if the parent couldn't possibly exist."""
773
transport = MockTransport([
776
"b option 0 1 4 :" # We don't have a 4th record
778
index = self.get_knit_index(transport, 'filename', 'r')
780
self.assertRaises(errors.KnitCorrupt, index.keys)
782
if (str(e) == ('exceptions must be strings, classes, or instances,'
783
' not exceptions.IndexError')
784
and sys.version_info[0:2] >= (2,5)):
785
self.knownFailure('Pyrex <0.9.5 fails with TypeError when'
786
' raising new style exceptions with python'
791
def test_corrupted_parent(self):
792
transport = MockTransport([
796
"c option 0 1 1v :", # Can't have a parent of '1v'
798
index = self.get_knit_index(transport, 'filename', 'r')
800
self.assertRaises(errors.KnitCorrupt, index.keys)
802
if (str(e) == ('exceptions must be strings, classes, or instances,'
803
' not exceptions.ValueError')
804
and sys.version_info[0:2] >= (2,5)):
805
self.knownFailure('Pyrex <0.9.5 fails with TypeError when'
806
' raising new style exceptions with python'
811
def test_corrupted_parent_in_list(self):
812
transport = MockTransport([
816
"c option 0 1 1 v :", # Can't have a parent of 'v'
818
index = self.get_knit_index(transport, 'filename', 'r')
820
self.assertRaises(errors.KnitCorrupt, index.keys)
822
if (str(e) == ('exceptions must be strings, classes, or instances,'
823
' not exceptions.ValueError')
824
and sys.version_info[0:2] >= (2,5)):
825
self.knownFailure('Pyrex <0.9.5 fails with TypeError when'
826
' raising new style exceptions with python'
831
def test_invalid_position(self):
832
transport = MockTransport([
836
index = self.get_knit_index(transport, 'filename', 'r')
838
self.assertRaises(errors.KnitCorrupt, index.keys)
840
if (str(e) == ('exceptions must be strings, classes, or instances,'
841
' not exceptions.ValueError')
842
and sys.version_info[0:2] >= (2,5)):
843
self.knownFailure('Pyrex <0.9.5 fails with TypeError when'
844
' raising new style exceptions with python'
849
def test_invalid_size(self):
850
transport = MockTransport([
854
index = self.get_knit_index(transport, 'filename', 'r')
856
self.assertRaises(errors.KnitCorrupt, index.keys)
858
if (str(e) == ('exceptions must be strings, classes, or instances,'
859
' not exceptions.ValueError')
860
and sys.version_info[0:2] >= (2,5)):
861
self.knownFailure('Pyrex <0.9.5 fails with TypeError when'
862
' raising new style exceptions with python'
867
def test_short_line(self):
868
transport = MockTransport([
871
"b option 10 10 0", # This line isn't terminated, ignored
873
index = self.get_knit_index(transport, "filename", "r")
874
self.assertEqual(set([('a',)]), index.keys())
876
def test_skip_incomplete_record(self):
877
# A line with bogus data should just be skipped
878
transport = MockTransport([
881
"b option 10 10 0", # This line isn't terminated, ignored
882
"c option 20 10 0 :", # Properly terminated, and starts with '\n'
884
index = self.get_knit_index(transport, "filename", "r")
885
self.assertEqual(set([('a',), ('c',)]), index.keys())
887
def test_trailing_characters(self):
888
# A line with bogus data should just be skipped
889
transport = MockTransport([
892
"b option 10 10 0 :a", # This line has extra trailing characters
893
"c option 20 10 0 :", # Properly terminated, and starts with '\n'
895
index = self.get_knit_index(transport, "filename", "r")
896
self.assertEqual(set([('a',), ('c',)]), index.keys())
899
class LowLevelKnitIndexTests_c(LowLevelKnitIndexTests):
901
_test_needs_features = [CompiledKnitFeature]
903
def get_knit_index(self, transport, name, mode):
904
mapper = ConstantMapper(name)
905
orig = knit._load_data
907
knit._load_data = orig
908
self.addCleanup(reset)
909
from bzrlib._knit_load_data_c import _load_data_c
910
knit._load_data = _load_data_c
911
allow_writes = lambda: mode == 'w'
912
return _KndxIndex(transport, mapper, lambda:None, allow_writes, lambda:True)
915
class KnitTests(TestCaseWithTransport):
916
"""Class containing knit test helper routines."""
918
def make_test_knit(self, annotate=False, name='test'):
919
mapper = ConstantMapper(name)
920
return make_file_factory(annotate, mapper)(self.get_transport())
923
class TestKnitIndex(KnitTests):
925
def test_add_versions_dictionary_compresses(self):
926
"""Adding versions to the index should update the lookup dict"""
927
knit = self.make_test_knit()
929
idx.add_records([(('a-1',), ['fulltext'], (('a-1',), 0, 0), [])])
930
self.check_file_contents('test.kndx',
931
'# bzr knit index 8\n'
936
(('a-2',), ['fulltext'], (('a-2',), 0, 0), [('a-1',)]),
937
(('a-3',), ['fulltext'], (('a-3',), 0, 0), [('a-2',)]),
939
self.check_file_contents('test.kndx',
940
'# bzr knit index 8\n'
942
'a-1 fulltext 0 0 :\n'
943
'a-2 fulltext 0 0 0 :\n'
944
'a-3 fulltext 0 0 1 :'
946
self.assertEqual(set([('a-3',), ('a-1',), ('a-2',)]), idx.keys())
948
('a-1',): ((('a-1',), 0, 0), None, (), ('fulltext', False)),
949
('a-2',): ((('a-2',), 0, 0), None, (('a-1',),), ('fulltext', False)),
950
('a-3',): ((('a-3',), 0, 0), None, (('a-2',),), ('fulltext', False)),
951
}, idx.get_build_details(idx.keys()))
952
self.assertEqual({('a-1',):(),
953
('a-2',):(('a-1',),),
954
('a-3',):(('a-2',),),},
955
idx.get_parent_map(idx.keys()))
957
def test_add_versions_fails_clean(self):
958
"""If add_versions fails in the middle, it restores a pristine state.
960
Any modifications that are made to the index are reset if all versions
963
# This cheats a little bit by passing in a generator which will
964
# raise an exception before the processing finishes
965
# Other possibilities would be to have an version with the wrong number
966
# of entries, or to make the backing transport unable to write any
969
knit = self.make_test_knit()
971
idx.add_records([(('a-1',), ['fulltext'], (('a-1',), 0, 0), [])])
973
class StopEarly(Exception):
976
def generate_failure():
977
"""Add some entries and then raise an exception"""
978
yield (('a-2',), ['fulltext'], (None, 0, 0), ('a-1',))
979
yield (('a-3',), ['fulltext'], (None, 0, 0), ('a-2',))
982
# Assert the pre-condition
984
self.assertEqual(set([('a-1',)]), set(idx.keys()))
986
{('a-1',): ((('a-1',), 0, 0), None, (), ('fulltext', False))},
987
idx.get_build_details([('a-1',)]))
988
self.assertEqual({('a-1',):()}, idx.get_parent_map(idx.keys()))
991
self.assertRaises(StopEarly, idx.add_records, generate_failure())
992
# And it shouldn't be modified
995
def test_knit_index_ignores_empty_files(self):
996
# There was a race condition in older bzr, where a ^C at the right time
997
# could leave an empty .kndx file, which bzr would later claim was a
998
# corrupted file since the header was not present. In reality, the file
999
# just wasn't created, so it should be ignored.
1000
t = get_transport('.')
1001
t.put_bytes('test.kndx', '')
1003
knit = self.make_test_knit()
1005
def test_knit_index_checks_header(self):
1006
t = get_transport('.')
1007
t.put_bytes('test.kndx', '# not really a knit header\n\n')
484
1008
k = self.make_test_knit()
485
self.failUnless(WeaveToKnit.is_compatible(w, k))
486
self.failIf(WeaveToKnit.is_compatible(k, w))
487
self.failIf(WeaveToKnit.is_compatible(w, w))
488
self.failIf(WeaveToKnit.is_compatible(k, k))
1009
self.assertRaises(KnitHeaderError, k.keys)
1012
class TestGraphIndexKnit(KnitTests):
1013
"""Tests for knits using a GraphIndex rather than a KnitIndex."""
1015
def make_g_index(self, name, ref_lists=0, nodes=[]):
1016
builder = GraphIndexBuilder(ref_lists)
1017
for node, references, value in nodes:
1018
builder.add_node(node, references, value)
1019
stream = builder.finish()
1020
trans = self.get_transport()
1021
size = trans.put_file(name, stream)
1022
return GraphIndex(trans, name, size)
1024
def two_graph_index(self, deltas=False, catch_adds=False):
1025
"""Build a two-graph index.
1027
:param deltas: If true, use underlying indices with two node-ref
1028
lists and 'parent' set to a delta-compressed against tail.
1030
# build a complex graph across several indices.
1032
# delta compression inn the index
1033
index1 = self.make_g_index('1', 2, [
1034
(('tip', ), 'N0 100', ([('parent', )], [], )),
1035
(('tail', ), '', ([], []))])
1036
index2 = self.make_g_index('2', 2, [
1037
(('parent', ), ' 100 78', ([('tail', ), ('ghost', )], [('tail', )])),
1038
(('separate', ), '', ([], []))])
1040
# just blob location and graph in the index.
1041
index1 = self.make_g_index('1', 1, [
1042
(('tip', ), 'N0 100', ([('parent', )], )),
1043
(('tail', ), '', ([], ))])
1044
index2 = self.make_g_index('2', 1, [
1045
(('parent', ), ' 100 78', ([('tail', ), ('ghost', )], )),
1046
(('separate', ), '', ([], ))])
1047
combined_index = CombinedGraphIndex([index1, index2])
1049
self.combined_index = combined_index
1050
self.caught_entries = []
1051
add_callback = self.catch_add
1054
return _KnitGraphIndex(combined_index, lambda:True, deltas=deltas,
1055
add_callback=add_callback)
1057
def test_keys(self):
1058
index = self.two_graph_index()
1059
self.assertEqual(set([('tail',), ('tip',), ('parent',), ('separate',)]),
1062
def test_get_position(self):
1063
index = self.two_graph_index()
1064
self.assertEqual((index._graph_index._indices[0], 0, 100), index.get_position(('tip',)))
1065
self.assertEqual((index._graph_index._indices[1], 100, 78), index.get_position(('parent',)))
1067
def test_get_method_deltas(self):
1068
index = self.two_graph_index(deltas=True)
1069
self.assertEqual('fulltext', index.get_method(('tip',)))
1070
self.assertEqual('line-delta', index.get_method(('parent',)))
1072
def test_get_method_no_deltas(self):
1073
# check that the parent-history lookup is ignored with deltas=False.
1074
index = self.two_graph_index(deltas=False)
1075
self.assertEqual('fulltext', index.get_method(('tip',)))
1076
self.assertEqual('fulltext', index.get_method(('parent',)))
1078
def test_get_options_deltas(self):
1079
index = self.two_graph_index(deltas=True)
1080
self.assertEqual(['fulltext', 'no-eol'], index.get_options(('tip',)))
1081
self.assertEqual(['line-delta'], index.get_options(('parent',)))
1083
def test_get_options_no_deltas(self):
1084
# check that the parent-history lookup is ignored with deltas=False.
1085
index = self.two_graph_index(deltas=False)
1086
self.assertEqual(['fulltext', 'no-eol'], index.get_options(('tip',)))
1087
self.assertEqual(['fulltext'], index.get_options(('parent',)))
1089
def test_get_parent_map(self):
1090
index = self.two_graph_index()
1091
self.assertEqual({('parent',):(('tail',), ('ghost',))},
1092
index.get_parent_map([('parent',), ('ghost',)]))
1094
def catch_add(self, entries):
1095
self.caught_entries.append(entries)
1097
def test_add_no_callback_errors(self):
1098
index = self.two_graph_index()
1099
self.assertRaises(errors.ReadOnlyError, index.add_records,
1100
[(('new',), 'fulltext,no-eol', (None, 50, 60), ['separate'])])
1102
def test_add_version_smoke(self):
1103
index = self.two_graph_index(catch_adds=True)
1104
index.add_records([(('new',), 'fulltext,no-eol', (None, 50, 60),
1106
self.assertEqual([[(('new', ), 'N50 60', ((('separate',),),))]],
1107
self.caught_entries)
1109
def test_add_version_delta_not_delta_index(self):
1110
index = self.two_graph_index(catch_adds=True)
1111
self.assertRaises(errors.KnitCorrupt, index.add_records,
1112
[(('new',), 'no-eol,line-delta', (None, 0, 100), [('parent',)])])
1113
self.assertEqual([], self.caught_entries)
1115
def test_add_version_same_dup(self):
1116
index = self.two_graph_index(catch_adds=True)
1117
# options can be spelt two different ways
1118
index.add_records([(('tip',), 'fulltext,no-eol', (None, 0, 100), [('parent',)])])
1119
index.add_records([(('tip',), 'no-eol,fulltext', (None, 0, 100), [('parent',)])])
1120
# position/length are ignored (because each pack could have fulltext or
1121
# delta, and be at a different position.
1122
index.add_records([(('tip',), 'fulltext,no-eol', (None, 50, 100),
1124
index.add_records([(('tip',), 'fulltext,no-eol', (None, 0, 1000),
1126
# but neither should have added data:
1127
self.assertEqual([[], [], [], []], self.caught_entries)
1129
def test_add_version_different_dup(self):
1130
index = self.two_graph_index(deltas=True, catch_adds=True)
1132
self.assertRaises(errors.KnitCorrupt, index.add_records,
1133
[(('tip',), 'no-eol,line-delta', (None, 0, 100), [('parent',)])])
1134
self.assertRaises(errors.KnitCorrupt, index.add_records,
1135
[(('tip',), 'line-delta,no-eol', (None, 0, 100), [('parent',)])])
1136
self.assertRaises(errors.KnitCorrupt, index.add_records,
1137
[(('tip',), 'fulltext', (None, 0, 100), [('parent',)])])
1139
self.assertRaises(errors.KnitCorrupt, index.add_records,
1140
[(('tip',), 'fulltext,no-eol', (None, 0, 100), [])])
1141
self.assertEqual([], self.caught_entries)
1143
def test_add_versions_nodeltas(self):
1144
index = self.two_graph_index(catch_adds=True)
1146
(('new',), 'fulltext,no-eol', (None, 50, 60), [('separate',)]),
1147
(('new2',), 'fulltext', (None, 0, 6), [('new',)]),
1149
self.assertEqual([(('new', ), 'N50 60', ((('separate',),),)),
1150
(('new2', ), ' 0 6', ((('new',),),))],
1151
sorted(self.caught_entries[0]))
1152
self.assertEqual(1, len(self.caught_entries))
1154
def test_add_versions_deltas(self):
1155
index = self.two_graph_index(deltas=True, catch_adds=True)
1157
(('new',), 'fulltext,no-eol', (None, 50, 60), [('separate',)]),
1158
(('new2',), 'line-delta', (None, 0, 6), [('new',)]),
1160
self.assertEqual([(('new', ), 'N50 60', ((('separate',),), ())),
1161
(('new2', ), ' 0 6', ((('new',),), (('new',),), ))],
1162
sorted(self.caught_entries[0]))
1163
self.assertEqual(1, len(self.caught_entries))
1165
def test_add_versions_delta_not_delta_index(self):
1166
index = self.two_graph_index(catch_adds=True)
1167
self.assertRaises(errors.KnitCorrupt, index.add_records,
1168
[(('new',), 'no-eol,line-delta', (None, 0, 100), [('parent',)])])
1169
self.assertEqual([], self.caught_entries)
1171
def test_add_versions_random_id_accepted(self):
1172
index = self.two_graph_index(catch_adds=True)
1173
index.add_records([], random_id=True)
1175
def test_add_versions_same_dup(self):
1176
index = self.two_graph_index(catch_adds=True)
1177
# options can be spelt two different ways
1178
index.add_records([(('tip',), 'fulltext,no-eol', (None, 0, 100),
1180
index.add_records([(('tip',), 'no-eol,fulltext', (None, 0, 100),
1182
# position/length are ignored (because each pack could have fulltext or
1183
# delta, and be at a different position.
1184
index.add_records([(('tip',), 'fulltext,no-eol', (None, 50, 100),
1186
index.add_records([(('tip',), 'fulltext,no-eol', (None, 0, 1000),
1188
# but neither should have added data.
1189
self.assertEqual([[], [], [], []], self.caught_entries)
1191
def test_add_versions_different_dup(self):
1192
index = self.two_graph_index(deltas=True, catch_adds=True)
1194
self.assertRaises(errors.KnitCorrupt, index.add_records,
1195
[(('tip',), 'no-eol,line-delta', (None, 0, 100), [('parent',)])])
1196
self.assertRaises(errors.KnitCorrupt, index.add_records,
1197
[(('tip',), 'line-delta,no-eol', (None, 0, 100), [('parent',)])])
1198
self.assertRaises(errors.KnitCorrupt, index.add_records,
1199
[(('tip',), 'fulltext', (None, 0, 100), [('parent',)])])
1201
self.assertRaises(errors.KnitCorrupt, index.add_records,
1202
[(('tip',), 'fulltext,no-eol', (None, 0, 100), [])])
1203
# change options in the second record
1204
self.assertRaises(errors.KnitCorrupt, index.add_records,
1205
[(('tip',), 'fulltext,no-eol', (None, 0, 100), [('parent',)]),
1206
(('tip',), 'no-eol,line-delta', (None, 0, 100), [('parent',)])])
1207
self.assertEqual([], self.caught_entries)
1210
class TestNoParentsGraphIndexKnit(KnitTests):
1211
"""Tests for knits using _KnitGraphIndex with no parents."""
1213
def make_g_index(self, name, ref_lists=0, nodes=[]):
1214
builder = GraphIndexBuilder(ref_lists)
1215
for node, references in nodes:
1216
builder.add_node(node, references)
1217
stream = builder.finish()
1218
trans = self.get_transport()
1219
size = trans.put_file(name, stream)
1220
return GraphIndex(trans, name, size)
1222
def test_parents_deltas_incompatible(self):
1223
index = CombinedGraphIndex([])
1224
self.assertRaises(errors.KnitError, _KnitGraphIndex, lambda:True,
1225
index, deltas=True, parents=False)
1227
def two_graph_index(self, catch_adds=False):
1228
"""Build a two-graph index.
1230
:param deltas: If true, use underlying indices with two node-ref
1231
lists and 'parent' set to a delta-compressed against tail.
1233
# put several versions in the index.
1234
index1 = self.make_g_index('1', 0, [
1235
(('tip', ), 'N0 100'),
1237
index2 = self.make_g_index('2', 0, [
1238
(('parent', ), ' 100 78'),
1239
(('separate', ), '')])
1240
combined_index = CombinedGraphIndex([index1, index2])
1242
self.combined_index = combined_index
1243
self.caught_entries = []
1244
add_callback = self.catch_add
1247
return _KnitGraphIndex(combined_index, lambda:True, parents=False,
1248
add_callback=add_callback)
1250
def test_keys(self):
1251
index = self.two_graph_index()
1252
self.assertEqual(set([('tail',), ('tip',), ('parent',), ('separate',)]),
1255
def test_get_position(self):
1256
index = self.two_graph_index()
1257
self.assertEqual((index._graph_index._indices[0], 0, 100),
1258
index.get_position(('tip',)))
1259
self.assertEqual((index._graph_index._indices[1], 100, 78),
1260
index.get_position(('parent',)))
1262
def test_get_method(self):
1263
index = self.two_graph_index()
1264
self.assertEqual('fulltext', index.get_method(('tip',)))
1265
self.assertEqual(['fulltext'], index.get_options(('parent',)))
1267
def test_get_options(self):
1268
index = self.two_graph_index()
1269
self.assertEqual(['fulltext', 'no-eol'], index.get_options(('tip',)))
1270
self.assertEqual(['fulltext'], index.get_options(('parent',)))
1272
def test_get_parent_map(self):
1273
index = self.two_graph_index()
1274
self.assertEqual({('parent',):None},
1275
index.get_parent_map([('parent',), ('ghost',)]))
1277
def catch_add(self, entries):
1278
self.caught_entries.append(entries)
1280
def test_add_no_callback_errors(self):
1281
index = self.two_graph_index()
1282
self.assertRaises(errors.ReadOnlyError, index.add_records,
1283
[(('new',), 'fulltext,no-eol', (None, 50, 60), [('separate',)])])
1285
def test_add_version_smoke(self):
1286
index = self.two_graph_index(catch_adds=True)
1287
index.add_records([(('new',), 'fulltext,no-eol', (None, 50, 60), [])])
1288
self.assertEqual([[(('new', ), 'N50 60')]],
1289
self.caught_entries)
1291
def test_add_version_delta_not_delta_index(self):
1292
index = self.two_graph_index(catch_adds=True)
1293
self.assertRaises(errors.KnitCorrupt, index.add_records,
1294
[(('new',), 'no-eol,line-delta', (None, 0, 100), [])])
1295
self.assertEqual([], self.caught_entries)
1297
def test_add_version_same_dup(self):
1298
index = self.two_graph_index(catch_adds=True)
1299
# options can be spelt two different ways
1300
index.add_records([(('tip',), 'fulltext,no-eol', (None, 0, 100), [])])
1301
index.add_records([(('tip',), 'no-eol,fulltext', (None, 0, 100), [])])
1302
# position/length are ignored (because each pack could have fulltext or
1303
# delta, and be at a different position.
1304
index.add_records([(('tip',), 'fulltext,no-eol', (None, 50, 100), [])])
1305
index.add_records([(('tip',), 'fulltext,no-eol', (None, 0, 1000), [])])
1306
# but neither should have added data.
1307
self.assertEqual([[], [], [], []], self.caught_entries)
1309
def test_add_version_different_dup(self):
1310
index = self.two_graph_index(catch_adds=True)
1312
self.assertRaises(errors.KnitCorrupt, index.add_records,
1313
[(('tip',), 'no-eol,line-delta', (None, 0, 100), [])])
1314
self.assertRaises(errors.KnitCorrupt, index.add_records,
1315
[(('tip',), 'line-delta,no-eol', (None, 0, 100), [])])
1316
self.assertRaises(errors.KnitCorrupt, index.add_records,
1317
[(('tip',), 'fulltext', (None, 0, 100), [])])
1319
self.assertRaises(errors.KnitCorrupt, index.add_records,
1320
[(('tip',), 'fulltext,no-eol', (None, 0, 100), [('parent',)])])
1321
self.assertEqual([], self.caught_entries)
1323
def test_add_versions(self):
1324
index = self.two_graph_index(catch_adds=True)
1326
(('new',), 'fulltext,no-eol', (None, 50, 60), []),
1327
(('new2',), 'fulltext', (None, 0, 6), []),
1329
self.assertEqual([(('new', ), 'N50 60'), (('new2', ), ' 0 6')],
1330
sorted(self.caught_entries[0]))
1331
self.assertEqual(1, len(self.caught_entries))
1333
def test_add_versions_delta_not_delta_index(self):
1334
index = self.two_graph_index(catch_adds=True)
1335
self.assertRaises(errors.KnitCorrupt, index.add_records,
1336
[(('new',), 'no-eol,line-delta', (None, 0, 100), [('parent',)])])
1337
self.assertEqual([], self.caught_entries)
1339
def test_add_versions_parents_not_parents_index(self):
1340
index = self.two_graph_index(catch_adds=True)
1341
self.assertRaises(errors.KnitCorrupt, index.add_records,
1342
[(('new',), 'no-eol,fulltext', (None, 0, 100), [('parent',)])])
1343
self.assertEqual([], self.caught_entries)
1345
def test_add_versions_random_id_accepted(self):
1346
index = self.two_graph_index(catch_adds=True)
1347
index.add_records([], random_id=True)
1349
def test_add_versions_same_dup(self):
1350
index = self.two_graph_index(catch_adds=True)
1351
# options can be spelt two different ways
1352
index.add_records([(('tip',), 'fulltext,no-eol', (None, 0, 100), [])])
1353
index.add_records([(('tip',), 'no-eol,fulltext', (None, 0, 100), [])])
1354
# position/length are ignored (because each pack could have fulltext or
1355
# delta, and be at a different position.
1356
index.add_records([(('tip',), 'fulltext,no-eol', (None, 50, 100), [])])
1357
index.add_records([(('tip',), 'fulltext,no-eol', (None, 0, 1000), [])])
1358
# but neither should have added data.
1359
self.assertEqual([[], [], [], []], self.caught_entries)
1361
def test_add_versions_different_dup(self):
1362
index = self.two_graph_index(catch_adds=True)
1364
self.assertRaises(errors.KnitCorrupt, index.add_records,
1365
[(('tip',), 'no-eol,line-delta', (None, 0, 100), [])])
1366
self.assertRaises(errors.KnitCorrupt, index.add_records,
1367
[(('tip',), 'line-delta,no-eol', (None, 0, 100), [])])
1368
self.assertRaises(errors.KnitCorrupt, index.add_records,
1369
[(('tip',), 'fulltext', (None, 0, 100), [])])
1371
self.assertRaises(errors.KnitCorrupt, index.add_records,
1372
[(('tip',), 'fulltext,no-eol', (None, 0, 100), [('parent',)])])
1373
# change options in the second record
1374
self.assertRaises(errors.KnitCorrupt, index.add_records,
1375
[(('tip',), 'fulltext,no-eol', (None, 0, 100), []),
1376
(('tip',), 'no-eol,line-delta', (None, 0, 100), [])])
1377
self.assertEqual([], self.caught_entries)
1380
class TestStacking(KnitTests):
1382
def get_basis_and_test_knit(self):
1383
basis = self.make_test_knit(name='basis')
1384
basis = RecordingVersionedFilesDecorator(basis)
1385
test = self.make_test_knit(name='test')
1386
test.add_fallback_versioned_files(basis)
1389
def test_add_fallback_versioned_files(self):
1390
basis = self.make_test_knit(name='basis')
1391
test = self.make_test_knit(name='test')
1392
# It must not error; other tests test that the fallback is referred to
1393
# when accessing data.
1394
test.add_fallback_versioned_files(basis)
1396
def test_add_lines(self):
1397
# lines added to the test are not added to the basis
1398
basis, test = self.get_basis_and_test_knit()
1400
key_basis = ('bar',)
1401
key_cross_border = ('quux',)
1402
key_delta = ('zaphod',)
1403
test.add_lines(key, (), ['foo\n'])
1404
self.assertEqual({}, basis.get_parent_map([key]))
1405
# lines added to the test that reference across the stack do a
1407
basis.add_lines(key_basis, (), ['foo\n'])
1409
test.add_lines(key_cross_border, (key_basis,), ['foo\n'])
1410
self.assertEqual('fulltext', test._index.get_method(key_cross_border))
1411
self.assertEqual([("get_parent_map", set([key_basis]))], basis.calls)
1412
# Subsequent adds do delta.
1414
test.add_lines(key_delta, (key_cross_border,), ['foo\n'])
1415
self.assertEqual('line-delta', test._index.get_method(key_delta))
1416
self.assertEqual([], basis.calls)
1418
def test_annotate(self):
1419
# annotations from the test knit are answered without asking the basis
1420
basis, test = self.get_basis_and_test_knit()
1422
key_basis = ('bar',)
1423
key_missing = ('missing',)
1424
test.add_lines(key, (), ['foo\n'])
1425
details = test.annotate(key)
1426
self.assertEqual([(key, 'foo\n')], details)
1427
self.assertEqual([], basis.calls)
1428
# But texts that are not in the test knit are looked for in the basis
1430
basis.add_lines(key_basis, (), ['foo\n', 'bar\n'])
1432
details = test.annotate(key_basis)
1433
self.assertEqual([(key_basis, 'foo\n'), (key_basis, 'bar\n')], details)
1434
# Not optimised to date:
1435
# self.assertEqual([("annotate", key_basis)], basis.calls)
1436
self.assertEqual([('get_parent_map', set([key_basis])),
1437
('get_parent_map', set([key_basis])),
1438
('get_parent_map', set([key_basis])),
1439
('get_record_stream', [key_basis], 'unordered', True)],
1442
def test_check(self):
1443
# At the moment checking a stacked knit does implicitly check the
1445
basis, test = self.get_basis_and_test_knit()
1448
def test_get_parent_map(self):
1449
# parents in the test knit are answered without asking the basis
1450
basis, test = self.get_basis_and_test_knit()
1452
key_basis = ('bar',)
1453
key_missing = ('missing',)
1454
test.add_lines(key, (), [])
1455
parent_map = test.get_parent_map([key])
1456
self.assertEqual({key: ()}, parent_map)
1457
self.assertEqual([], basis.calls)
1458
# But parents that are not in the test knit are looked for in the basis
1459
basis.add_lines(key_basis, (), [])
1461
parent_map = test.get_parent_map([key, key_basis, key_missing])
1462
self.assertEqual({key: (),
1463
key_basis: ()}, parent_map)
1464
self.assertEqual([("get_parent_map", set([key_basis, key_missing]))],
1467
def test_get_record_stream_unordered_fulltexts(self):
1468
# records from the test knit are answered without asking the basis:
1469
basis, test = self.get_basis_and_test_knit()
1471
key_basis = ('bar',)
1472
key_missing = ('missing',)
1473
test.add_lines(key, (), ['foo\n'])
1474
records = list(test.get_record_stream([key], 'unordered', True))
1475
self.assertEqual(1, len(records))
1476
self.assertEqual([], basis.calls)
1477
# Missing (from test knit) objects are retrieved from the basis:
1478
basis.add_lines(key_basis, (), ['foo\n', 'bar\n'])
1480
records = list(test.get_record_stream([key_basis, key_missing],
1482
self.assertEqual(2, len(records))
1483
calls = list(basis.calls)
1484
for record in records:
1485
self.assertSubset([record.key], (key_basis, key_missing))
1486
if record.key == key_missing:
1487
self.assertIsInstance(record, AbsentContentFactory)
1489
reference = list(basis.get_record_stream([key_basis],
1490
'unordered', True))[0]
1491
self.assertEqual(reference.key, record.key)
1492
self.assertEqual(reference.sha1, record.sha1)
1493
self.assertEqual(reference.storage_kind, record.storage_kind)
1494
self.assertEqual(reference.get_bytes_as(reference.storage_kind),
1495
record.get_bytes_as(record.storage_kind))
1496
self.assertEqual(reference.get_bytes_as('fulltext'),
1497
record.get_bytes_as('fulltext'))
1498
# It's not strictly minimal, but it seems reasonable for now for it to
1499
# ask which fallbacks have which parents.
1501
("get_parent_map", set([key_basis, key_missing])),
1502
("get_record_stream", [key_basis], 'unordered', True)],
1505
def test_get_record_stream_ordered_fulltexts(self):
1506
# ordering is preserved down into the fallback store.
1507
basis, test = self.get_basis_and_test_knit()
1509
key_basis = ('bar',)
1510
key_basis_2 = ('quux',)
1511
key_missing = ('missing',)
1512
test.add_lines(key, (key_basis,), ['foo\n'])
1513
# Missing (from test knit) objects are retrieved from the basis:
1514
basis.add_lines(key_basis, (key_basis_2,), ['foo\n', 'bar\n'])
1515
basis.add_lines(key_basis_2, (), ['quux\n'])
1517
# ask for in non-topological order
1518
records = list(test.get_record_stream(
1519
[key, key_basis, key_missing, key_basis_2], 'topological', True))
1520
self.assertEqual(4, len(records))
1522
for record in records:
1523
self.assertSubset([record.key],
1524
(key_basis, key_missing, key_basis_2, key))
1525
if record.key == key_missing:
1526
self.assertIsInstance(record, AbsentContentFactory)
1528
results.append((record.key, record.sha1, record.storage_kind,
1529
record.get_bytes_as('fulltext')))
1530
calls = list(basis.calls)
1531
order = [record[0] for record in results]
1532
self.assertEqual([key_basis_2, key_basis, key], order)
1533
for result in results:
1534
if result[0] == key:
1538
record = source.get_record_stream([result[0]], 'unordered',
1540
self.assertEqual(record.key, result[0])
1541
self.assertEqual(record.sha1, result[1])
1542
self.assertEqual(record.storage_kind, result[2])
1543
self.assertEqual(record.get_bytes_as('fulltext'), result[3])
1544
# It's not strictly minimal, but it seems reasonable for now for it to
1545
# ask which fallbacks have which parents.
1547
("get_parent_map", set([key_basis, key_basis_2, key_missing])),
1548
# unordered is asked for by the underlying worker as it still
1549
# buffers everything while answering - which is a problem!
1550
("get_record_stream", [key_basis_2, key_basis], 'unordered', True)],
1553
def test_get_record_stream_unordered_deltas(self):
1554
# records from the test knit are answered without asking the basis:
1555
basis, test = self.get_basis_and_test_knit()
1557
key_basis = ('bar',)
1558
key_missing = ('missing',)
1559
test.add_lines(key, (), ['foo\n'])
1560
records = list(test.get_record_stream([key], 'unordered', False))
1561
self.assertEqual(1, len(records))
1562
self.assertEqual([], basis.calls)
1563
# Missing (from test knit) objects are retrieved from the basis:
1564
basis.add_lines(key_basis, (), ['foo\n', 'bar\n'])
1566
records = list(test.get_record_stream([key_basis, key_missing],
1567
'unordered', False))
1568
self.assertEqual(2, len(records))
1569
calls = list(basis.calls)
1570
for record in records:
1571
self.assertSubset([record.key], (key_basis, key_missing))
1572
if record.key == key_missing:
1573
self.assertIsInstance(record, AbsentContentFactory)
1575
reference = list(basis.get_record_stream([key_basis],
1576
'unordered', False))[0]
1577
self.assertEqual(reference.key, record.key)
1578
self.assertEqual(reference.sha1, record.sha1)
1579
self.assertEqual(reference.storage_kind, record.storage_kind)
1580
self.assertEqual(reference.get_bytes_as(reference.storage_kind),
1581
record.get_bytes_as(record.storage_kind))
1582
# It's not strictly minimal, but it seems reasonable for now for it to
1583
# ask which fallbacks have which parents.
1585
("get_parent_map", set([key_basis, key_missing])),
1586
("get_record_stream", [key_basis], 'unordered', False)],
1589
def test_get_record_stream_ordered_deltas(self):
1590
# ordering is preserved down into the fallback store.
1591
basis, test = self.get_basis_and_test_knit()
1593
key_basis = ('bar',)
1594
key_basis_2 = ('quux',)
1595
key_missing = ('missing',)
1596
test.add_lines(key, (key_basis,), ['foo\n'])
1597
# Missing (from test knit) objects are retrieved from the basis:
1598
basis.add_lines(key_basis, (key_basis_2,), ['foo\n', 'bar\n'])
1599
basis.add_lines(key_basis_2, (), ['quux\n'])
1601
# ask for in non-topological order
1602
records = list(test.get_record_stream(
1603
[key, key_basis, key_missing, key_basis_2], 'topological', False))
1604
self.assertEqual(4, len(records))
1606
for record in records:
1607
self.assertSubset([record.key],
1608
(key_basis, key_missing, key_basis_2, key))
1609
if record.key == key_missing:
1610
self.assertIsInstance(record, AbsentContentFactory)
1612
results.append((record.key, record.sha1, record.storage_kind,
1613
record.get_bytes_as(record.storage_kind)))
1614
calls = list(basis.calls)
1615
order = [record[0] for record in results]
1616
self.assertEqual([key_basis_2, key_basis, key], order)
1617
for result in results:
1618
if result[0] == key:
1622
record = source.get_record_stream([result[0]], 'unordered',
1624
self.assertEqual(record.key, result[0])
1625
self.assertEqual(record.sha1, result[1])
1626
self.assertEqual(record.storage_kind, result[2])
1627
self.assertEqual(record.get_bytes_as(record.storage_kind), result[3])
1628
# It's not strictly minimal, but it seems reasonable for now for it to
1629
# ask which fallbacks have which parents.
1631
("get_parent_map", set([key_basis, key_basis_2, key_missing])),
1632
("get_record_stream", [key_basis_2, key_basis], 'topological', False)],
1635
def test_get_sha1s(self):
1636
# sha1's in the test knit are answered without asking the basis
1637
basis, test = self.get_basis_and_test_knit()
1639
key_basis = ('bar',)
1640
key_missing = ('missing',)
1641
test.add_lines(key, (), ['foo\n'])
1642
key_sha1sum = osutils.sha('foo\n').hexdigest()
1643
sha1s = test.get_sha1s([key])
1644
self.assertEqual({key: key_sha1sum}, sha1s)
1645
self.assertEqual([], basis.calls)
1646
# But texts that are not in the test knit are looked for in the basis
1647
# directly (rather than via text reconstruction) so that remote servers
1648
# etc don't have to answer with full content.
1649
basis.add_lines(key_basis, (), ['foo\n', 'bar\n'])
1650
basis_sha1sum = osutils.sha('foo\nbar\n').hexdigest()
1652
sha1s = test.get_sha1s([key, key_missing, key_basis])
1653
self.assertEqual({key: key_sha1sum,
1654
key_basis: basis_sha1sum}, sha1s)
1655
self.assertEqual([("get_sha1s", set([key_basis, key_missing]))],
1658
def test_insert_record_stream(self):
1659
# records are inserted as normal; insert_record_stream builds on
1660
# add_lines, so a smoke test should be all that's needed:
1662
key_basis = ('bar',)
1663
key_delta = ('zaphod',)
1664
basis, test = self.get_basis_and_test_knit()
1665
source = self.make_test_knit(name='source')
1666
basis.add_lines(key_basis, (), ['foo\n'])
1668
source.add_lines(key_basis, (), ['foo\n'])
1669
source.add_lines(key_delta, (key_basis,), ['bar\n'])
1670
stream = source.get_record_stream([key_delta], 'unordered', False)
1671
test.insert_record_stream(stream)
1672
self.assertEqual([("get_parent_map", set([key_basis]))],
1674
self.assertEqual({key_delta:(key_basis,)},
1675
test.get_parent_map([key_delta]))
1676
self.assertEqual('bar\n', test.get_record_stream([key_delta],
1677
'unordered', True).next().get_bytes_as('fulltext'))
1679
def test_iter_lines_added_or_present_in_keys(self):
1680
# Lines from the basis are returned, and lines for a given key are only
1684
# all sources are asked for keys:
1685
basis, test = self.get_basis_and_test_knit()
1686
basis.add_lines(key1, (), ["foo"])
1688
lines = list(test.iter_lines_added_or_present_in_keys([key1]))
1689
self.assertEqual([("foo\n", key1)], lines)
1690
self.assertEqual([("iter_lines_added_or_present_in_keys", set([key1]))],
1692
# keys in both are not duplicated:
1693
test.add_lines(key2, (), ["bar\n"])
1694
basis.add_lines(key2, (), ["bar\n"])
1696
lines = list(test.iter_lines_added_or_present_in_keys([key2]))
1697
self.assertEqual([("bar\n", key2)], lines)
1698
self.assertEqual([], basis.calls)
1700
def test_keys(self):
1703
# all sources are asked for keys:
1704
basis, test = self.get_basis_and_test_knit()
1706
self.assertEqual(set(), set(keys))
1707
self.assertEqual([("keys",)], basis.calls)
1708
# keys from a basis are returned:
1709
basis.add_lines(key1, (), [])
1712
self.assertEqual(set([key1]), set(keys))
1713
self.assertEqual([("keys",)], basis.calls)
1714
# keys in both are not duplicated:
1715
test.add_lines(key2, (), [])
1716
basis.add_lines(key2, (), [])
1719
self.assertEqual(2, len(keys))
1720
self.assertEqual(set([key1, key2]), set(keys))
1721
self.assertEqual([("keys",)], basis.calls)
1723
def test_add_mpdiffs(self):
1724
# records are inserted as normal; add_mpdiff builds on
1725
# add_lines, so a smoke test should be all that's needed:
1727
key_basis = ('bar',)
1728
key_delta = ('zaphod',)
1729
basis, test = self.get_basis_and_test_knit()
1730
source = self.make_test_knit(name='source')
1731
basis.add_lines(key_basis, (), ['foo\n'])
1733
source.add_lines(key_basis, (), ['foo\n'])
1734
source.add_lines(key_delta, (key_basis,), ['bar\n'])
1735
diffs = source.make_mpdiffs([key_delta])
1736
test.add_mpdiffs([(key_delta, (key_basis,),
1737
source.get_sha1s([key_delta])[key_delta], diffs[0])])
1738
self.assertEqual([("get_parent_map", set([key_basis])),
1739
('get_record_stream', [key_basis], 'unordered', True),
1740
('get_parent_map', set([key_basis]))],
1742
self.assertEqual({key_delta:(key_basis,)},
1743
test.get_parent_map([key_delta]))
1744
self.assertEqual('bar\n', test.get_record_stream([key_delta],
1745
'unordered', True).next().get_bytes_as('fulltext'))
1747
def test_make_mpdiffs(self):
1748
# Generating an mpdiff across a stacking boundary should detect parent
1752
key_right = ('zaphod',)
1753
basis, test = self.get_basis_and_test_knit()
1754
basis.add_lines(key_left, (), ['bar\n'])
1755
basis.add_lines(key_right, (), ['zaphod\n'])
1757
test.add_lines(key, (key_left, key_right),
1758
['bar\n', 'foo\n', 'zaphod\n'])
1759
diffs = test.make_mpdiffs([key])
1761
multiparent.MultiParent([multiparent.ParentText(0, 0, 0, 1),
1762
multiparent.NewText(['foo\n']),
1763
multiparent.ParentText(1, 0, 2, 1)])],
1765
self.assertEqual(4, len(basis.calls))
1767
("get_parent_map", set([key_left, key_right])),
1768
("get_parent_map", set([key_left, key_right])),
1769
("get_parent_map", set([key_left, key_right])),
1772
last_call = basis.calls[3]
1773
self.assertEqual('get_record_stream', last_call[0])
1774
self.assertEqual(set([key_left, key_right]), set(last_call[1]))
1775
self.assertEqual('unordered', last_call[2])
1776
self.assertEqual(True, last_call[3])