1
# Copyright (C) 2006-2011 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17
"""Tests for Knit data structure"""
19
from cStringIO import StringIO
32
from bzrlib.errors import (
36
from bzrlib.index import *
37
from bzrlib.knit import (
42
_VFContentMapGenerator,
48
from bzrlib.patiencediff import PatienceSequenceMatcher
49
from bzrlib.repofmt import (
53
from bzrlib.tests import (
55
TestCaseWithMemoryTransport,
56
TestCaseWithTransport,
59
from bzrlib.versionedfile import (
62
network_bytes_to_kind_and_offset,
63
RecordingVersionedFilesDecorator,
67
compiled_knit_feature = tests.ModuleAvailableFeature(
68
'bzrlib._knit_load_data_pyx')
71
class KnitContentTestsMixin(object):
73
def test_constructor(self):
74
content = self._make_content([])
77
content = self._make_content([])
78
self.assertEqual(content.text(), [])
80
content = self._make_content([("origin1", "text1"), ("origin2", "text2")])
81
self.assertEqual(content.text(), ["text1", "text2"])
84
content = self._make_content([("origin1", "text1"), ("origin2", "text2")])
86
self.assertIsInstance(copy, content.__class__)
87
self.assertEqual(copy.annotate(), content.annotate())
89
def assertDerivedBlocksEqual(self, source, target, noeol=False):
90
"""Assert that the derived matching blocks match real output"""
91
source_lines = source.splitlines(True)
92
target_lines = target.splitlines(True)
94
if noeol and not line.endswith('\n'):
98
source_content = self._make_content([(None, nl(l)) for l in source_lines])
99
target_content = self._make_content([(None, nl(l)) for l in target_lines])
100
line_delta = source_content.line_delta(target_content)
101
delta_blocks = list(KnitContent.get_line_delta_blocks(line_delta,
102
source_lines, target_lines))
103
matcher = PatienceSequenceMatcher(None, source_lines, target_lines)
104
matcher_blocks = list(matcher.get_matching_blocks())
105
self.assertEqual(matcher_blocks, delta_blocks)
107
def test_get_line_delta_blocks(self):
108
self.assertDerivedBlocksEqual('a\nb\nc\n', 'q\nc\n')
109
self.assertDerivedBlocksEqual(TEXT_1, TEXT_1)
110
self.assertDerivedBlocksEqual(TEXT_1, TEXT_1A)
111
self.assertDerivedBlocksEqual(TEXT_1, TEXT_1B)
112
self.assertDerivedBlocksEqual(TEXT_1B, TEXT_1A)
113
self.assertDerivedBlocksEqual(TEXT_1A, TEXT_1B)
114
self.assertDerivedBlocksEqual(TEXT_1A, '')
115
self.assertDerivedBlocksEqual('', TEXT_1A)
116
self.assertDerivedBlocksEqual('', '')
117
self.assertDerivedBlocksEqual('a\nb\nc', 'a\nb\nc\nd')
119
def test_get_line_delta_blocks_noeol(self):
120
"""Handle historical knit deltas safely
122
Some existing knit deltas don't consider the last line to differ
123
when the only difference whether it has a final newline.
125
New knit deltas appear to always consider the last line to differ
128
self.assertDerivedBlocksEqual('a\nb\nc', 'a\nb\nc\nd\n', noeol=True)
129
self.assertDerivedBlocksEqual('a\nb\nc\nd\n', 'a\nb\nc', noeol=True)
130
self.assertDerivedBlocksEqual('a\nb\nc\n', 'a\nb\nc', noeol=True)
131
self.assertDerivedBlocksEqual('a\nb\nc', 'a\nb\nc\n', noeol=True)
143
Banana cup cake recipe
153
Banana cup cake recipe
155
- bananas (do not use plantains!!!)
162
Banana cup cake recipe
179
class TestPlainKnitContent(TestCase, KnitContentTestsMixin):
181
def _make_content(self, lines):
182
annotated_content = AnnotatedKnitContent(lines)
183
return PlainKnitContent(annotated_content.text(), 'bogus')
185
def test_annotate(self):
186
content = self._make_content([])
187
self.assertEqual(content.annotate(), [])
189
content = self._make_content([("origin1", "text1"), ("origin2", "text2")])
190
self.assertEqual(content.annotate(),
191
[("bogus", "text1"), ("bogus", "text2")])
193
def test_line_delta(self):
194
content1 = self._make_content([("", "a"), ("", "b")])
195
content2 = self._make_content([("", "a"), ("", "a"), ("", "c")])
196
self.assertEqual(content1.line_delta(content2),
197
[(1, 2, 2, ["a", "c"])])
199
def test_line_delta_iter(self):
200
content1 = self._make_content([("", "a"), ("", "b")])
201
content2 = self._make_content([("", "a"), ("", "a"), ("", "c")])
202
it = content1.line_delta_iter(content2)
203
self.assertEqual(it.next(), (1, 2, 2, ["a", "c"]))
204
self.assertRaises(StopIteration, it.next)
207
class TestAnnotatedKnitContent(TestCase, KnitContentTestsMixin):
209
def _make_content(self, lines):
210
return AnnotatedKnitContent(lines)
212
def test_annotate(self):
213
content = self._make_content([])
214
self.assertEqual(content.annotate(), [])
216
content = self._make_content([("origin1", "text1"), ("origin2", "text2")])
217
self.assertEqual(content.annotate(),
218
[("origin1", "text1"), ("origin2", "text2")])
220
def test_line_delta(self):
221
content1 = self._make_content([("", "a"), ("", "b")])
222
content2 = self._make_content([("", "a"), ("", "a"), ("", "c")])
223
self.assertEqual(content1.line_delta(content2),
224
[(1, 2, 2, [("", "a"), ("", "c")])])
226
def test_line_delta_iter(self):
227
content1 = self._make_content([("", "a"), ("", "b")])
228
content2 = self._make_content([("", "a"), ("", "a"), ("", "c")])
229
it = content1.line_delta_iter(content2)
230
self.assertEqual(it.next(), (1, 2, 2, [("", "a"), ("", "c")]))
231
self.assertRaises(StopIteration, it.next)
234
class MockTransport(object):
236
def __init__(self, file_lines=None):
237
self.file_lines = file_lines
239
# We have no base directory for the MockTransport
242
def get(self, filename):
243
if self.file_lines is None:
244
raise NoSuchFile(filename)
246
return StringIO("\n".join(self.file_lines))
248
def readv(self, relpath, offsets):
249
fp = self.get(relpath)
250
for offset, size in offsets:
252
yield offset, fp.read(size)
254
def __getattr__(self, name):
255
def queue_call(*args, **kwargs):
256
self.calls.append((name, args, kwargs))
260
class MockReadvFailingTransport(MockTransport):
261
"""Fail in the middle of a readv() result.
263
This Transport will successfully yield the first two requested hunks, but
264
raise NoSuchFile for the rest.
267
def readv(self, relpath, offsets):
269
for result in MockTransport.readv(self, relpath, offsets):
271
# we use 2 because the first offset is the pack header, the second
272
# is the first actual content requset
274
raise errors.NoSuchFile(relpath)
278
class KnitRecordAccessTestsMixin(object):
279
"""Tests for getting and putting knit records."""
281
def test_add_raw_records(self):
282
"""Add_raw_records adds records retrievable later."""
283
access = self.get_access()
284
memos = access.add_raw_records([('key', 10)], '1234567890')
285
self.assertEqual(['1234567890'], list(access.get_raw_records(memos)))
287
def test_add_several_raw_records(self):
288
"""add_raw_records with many records and read some back."""
289
access = self.get_access()
290
memos = access.add_raw_records([('key', 10), ('key2', 2), ('key3', 5)],
292
self.assertEqual(['1234567890', '12', '34567'],
293
list(access.get_raw_records(memos)))
294
self.assertEqual(['1234567890'],
295
list(access.get_raw_records(memos[0:1])))
296
self.assertEqual(['12'],
297
list(access.get_raw_records(memos[1:2])))
298
self.assertEqual(['34567'],
299
list(access.get_raw_records(memos[2:3])))
300
self.assertEqual(['1234567890', '34567'],
301
list(access.get_raw_records(memos[0:1] + memos[2:3])))
304
class TestKnitKnitAccess(TestCaseWithMemoryTransport, KnitRecordAccessTestsMixin):
305
"""Tests for the .kndx implementation."""
307
def get_access(self):
308
"""Get a .knit style access instance."""
309
mapper = ConstantMapper("foo")
310
access = _KnitKeyAccess(self.get_transport(), mapper)
314
class _TestException(Exception):
315
"""Just an exception for local tests to use."""
318
class TestPackKnitAccess(TestCaseWithMemoryTransport, KnitRecordAccessTestsMixin):
319
"""Tests for the pack based access."""
321
def get_access(self):
322
return self._get_access()[0]
324
def _get_access(self, packname='packfile', index='FOO'):
325
transport = self.get_transport()
326
def write_data(bytes):
327
transport.append_bytes(packname, bytes)
328
writer = pack.ContainerWriter(write_data)
330
access = pack_repo._DirectPackAccess({})
331
access.set_writer(writer, index, (transport, packname))
332
return access, writer
334
def make_pack_file(self):
335
"""Create a pack file with 2 records."""
336
access, writer = self._get_access(packname='packname', index='foo')
338
memos.extend(access.add_raw_records([('key1', 10)], '1234567890'))
339
memos.extend(access.add_raw_records([('key2', 5)], '12345'))
343
def test_pack_collection_pack_retries(self):
344
"""An explicit pack of a pack collection succeeds even when a
345
concurrent pack happens.
347
builder = self.make_branch_builder('.')
348
builder.start_series()
349
builder.build_snapshot('rev-1', None, [
350
('add', ('', 'root-id', 'directory', None)),
351
('add', ('file', 'file-id', 'file', 'content\nrev 1\n')),
353
builder.build_snapshot('rev-2', ['rev-1'], [
354
('modify', ('file-id', 'content\nrev 2\n')),
356
builder.build_snapshot('rev-3', ['rev-2'], [
357
('modify', ('file-id', 'content\nrev 3\n')),
359
self.addCleanup(builder.finish_series)
360
b = builder.get_branch()
361
self.addCleanup(b.lock_write().unlock)
363
collection = repo._pack_collection
364
# Concurrently repack the repo.
365
reopened_repo = repo.bzrdir.open_repository()
370
def make_vf_for_retrying(self):
371
"""Create 3 packs and a reload function.
373
Originally, 2 pack files will have the data, but one will be missing.
374
And then the third will be used in place of the first two if reload()
377
:return: (versioned_file, reload_counter)
378
versioned_file a KnitVersionedFiles using the packs for access
380
builder = self.make_branch_builder('.', format="1.9")
381
builder.start_series()
382
builder.build_snapshot('rev-1', None, [
383
('add', ('', 'root-id', 'directory', None)),
384
('add', ('file', 'file-id', 'file', 'content\nrev 1\n')),
386
builder.build_snapshot('rev-2', ['rev-1'], [
387
('modify', ('file-id', 'content\nrev 2\n')),
389
builder.build_snapshot('rev-3', ['rev-2'], [
390
('modify', ('file-id', 'content\nrev 3\n')),
392
builder.finish_series()
393
b = builder.get_branch()
395
self.addCleanup(b.unlock)
396
# Pack these three revisions into another pack file, but don't remove
399
collection = repo._pack_collection
400
collection.ensure_loaded()
401
orig_packs = collection.packs
402
packer = knitpack_repo.KnitPacker(collection, orig_packs, '.testpack')
403
new_pack = packer.pack()
404
# forget about the new pack
408
# Set up a reload() function that switches to using the new pack file
409
new_index = new_pack.revision_index
410
access_tuple = new_pack.access_tuple()
411
reload_counter = [0, 0, 0]
413
reload_counter[0] += 1
414
if reload_counter[1] > 0:
415
# We already reloaded, nothing more to do
416
reload_counter[2] += 1
418
reload_counter[1] += 1
419
vf._index._graph_index._indices[:] = [new_index]
420
vf._access._indices.clear()
421
vf._access._indices[new_index] = access_tuple
423
# Delete one of the pack files so the data will need to be reloaded. We
424
# will delete the file with 'rev-2' in it
425
trans, name = orig_packs[1].access_tuple()
427
# We don't have the index trigger reloading because we want to test
428
# that we reload when the .pack disappears
429
vf._access._reload_func = reload
430
return vf, reload_counter
432
def make_reload_func(self, return_val=True):
435
reload_called[0] += 1
437
return reload_called, reload
439
def make_retry_exception(self):
440
# We raise a real exception so that sys.exc_info() is properly
443
raise _TestException('foobar')
444
except _TestException, e:
445
retry_exc = errors.RetryWithNewPacks(None, reload_occurred=False,
446
exc_info=sys.exc_info())
449
def test_read_from_several_packs(self):
450
access, writer = self._get_access()
452
memos.extend(access.add_raw_records([('key', 10)], '1234567890'))
454
access, writer = self._get_access('pack2', 'FOOBAR')
455
memos.extend(access.add_raw_records([('key', 5)], '12345'))
457
access, writer = self._get_access('pack3', 'BAZ')
458
memos.extend(access.add_raw_records([('key', 5)], 'alpha'))
460
transport = self.get_transport()
461
access = pack_repo._DirectPackAccess({"FOO":(transport, 'packfile'),
462
"FOOBAR":(transport, 'pack2'),
463
"BAZ":(transport, 'pack3')})
464
self.assertEqual(['1234567890', '12345', 'alpha'],
465
list(access.get_raw_records(memos)))
466
self.assertEqual(['1234567890'],
467
list(access.get_raw_records(memos[0:1])))
468
self.assertEqual(['12345'],
469
list(access.get_raw_records(memos[1:2])))
470
self.assertEqual(['alpha'],
471
list(access.get_raw_records(memos[2:3])))
472
self.assertEqual(['1234567890', 'alpha'],
473
list(access.get_raw_records(memos[0:1] + memos[2:3])))
475
def test_set_writer(self):
476
"""The writer should be settable post construction."""
477
access = pack_repo._DirectPackAccess({})
478
transport = self.get_transport()
479
packname = 'packfile'
481
def write_data(bytes):
482
transport.append_bytes(packname, bytes)
483
writer = pack.ContainerWriter(write_data)
485
access.set_writer(writer, index, (transport, packname))
486
memos = access.add_raw_records([('key', 10)], '1234567890')
488
self.assertEqual(['1234567890'], list(access.get_raw_records(memos)))
490
def test_missing_index_raises_retry(self):
491
memos = self.make_pack_file()
492
transport = self.get_transport()
493
reload_called, reload_func = self.make_reload_func()
494
# Note that the index key has changed from 'foo' to 'bar'
495
access = pack_repo._DirectPackAccess({'bar':(transport, 'packname')},
496
reload_func=reload_func)
497
e = self.assertListRaises(errors.RetryWithNewPacks,
498
access.get_raw_records, memos)
499
# Because a key was passed in which does not match our index list, we
500
# assume that the listing was already reloaded
501
self.assertTrue(e.reload_occurred)
502
self.assertIsInstance(e.exc_info, tuple)
503
self.assertIs(e.exc_info[0], KeyError)
504
self.assertIsInstance(e.exc_info[1], KeyError)
506
def test_missing_index_raises_key_error_with_no_reload(self):
507
memos = self.make_pack_file()
508
transport = self.get_transport()
509
# Note that the index key has changed from 'foo' to 'bar'
510
access = pack_repo._DirectPackAccess({'bar':(transport, 'packname')})
511
e = self.assertListRaises(KeyError, access.get_raw_records, memos)
513
def test_missing_file_raises_retry(self):
514
memos = self.make_pack_file()
515
transport = self.get_transport()
516
reload_called, reload_func = self.make_reload_func()
517
# Note that the 'filename' has been changed to 'different-packname'
518
access = pack_repo._DirectPackAccess(
519
{'foo':(transport, 'different-packname')},
520
reload_func=reload_func)
521
e = self.assertListRaises(errors.RetryWithNewPacks,
522
access.get_raw_records, memos)
523
# The file has gone missing, so we assume we need to reload
524
self.assertFalse(e.reload_occurred)
525
self.assertIsInstance(e.exc_info, tuple)
526
self.assertIs(e.exc_info[0], errors.NoSuchFile)
527
self.assertIsInstance(e.exc_info[1], errors.NoSuchFile)
528
self.assertEqual('different-packname', e.exc_info[1].path)
530
def test_missing_file_raises_no_such_file_with_no_reload(self):
531
memos = self.make_pack_file()
532
transport = self.get_transport()
533
# Note that the 'filename' has been changed to 'different-packname'
534
access = pack_repo._DirectPackAccess(
535
{'foo': (transport, 'different-packname')})
536
e = self.assertListRaises(errors.NoSuchFile,
537
access.get_raw_records, memos)
539
def test_failing_readv_raises_retry(self):
540
memos = self.make_pack_file()
541
transport = self.get_transport()
542
failing_transport = MockReadvFailingTransport(
543
[transport.get_bytes('packname')])
544
reload_called, reload_func = self.make_reload_func()
545
access = pack_repo._DirectPackAccess(
546
{'foo': (failing_transport, 'packname')},
547
reload_func=reload_func)
548
# Asking for a single record will not trigger the Mock failure
549
self.assertEqual(['1234567890'],
550
list(access.get_raw_records(memos[:1])))
551
self.assertEqual(['12345'],
552
list(access.get_raw_records(memos[1:2])))
553
# A multiple offset readv() will fail mid-way through
554
e = self.assertListRaises(errors.RetryWithNewPacks,
555
access.get_raw_records, memos)
556
# The file has gone missing, so we assume we need to reload
557
self.assertFalse(e.reload_occurred)
558
self.assertIsInstance(e.exc_info, tuple)
559
self.assertIs(e.exc_info[0], errors.NoSuchFile)
560
self.assertIsInstance(e.exc_info[1], errors.NoSuchFile)
561
self.assertEqual('packname', e.exc_info[1].path)
563
def test_failing_readv_raises_no_such_file_with_no_reload(self):
564
memos = self.make_pack_file()
565
transport = self.get_transport()
566
failing_transport = MockReadvFailingTransport(
567
[transport.get_bytes('packname')])
568
reload_called, reload_func = self.make_reload_func()
569
access = pack_repo._DirectPackAccess(
570
{'foo':(failing_transport, 'packname')})
571
# Asking for a single record will not trigger the Mock failure
572
self.assertEqual(['1234567890'],
573
list(access.get_raw_records(memos[:1])))
574
self.assertEqual(['12345'],
575
list(access.get_raw_records(memos[1:2])))
576
# A multiple offset readv() will fail mid-way through
577
e = self.assertListRaises(errors.NoSuchFile,
578
access.get_raw_records, memos)
580
def test_reload_or_raise_no_reload(self):
581
access = pack_repo._DirectPackAccess({}, reload_func=None)
582
retry_exc = self.make_retry_exception()
583
# Without a reload_func, we will just re-raise the original exception
584
self.assertRaises(_TestException, access.reload_or_raise, retry_exc)
586
def test_reload_or_raise_reload_changed(self):
587
reload_called, reload_func = self.make_reload_func(return_val=True)
588
access = pack_repo._DirectPackAccess({}, reload_func=reload_func)
589
retry_exc = self.make_retry_exception()
590
access.reload_or_raise(retry_exc)
591
self.assertEqual([1], reload_called)
592
retry_exc.reload_occurred=True
593
access.reload_or_raise(retry_exc)
594
self.assertEqual([2], reload_called)
596
def test_reload_or_raise_reload_no_change(self):
597
reload_called, reload_func = self.make_reload_func(return_val=False)
598
access = pack_repo._DirectPackAccess({}, reload_func=reload_func)
599
retry_exc = self.make_retry_exception()
600
# If reload_occurred is False, then we consider it an error to have
601
# reload_func() return False (no changes).
602
self.assertRaises(_TestException, access.reload_or_raise, retry_exc)
603
self.assertEqual([1], reload_called)
604
retry_exc.reload_occurred=True
605
# If reload_occurred is True, then we assume nothing changed because
606
# it had changed earlier, but didn't change again
607
access.reload_or_raise(retry_exc)
608
self.assertEqual([2], reload_called)
610
def test_annotate_retries(self):
611
vf, reload_counter = self.make_vf_for_retrying()
612
# It is a little bit bogus to annotate the Revision VF, but it works,
613
# as we have ancestry stored there
615
reload_lines = vf.annotate(key)
616
self.assertEqual([1, 1, 0], reload_counter)
617
plain_lines = vf.annotate(key)
618
self.assertEqual([1, 1, 0], reload_counter) # No extra reloading
619
if reload_lines != plain_lines:
620
self.fail('Annotation was not identical with reloading.')
621
# Now delete the packs-in-use, which should trigger another reload, but
622
# this time we just raise an exception because we can't recover
623
for trans, name in vf._access._indices.itervalues():
625
self.assertRaises(errors.NoSuchFile, vf.annotate, key)
626
self.assertEqual([2, 1, 1], reload_counter)
628
def test__get_record_map_retries(self):
629
vf, reload_counter = self.make_vf_for_retrying()
630
keys = [('rev-1',), ('rev-2',), ('rev-3',)]
631
records = vf._get_record_map(keys)
632
self.assertEqual(keys, sorted(records.keys()))
633
self.assertEqual([1, 1, 0], reload_counter)
634
# Now delete the packs-in-use, which should trigger another reload, but
635
# this time we just raise an exception because we can't recover
636
for trans, name in vf._access._indices.itervalues():
638
self.assertRaises(errors.NoSuchFile, vf._get_record_map, keys)
639
self.assertEqual([2, 1, 1], reload_counter)
641
def test_get_record_stream_retries(self):
642
vf, reload_counter = self.make_vf_for_retrying()
643
keys = [('rev-1',), ('rev-2',), ('rev-3',)]
644
record_stream = vf.get_record_stream(keys, 'topological', False)
645
record = record_stream.next()
646
self.assertEqual(('rev-1',), record.key)
647
self.assertEqual([0, 0, 0], reload_counter)
648
record = record_stream.next()
649
self.assertEqual(('rev-2',), record.key)
650
self.assertEqual([1, 1, 0], reload_counter)
651
record = record_stream.next()
652
self.assertEqual(('rev-3',), record.key)
653
self.assertEqual([1, 1, 0], reload_counter)
654
# Now delete all pack files, and see that we raise the right error
655
for trans, name in vf._access._indices.itervalues():
657
self.assertListRaises(errors.NoSuchFile,
658
vf.get_record_stream, keys, 'topological', False)
660
def test_iter_lines_added_or_present_in_keys_retries(self):
661
vf, reload_counter = self.make_vf_for_retrying()
662
keys = [('rev-1',), ('rev-2',), ('rev-3',)]
663
# Unfortunately, iter_lines_added_or_present_in_keys iterates the
664
# result in random order (determined by the iteration order from a
665
# set()), so we don't have any solid way to trigger whether data is
666
# read before or after. However we tried to delete the middle node to
667
# exercise the code well.
668
# What we care about is that all lines are always yielded, but not
671
reload_lines = sorted(vf.iter_lines_added_or_present_in_keys(keys))
672
self.assertEqual([1, 1, 0], reload_counter)
673
# Now do it again, to make sure the result is equivalent
674
plain_lines = sorted(vf.iter_lines_added_or_present_in_keys(keys))
675
self.assertEqual([1, 1, 0], reload_counter) # No extra reloading
676
self.assertEqual(plain_lines, reload_lines)
677
self.assertEqual(21, len(plain_lines))
678
# Now delete all pack files, and see that we raise the right error
679
for trans, name in vf._access._indices.itervalues():
681
self.assertListRaises(errors.NoSuchFile,
682
vf.iter_lines_added_or_present_in_keys, keys)
683
self.assertEqual([2, 1, 1], reload_counter)
685
def test_get_record_stream_yields_disk_sorted_order(self):
686
# if we get 'unordered' pick a semi-optimal order for reading. The
687
# order should be grouped by pack file, and then by position in file
688
repo = self.make_repository('test', format='pack-0.92')
690
self.addCleanup(repo.unlock)
691
repo.start_write_group()
693
vf.add_lines(('f-id', 'rev-5'), [('f-id', 'rev-4')], ['lines\n'])
694
vf.add_lines(('f-id', 'rev-1'), [], ['lines\n'])
695
vf.add_lines(('f-id', 'rev-2'), [('f-id', 'rev-1')], ['lines\n'])
696
repo.commit_write_group()
697
# We inserted them as rev-5, rev-1, rev-2, we should get them back in
699
stream = vf.get_record_stream([('f-id', 'rev-1'), ('f-id', 'rev-5'),
700
('f-id', 'rev-2')], 'unordered', False)
701
keys = [r.key for r in stream]
702
self.assertEqual([('f-id', 'rev-5'), ('f-id', 'rev-1'),
703
('f-id', 'rev-2')], keys)
704
repo.start_write_group()
705
vf.add_lines(('f-id', 'rev-4'), [('f-id', 'rev-3')], ['lines\n'])
706
vf.add_lines(('f-id', 'rev-3'), [('f-id', 'rev-2')], ['lines\n'])
707
vf.add_lines(('f-id', 'rev-6'), [('f-id', 'rev-5')], ['lines\n'])
708
repo.commit_write_group()
709
# Request in random order, to make sure the output order isn't based on
711
request_keys = set(('f-id', 'rev-%d' % i) for i in range(1, 7))
712
stream = vf.get_record_stream(request_keys, 'unordered', False)
713
keys = [r.key for r in stream]
714
# We want to get the keys back in disk order, but it doesn't matter
715
# which pack we read from first. So this can come back in 2 orders
716
alt1 = [('f-id', 'rev-%d' % i) for i in [4, 3, 6, 5, 1, 2]]
717
alt2 = [('f-id', 'rev-%d' % i) for i in [5, 1, 2, 4, 3, 6]]
718
if keys != alt1 and keys != alt2:
719
self.fail('Returned key order did not match either expected order.'
720
' expected %s or %s, not %s'
721
% (alt1, alt2, keys))
724
class LowLevelKnitDataTests(TestCase):
726
def create_gz_content(self, text):
728
gz_file = gzip.GzipFile(mode='wb', fileobj=sio)
731
return sio.getvalue()
733
def make_multiple_records(self):
734
"""Create the content for multiple records."""
735
sha1sum = osutils.sha_string('foo\nbar\n')
737
gz_txt = self.create_gz_content('version rev-id-1 2 %s\n'
742
record_1 = (0, len(gz_txt), sha1sum)
743
total_txt.append(gz_txt)
744
sha1sum = osutils.sha_string('baz\n')
745
gz_txt = self.create_gz_content('version rev-id-2 1 %s\n'
749
record_2 = (record_1[1], len(gz_txt), sha1sum)
750
total_txt.append(gz_txt)
751
return total_txt, record_1, record_2
753
def test_valid_knit_data(self):
754
sha1sum = osutils.sha_string('foo\nbar\n')
755
gz_txt = self.create_gz_content('version rev-id-1 2 %s\n'
760
transport = MockTransport([gz_txt])
761
access = _KnitKeyAccess(transport, ConstantMapper('filename'))
762
knit = KnitVersionedFiles(None, access)
763
records = [(('rev-id-1',), (('rev-id-1',), 0, len(gz_txt)))]
765
contents = list(knit._read_records_iter(records))
766
self.assertEqual([(('rev-id-1',), ['foo\n', 'bar\n'],
767
'4e48e2c9a3d2ca8a708cb0cc545700544efb5021')], contents)
769
raw_contents = list(knit._read_records_iter_raw(records))
770
self.assertEqual([(('rev-id-1',), gz_txt, sha1sum)], raw_contents)
772
def test_multiple_records_valid(self):
773
total_txt, record_1, record_2 = self.make_multiple_records()
774
transport = MockTransport([''.join(total_txt)])
775
access = _KnitKeyAccess(transport, ConstantMapper('filename'))
776
knit = KnitVersionedFiles(None, access)
777
records = [(('rev-id-1',), (('rev-id-1',), record_1[0], record_1[1])),
778
(('rev-id-2',), (('rev-id-2',), record_2[0], record_2[1]))]
780
contents = list(knit._read_records_iter(records))
781
self.assertEqual([(('rev-id-1',), ['foo\n', 'bar\n'], record_1[2]),
782
(('rev-id-2',), ['baz\n'], record_2[2])],
785
raw_contents = list(knit._read_records_iter_raw(records))
786
self.assertEqual([(('rev-id-1',), total_txt[0], record_1[2]),
787
(('rev-id-2',), total_txt[1], record_2[2])],
790
def test_not_enough_lines(self):
791
sha1sum = osutils.sha_string('foo\n')
792
# record says 2 lines data says 1
793
gz_txt = self.create_gz_content('version rev-id-1 2 %s\n'
797
transport = MockTransport([gz_txt])
798
access = _KnitKeyAccess(transport, ConstantMapper('filename'))
799
knit = KnitVersionedFiles(None, access)
800
records = [(('rev-id-1',), (('rev-id-1',), 0, len(gz_txt)))]
801
self.assertRaises(errors.KnitCorrupt, list,
802
knit._read_records_iter(records))
804
# read_records_iter_raw won't detect that sort of mismatch/corruption
805
raw_contents = list(knit._read_records_iter_raw(records))
806
self.assertEqual([(('rev-id-1',), gz_txt, sha1sum)], raw_contents)
808
def test_too_many_lines(self):
809
sha1sum = osutils.sha_string('foo\nbar\n')
810
# record says 1 lines data says 2
811
gz_txt = self.create_gz_content('version rev-id-1 1 %s\n'
816
transport = MockTransport([gz_txt])
817
access = _KnitKeyAccess(transport, ConstantMapper('filename'))
818
knit = KnitVersionedFiles(None, access)
819
records = [(('rev-id-1',), (('rev-id-1',), 0, len(gz_txt)))]
820
self.assertRaises(errors.KnitCorrupt, list,
821
knit._read_records_iter(records))
823
# read_records_iter_raw won't detect that sort of mismatch/corruption
824
raw_contents = list(knit._read_records_iter_raw(records))
825
self.assertEqual([(('rev-id-1',), gz_txt, sha1sum)], raw_contents)
827
def test_mismatched_version_id(self):
828
sha1sum = osutils.sha_string('foo\nbar\n')
829
gz_txt = self.create_gz_content('version rev-id-1 2 %s\n'
834
transport = MockTransport([gz_txt])
835
access = _KnitKeyAccess(transport, ConstantMapper('filename'))
836
knit = KnitVersionedFiles(None, access)
837
# We are asking for rev-id-2, but the data is rev-id-1
838
records = [(('rev-id-2',), (('rev-id-2',), 0, len(gz_txt)))]
839
self.assertRaises(errors.KnitCorrupt, list,
840
knit._read_records_iter(records))
842
# read_records_iter_raw detects mismatches in the header
843
self.assertRaises(errors.KnitCorrupt, list,
844
knit._read_records_iter_raw(records))
846
def test_uncompressed_data(self):
847
sha1sum = osutils.sha_string('foo\nbar\n')
848
txt = ('version rev-id-1 2 %s\n'
853
transport = MockTransport([txt])
854
access = _KnitKeyAccess(transport, ConstantMapper('filename'))
855
knit = KnitVersionedFiles(None, access)
856
records = [(('rev-id-1',), (('rev-id-1',), 0, len(txt)))]
858
# We don't have valid gzip data ==> corrupt
859
self.assertRaises(errors.KnitCorrupt, list,
860
knit._read_records_iter(records))
862
# read_records_iter_raw will notice the bad data
863
self.assertRaises(errors.KnitCorrupt, list,
864
knit._read_records_iter_raw(records))
866
def test_corrupted_data(self):
867
sha1sum = osutils.sha_string('foo\nbar\n')
868
gz_txt = self.create_gz_content('version rev-id-1 2 %s\n'
873
# Change 2 bytes in the middle to \xff
874
gz_txt = gz_txt[:10] + '\xff\xff' + gz_txt[12:]
875
transport = MockTransport([gz_txt])
876
access = _KnitKeyAccess(transport, ConstantMapper('filename'))
877
knit = KnitVersionedFiles(None, access)
878
records = [(('rev-id-1',), (('rev-id-1',), 0, len(gz_txt)))]
879
self.assertRaises(errors.KnitCorrupt, list,
880
knit._read_records_iter(records))
881
# read_records_iter_raw will barf on bad gz data
882
self.assertRaises(errors.KnitCorrupt, list,
883
knit._read_records_iter_raw(records))
886
class LowLevelKnitIndexTests(TestCase):
888
def get_knit_index(self, transport, name, mode):
889
mapper = ConstantMapper(name)
890
from bzrlib._knit_load_data_py import _load_data_py
891
self.overrideAttr(knit, '_load_data', _load_data_py)
892
allow_writes = lambda: 'w' in mode
893
return _KndxIndex(transport, mapper, lambda:None, allow_writes, lambda:True)
895
def test_create_file(self):
896
transport = MockTransport()
897
index = self.get_knit_index(transport, "filename", "w")
899
call = transport.calls.pop(0)
900
# call[1][1] is a StringIO - we can't test it by simple equality.
901
self.assertEqual('put_file_non_atomic', call[0])
902
self.assertEqual('filename.kndx', call[1][0])
903
# With no history, _KndxIndex writes a new index:
904
self.assertEqual(_KndxIndex.HEADER,
905
call[1][1].getvalue())
906
self.assertEqual({'create_parent_dir': True}, call[2])
908
def test_read_utf8_version_id(self):
909
unicode_revision_id = u"version-\N{CYRILLIC CAPITAL LETTER A}"
910
utf8_revision_id = unicode_revision_id.encode('utf-8')
911
transport = MockTransport([
913
'%s option 0 1 :' % (utf8_revision_id,)
915
index = self.get_knit_index(transport, "filename", "r")
916
# _KndxIndex is a private class, and deals in utf8 revision_ids, not
917
# Unicode revision_ids.
918
self.assertEqual({(utf8_revision_id,):()},
919
index.get_parent_map(index.keys()))
920
self.assertFalse((unicode_revision_id,) in index.keys())
922
def test_read_utf8_parents(self):
923
unicode_revision_id = u"version-\N{CYRILLIC CAPITAL LETTER A}"
924
utf8_revision_id = unicode_revision_id.encode('utf-8')
925
transport = MockTransport([
927
"version option 0 1 .%s :" % (utf8_revision_id,)
929
index = self.get_knit_index(transport, "filename", "r")
930
self.assertEqual({("version",):((utf8_revision_id,),)},
931
index.get_parent_map(index.keys()))
933
def test_read_ignore_corrupted_lines(self):
934
transport = MockTransport([
937
"corrupted options 0 1 .b .c ",
938
"version options 0 1 :"
940
index = self.get_knit_index(transport, "filename", "r")
941
self.assertEqual(1, len(index.keys()))
942
self.assertEqual(set([("version",)]), index.keys())
944
def test_read_corrupted_header(self):
945
transport = MockTransport(['not a bzr knit index header\n'])
946
index = self.get_knit_index(transport, "filename", "r")
947
self.assertRaises(KnitHeaderError, index.keys)
949
def test_read_duplicate_entries(self):
950
transport = MockTransport([
952
"parent options 0 1 :",
953
"version options1 0 1 0 :",
954
"version options2 1 2 .other :",
955
"version options3 3 4 0 .other :"
957
index = self.get_knit_index(transport, "filename", "r")
958
self.assertEqual(2, len(index.keys()))
959
# check that the index used is the first one written. (Specific
960
# to KnitIndex style indices.
961
self.assertEqual("1", index._dictionary_compress([("version",)]))
962
self.assertEqual((("version",), 3, 4), index.get_position(("version",)))
963
self.assertEqual(["options3"], index.get_options(("version",)))
964
self.assertEqual({("version",):(("parent",), ("other",))},
965
index.get_parent_map([("version",)]))
967
def test_read_compressed_parents(self):
968
transport = MockTransport([
972
"c option 0 1 1 0 :",
974
index = self.get_knit_index(transport, "filename", "r")
975
self.assertEqual({("b",):(("a",),), ("c",):(("b",), ("a",))},
976
index.get_parent_map([("b",), ("c",)]))
978
def test_write_utf8_version_id(self):
979
unicode_revision_id = u"version-\N{CYRILLIC CAPITAL LETTER A}"
980
utf8_revision_id = unicode_revision_id.encode('utf-8')
981
transport = MockTransport([
984
index = self.get_knit_index(transport, "filename", "r")
986
((utf8_revision_id,), ["option"], ((utf8_revision_id,), 0, 1), [])])
987
call = transport.calls.pop(0)
988
# call[1][1] is a StringIO - we can't test it by simple equality.
989
self.assertEqual('put_file_non_atomic', call[0])
990
self.assertEqual('filename.kndx', call[1][0])
991
# With no history, _KndxIndex writes a new index:
992
self.assertEqual(_KndxIndex.HEADER +
993
"\n%s option 0 1 :" % (utf8_revision_id,),
994
call[1][1].getvalue())
995
self.assertEqual({'create_parent_dir': True}, call[2])
997
def test_write_utf8_parents(self):
998
unicode_revision_id = u"version-\N{CYRILLIC CAPITAL LETTER A}"
999
utf8_revision_id = unicode_revision_id.encode('utf-8')
1000
transport = MockTransport([
1003
index = self.get_knit_index(transport, "filename", "r")
1005
(("version",), ["option"], (("version",), 0, 1), [(utf8_revision_id,)])])
1006
call = transport.calls.pop(0)
1007
# call[1][1] is a StringIO - we can't test it by simple equality.
1008
self.assertEqual('put_file_non_atomic', call[0])
1009
self.assertEqual('filename.kndx', call[1][0])
1010
# With no history, _KndxIndex writes a new index:
1011
self.assertEqual(_KndxIndex.HEADER +
1012
"\nversion option 0 1 .%s :" % (utf8_revision_id,),
1013
call[1][1].getvalue())
1014
self.assertEqual({'create_parent_dir': True}, call[2])
1016
def test_keys(self):
1017
transport = MockTransport([
1020
index = self.get_knit_index(transport, "filename", "r")
1022
self.assertEqual(set(), index.keys())
1024
index.add_records([(("a",), ["option"], (("a",), 0, 1), [])])
1025
self.assertEqual(set([("a",)]), index.keys())
1027
index.add_records([(("a",), ["option"], (("a",), 0, 1), [])])
1028
self.assertEqual(set([("a",)]), index.keys())
1030
index.add_records([(("b",), ["option"], (("b",), 0, 1), [])])
1031
self.assertEqual(set([("a",), ("b",)]), index.keys())
1033
def add_a_b(self, index, random_id=None):
1035
if random_id is not None:
1036
kwargs["random_id"] = random_id
1038
(("a",), ["option"], (("a",), 0, 1), [("b",)]),
1039
(("a",), ["opt"], (("a",), 1, 2), [("c",)]),
1040
(("b",), ["option"], (("b",), 2, 3), [("a",)])
1043
def assertIndexIsAB(self, index):
1048
index.get_parent_map(index.keys()))
1049
self.assertEqual((("a",), 1, 2), index.get_position(("a",)))
1050
self.assertEqual((("b",), 2, 3), index.get_position(("b",)))
1051
self.assertEqual(["opt"], index.get_options(("a",)))
1053
def test_add_versions(self):
1054
transport = MockTransport([
1057
index = self.get_knit_index(transport, "filename", "r")
1060
call = transport.calls.pop(0)
1061
# call[1][1] is a StringIO - we can't test it by simple equality.
1062
self.assertEqual('put_file_non_atomic', call[0])
1063
self.assertEqual('filename.kndx', call[1][0])
1064
# With no history, _KndxIndex writes a new index:
1067
"\na option 0 1 .b :"
1069
"\nb option 2 3 0 :",
1070
call[1][1].getvalue())
1071
self.assertEqual({'create_parent_dir': True}, call[2])
1072
self.assertIndexIsAB(index)
1074
def test_add_versions_random_id_is_accepted(self):
1075
transport = MockTransport([
1078
index = self.get_knit_index(transport, "filename", "r")
1079
self.add_a_b(index, random_id=True)
1081
def test_delay_create_and_add_versions(self):
1082
transport = MockTransport()
1084
index = self.get_knit_index(transport, "filename", "w")
1086
self.assertEqual([], transport.calls)
1089
#[ {"dir_mode": 0777, "create_parent_dir": True, "mode": "wb"},
1091
# Two calls: one during which we load the existing index (and when its
1092
# missing create it), then a second where we write the contents out.
1093
self.assertEqual(2, len(transport.calls))
1094
call = transport.calls.pop(0)
1095
self.assertEqual('put_file_non_atomic', call[0])
1096
self.assertEqual('filename.kndx', call[1][0])
1097
# With no history, _KndxIndex writes a new index:
1098
self.assertEqual(_KndxIndex.HEADER, call[1][1].getvalue())
1099
self.assertEqual({'create_parent_dir': True}, call[2])
1100
call = transport.calls.pop(0)
1101
# call[1][1] is a StringIO - we can't test it by simple equality.
1102
self.assertEqual('put_file_non_atomic', call[0])
1103
self.assertEqual('filename.kndx', call[1][0])
1104
# With no history, _KndxIndex writes a new index:
1107
"\na option 0 1 .b :"
1109
"\nb option 2 3 0 :",
1110
call[1][1].getvalue())
1111
self.assertEqual({'create_parent_dir': True}, call[2])
1113
def assertTotalBuildSize(self, size, keys, positions):
1114
self.assertEqual(size,
1115
knit._get_total_build_size(None, keys, positions))
1117
def test__get_total_build_size(self):
1119
('a',): (('fulltext', False), (('a',), 0, 100), None),
1120
('b',): (('line-delta', False), (('b',), 100, 21), ('a',)),
1121
('c',): (('line-delta', False), (('c',), 121, 35), ('b',)),
1122
('d',): (('line-delta', False), (('d',), 156, 12), ('b',)),
1124
self.assertTotalBuildSize(100, [('a',)], positions)
1125
self.assertTotalBuildSize(121, [('b',)], positions)
1126
# c needs both a & b
1127
self.assertTotalBuildSize(156, [('c',)], positions)
1128
# we shouldn't count 'b' twice
1129
self.assertTotalBuildSize(156, [('b',), ('c',)], positions)
1130
self.assertTotalBuildSize(133, [('d',)], positions)
1131
self.assertTotalBuildSize(168, [('c',), ('d',)], positions)
1133
def test_get_position(self):
1134
transport = MockTransport([
1139
index = self.get_knit_index(transport, "filename", "r")
1141
self.assertEqual((("a",), 0, 1), index.get_position(("a",)))
1142
self.assertEqual((("b",), 1, 2), index.get_position(("b",)))
1144
def test_get_method(self):
1145
transport = MockTransport([
1147
"a fulltext,unknown 0 1 :",
1148
"b unknown,line-delta 1 2 :",
1151
index = self.get_knit_index(transport, "filename", "r")
1153
self.assertEqual("fulltext", index.get_method("a"))
1154
self.assertEqual("line-delta", index.get_method("b"))
1155
self.assertRaises(errors.KnitIndexUnknownMethod, index.get_method, "c")
1157
def test_get_options(self):
1158
transport = MockTransport([
1163
index = self.get_knit_index(transport, "filename", "r")
1165
self.assertEqual(["opt1"], index.get_options("a"))
1166
self.assertEqual(["opt2", "opt3"], index.get_options("b"))
1168
def test_get_parent_map(self):
1169
transport = MockTransport([
1172
"b option 1 2 0 .c :",
1173
"c option 1 2 1 0 .e :"
1175
index = self.get_knit_index(transport, "filename", "r")
1179
("b",):(("a",), ("c",)),
1180
("c",):(("b",), ("a",), ("e",)),
1181
}, index.get_parent_map(index.keys()))
1183
def test_impossible_parent(self):
1184
"""Test we get KnitCorrupt if the parent couldn't possibly exist."""
1185
transport = MockTransport([
1188
"b option 0 1 4 :" # We don't have a 4th record
1190
index = self.get_knit_index(transport, 'filename', 'r')
1192
self.assertRaises(errors.KnitCorrupt, index.keys)
1193
except TypeError, e:
1194
if (str(e) == ('exceptions must be strings, classes, or instances,'
1195
' not exceptions.IndexError')):
1196
self.knownFailure('Pyrex <0.9.5 fails with TypeError when'
1197
' raising new style exceptions with python'
1202
def test_corrupted_parent(self):
1203
transport = MockTransport([
1207
"c option 0 1 1v :", # Can't have a parent of '1v'
1209
index = self.get_knit_index(transport, 'filename', 'r')
1211
self.assertRaises(errors.KnitCorrupt, index.keys)
1212
except TypeError, e:
1213
if (str(e) == ('exceptions must be strings, classes, or instances,'
1214
' not exceptions.ValueError')):
1215
self.knownFailure('Pyrex <0.9.5 fails with TypeError when'
1216
' raising new style exceptions with python'
1221
def test_corrupted_parent_in_list(self):
1222
transport = MockTransport([
1226
"c option 0 1 1 v :", # Can't have a parent of 'v'
1228
index = self.get_knit_index(transport, 'filename', 'r')
1230
self.assertRaises(errors.KnitCorrupt, index.keys)
1231
except TypeError, e:
1232
if (str(e) == ('exceptions must be strings, classes, or instances,'
1233
' not exceptions.ValueError')):
1234
self.knownFailure('Pyrex <0.9.5 fails with TypeError when'
1235
' raising new style exceptions with python'
1240
def test_invalid_position(self):
1241
transport = MockTransport([
1245
index = self.get_knit_index(transport, 'filename', 'r')
1247
self.assertRaises(errors.KnitCorrupt, index.keys)
1248
except TypeError, e:
1249
if (str(e) == ('exceptions must be strings, classes, or instances,'
1250
' not exceptions.ValueError')):
1251
self.knownFailure('Pyrex <0.9.5 fails with TypeError when'
1252
' raising new style exceptions with python'
1257
def test_invalid_size(self):
1258
transport = MockTransport([
1262
index = self.get_knit_index(transport, 'filename', 'r')
1264
self.assertRaises(errors.KnitCorrupt, index.keys)
1265
except TypeError, e:
1266
if (str(e) == ('exceptions must be strings, classes, or instances,'
1267
' not exceptions.ValueError')):
1268
self.knownFailure('Pyrex <0.9.5 fails with TypeError when'
1269
' raising new style exceptions with python'
1274
def test_scan_unvalidated_index_not_implemented(self):
1275
transport = MockTransport()
1276
index = self.get_knit_index(transport, 'filename', 'r')
1278
NotImplementedError, index.scan_unvalidated_index,
1279
'dummy graph_index')
1281
NotImplementedError, index.get_missing_compression_parents)
1283
def test_short_line(self):
1284
transport = MockTransport([
1287
"b option 10 10 0", # This line isn't terminated, ignored
1289
index = self.get_knit_index(transport, "filename", "r")
1290
self.assertEqual(set([('a',)]), index.keys())
1292
def test_skip_incomplete_record(self):
1293
# A line with bogus data should just be skipped
1294
transport = MockTransport([
1297
"b option 10 10 0", # This line isn't terminated, ignored
1298
"c option 20 10 0 :", # Properly terminated, and starts with '\n'
1300
index = self.get_knit_index(transport, "filename", "r")
1301
self.assertEqual(set([('a',), ('c',)]), index.keys())
1303
def test_trailing_characters(self):
1304
# A line with bogus data should just be skipped
1305
transport = MockTransport([
1308
"b option 10 10 0 :a", # This line has extra trailing characters
1309
"c option 20 10 0 :", # Properly terminated, and starts with '\n'
1311
index = self.get_knit_index(transport, "filename", "r")
1312
self.assertEqual(set([('a',), ('c',)]), index.keys())
1315
class LowLevelKnitIndexTests_c(LowLevelKnitIndexTests):
1317
_test_needs_features = [compiled_knit_feature]
1319
def get_knit_index(self, transport, name, mode):
1320
mapper = ConstantMapper(name)
1321
from bzrlib._knit_load_data_pyx import _load_data_c
1322
self.overrideAttr(knit, '_load_data', _load_data_c)
1323
allow_writes = lambda: mode == 'w'
1324
return _KndxIndex(transport, mapper, lambda:None,
1325
allow_writes, lambda:True)
1328
class Test_KnitAnnotator(TestCaseWithMemoryTransport):
1330
def make_annotator(self):
1331
factory = knit.make_pack_factory(True, True, 1)
1332
vf = factory(self.get_transport())
1333
return knit._KnitAnnotator(vf)
1335
def test__expand_fulltext(self):
1336
ann = self.make_annotator()
1337
rev_key = ('rev-id',)
1338
ann._num_compression_children[rev_key] = 1
1339
res = ann._expand_record(rev_key, (('parent-id',),), None,
1340
['line1\n', 'line2\n'], ('fulltext', True))
1341
# The content object and text lines should be cached appropriately
1342
self.assertEqual(['line1\n', 'line2'], res)
1343
content_obj = ann._content_objects[rev_key]
1344
self.assertEqual(['line1\n', 'line2\n'], content_obj._lines)
1345
self.assertEqual(res, content_obj.text())
1346
self.assertEqual(res, ann._text_cache[rev_key])
1348
def test__expand_delta_comp_parent_not_available(self):
1349
# Parent isn't available yet, so we return nothing, but queue up this
1350
# node for later processing
1351
ann = self.make_annotator()
1352
rev_key = ('rev-id',)
1353
parent_key = ('parent-id',)
1354
record = ['0,1,1\n', 'new-line\n']
1355
details = ('line-delta', False)
1356
res = ann._expand_record(rev_key, (parent_key,), parent_key,
1358
self.assertEqual(None, res)
1359
self.assertTrue(parent_key in ann._pending_deltas)
1360
pending = ann._pending_deltas[parent_key]
1361
self.assertEqual(1, len(pending))
1362
self.assertEqual((rev_key, (parent_key,), record, details), pending[0])
1364
def test__expand_record_tracks_num_children(self):
1365
ann = self.make_annotator()
1366
rev_key = ('rev-id',)
1367
rev2_key = ('rev2-id',)
1368
parent_key = ('parent-id',)
1369
record = ['0,1,1\n', 'new-line\n']
1370
details = ('line-delta', False)
1371
ann._num_compression_children[parent_key] = 2
1372
ann._expand_record(parent_key, (), None, ['line1\n', 'line2\n'],
1373
('fulltext', False))
1374
res = ann._expand_record(rev_key, (parent_key,), parent_key,
1376
self.assertEqual({parent_key: 1}, ann._num_compression_children)
1377
# Expanding the second child should remove the content object, and the
1378
# num_compression_children entry
1379
res = ann._expand_record(rev2_key, (parent_key,), parent_key,
1381
self.assertFalse(parent_key in ann._content_objects)
1382
self.assertEqual({}, ann._num_compression_children)
1383
# We should not cache the content_objects for rev2 and rev, because
1384
# they do not have compression children of their own.
1385
self.assertEqual({}, ann._content_objects)
1387
def test__expand_delta_records_blocks(self):
1388
ann = self.make_annotator()
1389
rev_key = ('rev-id',)
1390
parent_key = ('parent-id',)
1391
record = ['0,1,1\n', 'new-line\n']
1392
details = ('line-delta', True)
1393
ann._num_compression_children[parent_key] = 2
1394
ann._expand_record(parent_key, (), None,
1395
['line1\n', 'line2\n', 'line3\n'],
1396
('fulltext', False))
1397
ann._expand_record(rev_key, (parent_key,), parent_key, record, details)
1398
self.assertEqual({(rev_key, parent_key): [(1, 1, 1), (3, 3, 0)]},
1399
ann._matching_blocks)
1400
rev2_key = ('rev2-id',)
1401
record = ['0,1,1\n', 'new-line\n']
1402
details = ('line-delta', False)
1403
ann._expand_record(rev2_key, (parent_key,), parent_key, record, details)
1404
self.assertEqual([(1, 1, 2), (3, 3, 0)],
1405
ann._matching_blocks[(rev2_key, parent_key)])
1407
def test__get_parent_ann_uses_matching_blocks(self):
1408
ann = self.make_annotator()
1409
rev_key = ('rev-id',)
1410
parent_key = ('parent-id',)
1411
parent_ann = [(parent_key,)]*3
1412
block_key = (rev_key, parent_key)
1413
ann._annotations_cache[parent_key] = parent_ann
1414
ann._matching_blocks[block_key] = [(0, 1, 1), (3, 3, 0)]
1415
# We should not try to access any parent_lines content, because we know
1416
# we already have the matching blocks
1417
par_ann, blocks = ann._get_parent_annotations_and_matches(rev_key,
1418
['1\n', '2\n', '3\n'], parent_key)
1419
self.assertEqual(parent_ann, par_ann)
1420
self.assertEqual([(0, 1, 1), (3, 3, 0)], blocks)
1421
self.assertEqual({}, ann._matching_blocks)
1423
def test__process_pending(self):
1424
ann = self.make_annotator()
1425
rev_key = ('rev-id',)
1428
record = ['0,1,1\n', 'new-line\n']
1429
details = ('line-delta', False)
1430
p1_record = ['line1\n', 'line2\n']
1431
ann._num_compression_children[p1_key] = 1
1432
res = ann._expand_record(rev_key, (p1_key,p2_key), p1_key,
1434
self.assertEqual(None, res)
1435
# self.assertTrue(p1_key in ann._pending_deltas)
1436
self.assertEqual({}, ann._pending_annotation)
1437
# Now insert p1, and we should be able to expand the delta
1438
res = ann._expand_record(p1_key, (), None, p1_record,
1439
('fulltext', False))
1440
self.assertEqual(p1_record, res)
1441
ann._annotations_cache[p1_key] = [(p1_key,)]*2
1442
res = ann._process_pending(p1_key)
1443
self.assertEqual([], res)
1444
self.assertFalse(p1_key in ann._pending_deltas)
1445
self.assertTrue(p2_key in ann._pending_annotation)
1446
self.assertEqual({p2_key: [(rev_key, (p1_key, p2_key))]},
1447
ann._pending_annotation)
1448
# Now fill in parent 2, and pending annotation should be satisfied
1449
res = ann._expand_record(p2_key, (), None, [], ('fulltext', False))
1450
ann._annotations_cache[p2_key] = []
1451
res = ann._process_pending(p2_key)
1452
self.assertEqual([rev_key], res)
1453
self.assertEqual({}, ann._pending_annotation)
1454
self.assertEqual({}, ann._pending_deltas)
1456
def test_record_delta_removes_basis(self):
1457
ann = self.make_annotator()
1458
ann._expand_record(('parent-id',), (), None,
1459
['line1\n', 'line2\n'], ('fulltext', False))
1460
ann._num_compression_children['parent-id'] = 2
1462
def test_annotate_special_text(self):
1463
ann = self.make_annotator()
1465
rev1_key = ('rev-1',)
1466
rev2_key = ('rev-2',)
1467
rev3_key = ('rev-3',)
1468
spec_key = ('special:',)
1469
vf.add_lines(rev1_key, [], ['initial content\n'])
1470
vf.add_lines(rev2_key, [rev1_key], ['initial content\n',
1473
vf.add_lines(rev3_key, [rev1_key], ['initial content\n',
1476
spec_text = ('initial content\n'
1480
ann.add_special_text(spec_key, [rev2_key, rev3_key], spec_text)
1481
anns, lines = ann.annotate(spec_key)
1482
self.assertEqual([(rev1_key,),
1483
(rev2_key, rev3_key),
1487
self.assertEqualDiff(spec_text, ''.join(lines))
1490
class KnitTests(TestCaseWithTransport):
1491
"""Class containing knit test helper routines."""
1493
def make_test_knit(self, annotate=False, name='test'):
1494
mapper = ConstantMapper(name)
1495
return make_file_factory(annotate, mapper)(self.get_transport())
1498
class TestBadShaError(KnitTests):
1499
"""Tests for handling of sha errors."""
1501
def test_sha_exception_has_text(self):
1502
# having the failed text included in the error allows for recovery.
1503
source = self.make_test_knit()
1504
target = self.make_test_knit(name="target")
1505
if not source._max_delta_chain:
1506
raise TestNotApplicable(
1507
"cannot get delta-caused sha failures without deltas.")
1510
broken = ('broken',)
1511
source.add_lines(basis, (), ['foo\n'])
1512
source.add_lines(broken, (basis,), ['foo\n', 'bar\n'])
1513
# Seed target with a bad basis text
1514
target.add_lines(basis, (), ['gam\n'])
1515
target.insert_record_stream(
1516
source.get_record_stream([broken], 'unordered', False))
1517
err = self.assertRaises(errors.KnitCorrupt,
1518
target.get_record_stream([broken], 'unordered', True
1519
).next().get_bytes_as, 'chunked')
1520
self.assertEqual(['gam\n', 'bar\n'], err.content)
1521
# Test for formatting with live data
1522
self.assertStartsWith(str(err), "Knit ")
1525
class TestKnitIndex(KnitTests):
1527
def test_add_versions_dictionary_compresses(self):
1528
"""Adding versions to the index should update the lookup dict"""
1529
knit = self.make_test_knit()
1531
idx.add_records([(('a-1',), ['fulltext'], (('a-1',), 0, 0), [])])
1532
self.check_file_contents('test.kndx',
1533
'# bzr knit index 8\n'
1535
'a-1 fulltext 0 0 :'
1538
(('a-2',), ['fulltext'], (('a-2',), 0, 0), [('a-1',)]),
1539
(('a-3',), ['fulltext'], (('a-3',), 0, 0), [('a-2',)]),
1541
self.check_file_contents('test.kndx',
1542
'# bzr knit index 8\n'
1544
'a-1 fulltext 0 0 :\n'
1545
'a-2 fulltext 0 0 0 :\n'
1546
'a-3 fulltext 0 0 1 :'
1548
self.assertEqual(set([('a-3',), ('a-1',), ('a-2',)]), idx.keys())
1550
('a-1',): ((('a-1',), 0, 0), None, (), ('fulltext', False)),
1551
('a-2',): ((('a-2',), 0, 0), None, (('a-1',),), ('fulltext', False)),
1552
('a-3',): ((('a-3',), 0, 0), None, (('a-2',),), ('fulltext', False)),
1553
}, idx.get_build_details(idx.keys()))
1554
self.assertEqual({('a-1',):(),
1555
('a-2',):(('a-1',),),
1556
('a-3',):(('a-2',),),},
1557
idx.get_parent_map(idx.keys()))
1559
def test_add_versions_fails_clean(self):
1560
"""If add_versions fails in the middle, it restores a pristine state.
1562
Any modifications that are made to the index are reset if all versions
1565
# This cheats a little bit by passing in a generator which will
1566
# raise an exception before the processing finishes
1567
# Other possibilities would be to have an version with the wrong number
1568
# of entries, or to make the backing transport unable to write any
1571
knit = self.make_test_knit()
1573
idx.add_records([(('a-1',), ['fulltext'], (('a-1',), 0, 0), [])])
1575
class StopEarly(Exception):
1578
def generate_failure():
1579
"""Add some entries and then raise an exception"""
1580
yield (('a-2',), ['fulltext'], (None, 0, 0), ('a-1',))
1581
yield (('a-3',), ['fulltext'], (None, 0, 0), ('a-2',))
1584
# Assert the pre-condition
1586
self.assertEqual(set([('a-1',)]), set(idx.keys()))
1588
{('a-1',): ((('a-1',), 0, 0), None, (), ('fulltext', False))},
1589
idx.get_build_details([('a-1',)]))
1590
self.assertEqual({('a-1',):()}, idx.get_parent_map(idx.keys()))
1593
self.assertRaises(StopEarly, idx.add_records, generate_failure())
1594
# And it shouldn't be modified
1597
def test_knit_index_ignores_empty_files(self):
1598
# There was a race condition in older bzr, where a ^C at the right time
1599
# could leave an empty .kndx file, which bzr would later claim was a
1600
# corrupted file since the header was not present. In reality, the file
1601
# just wasn't created, so it should be ignored.
1602
t = transport.get_transport('.')
1603
t.put_bytes('test.kndx', '')
1605
knit = self.make_test_knit()
1607
def test_knit_index_checks_header(self):
1608
t = transport.get_transport('.')
1609
t.put_bytes('test.kndx', '# not really a knit header\n\n')
1610
k = self.make_test_knit()
1611
self.assertRaises(KnitHeaderError, k.keys)
1614
class TestGraphIndexKnit(KnitTests):
1615
"""Tests for knits using a GraphIndex rather than a KnitIndex."""
1617
def make_g_index(self, name, ref_lists=0, nodes=[]):
1618
builder = GraphIndexBuilder(ref_lists)
1619
for node, references, value in nodes:
1620
builder.add_node(node, references, value)
1621
stream = builder.finish()
1622
trans = self.get_transport()
1623
size = trans.put_file(name, stream)
1624
return GraphIndex(trans, name, size)
1626
def two_graph_index(self, deltas=False, catch_adds=False):
1627
"""Build a two-graph index.
1629
:param deltas: If true, use underlying indices with two node-ref
1630
lists and 'parent' set to a delta-compressed against tail.
1632
# build a complex graph across several indices.
1634
# delta compression inn the index
1635
index1 = self.make_g_index('1', 2, [
1636
(('tip', ), 'N0 100', ([('parent', )], [], )),
1637
(('tail', ), '', ([], []))])
1638
index2 = self.make_g_index('2', 2, [
1639
(('parent', ), ' 100 78', ([('tail', ), ('ghost', )], [('tail', )])),
1640
(('separate', ), '', ([], []))])
1642
# just blob location and graph in the index.
1643
index1 = self.make_g_index('1', 1, [
1644
(('tip', ), 'N0 100', ([('parent', )], )),
1645
(('tail', ), '', ([], ))])
1646
index2 = self.make_g_index('2', 1, [
1647
(('parent', ), ' 100 78', ([('tail', ), ('ghost', )], )),
1648
(('separate', ), '', ([], ))])
1649
combined_index = CombinedGraphIndex([index1, index2])
1651
self.combined_index = combined_index
1652
self.caught_entries = []
1653
add_callback = self.catch_add
1656
return _KnitGraphIndex(combined_index, lambda:True, deltas=deltas,
1657
add_callback=add_callback)
1659
def test_keys(self):
1660
index = self.two_graph_index()
1661
self.assertEqual(set([('tail',), ('tip',), ('parent',), ('separate',)]),
1664
def test_get_position(self):
1665
index = self.two_graph_index()
1666
self.assertEqual((index._graph_index._indices[0], 0, 100), index.get_position(('tip',)))
1667
self.assertEqual((index._graph_index._indices[1], 100, 78), index.get_position(('parent',)))
1669
def test_get_method_deltas(self):
1670
index = self.two_graph_index(deltas=True)
1671
self.assertEqual('fulltext', index.get_method(('tip',)))
1672
self.assertEqual('line-delta', index.get_method(('parent',)))
1674
def test_get_method_no_deltas(self):
1675
# check that the parent-history lookup is ignored with deltas=False.
1676
index = self.two_graph_index(deltas=False)
1677
self.assertEqual('fulltext', index.get_method(('tip',)))
1678
self.assertEqual('fulltext', index.get_method(('parent',)))
1680
def test_get_options_deltas(self):
1681
index = self.two_graph_index(deltas=True)
1682
self.assertEqual(['fulltext', 'no-eol'], index.get_options(('tip',)))
1683
self.assertEqual(['line-delta'], index.get_options(('parent',)))
1685
def test_get_options_no_deltas(self):
1686
# check that the parent-history lookup is ignored with deltas=False.
1687
index = self.two_graph_index(deltas=False)
1688
self.assertEqual(['fulltext', 'no-eol'], index.get_options(('tip',)))
1689
self.assertEqual(['fulltext'], index.get_options(('parent',)))
1691
def test_get_parent_map(self):
1692
index = self.two_graph_index()
1693
self.assertEqual({('parent',):(('tail',), ('ghost',))},
1694
index.get_parent_map([('parent',), ('ghost',)]))
1696
def catch_add(self, entries):
1697
self.caught_entries.append(entries)
1699
def test_add_no_callback_errors(self):
1700
index = self.two_graph_index()
1701
self.assertRaises(errors.ReadOnlyError, index.add_records,
1702
[(('new',), 'fulltext,no-eol', (None, 50, 60), ['separate'])])
1704
def test_add_version_smoke(self):
1705
index = self.two_graph_index(catch_adds=True)
1706
index.add_records([(('new',), 'fulltext,no-eol', (None, 50, 60),
1708
self.assertEqual([[(('new', ), 'N50 60', ((('separate',),),))]],
1709
self.caught_entries)
1711
def test_add_version_delta_not_delta_index(self):
1712
index = self.two_graph_index(catch_adds=True)
1713
self.assertRaises(errors.KnitCorrupt, index.add_records,
1714
[(('new',), 'no-eol,line-delta', (None, 0, 100), [('parent',)])])
1715
self.assertEqual([], self.caught_entries)
1717
def test_add_version_same_dup(self):
1718
index = self.two_graph_index(catch_adds=True)
1719
# options can be spelt two different ways
1720
index.add_records([(('tip',), 'fulltext,no-eol', (None, 0, 100), [('parent',)])])
1721
index.add_records([(('tip',), 'no-eol,fulltext', (None, 0, 100), [('parent',)])])
1722
# position/length are ignored (because each pack could have fulltext or
1723
# delta, and be at a different position.
1724
index.add_records([(('tip',), 'fulltext,no-eol', (None, 50, 100),
1726
index.add_records([(('tip',), 'fulltext,no-eol', (None, 0, 1000),
1728
# but neither should have added data:
1729
self.assertEqual([[], [], [], []], self.caught_entries)
1731
def test_add_version_different_dup(self):
1732
index = self.two_graph_index(deltas=True, catch_adds=True)
1734
self.assertRaises(errors.KnitCorrupt, index.add_records,
1735
[(('tip',), 'line-delta', (None, 0, 100), [('parent',)])])
1736
self.assertRaises(errors.KnitCorrupt, index.add_records,
1737
[(('tip',), 'fulltext', (None, 0, 100), [('parent',)])])
1739
self.assertRaises(errors.KnitCorrupt, index.add_records,
1740
[(('tip',), 'fulltext,no-eol', (None, 0, 100), [])])
1741
self.assertEqual([], self.caught_entries)
1743
def test_add_versions_nodeltas(self):
1744
index = self.two_graph_index(catch_adds=True)
1746
(('new',), 'fulltext,no-eol', (None, 50, 60), [('separate',)]),
1747
(('new2',), 'fulltext', (None, 0, 6), [('new',)]),
1749
self.assertEqual([(('new', ), 'N50 60', ((('separate',),),)),
1750
(('new2', ), ' 0 6', ((('new',),),))],
1751
sorted(self.caught_entries[0]))
1752
self.assertEqual(1, len(self.caught_entries))
1754
def test_add_versions_deltas(self):
1755
index = self.two_graph_index(deltas=True, catch_adds=True)
1757
(('new',), 'fulltext,no-eol', (None, 50, 60), [('separate',)]),
1758
(('new2',), 'line-delta', (None, 0, 6), [('new',)]),
1760
self.assertEqual([(('new', ), 'N50 60', ((('separate',),), ())),
1761
(('new2', ), ' 0 6', ((('new',),), (('new',),), ))],
1762
sorted(self.caught_entries[0]))
1763
self.assertEqual(1, len(self.caught_entries))
1765
def test_add_versions_delta_not_delta_index(self):
1766
index = self.two_graph_index(catch_adds=True)
1767
self.assertRaises(errors.KnitCorrupt, index.add_records,
1768
[(('new',), 'no-eol,line-delta', (None, 0, 100), [('parent',)])])
1769
self.assertEqual([], self.caught_entries)
1771
def test_add_versions_random_id_accepted(self):
1772
index = self.two_graph_index(catch_adds=True)
1773
index.add_records([], random_id=True)
1775
def test_add_versions_same_dup(self):
1776
index = self.two_graph_index(catch_adds=True)
1777
# options can be spelt two different ways
1778
index.add_records([(('tip',), 'fulltext,no-eol', (None, 0, 100),
1780
index.add_records([(('tip',), 'no-eol,fulltext', (None, 0, 100),
1782
# position/length are ignored (because each pack could have fulltext or
1783
# delta, and be at a different position.
1784
index.add_records([(('tip',), 'fulltext,no-eol', (None, 50, 100),
1786
index.add_records([(('tip',), 'fulltext,no-eol', (None, 0, 1000),
1788
# but neither should have added data.
1789
self.assertEqual([[], [], [], []], self.caught_entries)
1791
def test_add_versions_different_dup(self):
1792
index = self.two_graph_index(deltas=True, catch_adds=True)
1794
self.assertRaises(errors.KnitCorrupt, index.add_records,
1795
[(('tip',), 'line-delta', (None, 0, 100), [('parent',)])])
1796
self.assertRaises(errors.KnitCorrupt, index.add_records,
1797
[(('tip',), 'fulltext', (None, 0, 100), [('parent',)])])
1799
self.assertRaises(errors.KnitCorrupt, index.add_records,
1800
[(('tip',), 'fulltext,no-eol', (None, 0, 100), [])])
1801
# change options in the second record
1802
self.assertRaises(errors.KnitCorrupt, index.add_records,
1803
[(('tip',), 'fulltext,no-eol', (None, 0, 100), [('parent',)]),
1804
(('tip',), 'line-delta', (None, 0, 100), [('parent',)])])
1805
self.assertEqual([], self.caught_entries)
1807
def make_g_index_missing_compression_parent(self):
1808
graph_index = self.make_g_index('missing_comp', 2,
1809
[(('tip', ), ' 100 78',
1810
([('missing-parent', ), ('ghost', )], [('missing-parent', )]))])
1813
def make_g_index_missing_parent(self):
1814
graph_index = self.make_g_index('missing_parent', 2,
1815
[(('parent', ), ' 100 78', ([], [])),
1816
(('tip', ), ' 100 78',
1817
([('parent', ), ('missing-parent', )], [('parent', )])),
1821
def make_g_index_no_external_refs(self):
1822
graph_index = self.make_g_index('no_external_refs', 2,
1823
[(('rev', ), ' 100 78',
1824
([('parent', ), ('ghost', )], []))])
1827
def test_add_good_unvalidated_index(self):
1828
unvalidated = self.make_g_index_no_external_refs()
1829
combined = CombinedGraphIndex([unvalidated])
1830
index = _KnitGraphIndex(combined, lambda: True, deltas=True)
1831
index.scan_unvalidated_index(unvalidated)
1832
self.assertEqual(frozenset(), index.get_missing_compression_parents())
1834
def test_add_missing_compression_parent_unvalidated_index(self):
1835
unvalidated = self.make_g_index_missing_compression_parent()
1836
combined = CombinedGraphIndex([unvalidated])
1837
index = _KnitGraphIndex(combined, lambda: True, deltas=True)
1838
index.scan_unvalidated_index(unvalidated)
1839
# This also checks that its only the compression parent that is
1840
# examined, otherwise 'ghost' would also be reported as a missing
1843
frozenset([('missing-parent',)]),
1844
index.get_missing_compression_parents())
1846
def test_add_missing_noncompression_parent_unvalidated_index(self):
1847
unvalidated = self.make_g_index_missing_parent()
1848
combined = CombinedGraphIndex([unvalidated])
1849
index = _KnitGraphIndex(combined, lambda: True, deltas=True,
1850
track_external_parent_refs=True)
1851
index.scan_unvalidated_index(unvalidated)
1853
frozenset([('missing-parent',)]), index.get_missing_parents())
1855
def test_track_external_parent_refs(self):
1856
g_index = self.make_g_index('empty', 2, [])
1857
combined = CombinedGraphIndex([g_index])
1858
index = _KnitGraphIndex(combined, lambda: True, deltas=True,
1859
add_callback=self.catch_add, track_external_parent_refs=True)
1860
self.caught_entries = []
1862
(('new-key',), 'fulltext,no-eol', (None, 50, 60),
1863
[('parent-1',), ('parent-2',)])])
1865
frozenset([('parent-1',), ('parent-2',)]),
1866
index.get_missing_parents())
1868
def test_add_unvalidated_index_with_present_external_references(self):
1869
index = self.two_graph_index(deltas=True)
1870
# Ugly hack to get at one of the underlying GraphIndex objects that
1871
# two_graph_index built.
1872
unvalidated = index._graph_index._indices[1]
1873
# 'parent' is an external ref of _indices[1] (unvalidated), but is
1874
# present in _indices[0].
1875
index.scan_unvalidated_index(unvalidated)
1876
self.assertEqual(frozenset(), index.get_missing_compression_parents())
1878
def make_new_missing_parent_g_index(self, name):
1879
missing_parent = name + '-missing-parent'
1880
graph_index = self.make_g_index(name, 2,
1881
[((name + 'tip', ), ' 100 78',
1882
([(missing_parent, ), ('ghost', )], [(missing_parent, )]))])
1885
def test_add_mulitiple_unvalidated_indices_with_missing_parents(self):
1886
g_index_1 = self.make_new_missing_parent_g_index('one')
1887
g_index_2 = self.make_new_missing_parent_g_index('two')
1888
combined = CombinedGraphIndex([g_index_1, g_index_2])
1889
index = _KnitGraphIndex(combined, lambda: True, deltas=True)
1890
index.scan_unvalidated_index(g_index_1)
1891
index.scan_unvalidated_index(g_index_2)
1893
frozenset([('one-missing-parent',), ('two-missing-parent',)]),
1894
index.get_missing_compression_parents())
1896
def test_add_mulitiple_unvalidated_indices_with_mutual_dependencies(self):
1897
graph_index_a = self.make_g_index('one', 2,
1898
[(('parent-one', ), ' 100 78', ([('non-compression-parent',)], [])),
1899
(('child-of-two', ), ' 100 78',
1900
([('parent-two',)], [('parent-two',)]))])
1901
graph_index_b = self.make_g_index('two', 2,
1902
[(('parent-two', ), ' 100 78', ([('non-compression-parent',)], [])),
1903
(('child-of-one', ), ' 100 78',
1904
([('parent-one',)], [('parent-one',)]))])
1905
combined = CombinedGraphIndex([graph_index_a, graph_index_b])
1906
index = _KnitGraphIndex(combined, lambda: True, deltas=True)
1907
index.scan_unvalidated_index(graph_index_a)
1908
index.scan_unvalidated_index(graph_index_b)
1910
frozenset([]), index.get_missing_compression_parents())
1913
class TestNoParentsGraphIndexKnit(KnitTests):
1914
"""Tests for knits using _KnitGraphIndex with no parents."""
1916
def make_g_index(self, name, ref_lists=0, nodes=[]):
1917
builder = GraphIndexBuilder(ref_lists)
1918
for node, references in nodes:
1919
builder.add_node(node, references)
1920
stream = builder.finish()
1921
trans = self.get_transport()
1922
size = trans.put_file(name, stream)
1923
return GraphIndex(trans, name, size)
1925
def test_add_good_unvalidated_index(self):
1926
unvalidated = self.make_g_index('unvalidated')
1927
combined = CombinedGraphIndex([unvalidated])
1928
index = _KnitGraphIndex(combined, lambda: True, parents=False)
1929
index.scan_unvalidated_index(unvalidated)
1930
self.assertEqual(frozenset(),
1931
index.get_missing_compression_parents())
1933
def test_parents_deltas_incompatible(self):
1934
index = CombinedGraphIndex([])
1935
self.assertRaises(errors.KnitError, _KnitGraphIndex, lambda:True,
1936
index, deltas=True, parents=False)
1938
def two_graph_index(self, catch_adds=False):
1939
"""Build a two-graph index.
1941
:param deltas: If true, use underlying indices with two node-ref
1942
lists and 'parent' set to a delta-compressed against tail.
1944
# put several versions in the index.
1945
index1 = self.make_g_index('1', 0, [
1946
(('tip', ), 'N0 100'),
1948
index2 = self.make_g_index('2', 0, [
1949
(('parent', ), ' 100 78'),
1950
(('separate', ), '')])
1951
combined_index = CombinedGraphIndex([index1, index2])
1953
self.combined_index = combined_index
1954
self.caught_entries = []
1955
add_callback = self.catch_add
1958
return _KnitGraphIndex(combined_index, lambda:True, parents=False,
1959
add_callback=add_callback)
1961
def test_keys(self):
1962
index = self.two_graph_index()
1963
self.assertEqual(set([('tail',), ('tip',), ('parent',), ('separate',)]),
1966
def test_get_position(self):
1967
index = self.two_graph_index()
1968
self.assertEqual((index._graph_index._indices[0], 0, 100),
1969
index.get_position(('tip',)))
1970
self.assertEqual((index._graph_index._indices[1], 100, 78),
1971
index.get_position(('parent',)))
1973
def test_get_method(self):
1974
index = self.two_graph_index()
1975
self.assertEqual('fulltext', index.get_method(('tip',)))
1976
self.assertEqual(['fulltext'], index.get_options(('parent',)))
1978
def test_get_options(self):
1979
index = self.two_graph_index()
1980
self.assertEqual(['fulltext', 'no-eol'], index.get_options(('tip',)))
1981
self.assertEqual(['fulltext'], index.get_options(('parent',)))
1983
def test_get_parent_map(self):
1984
index = self.two_graph_index()
1985
self.assertEqual({('parent',):None},
1986
index.get_parent_map([('parent',), ('ghost',)]))
1988
def catch_add(self, entries):
1989
self.caught_entries.append(entries)
1991
def test_add_no_callback_errors(self):
1992
index = self.two_graph_index()
1993
self.assertRaises(errors.ReadOnlyError, index.add_records,
1994
[(('new',), 'fulltext,no-eol', (None, 50, 60), [('separate',)])])
1996
def test_add_version_smoke(self):
1997
index = self.two_graph_index(catch_adds=True)
1998
index.add_records([(('new',), 'fulltext,no-eol', (None, 50, 60), [])])
1999
self.assertEqual([[(('new', ), 'N50 60')]],
2000
self.caught_entries)
2002
def test_add_version_delta_not_delta_index(self):
2003
index = self.two_graph_index(catch_adds=True)
2004
self.assertRaises(errors.KnitCorrupt, index.add_records,
2005
[(('new',), 'no-eol,line-delta', (None, 0, 100), [])])
2006
self.assertEqual([], self.caught_entries)
2008
def test_add_version_same_dup(self):
2009
index = self.two_graph_index(catch_adds=True)
2010
# options can be spelt two different ways
2011
index.add_records([(('tip',), 'fulltext,no-eol', (None, 0, 100), [])])
2012
index.add_records([(('tip',), 'no-eol,fulltext', (None, 0, 100), [])])
2013
# position/length are ignored (because each pack could have fulltext or
2014
# delta, and be at a different position.
2015
index.add_records([(('tip',), 'fulltext,no-eol', (None, 50, 100), [])])
2016
index.add_records([(('tip',), 'fulltext,no-eol', (None, 0, 1000), [])])
2017
# but neither should have added data.
2018
self.assertEqual([[], [], [], []], self.caught_entries)
2020
def test_add_version_different_dup(self):
2021
index = self.two_graph_index(catch_adds=True)
2023
self.assertRaises(errors.KnitCorrupt, index.add_records,
2024
[(('tip',), 'no-eol,line-delta', (None, 0, 100), [])])
2025
self.assertRaises(errors.KnitCorrupt, index.add_records,
2026
[(('tip',), 'line-delta,no-eol', (None, 0, 100), [])])
2027
self.assertRaises(errors.KnitCorrupt, index.add_records,
2028
[(('tip',), 'fulltext', (None, 0, 100), [])])
2030
self.assertRaises(errors.KnitCorrupt, index.add_records,
2031
[(('tip',), 'fulltext,no-eol', (None, 0, 100), [('parent',)])])
2032
self.assertEqual([], self.caught_entries)
2034
def test_add_versions(self):
2035
index = self.two_graph_index(catch_adds=True)
2037
(('new',), 'fulltext,no-eol', (None, 50, 60), []),
2038
(('new2',), 'fulltext', (None, 0, 6), []),
2040
self.assertEqual([(('new', ), 'N50 60'), (('new2', ), ' 0 6')],
2041
sorted(self.caught_entries[0]))
2042
self.assertEqual(1, len(self.caught_entries))
2044
def test_add_versions_delta_not_delta_index(self):
2045
index = self.two_graph_index(catch_adds=True)
2046
self.assertRaises(errors.KnitCorrupt, index.add_records,
2047
[(('new',), 'no-eol,line-delta', (None, 0, 100), [('parent',)])])
2048
self.assertEqual([], self.caught_entries)
2050
def test_add_versions_parents_not_parents_index(self):
2051
index = self.two_graph_index(catch_adds=True)
2052
self.assertRaises(errors.KnitCorrupt, index.add_records,
2053
[(('new',), 'no-eol,fulltext', (None, 0, 100), [('parent',)])])
2054
self.assertEqual([], self.caught_entries)
2056
def test_add_versions_random_id_accepted(self):
2057
index = self.two_graph_index(catch_adds=True)
2058
index.add_records([], random_id=True)
2060
def test_add_versions_same_dup(self):
2061
index = self.two_graph_index(catch_adds=True)
2062
# options can be spelt two different ways
2063
index.add_records([(('tip',), 'fulltext,no-eol', (None, 0, 100), [])])
2064
index.add_records([(('tip',), 'no-eol,fulltext', (None, 0, 100), [])])
2065
# position/length are ignored (because each pack could have fulltext or
2066
# delta, and be at a different position.
2067
index.add_records([(('tip',), 'fulltext,no-eol', (None, 50, 100), [])])
2068
index.add_records([(('tip',), 'fulltext,no-eol', (None, 0, 1000), [])])
2069
# but neither should have added data.
2070
self.assertEqual([[], [], [], []], self.caught_entries)
2072
def test_add_versions_different_dup(self):
2073
index = self.two_graph_index(catch_adds=True)
2075
self.assertRaises(errors.KnitCorrupt, index.add_records,
2076
[(('tip',), 'no-eol,line-delta', (None, 0, 100), [])])
2077
self.assertRaises(errors.KnitCorrupt, index.add_records,
2078
[(('tip',), 'line-delta,no-eol', (None, 0, 100), [])])
2079
self.assertRaises(errors.KnitCorrupt, index.add_records,
2080
[(('tip',), 'fulltext', (None, 0, 100), [])])
2082
self.assertRaises(errors.KnitCorrupt, index.add_records,
2083
[(('tip',), 'fulltext,no-eol', (None, 0, 100), [('parent',)])])
2084
# change options in the second record
2085
self.assertRaises(errors.KnitCorrupt, index.add_records,
2086
[(('tip',), 'fulltext,no-eol', (None, 0, 100), []),
2087
(('tip',), 'no-eol,line-delta', (None, 0, 100), [])])
2088
self.assertEqual([], self.caught_entries)
2091
class TestKnitVersionedFiles(KnitTests):
2093
def assertGroupKeysForIo(self, exp_groups, keys, non_local_keys,
2094
positions, _min_buffer_size=None):
2095
kvf = self.make_test_knit()
2096
if _min_buffer_size is None:
2097
_min_buffer_size = knit._STREAM_MIN_BUFFER_SIZE
2098
self.assertEqual(exp_groups, kvf._group_keys_for_io(keys,
2099
non_local_keys, positions,
2100
_min_buffer_size=_min_buffer_size))
2102
def assertSplitByPrefix(self, expected_map, expected_prefix_order,
2104
split, prefix_order = KnitVersionedFiles._split_by_prefix(keys)
2105
self.assertEqual(expected_map, split)
2106
self.assertEqual(expected_prefix_order, prefix_order)
2108
def test__group_keys_for_io(self):
2109
ft_detail = ('fulltext', False)
2110
ld_detail = ('line-delta', False)
2118
f_a: (ft_detail, (f_a, 0, 100), None),
2119
f_b: (ld_detail, (f_b, 100, 21), f_a),
2120
f_c: (ld_detail, (f_c, 180, 15), f_b),
2121
g_a: (ft_detail, (g_a, 121, 35), None),
2122
g_b: (ld_detail, (g_b, 156, 12), g_a),
2123
g_c: (ld_detail, (g_c, 195, 13), g_a),
2125
self.assertGroupKeysForIo([([f_a], set())],
2126
[f_a], [], positions)
2127
self.assertGroupKeysForIo([([f_a], set([f_a]))],
2128
[f_a], [f_a], positions)
2129
self.assertGroupKeysForIo([([f_a, f_b], set([]))],
2130
[f_a, f_b], [], positions)
2131
self.assertGroupKeysForIo([([f_a, f_b], set([f_b]))],
2132
[f_a, f_b], [f_b], positions)
2133
self.assertGroupKeysForIo([([f_a, f_b, g_a, g_b], set())],
2134
[f_a, g_a, f_b, g_b], [], positions)
2135
self.assertGroupKeysForIo([([f_a, f_b, g_a, g_b], set())],
2136
[f_a, g_a, f_b, g_b], [], positions,
2137
_min_buffer_size=150)
2138
self.assertGroupKeysForIo([([f_a, f_b], set()), ([g_a, g_b], set())],
2139
[f_a, g_a, f_b, g_b], [], positions,
2140
_min_buffer_size=100)
2141
self.assertGroupKeysForIo([([f_c], set()), ([g_b], set())],
2142
[f_c, g_b], [], positions,
2143
_min_buffer_size=125)
2144
self.assertGroupKeysForIo([([g_b, f_c], set())],
2145
[g_b, f_c], [], positions,
2146
_min_buffer_size=125)
2148
def test__split_by_prefix(self):
2149
self.assertSplitByPrefix({'f': [('f', 'a'), ('f', 'b')],
2150
'g': [('g', 'b'), ('g', 'a')],
2152
[('f', 'a'), ('g', 'b'),
2153
('g', 'a'), ('f', 'b')])
2155
self.assertSplitByPrefix({'f': [('f', 'a'), ('f', 'b')],
2156
'g': [('g', 'b'), ('g', 'a')],
2158
[('f', 'a'), ('f', 'b'),
2159
('g', 'b'), ('g', 'a')])
2161
self.assertSplitByPrefix({'f': [('f', 'a'), ('f', 'b')],
2162
'g': [('g', 'b'), ('g', 'a')],
2164
[('f', 'a'), ('f', 'b'),
2165
('g', 'b'), ('g', 'a')])
2167
self.assertSplitByPrefix({'f': [('f', 'a'), ('f', 'b')],
2168
'g': [('g', 'b'), ('g', 'a')],
2169
'': [('a',), ('b',)]
2171
[('f', 'a'), ('g', 'b'),
2173
('g', 'a'), ('f', 'b')])
2176
class TestStacking(KnitTests):
2178
def get_basis_and_test_knit(self):
2179
basis = self.make_test_knit(name='basis')
2180
basis = RecordingVersionedFilesDecorator(basis)
2181
test = self.make_test_knit(name='test')
2182
test.add_fallback_versioned_files(basis)
2185
def test_add_fallback_versioned_files(self):
2186
basis = self.make_test_knit(name='basis')
2187
test = self.make_test_knit(name='test')
2188
# It must not error; other tests test that the fallback is referred to
2189
# when accessing data.
2190
test.add_fallback_versioned_files(basis)
2192
def test_add_lines(self):
2193
# lines added to the test are not added to the basis
2194
basis, test = self.get_basis_and_test_knit()
2196
key_basis = ('bar',)
2197
key_cross_border = ('quux',)
2198
key_delta = ('zaphod',)
2199
test.add_lines(key, (), ['foo\n'])
2200
self.assertEqual({}, basis.get_parent_map([key]))
2201
# lines added to the test that reference across the stack do a
2203
basis.add_lines(key_basis, (), ['foo\n'])
2205
test.add_lines(key_cross_border, (key_basis,), ['foo\n'])
2206
self.assertEqual('fulltext', test._index.get_method(key_cross_border))
2207
# we don't even need to look at the basis to see that this should be
2208
# stored as a fulltext
2209
self.assertEqual([], basis.calls)
2210
# Subsequent adds do delta.
2212
test.add_lines(key_delta, (key_cross_border,), ['foo\n'])
2213
self.assertEqual('line-delta', test._index.get_method(key_delta))
2214
self.assertEqual([], basis.calls)
2216
def test_annotate(self):
2217
# annotations from the test knit are answered without asking the basis
2218
basis, test = self.get_basis_and_test_knit()
2220
key_basis = ('bar',)
2221
key_missing = ('missing',)
2222
test.add_lines(key, (), ['foo\n'])
2223
details = test.annotate(key)
2224
self.assertEqual([(key, 'foo\n')], details)
2225
self.assertEqual([], basis.calls)
2226
# But texts that are not in the test knit are looked for in the basis
2228
basis.add_lines(key_basis, (), ['foo\n', 'bar\n'])
2230
details = test.annotate(key_basis)
2231
self.assertEqual([(key_basis, 'foo\n'), (key_basis, 'bar\n')], details)
2232
# Not optimised to date:
2233
# self.assertEqual([("annotate", key_basis)], basis.calls)
2234
self.assertEqual([('get_parent_map', set([key_basis])),
2235
('get_parent_map', set([key_basis])),
2236
('get_record_stream', [key_basis], 'topological', True)],
2239
def test_check(self):
2240
# At the moment checking a stacked knit does implicitly check the
2242
basis, test = self.get_basis_and_test_knit()
2245
def test_get_parent_map(self):
2246
# parents in the test knit are answered without asking the basis
2247
basis, test = self.get_basis_and_test_knit()
2249
key_basis = ('bar',)
2250
key_missing = ('missing',)
2251
test.add_lines(key, (), [])
2252
parent_map = test.get_parent_map([key])
2253
self.assertEqual({key: ()}, parent_map)
2254
self.assertEqual([], basis.calls)
2255
# But parents that are not in the test knit are looked for in the basis
2256
basis.add_lines(key_basis, (), [])
2258
parent_map = test.get_parent_map([key, key_basis, key_missing])
2259
self.assertEqual({key: (),
2260
key_basis: ()}, parent_map)
2261
self.assertEqual([("get_parent_map", set([key_basis, key_missing]))],
2264
def test_get_record_stream_unordered_fulltexts(self):
2265
# records from the test knit are answered without asking the basis:
2266
basis, test = self.get_basis_and_test_knit()
2268
key_basis = ('bar',)
2269
key_missing = ('missing',)
2270
test.add_lines(key, (), ['foo\n'])
2271
records = list(test.get_record_stream([key], 'unordered', True))
2272
self.assertEqual(1, len(records))
2273
self.assertEqual([], basis.calls)
2274
# Missing (from test knit) objects are retrieved from the basis:
2275
basis.add_lines(key_basis, (), ['foo\n', 'bar\n'])
2277
records = list(test.get_record_stream([key_basis, key_missing],
2279
self.assertEqual(2, len(records))
2280
calls = list(basis.calls)
2281
for record in records:
2282
self.assertSubset([record.key], (key_basis, key_missing))
2283
if record.key == key_missing:
2284
self.assertIsInstance(record, AbsentContentFactory)
2286
reference = list(basis.get_record_stream([key_basis],
2287
'unordered', True))[0]
2288
self.assertEqual(reference.key, record.key)
2289
self.assertEqual(reference.sha1, record.sha1)
2290
self.assertEqual(reference.storage_kind, record.storage_kind)
2291
self.assertEqual(reference.get_bytes_as(reference.storage_kind),
2292
record.get_bytes_as(record.storage_kind))
2293
self.assertEqual(reference.get_bytes_as('fulltext'),
2294
record.get_bytes_as('fulltext'))
2295
# It's not strictly minimal, but it seems reasonable for now for it to
2296
# ask which fallbacks have which parents.
2298
("get_parent_map", set([key_basis, key_missing])),
2299
("get_record_stream", [key_basis], 'unordered', True)],
2302
def test_get_record_stream_ordered_fulltexts(self):
2303
# ordering is preserved down into the fallback store.
2304
basis, test = self.get_basis_and_test_knit()
2306
key_basis = ('bar',)
2307
key_basis_2 = ('quux',)
2308
key_missing = ('missing',)
2309
test.add_lines(key, (key_basis,), ['foo\n'])
2310
# Missing (from test knit) objects are retrieved from the basis:
2311
basis.add_lines(key_basis, (key_basis_2,), ['foo\n', 'bar\n'])
2312
basis.add_lines(key_basis_2, (), ['quux\n'])
2314
# ask for in non-topological order
2315
records = list(test.get_record_stream(
2316
[key, key_basis, key_missing, key_basis_2], 'topological', True))
2317
self.assertEqual(4, len(records))
2319
for record in records:
2320
self.assertSubset([record.key],
2321
(key_basis, key_missing, key_basis_2, key))
2322
if record.key == key_missing:
2323
self.assertIsInstance(record, AbsentContentFactory)
2325
results.append((record.key, record.sha1, record.storage_kind,
2326
record.get_bytes_as('fulltext')))
2327
calls = list(basis.calls)
2328
order = [record[0] for record in results]
2329
self.assertEqual([key_basis_2, key_basis, key], order)
2330
for result in results:
2331
if result[0] == key:
2335
record = source.get_record_stream([result[0]], 'unordered',
2337
self.assertEqual(record.key, result[0])
2338
self.assertEqual(record.sha1, result[1])
2339
# We used to check that the storage kind matched, but actually it
2340
# depends on whether it was sourced from the basis, or in a single
2341
# group, because asking for full texts returns proxy objects to a
2342
# _ContentMapGenerator object; so checking the kind is unneeded.
2343
self.assertEqual(record.get_bytes_as('fulltext'), result[3])
2344
# It's not strictly minimal, but it seems reasonable for now for it to
2345
# ask which fallbacks have which parents.
2347
("get_parent_map", set([key_basis, key_basis_2, key_missing])),
2348
# topological is requested from the fallback, because that is what
2349
# was requested at the top level.
2350
("get_record_stream", [key_basis_2, key_basis], 'topological', True)],
2353
def test_get_record_stream_unordered_deltas(self):
2354
# records from the test knit are answered without asking the basis:
2355
basis, test = self.get_basis_and_test_knit()
2357
key_basis = ('bar',)
2358
key_missing = ('missing',)
2359
test.add_lines(key, (), ['foo\n'])
2360
records = list(test.get_record_stream([key], 'unordered', False))
2361
self.assertEqual(1, len(records))
2362
self.assertEqual([], basis.calls)
2363
# Missing (from test knit) objects are retrieved from the basis:
2364
basis.add_lines(key_basis, (), ['foo\n', 'bar\n'])
2366
records = list(test.get_record_stream([key_basis, key_missing],
2367
'unordered', False))
2368
self.assertEqual(2, len(records))
2369
calls = list(basis.calls)
2370
for record in records:
2371
self.assertSubset([record.key], (key_basis, key_missing))
2372
if record.key == key_missing:
2373
self.assertIsInstance(record, AbsentContentFactory)
2375
reference = list(basis.get_record_stream([key_basis],
2376
'unordered', False))[0]
2377
self.assertEqual(reference.key, record.key)
2378
self.assertEqual(reference.sha1, record.sha1)
2379
self.assertEqual(reference.storage_kind, record.storage_kind)
2380
self.assertEqual(reference.get_bytes_as(reference.storage_kind),
2381
record.get_bytes_as(record.storage_kind))
2382
# It's not strictly minimal, but it seems reasonable for now for it to
2383
# ask which fallbacks have which parents.
2385
("get_parent_map", set([key_basis, key_missing])),
2386
("get_record_stream", [key_basis], 'unordered', False)],
2389
def test_get_record_stream_ordered_deltas(self):
2390
# ordering is preserved down into the fallback store.
2391
basis, test = self.get_basis_and_test_knit()
2393
key_basis = ('bar',)
2394
key_basis_2 = ('quux',)
2395
key_missing = ('missing',)
2396
test.add_lines(key, (key_basis,), ['foo\n'])
2397
# Missing (from test knit) objects are retrieved from the basis:
2398
basis.add_lines(key_basis, (key_basis_2,), ['foo\n', 'bar\n'])
2399
basis.add_lines(key_basis_2, (), ['quux\n'])
2401
# ask for in non-topological order
2402
records = list(test.get_record_stream(
2403
[key, key_basis, key_missing, key_basis_2], 'topological', False))
2404
self.assertEqual(4, len(records))
2406
for record in records:
2407
self.assertSubset([record.key],
2408
(key_basis, key_missing, key_basis_2, key))
2409
if record.key == key_missing:
2410
self.assertIsInstance(record, AbsentContentFactory)
2412
results.append((record.key, record.sha1, record.storage_kind,
2413
record.get_bytes_as(record.storage_kind)))
2414
calls = list(basis.calls)
2415
order = [record[0] for record in results]
2416
self.assertEqual([key_basis_2, key_basis, key], order)
2417
for result in results:
2418
if result[0] == key:
2422
record = source.get_record_stream([result[0]], 'unordered',
2424
self.assertEqual(record.key, result[0])
2425
self.assertEqual(record.sha1, result[1])
2426
self.assertEqual(record.storage_kind, result[2])
2427
self.assertEqual(record.get_bytes_as(record.storage_kind), result[3])
2428
# It's not strictly minimal, but it seems reasonable for now for it to
2429
# ask which fallbacks have which parents.
2431
("get_parent_map", set([key_basis, key_basis_2, key_missing])),
2432
("get_record_stream", [key_basis_2, key_basis], 'topological', False)],
2435
def test_get_sha1s(self):
2436
# sha1's in the test knit are answered without asking the basis
2437
basis, test = self.get_basis_and_test_knit()
2439
key_basis = ('bar',)
2440
key_missing = ('missing',)
2441
test.add_lines(key, (), ['foo\n'])
2442
key_sha1sum = osutils.sha_string('foo\n')
2443
sha1s = test.get_sha1s([key])
2444
self.assertEqual({key: key_sha1sum}, sha1s)
2445
self.assertEqual([], basis.calls)
2446
# But texts that are not in the test knit are looked for in the basis
2447
# directly (rather than via text reconstruction) so that remote servers
2448
# etc don't have to answer with full content.
2449
basis.add_lines(key_basis, (), ['foo\n', 'bar\n'])
2450
basis_sha1sum = osutils.sha_string('foo\nbar\n')
2452
sha1s = test.get_sha1s([key, key_missing, key_basis])
2453
self.assertEqual({key: key_sha1sum,
2454
key_basis: basis_sha1sum}, sha1s)
2455
self.assertEqual([("get_sha1s", set([key_basis, key_missing]))],
2458
def test_insert_record_stream(self):
2459
# records are inserted as normal; insert_record_stream builds on
2460
# add_lines, so a smoke test should be all that's needed:
2462
key_basis = ('bar',)
2463
key_delta = ('zaphod',)
2464
basis, test = self.get_basis_and_test_knit()
2465
source = self.make_test_knit(name='source')
2466
basis.add_lines(key_basis, (), ['foo\n'])
2468
source.add_lines(key_basis, (), ['foo\n'])
2469
source.add_lines(key_delta, (key_basis,), ['bar\n'])
2470
stream = source.get_record_stream([key_delta], 'unordered', False)
2471
test.insert_record_stream(stream)
2472
# XXX: this does somewhat too many calls in making sure of whether it
2473
# has to recreate the full text.
2474
self.assertEqual([("get_parent_map", set([key_basis])),
2475
('get_parent_map', set([key_basis])),
2476
('get_record_stream', [key_basis], 'unordered', True)],
2478
self.assertEqual({key_delta:(key_basis,)},
2479
test.get_parent_map([key_delta]))
2480
self.assertEqual('bar\n', test.get_record_stream([key_delta],
2481
'unordered', True).next().get_bytes_as('fulltext'))
2483
def test_iter_lines_added_or_present_in_keys(self):
2484
# Lines from the basis are returned, and lines for a given key are only
2488
# all sources are asked for keys:
2489
basis, test = self.get_basis_and_test_knit()
2490
basis.add_lines(key1, (), ["foo"])
2492
lines = list(test.iter_lines_added_or_present_in_keys([key1]))
2493
self.assertEqual([("foo\n", key1)], lines)
2494
self.assertEqual([("iter_lines_added_or_present_in_keys", set([key1]))],
2496
# keys in both are not duplicated:
2497
test.add_lines(key2, (), ["bar\n"])
2498
basis.add_lines(key2, (), ["bar\n"])
2500
lines = list(test.iter_lines_added_or_present_in_keys([key2]))
2501
self.assertEqual([("bar\n", key2)], lines)
2502
self.assertEqual([], basis.calls)
2504
def test_keys(self):
2507
# all sources are asked for keys:
2508
basis, test = self.get_basis_and_test_knit()
2510
self.assertEqual(set(), set(keys))
2511
self.assertEqual([("keys",)], basis.calls)
2512
# keys from a basis are returned:
2513
basis.add_lines(key1, (), [])
2516
self.assertEqual(set([key1]), set(keys))
2517
self.assertEqual([("keys",)], basis.calls)
2518
# keys in both are not duplicated:
2519
test.add_lines(key2, (), [])
2520
basis.add_lines(key2, (), [])
2523
self.assertEqual(2, len(keys))
2524
self.assertEqual(set([key1, key2]), set(keys))
2525
self.assertEqual([("keys",)], basis.calls)
2527
def test_add_mpdiffs(self):
2528
# records are inserted as normal; add_mpdiff builds on
2529
# add_lines, so a smoke test should be all that's needed:
2531
key_basis = ('bar',)
2532
key_delta = ('zaphod',)
2533
basis, test = self.get_basis_and_test_knit()
2534
source = self.make_test_knit(name='source')
2535
basis.add_lines(key_basis, (), ['foo\n'])
2537
source.add_lines(key_basis, (), ['foo\n'])
2538
source.add_lines(key_delta, (key_basis,), ['bar\n'])
2539
diffs = source.make_mpdiffs([key_delta])
2540
test.add_mpdiffs([(key_delta, (key_basis,),
2541
source.get_sha1s([key_delta])[key_delta], diffs[0])])
2542
self.assertEqual([("get_parent_map", set([key_basis])),
2543
('get_record_stream', [key_basis], 'unordered', True),],
2545
self.assertEqual({key_delta:(key_basis,)},
2546
test.get_parent_map([key_delta]))
2547
self.assertEqual('bar\n', test.get_record_stream([key_delta],
2548
'unordered', True).next().get_bytes_as('fulltext'))
2550
def test_make_mpdiffs(self):
2551
# Generating an mpdiff across a stacking boundary should detect parent
2555
key_right = ('zaphod',)
2556
basis, test = self.get_basis_and_test_knit()
2557
basis.add_lines(key_left, (), ['bar\n'])
2558
basis.add_lines(key_right, (), ['zaphod\n'])
2560
test.add_lines(key, (key_left, key_right),
2561
['bar\n', 'foo\n', 'zaphod\n'])
2562
diffs = test.make_mpdiffs([key])
2564
multiparent.MultiParent([multiparent.ParentText(0, 0, 0, 1),
2565
multiparent.NewText(['foo\n']),
2566
multiparent.ParentText(1, 0, 2, 1)])],
2568
self.assertEqual(3, len(basis.calls))
2570
("get_parent_map", set([key_left, key_right])),
2571
("get_parent_map", set([key_left, key_right])),
2574
last_call = basis.calls[-1]
2575
self.assertEqual('get_record_stream', last_call[0])
2576
self.assertEqual(set([key_left, key_right]), set(last_call[1]))
2577
self.assertEqual('topological', last_call[2])
2578
self.assertEqual(True, last_call[3])
2581
class TestNetworkBehaviour(KnitTests):
2582
"""Tests for getting data out of/into knits over the network."""
2584
def test_include_delta_closure_generates_a_knit_delta_closure(self):
2585
vf = self.make_test_knit(name='test')
2586
# put in three texts, giving ft, delta, delta
2587
vf.add_lines(('base',), (), ['base\n', 'content\n'])
2588
vf.add_lines(('d1',), (('base',),), ['d1\n'])
2589
vf.add_lines(('d2',), (('d1',),), ['d2\n'])
2590
# But heuristics could interfere, so check what happened:
2591
self.assertEqual(['knit-ft-gz', 'knit-delta-gz', 'knit-delta-gz'],
2592
[record.storage_kind for record in
2593
vf.get_record_stream([('base',), ('d1',), ('d2',)],
2594
'topological', False)])
2595
# generate a stream of just the deltas include_delta_closure=True,
2596
# serialise to the network, and check that we get a delta closure on the wire.
2597
stream = vf.get_record_stream([('d1',), ('d2',)], 'topological', True)
2598
netb = [record.get_bytes_as(record.storage_kind) for record in stream]
2599
# The first bytes should be a memo from _ContentMapGenerator, and the
2600
# second bytes should be empty (because its a API proxy not something
2601
# for wire serialisation.
2602
self.assertEqual('', netb[1])
2604
kind, line_end = network_bytes_to_kind_and_offset(bytes)
2605
self.assertEqual('knit-delta-closure', kind)
2608
class TestContentMapGenerator(KnitTests):
2609
"""Tests for ContentMapGenerator"""
2611
def test_get_record_stream_gives_records(self):
2612
vf = self.make_test_knit(name='test')
2613
# put in three texts, giving ft, delta, delta
2614
vf.add_lines(('base',), (), ['base\n', 'content\n'])
2615
vf.add_lines(('d1',), (('base',),), ['d1\n'])
2616
vf.add_lines(('d2',), (('d1',),), ['d2\n'])
2617
keys = [('d1',), ('d2',)]
2618
generator = _VFContentMapGenerator(vf, keys,
2619
global_map=vf.get_parent_map(keys))
2620
for record in generator.get_record_stream():
2621
if record.key == ('d1',):
2622
self.assertEqual('d1\n', record.get_bytes_as('fulltext'))
2624
self.assertEqual('d2\n', record.get_bytes_as('fulltext'))
2626
def test_get_record_stream_kinds_are_raw(self):
2627
vf = self.make_test_knit(name='test')
2628
# put in three texts, giving ft, delta, delta
2629
vf.add_lines(('base',), (), ['base\n', 'content\n'])
2630
vf.add_lines(('d1',), (('base',),), ['d1\n'])
2631
vf.add_lines(('d2',), (('d1',),), ['d2\n'])
2632
keys = [('base',), ('d1',), ('d2',)]
2633
generator = _VFContentMapGenerator(vf, keys,
2634
global_map=vf.get_parent_map(keys))
2635
kinds = {('base',): 'knit-delta-closure',
2636
('d1',): 'knit-delta-closure-ref',
2637
('d2',): 'knit-delta-closure-ref',
2639
for record in generator.get_record_stream():
2640
self.assertEqual(kinds[record.key], record.storage_kind)