399
AB_MERGE_TEXT="""unchanged|Banana cup cake recipe
404
new-b|- bananas (do not use plantains!!!)
405
unchanged|- broken tea cups
406
new-a|- self-raising flour
409
AB_MERGE=[tuple(l.split('|')) for l in AB_MERGE_TEXT.splitlines(True)]
412
def line_delta(from_lines, to_lines):
413
"""Generate line-based delta from one text to another"""
414
s = difflib.SequenceMatcher(None, from_lines, to_lines)
415
for op in s.get_opcodes():
418
yield '%d,%d,%d\n' % (op[1], op[2], op[4]-op[3])
419
for i in range(op[3], op[4]):
423
def apply_line_delta(basis_lines, delta_lines):
424
"""Apply a line-based perfect diff
426
basis_lines -- text to apply the patch to
427
delta_lines -- diff instructions and content
195
class TestPlainKnitContent(TestCase, KnitContentTestsMixin):
197
def _make_content(self, lines):
198
annotated_content = AnnotatedKnitContent(lines)
199
return PlainKnitContent(annotated_content.text(), 'bogus')
201
def test_annotate(self):
202
content = self._make_content([])
203
self.assertEqual(content.annotate(), [])
205
content = self._make_content([("origin1", "text1"), ("origin2", "text2")])
206
self.assertEqual(content.annotate(),
207
[("bogus", "text1"), ("bogus", "text2")])
209
def test_line_delta(self):
210
content1 = self._make_content([("", "a"), ("", "b")])
211
content2 = self._make_content([("", "a"), ("", "a"), ("", "c")])
212
self.assertEqual(content1.line_delta(content2),
213
[(1, 2, 2, ["a", "c"])])
215
def test_line_delta_iter(self):
216
content1 = self._make_content([("", "a"), ("", "b")])
217
content2 = self._make_content([("", "a"), ("", "a"), ("", "c")])
218
it = content1.line_delta_iter(content2)
219
self.assertEqual(it.next(), (1, 2, 2, ["a", "c"]))
220
self.assertRaises(StopIteration, it.next)
223
class TestAnnotatedKnitContent(TestCase, KnitContentTestsMixin):
225
def _make_content(self, lines):
226
return AnnotatedKnitContent(lines)
228
def test_annotate(self):
229
content = self._make_content([])
230
self.assertEqual(content.annotate(), [])
232
content = self._make_content([("origin1", "text1"), ("origin2", "text2")])
233
self.assertEqual(content.annotate(),
234
[("origin1", "text1"), ("origin2", "text2")])
236
def test_line_delta(self):
237
content1 = self._make_content([("", "a"), ("", "b")])
238
content2 = self._make_content([("", "a"), ("", "a"), ("", "c")])
239
self.assertEqual(content1.line_delta(content2),
240
[(1, 2, 2, [("", "a"), ("", "c")])])
242
def test_line_delta_iter(self):
243
content1 = self._make_content([("", "a"), ("", "b")])
244
content2 = self._make_content([("", "a"), ("", "a"), ("", "c")])
245
it = content1.line_delta_iter(content2)
246
self.assertEqual(it.next(), (1, 2, 2, [("", "a"), ("", "c")]))
247
self.assertRaises(StopIteration, it.next)
250
class MockTransport(object):
252
def __init__(self, file_lines=None):
253
self.file_lines = file_lines
255
# We have no base directory for the MockTransport
258
def get(self, filename):
259
if self.file_lines is None:
260
raise NoSuchFile(filename)
262
return StringIO("\n".join(self.file_lines))
264
def readv(self, relpath, offsets):
265
fp = self.get(relpath)
266
for offset, size in offsets:
268
yield offset, fp.read(size)
270
def __getattr__(self, name):
271
def queue_call(*args, **kwargs):
272
self.calls.append((name, args, kwargs))
276
class MockReadvFailingTransport(MockTransport):
277
"""Fail in the middle of a readv() result.
279
This Transport will successfully yield the first two requested hunks, but
280
raise NoSuchFile for the rest.
432
while i < len(delta_lines):
434
a, b, c = map(long, l.split(','))
436
out[offset+a:offset+b] = delta_lines[i:i+c]
438
offset = offset + (b - a) + c
442
class TestWeaveToKnit(KnitTests):
444
def test_weave_to_knit_matches(self):
445
# check that the WeaveToKnit is_compatible function
446
# registers True for a Weave to a Knit.
283
def readv(self, relpath, offsets):
285
for result in MockTransport.readv(self, relpath, offsets):
287
# we use 2 because the first offset is the pack header, the second
288
# is the first actual content requset
290
raise errors.NoSuchFile(relpath)
294
class KnitRecordAccessTestsMixin(object):
295
"""Tests for getting and putting knit records."""
297
def test_add_raw_records(self):
298
"""Add_raw_records adds records retrievable later."""
299
access = self.get_access()
300
memos = access.add_raw_records([('key', 10)], '1234567890')
301
self.assertEqual(['1234567890'], list(access.get_raw_records(memos)))
303
def test_add_several_raw_records(self):
304
"""add_raw_records with many records and read some back."""
305
access = self.get_access()
306
memos = access.add_raw_records([('key', 10), ('key2', 2), ('key3', 5)],
308
self.assertEqual(['1234567890', '12', '34567'],
309
list(access.get_raw_records(memos)))
310
self.assertEqual(['1234567890'],
311
list(access.get_raw_records(memos[0:1])))
312
self.assertEqual(['12'],
313
list(access.get_raw_records(memos[1:2])))
314
self.assertEqual(['34567'],
315
list(access.get_raw_records(memos[2:3])))
316
self.assertEqual(['1234567890', '34567'],
317
list(access.get_raw_records(memos[0:1] + memos[2:3])))
320
class TestKnitKnitAccess(TestCaseWithMemoryTransport, KnitRecordAccessTestsMixin):
321
"""Tests for the .kndx implementation."""
323
def get_access(self):
324
"""Get a .knit style access instance."""
325
mapper = ConstantMapper("foo")
326
access = _KnitKeyAccess(self.get_transport(), mapper)
330
class _TestException(Exception):
331
"""Just an exception for local tests to use."""
334
class TestPackKnitAccess(TestCaseWithMemoryTransport, KnitRecordAccessTestsMixin):
335
"""Tests for the pack based access."""
337
def get_access(self):
338
return self._get_access()[0]
340
def _get_access(self, packname='packfile', index='FOO'):
341
transport = self.get_transport()
342
def write_data(bytes):
343
transport.append_bytes(packname, bytes)
344
writer = pack.ContainerWriter(write_data)
346
access = _DirectPackAccess({})
347
access.set_writer(writer, index, (transport, packname))
348
return access, writer
350
def make_pack_file(self):
351
"""Create a pack file with 2 records."""
352
access, writer = self._get_access(packname='packname', index='foo')
354
memos.extend(access.add_raw_records([('key1', 10)], '1234567890'))
355
memos.extend(access.add_raw_records([('key2', 5)], '12345'))
359
def make_vf_for_retrying(self):
360
"""Create 3 packs and a reload function.
362
Originally, 2 pack files will have the data, but one will be missing.
363
And then the third will be used in place of the first two if reload()
366
:return: (versioned_file, reload_counter)
367
versioned_file a KnitVersionedFiles using the packs for access
369
builder = self.make_branch_builder('.', format="1.9")
370
builder.start_series()
371
builder.build_snapshot('rev-1', None, [
372
('add', ('', 'root-id', 'directory', None)),
373
('add', ('file', 'file-id', 'file', 'content\nrev 1\n')),
375
builder.build_snapshot('rev-2', ['rev-1'], [
376
('modify', ('file-id', 'content\nrev 2\n')),
378
builder.build_snapshot('rev-3', ['rev-2'], [
379
('modify', ('file-id', 'content\nrev 3\n')),
381
builder.finish_series()
382
b = builder.get_branch()
384
self.addCleanup(b.unlock)
385
# Pack these three revisions into another pack file, but don't remove
388
collection = repo._pack_collection
389
collection.ensure_loaded()
390
orig_packs = collection.packs
391
packer = pack_repo.Packer(collection, orig_packs, '.testpack')
392
new_pack = packer.pack()
393
# forget about the new pack
397
# Set up a reload() function that switches to using the new pack file
398
new_index = new_pack.revision_index
399
access_tuple = new_pack.access_tuple()
400
reload_counter = [0, 0, 0]
402
reload_counter[0] += 1
403
if reload_counter[1] > 0:
404
# We already reloaded, nothing more to do
405
reload_counter[2] += 1
407
reload_counter[1] += 1
408
vf._index._graph_index._indices[:] = [new_index]
409
vf._access._indices.clear()
410
vf._access._indices[new_index] = access_tuple
412
# Delete one of the pack files so the data will need to be reloaded. We
413
# will delete the file with 'rev-2' in it
414
trans, name = orig_packs[1].access_tuple()
416
# We don't have the index trigger reloading because we want to test
417
# that we reload when the .pack disappears
418
vf._access._reload_func = reload
419
return vf, reload_counter
421
def make_reload_func(self, return_val=True):
424
reload_called[0] += 1
426
return reload_called, reload
428
def make_retry_exception(self):
429
# We raise a real exception so that sys.exc_info() is properly
432
raise _TestException('foobar')
433
except _TestException, e:
434
retry_exc = errors.RetryWithNewPacks(None, reload_occurred=False,
435
exc_info=sys.exc_info())
438
def test_read_from_several_packs(self):
439
access, writer = self._get_access()
441
memos.extend(access.add_raw_records([('key', 10)], '1234567890'))
443
access, writer = self._get_access('pack2', 'FOOBAR')
444
memos.extend(access.add_raw_records([('key', 5)], '12345'))
446
access, writer = self._get_access('pack3', 'BAZ')
447
memos.extend(access.add_raw_records([('key', 5)], 'alpha'))
449
transport = self.get_transport()
450
access = _DirectPackAccess({"FOO":(transport, 'packfile'),
451
"FOOBAR":(transport, 'pack2'),
452
"BAZ":(transport, 'pack3')})
453
self.assertEqual(['1234567890', '12345', 'alpha'],
454
list(access.get_raw_records(memos)))
455
self.assertEqual(['1234567890'],
456
list(access.get_raw_records(memos[0:1])))
457
self.assertEqual(['12345'],
458
list(access.get_raw_records(memos[1:2])))
459
self.assertEqual(['alpha'],
460
list(access.get_raw_records(memos[2:3])))
461
self.assertEqual(['1234567890', 'alpha'],
462
list(access.get_raw_records(memos[0:1] + memos[2:3])))
464
def test_set_writer(self):
465
"""The writer should be settable post construction."""
466
access = _DirectPackAccess({})
467
transport = self.get_transport()
468
packname = 'packfile'
470
def write_data(bytes):
471
transport.append_bytes(packname, bytes)
472
writer = pack.ContainerWriter(write_data)
474
access.set_writer(writer, index, (transport, packname))
475
memos = access.add_raw_records([('key', 10)], '1234567890')
477
self.assertEqual(['1234567890'], list(access.get_raw_records(memos)))
479
def test_missing_index_raises_retry(self):
480
memos = self.make_pack_file()
481
transport = self.get_transport()
482
reload_called, reload_func = self.make_reload_func()
483
# Note that the index key has changed from 'foo' to 'bar'
484
access = _DirectPackAccess({'bar':(transport, 'packname')},
485
reload_func=reload_func)
486
e = self.assertListRaises(errors.RetryWithNewPacks,
487
access.get_raw_records, memos)
488
# Because a key was passed in which does not match our index list, we
489
# assume that the listing was already reloaded
490
self.assertTrue(e.reload_occurred)
491
self.assertIsInstance(e.exc_info, tuple)
492
self.assertIs(e.exc_info[0], KeyError)
493
self.assertIsInstance(e.exc_info[1], KeyError)
495
def test_missing_index_raises_key_error_with_no_reload(self):
496
memos = self.make_pack_file()
497
transport = self.get_transport()
498
# Note that the index key has changed from 'foo' to 'bar'
499
access = _DirectPackAccess({'bar':(transport, 'packname')})
500
e = self.assertListRaises(KeyError, access.get_raw_records, memos)
502
def test_missing_file_raises_retry(self):
503
memos = self.make_pack_file()
504
transport = self.get_transport()
505
reload_called, reload_func = self.make_reload_func()
506
# Note that the 'filename' has been changed to 'different-packname'
507
access = _DirectPackAccess({'foo':(transport, 'different-packname')},
508
reload_func=reload_func)
509
e = self.assertListRaises(errors.RetryWithNewPacks,
510
access.get_raw_records, memos)
511
# The file has gone missing, so we assume we need to reload
512
self.assertFalse(e.reload_occurred)
513
self.assertIsInstance(e.exc_info, tuple)
514
self.assertIs(e.exc_info[0], errors.NoSuchFile)
515
self.assertIsInstance(e.exc_info[1], errors.NoSuchFile)
516
self.assertEqual('different-packname', e.exc_info[1].path)
518
def test_missing_file_raises_no_such_file_with_no_reload(self):
519
memos = self.make_pack_file()
520
transport = self.get_transport()
521
# Note that the 'filename' has been changed to 'different-packname'
522
access = _DirectPackAccess({'foo':(transport, 'different-packname')})
523
e = self.assertListRaises(errors.NoSuchFile,
524
access.get_raw_records, memos)
526
def test_failing_readv_raises_retry(self):
527
memos = self.make_pack_file()
528
transport = self.get_transport()
529
failing_transport = MockReadvFailingTransport(
530
[transport.get_bytes('packname')])
531
reload_called, reload_func = self.make_reload_func()
532
access = _DirectPackAccess({'foo':(failing_transport, 'packname')},
533
reload_func=reload_func)
534
# Asking for a single record will not trigger the Mock failure
535
self.assertEqual(['1234567890'],
536
list(access.get_raw_records(memos[:1])))
537
self.assertEqual(['12345'],
538
list(access.get_raw_records(memos[1:2])))
539
# A multiple offset readv() will fail mid-way through
540
e = self.assertListRaises(errors.RetryWithNewPacks,
541
access.get_raw_records, memos)
542
# The file has gone missing, so we assume we need to reload
543
self.assertFalse(e.reload_occurred)
544
self.assertIsInstance(e.exc_info, tuple)
545
self.assertIs(e.exc_info[0], errors.NoSuchFile)
546
self.assertIsInstance(e.exc_info[1], errors.NoSuchFile)
547
self.assertEqual('packname', e.exc_info[1].path)
549
def test_failing_readv_raises_no_such_file_with_no_reload(self):
550
memos = self.make_pack_file()
551
transport = self.get_transport()
552
failing_transport = MockReadvFailingTransport(
553
[transport.get_bytes('packname')])
554
reload_called, reload_func = self.make_reload_func()
555
access = _DirectPackAccess({'foo':(failing_transport, 'packname')})
556
# Asking for a single record will not trigger the Mock failure
557
self.assertEqual(['1234567890'],
558
list(access.get_raw_records(memos[:1])))
559
self.assertEqual(['12345'],
560
list(access.get_raw_records(memos[1:2])))
561
# A multiple offset readv() will fail mid-way through
562
e = self.assertListRaises(errors.NoSuchFile,
563
access.get_raw_records, memos)
565
def test_reload_or_raise_no_reload(self):
566
access = _DirectPackAccess({}, reload_func=None)
567
retry_exc = self.make_retry_exception()
568
# Without a reload_func, we will just re-raise the original exception
569
self.assertRaises(_TestException, access.reload_or_raise, retry_exc)
571
def test_reload_or_raise_reload_changed(self):
572
reload_called, reload_func = self.make_reload_func(return_val=True)
573
access = _DirectPackAccess({}, reload_func=reload_func)
574
retry_exc = self.make_retry_exception()
575
access.reload_or_raise(retry_exc)
576
self.assertEqual([1], reload_called)
577
retry_exc.reload_occurred=True
578
access.reload_or_raise(retry_exc)
579
self.assertEqual([2], reload_called)
581
def test_reload_or_raise_reload_no_change(self):
582
reload_called, reload_func = self.make_reload_func(return_val=False)
583
access = _DirectPackAccess({}, reload_func=reload_func)
584
retry_exc = self.make_retry_exception()
585
# If reload_occurred is False, then we consider it an error to have
586
# reload_func() return False (no changes).
587
self.assertRaises(_TestException, access.reload_or_raise, retry_exc)
588
self.assertEqual([1], reload_called)
589
retry_exc.reload_occurred=True
590
# If reload_occurred is True, then we assume nothing changed because
591
# it had changed earlier, but didn't change again
592
access.reload_or_raise(retry_exc)
593
self.assertEqual([2], reload_called)
595
def test_annotate_retries(self):
596
vf, reload_counter = self.make_vf_for_retrying()
597
# It is a little bit bogus to annotate the Revision VF, but it works,
598
# as we have ancestry stored there
600
reload_lines = vf.annotate(key)
601
self.assertEqual([1, 1, 0], reload_counter)
602
plain_lines = vf.annotate(key)
603
self.assertEqual([1, 1, 0], reload_counter) # No extra reloading
604
if reload_lines != plain_lines:
605
self.fail('Annotation was not identical with reloading.')
606
# Now delete the packs-in-use, which should trigger another reload, but
607
# this time we just raise an exception because we can't recover
608
for trans, name in vf._access._indices.itervalues():
610
self.assertRaises(errors.NoSuchFile, vf.annotate, key)
611
self.assertEqual([2, 1, 1], reload_counter)
613
def test__get_record_map_retries(self):
614
vf, reload_counter = self.make_vf_for_retrying()
615
keys = [('rev-1',), ('rev-2',), ('rev-3',)]
616
records = vf._get_record_map(keys)
617
self.assertEqual(keys, sorted(records.keys()))
618
self.assertEqual([1, 1, 0], reload_counter)
619
# Now delete the packs-in-use, which should trigger another reload, but
620
# this time we just raise an exception because we can't recover
621
for trans, name in vf._access._indices.itervalues():
623
self.assertRaises(errors.NoSuchFile, vf._get_record_map, keys)
624
self.assertEqual([2, 1, 1], reload_counter)
626
def test_get_record_stream_retries(self):
627
vf, reload_counter = self.make_vf_for_retrying()
628
keys = [('rev-1',), ('rev-2',), ('rev-3',)]
629
record_stream = vf.get_record_stream(keys, 'topological', False)
630
record = record_stream.next()
631
self.assertEqual(('rev-1',), record.key)
632
self.assertEqual([0, 0, 0], reload_counter)
633
record = record_stream.next()
634
self.assertEqual(('rev-2',), record.key)
635
self.assertEqual([1, 1, 0], reload_counter)
636
record = record_stream.next()
637
self.assertEqual(('rev-3',), record.key)
638
self.assertEqual([1, 1, 0], reload_counter)
639
# Now delete all pack files, and see that we raise the right error
640
for trans, name in vf._access._indices.itervalues():
642
self.assertListRaises(errors.NoSuchFile,
643
vf.get_record_stream, keys, 'topological', False)
645
def test_iter_lines_added_or_present_in_keys_retries(self):
646
vf, reload_counter = self.make_vf_for_retrying()
647
keys = [('rev-1',), ('rev-2',), ('rev-3',)]
648
# Unfortunately, iter_lines_added_or_present_in_keys iterates the
649
# result in random order (determined by the iteration order from a
650
# set()), so we don't have any solid way to trigger whether data is
651
# read before or after. However we tried to delete the middle node to
652
# exercise the code well.
653
# What we care about is that all lines are always yielded, but not
656
reload_lines = sorted(vf.iter_lines_added_or_present_in_keys(keys))
657
self.assertEqual([1, 1, 0], reload_counter)
658
# Now do it again, to make sure the result is equivalent
659
plain_lines = sorted(vf.iter_lines_added_or_present_in_keys(keys))
660
self.assertEqual([1, 1, 0], reload_counter) # No extra reloading
661
self.assertEqual(plain_lines, reload_lines)
662
self.assertEqual(21, len(plain_lines))
663
# Now delete all pack files, and see that we raise the right error
664
for trans, name in vf._access._indices.itervalues():
666
self.assertListRaises(errors.NoSuchFile,
667
vf.iter_lines_added_or_present_in_keys, keys)
668
self.assertEqual([2, 1, 1], reload_counter)
670
def test_get_record_stream_yields_disk_sorted_order(self):
671
# if we get 'unordered' pick a semi-optimal order for reading. The
672
# order should be grouped by pack file, and then by position in file
673
repo = self.make_repository('test', format='pack-0.92')
675
self.addCleanup(repo.unlock)
676
repo.start_write_group()
678
vf.add_lines(('f-id', 'rev-5'), [('f-id', 'rev-4')], ['lines\n'])
679
vf.add_lines(('f-id', 'rev-1'), [], ['lines\n'])
680
vf.add_lines(('f-id', 'rev-2'), [('f-id', 'rev-1')], ['lines\n'])
681
repo.commit_write_group()
682
# We inserted them as rev-5, rev-1, rev-2, we should get them back in
684
stream = vf.get_record_stream([('f-id', 'rev-1'), ('f-id', 'rev-5'),
685
('f-id', 'rev-2')], 'unordered', False)
686
keys = [r.key for r in stream]
687
self.assertEqual([('f-id', 'rev-5'), ('f-id', 'rev-1'),
688
('f-id', 'rev-2')], keys)
689
repo.start_write_group()
690
vf.add_lines(('f-id', 'rev-4'), [('f-id', 'rev-3')], ['lines\n'])
691
vf.add_lines(('f-id', 'rev-3'), [('f-id', 'rev-2')], ['lines\n'])
692
vf.add_lines(('f-id', 'rev-6'), [('f-id', 'rev-5')], ['lines\n'])
693
repo.commit_write_group()
694
# Request in random order, to make sure the output order isn't based on
696
request_keys = set(('f-id', 'rev-%d' % i) for i in range(1, 7))
697
stream = vf.get_record_stream(request_keys, 'unordered', False)
698
keys = [r.key for r in stream]
699
# We want to get the keys back in disk order, but it doesn't matter
700
# which pack we read from first. So this can come back in 2 orders
701
alt1 = [('f-id', 'rev-%d' % i) for i in [4, 3, 6, 5, 1, 2]]
702
alt2 = [('f-id', 'rev-%d' % i) for i in [5, 1, 2, 4, 3, 6]]
703
if keys != alt1 and keys != alt2:
704
self.fail('Returned key order did not match either expected order.'
705
' expected %s or %s, not %s'
706
% (alt1, alt2, keys))
709
class LowLevelKnitDataTests(TestCase):
711
def create_gz_content(self, text):
713
gz_file = gzip.GzipFile(mode='wb', fileobj=sio)
716
return sio.getvalue()
718
def make_multiple_records(self):
719
"""Create the content for multiple records."""
720
sha1sum = osutils.sha('foo\nbar\n').hexdigest()
722
gz_txt = self.create_gz_content('version rev-id-1 2 %s\n'
727
record_1 = (0, len(gz_txt), sha1sum)
728
total_txt.append(gz_txt)
729
sha1sum = osutils.sha('baz\n').hexdigest()
730
gz_txt = self.create_gz_content('version rev-id-2 1 %s\n'
734
record_2 = (record_1[1], len(gz_txt), sha1sum)
735
total_txt.append(gz_txt)
736
return total_txt, record_1, record_2
738
def test_valid_knit_data(self):
739
sha1sum = osutils.sha('foo\nbar\n').hexdigest()
740
gz_txt = self.create_gz_content('version rev-id-1 2 %s\n'
745
transport = MockTransport([gz_txt])
746
access = _KnitKeyAccess(transport, ConstantMapper('filename'))
747
knit = KnitVersionedFiles(None, access)
748
records = [(('rev-id-1',), (('rev-id-1',), 0, len(gz_txt)))]
750
contents = list(knit._read_records_iter(records))
751
self.assertEqual([(('rev-id-1',), ['foo\n', 'bar\n'],
752
'4e48e2c9a3d2ca8a708cb0cc545700544efb5021')], contents)
754
raw_contents = list(knit._read_records_iter_raw(records))
755
self.assertEqual([(('rev-id-1',), gz_txt, sha1sum)], raw_contents)
757
def test_multiple_records_valid(self):
758
total_txt, record_1, record_2 = self.make_multiple_records()
759
transport = MockTransport([''.join(total_txt)])
760
access = _KnitKeyAccess(transport, ConstantMapper('filename'))
761
knit = KnitVersionedFiles(None, access)
762
records = [(('rev-id-1',), (('rev-id-1',), record_1[0], record_1[1])),
763
(('rev-id-2',), (('rev-id-2',), record_2[0], record_2[1]))]
765
contents = list(knit._read_records_iter(records))
766
self.assertEqual([(('rev-id-1',), ['foo\n', 'bar\n'], record_1[2]),
767
(('rev-id-2',), ['baz\n'], record_2[2])],
770
raw_contents = list(knit._read_records_iter_raw(records))
771
self.assertEqual([(('rev-id-1',), total_txt[0], record_1[2]),
772
(('rev-id-2',), total_txt[1], record_2[2])],
775
def test_not_enough_lines(self):
776
sha1sum = osutils.sha('foo\n').hexdigest()
777
# record says 2 lines data says 1
778
gz_txt = self.create_gz_content('version rev-id-1 2 %s\n'
782
transport = MockTransport([gz_txt])
783
access = _KnitKeyAccess(transport, ConstantMapper('filename'))
784
knit = KnitVersionedFiles(None, access)
785
records = [(('rev-id-1',), (('rev-id-1',), 0, len(gz_txt)))]
786
self.assertRaises(errors.KnitCorrupt, list,
787
knit._read_records_iter(records))
789
# read_records_iter_raw won't detect that sort of mismatch/corruption
790
raw_contents = list(knit._read_records_iter_raw(records))
791
self.assertEqual([(('rev-id-1',), gz_txt, sha1sum)], raw_contents)
793
def test_too_many_lines(self):
794
sha1sum = osutils.sha('foo\nbar\n').hexdigest()
795
# record says 1 lines data says 2
796
gz_txt = self.create_gz_content('version rev-id-1 1 %s\n'
801
transport = MockTransport([gz_txt])
802
access = _KnitKeyAccess(transport, ConstantMapper('filename'))
803
knit = KnitVersionedFiles(None, access)
804
records = [(('rev-id-1',), (('rev-id-1',), 0, len(gz_txt)))]
805
self.assertRaises(errors.KnitCorrupt, list,
806
knit._read_records_iter(records))
808
# read_records_iter_raw won't detect that sort of mismatch/corruption
809
raw_contents = list(knit._read_records_iter_raw(records))
810
self.assertEqual([(('rev-id-1',), gz_txt, sha1sum)], raw_contents)
812
def test_mismatched_version_id(self):
813
sha1sum = osutils.sha('foo\nbar\n').hexdigest()
814
gz_txt = self.create_gz_content('version rev-id-1 2 %s\n'
819
transport = MockTransport([gz_txt])
820
access = _KnitKeyAccess(transport, ConstantMapper('filename'))
821
knit = KnitVersionedFiles(None, access)
822
# We are asking for rev-id-2, but the data is rev-id-1
823
records = [(('rev-id-2',), (('rev-id-2',), 0, len(gz_txt)))]
824
self.assertRaises(errors.KnitCorrupt, list,
825
knit._read_records_iter(records))
827
# read_records_iter_raw detects mismatches in the header
828
self.assertRaises(errors.KnitCorrupt, list,
829
knit._read_records_iter_raw(records))
831
def test_uncompressed_data(self):
832
sha1sum = osutils.sha('foo\nbar\n').hexdigest()
833
txt = ('version rev-id-1 2 %s\n'
838
transport = MockTransport([txt])
839
access = _KnitKeyAccess(transport, ConstantMapper('filename'))
840
knit = KnitVersionedFiles(None, access)
841
records = [(('rev-id-1',), (('rev-id-1',), 0, len(txt)))]
843
# We don't have valid gzip data ==> corrupt
844
self.assertRaises(errors.KnitCorrupt, list,
845
knit._read_records_iter(records))
847
# read_records_iter_raw will notice the bad data
848
self.assertRaises(errors.KnitCorrupt, list,
849
knit._read_records_iter_raw(records))
851
def test_corrupted_data(self):
852
sha1sum = osutils.sha('foo\nbar\n').hexdigest()
853
gz_txt = self.create_gz_content('version rev-id-1 2 %s\n'
858
# Change 2 bytes in the middle to \xff
859
gz_txt = gz_txt[:10] + '\xff\xff' + gz_txt[12:]
860
transport = MockTransport([gz_txt])
861
access = _KnitKeyAccess(transport, ConstantMapper('filename'))
862
knit = KnitVersionedFiles(None, access)
863
records = [(('rev-id-1',), (('rev-id-1',), 0, len(gz_txt)))]
864
self.assertRaises(errors.KnitCorrupt, list,
865
knit._read_records_iter(records))
866
# read_records_iter_raw will barf on bad gz data
867
self.assertRaises(errors.KnitCorrupt, list,
868
knit._read_records_iter_raw(records))
871
class LowLevelKnitIndexTests(TestCase):
873
def get_knit_index(self, transport, name, mode):
874
mapper = ConstantMapper(name)
875
orig = knit._load_data
877
knit._load_data = orig
878
self.addCleanup(reset)
879
from bzrlib._knit_load_data_py import _load_data_py
880
knit._load_data = _load_data_py
881
allow_writes = lambda: 'w' in mode
882
return _KndxIndex(transport, mapper, lambda:None, allow_writes, lambda:True)
884
def test_create_file(self):
885
transport = MockTransport()
886
index = self.get_knit_index(transport, "filename", "w")
888
call = transport.calls.pop(0)
889
# call[1][1] is a StringIO - we can't test it by simple equality.
890
self.assertEqual('put_file_non_atomic', call[0])
891
self.assertEqual('filename.kndx', call[1][0])
892
# With no history, _KndxIndex writes a new index:
893
self.assertEqual(_KndxIndex.HEADER,
894
call[1][1].getvalue())
895
self.assertEqual({'create_parent_dir': True}, call[2])
897
def test_read_utf8_version_id(self):
898
unicode_revision_id = u"version-\N{CYRILLIC CAPITAL LETTER A}"
899
utf8_revision_id = unicode_revision_id.encode('utf-8')
900
transport = MockTransport([
902
'%s option 0 1 :' % (utf8_revision_id,)
904
index = self.get_knit_index(transport, "filename", "r")
905
# _KndxIndex is a private class, and deals in utf8 revision_ids, not
906
# Unicode revision_ids.
907
self.assertEqual({(utf8_revision_id,):()},
908
index.get_parent_map(index.keys()))
909
self.assertFalse((unicode_revision_id,) in index.keys())
911
def test_read_utf8_parents(self):
912
unicode_revision_id = u"version-\N{CYRILLIC CAPITAL LETTER A}"
913
utf8_revision_id = unicode_revision_id.encode('utf-8')
914
transport = MockTransport([
916
"version option 0 1 .%s :" % (utf8_revision_id,)
918
index = self.get_knit_index(transport, "filename", "r")
919
self.assertEqual({("version",):((utf8_revision_id,),)},
920
index.get_parent_map(index.keys()))
922
def test_read_ignore_corrupted_lines(self):
923
transport = MockTransport([
926
"corrupted options 0 1 .b .c ",
927
"version options 0 1 :"
929
index = self.get_knit_index(transport, "filename", "r")
930
self.assertEqual(1, len(index.keys()))
931
self.assertEqual(set([("version",)]), index.keys())
933
def test_read_corrupted_header(self):
934
transport = MockTransport(['not a bzr knit index header\n'])
935
index = self.get_knit_index(transport, "filename", "r")
936
self.assertRaises(KnitHeaderError, index.keys)
938
def test_read_duplicate_entries(self):
939
transport = MockTransport([
941
"parent options 0 1 :",
942
"version options1 0 1 0 :",
943
"version options2 1 2 .other :",
944
"version options3 3 4 0 .other :"
946
index = self.get_knit_index(transport, "filename", "r")
947
self.assertEqual(2, len(index.keys()))
948
# check that the index used is the first one written. (Specific
949
# to KnitIndex style indices.
950
self.assertEqual("1", index._dictionary_compress([("version",)]))
951
self.assertEqual((("version",), 3, 4), index.get_position(("version",)))
952
self.assertEqual(["options3"], index.get_options(("version",)))
953
self.assertEqual({("version",):(("parent",), ("other",))},
954
index.get_parent_map([("version",)]))
956
def test_read_compressed_parents(self):
957
transport = MockTransport([
961
"c option 0 1 1 0 :",
963
index = self.get_knit_index(transport, "filename", "r")
964
self.assertEqual({("b",):(("a",),), ("c",):(("b",), ("a",))},
965
index.get_parent_map([("b",), ("c",)]))
967
def test_write_utf8_version_id(self):
968
unicode_revision_id = u"version-\N{CYRILLIC CAPITAL LETTER A}"
969
utf8_revision_id = unicode_revision_id.encode('utf-8')
970
transport = MockTransport([
973
index = self.get_knit_index(transport, "filename", "r")
975
((utf8_revision_id,), ["option"], ((utf8_revision_id,), 0, 1), [])])
976
call = transport.calls.pop(0)
977
# call[1][1] is a StringIO - we can't test it by simple equality.
978
self.assertEqual('put_file_non_atomic', call[0])
979
self.assertEqual('filename.kndx', call[1][0])
980
# With no history, _KndxIndex writes a new index:
981
self.assertEqual(_KndxIndex.HEADER +
982
"\n%s option 0 1 :" % (utf8_revision_id,),
983
call[1][1].getvalue())
984
self.assertEqual({'create_parent_dir': True}, call[2])
986
def test_write_utf8_parents(self):
987
unicode_revision_id = u"version-\N{CYRILLIC CAPITAL LETTER A}"
988
utf8_revision_id = unicode_revision_id.encode('utf-8')
989
transport = MockTransport([
992
index = self.get_knit_index(transport, "filename", "r")
994
(("version",), ["option"], (("version",), 0, 1), [(utf8_revision_id,)])])
995
call = transport.calls.pop(0)
996
# call[1][1] is a StringIO - we can't test it by simple equality.
997
self.assertEqual('put_file_non_atomic', call[0])
998
self.assertEqual('filename.kndx', call[1][0])
999
# With no history, _KndxIndex writes a new index:
1000
self.assertEqual(_KndxIndex.HEADER +
1001
"\nversion option 0 1 .%s :" % (utf8_revision_id,),
1002
call[1][1].getvalue())
1003
self.assertEqual({'create_parent_dir': True}, call[2])
1005
def test_keys(self):
1006
transport = MockTransport([
1009
index = self.get_knit_index(transport, "filename", "r")
1011
self.assertEqual(set(), index.keys())
1013
index.add_records([(("a",), ["option"], (("a",), 0, 1), [])])
1014
self.assertEqual(set([("a",)]), index.keys())
1016
index.add_records([(("a",), ["option"], (("a",), 0, 1), [])])
1017
self.assertEqual(set([("a",)]), index.keys())
1019
index.add_records([(("b",), ["option"], (("b",), 0, 1), [])])
1020
self.assertEqual(set([("a",), ("b",)]), index.keys())
1022
def add_a_b(self, index, random_id=None):
1024
if random_id is not None:
1025
kwargs["random_id"] = random_id
1027
(("a",), ["option"], (("a",), 0, 1), [("b",)]),
1028
(("a",), ["opt"], (("a",), 1, 2), [("c",)]),
1029
(("b",), ["option"], (("b",), 2, 3), [("a",)])
1032
def assertIndexIsAB(self, index):
1037
index.get_parent_map(index.keys()))
1038
self.assertEqual((("a",), 1, 2), index.get_position(("a",)))
1039
self.assertEqual((("b",), 2, 3), index.get_position(("b",)))
1040
self.assertEqual(["opt"], index.get_options(("a",)))
1042
def test_add_versions(self):
1043
transport = MockTransport([
1046
index = self.get_knit_index(transport, "filename", "r")
1049
call = transport.calls.pop(0)
1050
# call[1][1] is a StringIO - we can't test it by simple equality.
1051
self.assertEqual('put_file_non_atomic', call[0])
1052
self.assertEqual('filename.kndx', call[1][0])
1053
# With no history, _KndxIndex writes a new index:
1056
"\na option 0 1 .b :"
1058
"\nb option 2 3 0 :",
1059
call[1][1].getvalue())
1060
self.assertEqual({'create_parent_dir': True}, call[2])
1061
self.assertIndexIsAB(index)
1063
def test_add_versions_random_id_is_accepted(self):
1064
transport = MockTransport([
1067
index = self.get_knit_index(transport, "filename", "r")
1068
self.add_a_b(index, random_id=True)
1070
def test_delay_create_and_add_versions(self):
1071
transport = MockTransport()
1073
index = self.get_knit_index(transport, "filename", "w")
1075
self.assertEqual([], transport.calls)
1078
#[ {"dir_mode": 0777, "create_parent_dir": True, "mode": "wb"},
1080
# Two calls: one during which we load the existing index (and when its
1081
# missing create it), then a second where we write the contents out.
1082
self.assertEqual(2, len(transport.calls))
1083
call = transport.calls.pop(0)
1084
self.assertEqual('put_file_non_atomic', call[0])
1085
self.assertEqual('filename.kndx', call[1][0])
1086
# With no history, _KndxIndex writes a new index:
1087
self.assertEqual(_KndxIndex.HEADER, call[1][1].getvalue())
1088
self.assertEqual({'create_parent_dir': True}, call[2])
1089
call = transport.calls.pop(0)
1090
# call[1][1] is a StringIO - we can't test it by simple equality.
1091
self.assertEqual('put_file_non_atomic', call[0])
1092
self.assertEqual('filename.kndx', call[1][0])
1093
# With no history, _KndxIndex writes a new index:
1096
"\na option 0 1 .b :"
1098
"\nb option 2 3 0 :",
1099
call[1][1].getvalue())
1100
self.assertEqual({'create_parent_dir': True}, call[2])
1102
def assertTotalBuildSize(self, size, keys, positions):
1103
self.assertEqual(size,
1104
knit._get_total_build_size(None, keys, positions))
1106
def test__get_total_build_size(self):
1108
('a',): (('fulltext', False), (('a',), 0, 100), None),
1109
('b',): (('line-delta', False), (('b',), 100, 21), ('a',)),
1110
('c',): (('line-delta', False), (('c',), 121, 35), ('b',)),
1111
('d',): (('line-delta', False), (('d',), 156, 12), ('b',)),
1113
self.assertTotalBuildSize(100, [('a',)], positions)
1114
self.assertTotalBuildSize(121, [('b',)], positions)
1115
# c needs both a & b
1116
self.assertTotalBuildSize(156, [('c',)], positions)
1117
# we shouldn't count 'b' twice
1118
self.assertTotalBuildSize(156, [('b',), ('c',)], positions)
1119
self.assertTotalBuildSize(133, [('d',)], positions)
1120
self.assertTotalBuildSize(168, [('c',), ('d',)], positions)
1122
def test_get_position(self):
1123
transport = MockTransport([
1128
index = self.get_knit_index(transport, "filename", "r")
1130
self.assertEqual((("a",), 0, 1), index.get_position(("a",)))
1131
self.assertEqual((("b",), 1, 2), index.get_position(("b",)))
1133
def test_get_method(self):
1134
transport = MockTransport([
1136
"a fulltext,unknown 0 1 :",
1137
"b unknown,line-delta 1 2 :",
1140
index = self.get_knit_index(transport, "filename", "r")
1142
self.assertEqual("fulltext", index.get_method("a"))
1143
self.assertEqual("line-delta", index.get_method("b"))
1144
self.assertRaises(errors.KnitIndexUnknownMethod, index.get_method, "c")
1146
def test_get_options(self):
1147
transport = MockTransport([
1152
index = self.get_knit_index(transport, "filename", "r")
1154
self.assertEqual(["opt1"], index.get_options("a"))
1155
self.assertEqual(["opt2", "opt3"], index.get_options("b"))
1157
def test_get_parent_map(self):
1158
transport = MockTransport([
1161
"b option 1 2 0 .c :",
1162
"c option 1 2 1 0 .e :"
1164
index = self.get_knit_index(transport, "filename", "r")
1168
("b",):(("a",), ("c",)),
1169
("c",):(("b",), ("a",), ("e",)),
1170
}, index.get_parent_map(index.keys()))
1172
def test_impossible_parent(self):
1173
"""Test we get KnitCorrupt if the parent couldn't possibly exist."""
1174
transport = MockTransport([
1177
"b option 0 1 4 :" # We don't have a 4th record
1179
index = self.get_knit_index(transport, 'filename', 'r')
1181
self.assertRaises(errors.KnitCorrupt, index.keys)
1182
except TypeError, e:
1183
if (str(e) == ('exceptions must be strings, classes, or instances,'
1184
' not exceptions.IndexError')
1185
and sys.version_info[0:2] >= (2,5)):
1186
self.knownFailure('Pyrex <0.9.5 fails with TypeError when'
1187
' raising new style exceptions with python'
1192
def test_corrupted_parent(self):
1193
transport = MockTransport([
1197
"c option 0 1 1v :", # Can't have a parent of '1v'
1199
index = self.get_knit_index(transport, 'filename', 'r')
1201
self.assertRaises(errors.KnitCorrupt, index.keys)
1202
except TypeError, e:
1203
if (str(e) == ('exceptions must be strings, classes, or instances,'
1204
' not exceptions.ValueError')
1205
and sys.version_info[0:2] >= (2,5)):
1206
self.knownFailure('Pyrex <0.9.5 fails with TypeError when'
1207
' raising new style exceptions with python'
1212
def test_corrupted_parent_in_list(self):
1213
transport = MockTransport([
1217
"c option 0 1 1 v :", # Can't have a parent of 'v'
1219
index = self.get_knit_index(transport, 'filename', 'r')
1221
self.assertRaises(errors.KnitCorrupt, index.keys)
1222
except TypeError, e:
1223
if (str(e) == ('exceptions must be strings, classes, or instances,'
1224
' not exceptions.ValueError')
1225
and sys.version_info[0:2] >= (2,5)):
1226
self.knownFailure('Pyrex <0.9.5 fails with TypeError when'
1227
' raising new style exceptions with python'
1232
def test_invalid_position(self):
1233
transport = MockTransport([
1237
index = self.get_knit_index(transport, 'filename', 'r')
1239
self.assertRaises(errors.KnitCorrupt, index.keys)
1240
except TypeError, e:
1241
if (str(e) == ('exceptions must be strings, classes, or instances,'
1242
' not exceptions.ValueError')
1243
and sys.version_info[0:2] >= (2,5)):
1244
self.knownFailure('Pyrex <0.9.5 fails with TypeError when'
1245
' raising new style exceptions with python'
1250
def test_invalid_size(self):
1251
transport = MockTransport([
1255
index = self.get_knit_index(transport, 'filename', 'r')
1257
self.assertRaises(errors.KnitCorrupt, index.keys)
1258
except TypeError, e:
1259
if (str(e) == ('exceptions must be strings, classes, or instances,'
1260
' not exceptions.ValueError')
1261
and sys.version_info[0:2] >= (2,5)):
1262
self.knownFailure('Pyrex <0.9.5 fails with TypeError when'
1263
' raising new style exceptions with python'
1268
def test_scan_unvalidated_index_not_implemented(self):
1269
transport = MockTransport()
1270
index = self.get_knit_index(transport, 'filename', 'r')
1272
NotImplementedError, index.scan_unvalidated_index,
1273
'dummy graph_index')
1275
NotImplementedError, index.get_missing_compression_parents)
1277
def test_short_line(self):
1278
transport = MockTransport([
1281
"b option 10 10 0", # This line isn't terminated, ignored
1283
index = self.get_knit_index(transport, "filename", "r")
1284
self.assertEqual(set([('a',)]), index.keys())
1286
def test_skip_incomplete_record(self):
1287
# A line with bogus data should just be skipped
1288
transport = MockTransport([
1291
"b option 10 10 0", # This line isn't terminated, ignored
1292
"c option 20 10 0 :", # Properly terminated, and starts with '\n'
1294
index = self.get_knit_index(transport, "filename", "r")
1295
self.assertEqual(set([('a',), ('c',)]), index.keys())
1297
def test_trailing_characters(self):
1298
# A line with bogus data should just be skipped
1299
transport = MockTransport([
1302
"b option 10 10 0 :a", # This line has extra trailing characters
1303
"c option 20 10 0 :", # Properly terminated, and starts with '\n'
1305
index = self.get_knit_index(transport, "filename", "r")
1306
self.assertEqual(set([('a',), ('c',)]), index.keys())
1309
class LowLevelKnitIndexTests_c(LowLevelKnitIndexTests):
1311
_test_needs_features = [CompiledKnitFeature]
1313
def get_knit_index(self, transport, name, mode):
1314
mapper = ConstantMapper(name)
1315
orig = knit._load_data
1317
knit._load_data = orig
1318
self.addCleanup(reset)
1319
from bzrlib._knit_load_data_pyx import _load_data_c
1320
knit._load_data = _load_data_c
1321
allow_writes = lambda: mode == 'w'
1322
return _KndxIndex(transport, mapper, lambda:None, allow_writes, lambda:True)
1325
class Test_KnitAnnotator(TestCaseWithMemoryTransport):
1327
def make_annotator(self):
1328
factory = knit.make_pack_factory(True, True, 1)
1329
vf = factory(self.get_transport())
1330
return knit._KnitAnnotator(vf)
1332
def test__expand_fulltext(self):
1333
ann = self.make_annotator()
1334
rev_key = ('rev-id',)
1335
ann._num_compression_children[rev_key] = 1
1336
res = ann._expand_record(rev_key, (('parent-id',),), None,
1337
['line1\n', 'line2\n'], ('fulltext', True))
1338
# The content object and text lines should be cached appropriately
1339
self.assertEqual(['line1\n', 'line2'], res)
1340
content_obj = ann._content_objects[rev_key]
1341
self.assertEqual(['line1\n', 'line2\n'], content_obj._lines)
1342
self.assertEqual(res, content_obj.text())
1343
self.assertEqual(res, ann._text_cache[rev_key])
1345
def test__expand_delta_comp_parent_not_available(self):
1346
# Parent isn't available yet, so we return nothing, but queue up this
1347
# node for later processing
1348
ann = self.make_annotator()
1349
rev_key = ('rev-id',)
1350
parent_key = ('parent-id',)
1351
record = ['0,1,1\n', 'new-line\n']
1352
details = ('line-delta', False)
1353
res = ann._expand_record(rev_key, (parent_key,), parent_key,
1355
self.assertEqual(None, res)
1356
self.assertTrue(parent_key in ann._pending_deltas)
1357
pending = ann._pending_deltas[parent_key]
1358
self.assertEqual(1, len(pending))
1359
self.assertEqual((rev_key, (parent_key,), record, details), pending[0])
1361
def test__expand_record_tracks_num_children(self):
1362
ann = self.make_annotator()
1363
rev_key = ('rev-id',)
1364
rev2_key = ('rev2-id',)
1365
parent_key = ('parent-id',)
1366
record = ['0,1,1\n', 'new-line\n']
1367
details = ('line-delta', False)
1368
ann._num_compression_children[parent_key] = 2
1369
ann._expand_record(parent_key, (), None, ['line1\n', 'line2\n'],
1370
('fulltext', False))
1371
res = ann._expand_record(rev_key, (parent_key,), parent_key,
1373
self.assertEqual({parent_key: 1}, ann._num_compression_children)
1374
# Expanding the second child should remove the content object, and the
1375
# num_compression_children entry
1376
res = ann._expand_record(rev2_key, (parent_key,), parent_key,
1378
self.assertFalse(parent_key in ann._content_objects)
1379
self.assertEqual({}, ann._num_compression_children)
1380
# We should not cache the content_objects for rev2 and rev, because
1381
# they do not have compression children of their own.
1382
self.assertEqual({}, ann._content_objects)
1384
def test__expand_delta_records_blocks(self):
1385
ann = self.make_annotator()
1386
rev_key = ('rev-id',)
1387
parent_key = ('parent-id',)
1388
record = ['0,1,1\n', 'new-line\n']
1389
details = ('line-delta', True)
1390
ann._num_compression_children[parent_key] = 2
1391
ann._expand_record(parent_key, (), None,
1392
['line1\n', 'line2\n', 'line3\n'],
1393
('fulltext', False))
1394
ann._expand_record(rev_key, (parent_key,), parent_key, record, details)
1395
self.assertEqual({(rev_key, parent_key): [(1, 1, 1), (3, 3, 0)]},
1396
ann._matching_blocks)
1397
rev2_key = ('rev2-id',)
1398
record = ['0,1,1\n', 'new-line\n']
1399
details = ('line-delta', False)
1400
ann._expand_record(rev2_key, (parent_key,), parent_key, record, details)
1401
self.assertEqual([(1, 1, 2), (3, 3, 0)],
1402
ann._matching_blocks[(rev2_key, parent_key)])
1404
def test__get_parent_ann_uses_matching_blocks(self):
1405
ann = self.make_annotator()
1406
rev_key = ('rev-id',)
1407
parent_key = ('parent-id',)
1408
parent_ann = [(parent_key,)]*3
1409
block_key = (rev_key, parent_key)
1410
ann._annotations_cache[parent_key] = parent_ann
1411
ann._matching_blocks[block_key] = [(0, 1, 1), (3, 3, 0)]
1412
# We should not try to access any parent_lines content, because we know
1413
# we already have the matching blocks
1414
par_ann, blocks = ann._get_parent_annotations_and_matches(rev_key,
1415
['1\n', '2\n', '3\n'], parent_key)
1416
self.assertEqual(parent_ann, par_ann)
1417
self.assertEqual([(0, 1, 1), (3, 3, 0)], blocks)
1418
self.assertEqual({}, ann._matching_blocks)
1420
def test__process_pending(self):
1421
ann = self.make_annotator()
1422
rev_key = ('rev-id',)
1425
record = ['0,1,1\n', 'new-line\n']
1426
details = ('line-delta', False)
1427
p1_record = ['line1\n', 'line2\n']
1428
ann._num_compression_children[p1_key] = 1
1429
res = ann._expand_record(rev_key, (p1_key,p2_key), p1_key,
1431
self.assertEqual(None, res)
1432
# self.assertTrue(p1_key in ann._pending_deltas)
1433
self.assertEqual({}, ann._pending_annotation)
1434
# Now insert p1, and we should be able to expand the delta
1435
res = ann._expand_record(p1_key, (), None, p1_record,
1436
('fulltext', False))
1437
self.assertEqual(p1_record, res)
1438
ann._annotations_cache[p1_key] = [(p1_key,)]*2
1439
res = ann._process_pending(p1_key)
1440
self.assertEqual([], res)
1441
self.assertFalse(p1_key in ann._pending_deltas)
1442
self.assertTrue(p2_key in ann._pending_annotation)
1443
self.assertEqual({p2_key: [(rev_key, (p1_key, p2_key))]},
1444
ann._pending_annotation)
1445
# Now fill in parent 2, and pending annotation should be satisfied
1446
res = ann._expand_record(p2_key, (), None, [], ('fulltext', False))
1447
ann._annotations_cache[p2_key] = []
1448
res = ann._process_pending(p2_key)
1449
self.assertEqual([rev_key], res)
1450
self.assertEqual({}, ann._pending_annotation)
1451
self.assertEqual({}, ann._pending_deltas)
1453
def test_record_delta_removes_basis(self):
1454
ann = self.make_annotator()
1455
ann._expand_record(('parent-id',), (), None,
1456
['line1\n', 'line2\n'], ('fulltext', False))
1457
ann._num_compression_children['parent-id'] = 2
1459
def test_annotate_special_text(self):
1460
ann = self.make_annotator()
1462
rev1_key = ('rev-1',)
1463
rev2_key = ('rev-2',)
1464
rev3_key = ('rev-3',)
1465
spec_key = ('special:',)
1466
vf.add_lines(rev1_key, [], ['initial content\n'])
1467
vf.add_lines(rev2_key, [rev1_key], ['initial content\n',
1470
vf.add_lines(rev3_key, [rev1_key], ['initial content\n',
1473
spec_text = ('initial content\n'
1477
ann.add_special_text(spec_key, [rev2_key, rev3_key], spec_text)
1478
anns, lines = ann.annotate(spec_key)
1479
self.assertEqual([(rev1_key,),
1480
(rev2_key, rev3_key),
1484
self.assertEqualDiff(spec_text, ''.join(lines))
1487
class KnitTests(TestCaseWithTransport):
1488
"""Class containing knit test helper routines."""
1490
def make_test_knit(self, annotate=False, name='test'):
1491
mapper = ConstantMapper(name)
1492
return make_file_factory(annotate, mapper)(self.get_transport())
1495
class TestBadShaError(KnitTests):
1496
"""Tests for handling of sha errors."""
1498
def test_sha_exception_has_text(self):
1499
# having the failed text included in the error allows for recovery.
1500
source = self.make_test_knit()
1501
target = self.make_test_knit(name="target")
1502
if not source._max_delta_chain:
1503
raise TestNotApplicable(
1504
"cannot get delta-caused sha failures without deltas.")
1507
broken = ('broken',)
1508
source.add_lines(basis, (), ['foo\n'])
1509
source.add_lines(broken, (basis,), ['foo\n', 'bar\n'])
1510
# Seed target with a bad basis text
1511
target.add_lines(basis, (), ['gam\n'])
1512
target.insert_record_stream(
1513
source.get_record_stream([broken], 'unordered', False))
1514
err = self.assertRaises(errors.KnitCorrupt,
1515
target.get_record_stream([broken], 'unordered', True
1516
).next().get_bytes_as, 'chunked')
1517
self.assertEqual(['gam\n', 'bar\n'], err.content)
1518
# Test for formatting with live data
1519
self.assertStartsWith(str(err), "Knit ")
1522
class TestKnitIndex(KnitTests):
1524
def test_add_versions_dictionary_compresses(self):
1525
"""Adding versions to the index should update the lookup dict"""
1526
knit = self.make_test_knit()
1528
idx.add_records([(('a-1',), ['fulltext'], (('a-1',), 0, 0), [])])
1529
self.check_file_contents('test.kndx',
1530
'# bzr knit index 8\n'
1532
'a-1 fulltext 0 0 :'
1535
(('a-2',), ['fulltext'], (('a-2',), 0, 0), [('a-1',)]),
1536
(('a-3',), ['fulltext'], (('a-3',), 0, 0), [('a-2',)]),
1538
self.check_file_contents('test.kndx',
1539
'# bzr knit index 8\n'
1541
'a-1 fulltext 0 0 :\n'
1542
'a-2 fulltext 0 0 0 :\n'
1543
'a-3 fulltext 0 0 1 :'
1545
self.assertEqual(set([('a-3',), ('a-1',), ('a-2',)]), idx.keys())
1547
('a-1',): ((('a-1',), 0, 0), None, (), ('fulltext', False)),
1548
('a-2',): ((('a-2',), 0, 0), None, (('a-1',),), ('fulltext', False)),
1549
('a-3',): ((('a-3',), 0, 0), None, (('a-2',),), ('fulltext', False)),
1550
}, idx.get_build_details(idx.keys()))
1551
self.assertEqual({('a-1',):(),
1552
('a-2',):(('a-1',),),
1553
('a-3',):(('a-2',),),},
1554
idx.get_parent_map(idx.keys()))
1556
def test_add_versions_fails_clean(self):
1557
"""If add_versions fails in the middle, it restores a pristine state.
1559
Any modifications that are made to the index are reset if all versions
1562
# This cheats a little bit by passing in a generator which will
1563
# raise an exception before the processing finishes
1564
# Other possibilities would be to have an version with the wrong number
1565
# of entries, or to make the backing transport unable to write any
1568
knit = self.make_test_knit()
1570
idx.add_records([(('a-1',), ['fulltext'], (('a-1',), 0, 0), [])])
1572
class StopEarly(Exception):
1575
def generate_failure():
1576
"""Add some entries and then raise an exception"""
1577
yield (('a-2',), ['fulltext'], (None, 0, 0), ('a-1',))
1578
yield (('a-3',), ['fulltext'], (None, 0, 0), ('a-2',))
1581
# Assert the pre-condition
1583
self.assertEqual(set([('a-1',)]), set(idx.keys()))
1585
{('a-1',): ((('a-1',), 0, 0), None, (), ('fulltext', False))},
1586
idx.get_build_details([('a-1',)]))
1587
self.assertEqual({('a-1',):()}, idx.get_parent_map(idx.keys()))
1590
self.assertRaises(StopEarly, idx.add_records, generate_failure())
1591
# And it shouldn't be modified
1594
def test_knit_index_ignores_empty_files(self):
1595
# There was a race condition in older bzr, where a ^C at the right time
1596
# could leave an empty .kndx file, which bzr would later claim was a
1597
# corrupted file since the header was not present. In reality, the file
1598
# just wasn't created, so it should be ignored.
1599
t = get_transport('.')
1600
t.put_bytes('test.kndx', '')
1602
knit = self.make_test_knit()
1604
def test_knit_index_checks_header(self):
1605
t = get_transport('.')
1606
t.put_bytes('test.kndx', '# not really a knit header\n\n')
448
1607
k = self.make_test_knit()
449
self.failUnless(WeaveToKnit.is_compatible(w, k))
450
self.failIf(WeaveToKnit.is_compatible(k, w))
451
self.failIf(WeaveToKnit.is_compatible(w, w))
452
self.failIf(WeaveToKnit.is_compatible(k, k))
455
class TestKnitCaching(KnitTests):
457
def create_knit(self, cache_add=False):
458
k = self.make_test_knit(True)
462
k.add_lines('text-1', [], split_lines(TEXT_1))
463
k.add_lines('text-2', [], split_lines(TEXT_2))
466
def test_no_caching(self):
467
k = self.create_knit()
468
# Nothing should be cached without setting 'enable_cache'
469
self.assertEqual({}, k._data._cache)
471
def test_cache_add_and_clear(self):
472
k = self.create_knit(True)
474
self.assertEqual(['text-1', 'text-2'], sorted(k._data._cache.keys()))
477
self.assertEqual({}, k._data._cache)
479
def test_cache_data_read_raw(self):
480
k = self.create_knit()
485
def read_one_raw(version):
486
pos_map = k._get_components_positions([version])
487
method, pos, size, next = pos_map[version]
488
lst = list(k._data.read_records_iter_raw([(version, pos, size)]))
489
self.assertEqual(1, len(lst))
492
val = read_one_raw('text-1')
493
self.assertEqual({'text-1':val[1]}, k._data._cache)
496
# After clear, new reads are not cached
497
self.assertEqual({}, k._data._cache)
499
val2 = read_one_raw('text-1')
500
self.assertEqual(val, val2)
501
self.assertEqual({}, k._data._cache)
503
def test_cache_data_read(self):
504
k = self.create_knit()
506
def read_one(version):
507
pos_map = k._get_components_positions([version])
508
method, pos, size, next = pos_map[version]
509
lst = list(k._data.read_records_iter([(version, pos, size)]))
510
self.assertEqual(1, len(lst))
516
val = read_one('text-2')
517
self.assertEqual(['text-2'], k._data._cache.keys())
518
self.assertEqual('text-2', val[0])
519
content, digest = k._data._parse_record('text-2',
520
k._data._cache['text-2'])
521
self.assertEqual(content, val[1])
522
self.assertEqual(digest, val[2])
525
self.assertEqual({}, k._data._cache)
527
val2 = read_one('text-2')
528
self.assertEqual(val, val2)
529
self.assertEqual({}, k._data._cache)
531
def test_cache_read(self):
532
k = self.create_knit()
535
text = k.get_text('text-1')
536
self.assertEqual(TEXT_1, text)
537
self.assertEqual(['text-1'], k._data._cache.keys())
540
self.assertEqual({}, k._data._cache)
542
text = k.get_text('text-1')
543
self.assertEqual(TEXT_1, text)
544
self.assertEqual({}, k._data._cache)
1608
self.assertRaises(KnitHeaderError, k.keys)
1611
class TestGraphIndexKnit(KnitTests):
1612
"""Tests for knits using a GraphIndex rather than a KnitIndex."""
1614
def make_g_index(self, name, ref_lists=0, nodes=[]):
1615
builder = GraphIndexBuilder(ref_lists)
1616
for node, references, value in nodes:
1617
builder.add_node(node, references, value)
1618
stream = builder.finish()
1619
trans = self.get_transport()
1620
size = trans.put_file(name, stream)
1621
return GraphIndex(trans, name, size)
1623
def two_graph_index(self, deltas=False, catch_adds=False):
1624
"""Build a two-graph index.
1626
:param deltas: If true, use underlying indices with two node-ref
1627
lists and 'parent' set to a delta-compressed against tail.
1629
# build a complex graph across several indices.
1631
# delta compression inn the index
1632
index1 = self.make_g_index('1', 2, [
1633
(('tip', ), 'N0 100', ([('parent', )], [], )),
1634
(('tail', ), '', ([], []))])
1635
index2 = self.make_g_index('2', 2, [
1636
(('parent', ), ' 100 78', ([('tail', ), ('ghost', )], [('tail', )])),
1637
(('separate', ), '', ([], []))])
1639
# just blob location and graph in the index.
1640
index1 = self.make_g_index('1', 1, [
1641
(('tip', ), 'N0 100', ([('parent', )], )),
1642
(('tail', ), '', ([], ))])
1643
index2 = self.make_g_index('2', 1, [
1644
(('parent', ), ' 100 78', ([('tail', ), ('ghost', )], )),
1645
(('separate', ), '', ([], ))])
1646
combined_index = CombinedGraphIndex([index1, index2])
1648
self.combined_index = combined_index
1649
self.caught_entries = []
1650
add_callback = self.catch_add
1653
return _KnitGraphIndex(combined_index, lambda:True, deltas=deltas,
1654
add_callback=add_callback)
1656
def test_keys(self):
1657
index = self.two_graph_index()
1658
self.assertEqual(set([('tail',), ('tip',), ('parent',), ('separate',)]),
1661
def test_get_position(self):
1662
index = self.two_graph_index()
1663
self.assertEqual((index._graph_index._indices[0], 0, 100), index.get_position(('tip',)))
1664
self.assertEqual((index._graph_index._indices[1], 100, 78), index.get_position(('parent',)))
1666
def test_get_method_deltas(self):
1667
index = self.two_graph_index(deltas=True)
1668
self.assertEqual('fulltext', index.get_method(('tip',)))
1669
self.assertEqual('line-delta', index.get_method(('parent',)))
1671
def test_get_method_no_deltas(self):
1672
# check that the parent-history lookup is ignored with deltas=False.
1673
index = self.two_graph_index(deltas=False)
1674
self.assertEqual('fulltext', index.get_method(('tip',)))
1675
self.assertEqual('fulltext', index.get_method(('parent',)))
1677
def test_get_options_deltas(self):
1678
index = self.two_graph_index(deltas=True)
1679
self.assertEqual(['fulltext', 'no-eol'], index.get_options(('tip',)))
1680
self.assertEqual(['line-delta'], index.get_options(('parent',)))
1682
def test_get_options_no_deltas(self):
1683
# check that the parent-history lookup is ignored with deltas=False.
1684
index = self.two_graph_index(deltas=False)
1685
self.assertEqual(['fulltext', 'no-eol'], index.get_options(('tip',)))
1686
self.assertEqual(['fulltext'], index.get_options(('parent',)))
1688
def test_get_parent_map(self):
1689
index = self.two_graph_index()
1690
self.assertEqual({('parent',):(('tail',), ('ghost',))},
1691
index.get_parent_map([('parent',), ('ghost',)]))
1693
def catch_add(self, entries):
1694
self.caught_entries.append(entries)
1696
def test_add_no_callback_errors(self):
1697
index = self.two_graph_index()
1698
self.assertRaises(errors.ReadOnlyError, index.add_records,
1699
[(('new',), 'fulltext,no-eol', (None, 50, 60), ['separate'])])
1701
def test_add_version_smoke(self):
1702
index = self.two_graph_index(catch_adds=True)
1703
index.add_records([(('new',), 'fulltext,no-eol', (None, 50, 60),
1705
self.assertEqual([[(('new', ), 'N50 60', ((('separate',),),))]],
1706
self.caught_entries)
1708
def test_add_version_delta_not_delta_index(self):
1709
index = self.two_graph_index(catch_adds=True)
1710
self.assertRaises(errors.KnitCorrupt, index.add_records,
1711
[(('new',), 'no-eol,line-delta', (None, 0, 100), [('parent',)])])
1712
self.assertEqual([], self.caught_entries)
1714
def test_add_version_same_dup(self):
1715
index = self.two_graph_index(catch_adds=True)
1716
# options can be spelt two different ways
1717
index.add_records([(('tip',), 'fulltext,no-eol', (None, 0, 100), [('parent',)])])
1718
index.add_records([(('tip',), 'no-eol,fulltext', (None, 0, 100), [('parent',)])])
1719
# position/length are ignored (because each pack could have fulltext or
1720
# delta, and be at a different position.
1721
index.add_records([(('tip',), 'fulltext,no-eol', (None, 50, 100),
1723
index.add_records([(('tip',), 'fulltext,no-eol', (None, 0, 1000),
1725
# but neither should have added data:
1726
self.assertEqual([[], [], [], []], self.caught_entries)
1728
def test_add_version_different_dup(self):
1729
index = self.two_graph_index(deltas=True, catch_adds=True)
1731
self.assertRaises(errors.KnitCorrupt, index.add_records,
1732
[(('tip',), 'line-delta', (None, 0, 100), [('parent',)])])
1733
self.assertRaises(errors.KnitCorrupt, index.add_records,
1734
[(('tip',), 'fulltext', (None, 0, 100), [('parent',)])])
1736
self.assertRaises(errors.KnitCorrupt, index.add_records,
1737
[(('tip',), 'fulltext,no-eol', (None, 0, 100), [])])
1738
self.assertEqual([], self.caught_entries)
1740
def test_add_versions_nodeltas(self):
1741
index = self.two_graph_index(catch_adds=True)
1743
(('new',), 'fulltext,no-eol', (None, 50, 60), [('separate',)]),
1744
(('new2',), 'fulltext', (None, 0, 6), [('new',)]),
1746
self.assertEqual([(('new', ), 'N50 60', ((('separate',),),)),
1747
(('new2', ), ' 0 6', ((('new',),),))],
1748
sorted(self.caught_entries[0]))
1749
self.assertEqual(1, len(self.caught_entries))
1751
def test_add_versions_deltas(self):
1752
index = self.two_graph_index(deltas=True, catch_adds=True)
1754
(('new',), 'fulltext,no-eol', (None, 50, 60), [('separate',)]),
1755
(('new2',), 'line-delta', (None, 0, 6), [('new',)]),
1757
self.assertEqual([(('new', ), 'N50 60', ((('separate',),), ())),
1758
(('new2', ), ' 0 6', ((('new',),), (('new',),), ))],
1759
sorted(self.caught_entries[0]))
1760
self.assertEqual(1, len(self.caught_entries))
1762
def test_add_versions_delta_not_delta_index(self):
1763
index = self.two_graph_index(catch_adds=True)
1764
self.assertRaises(errors.KnitCorrupt, index.add_records,
1765
[(('new',), 'no-eol,line-delta', (None, 0, 100), [('parent',)])])
1766
self.assertEqual([], self.caught_entries)
1768
def test_add_versions_random_id_accepted(self):
1769
index = self.two_graph_index(catch_adds=True)
1770
index.add_records([], random_id=True)
1772
def test_add_versions_same_dup(self):
1773
index = self.two_graph_index(catch_adds=True)
1774
# options can be spelt two different ways
1775
index.add_records([(('tip',), 'fulltext,no-eol', (None, 0, 100),
1777
index.add_records([(('tip',), 'no-eol,fulltext', (None, 0, 100),
1779
# position/length are ignored (because each pack could have fulltext or
1780
# delta, and be at a different position.
1781
index.add_records([(('tip',), 'fulltext,no-eol', (None, 50, 100),
1783
index.add_records([(('tip',), 'fulltext,no-eol', (None, 0, 1000),
1785
# but neither should have added data.
1786
self.assertEqual([[], [], [], []], self.caught_entries)
1788
def test_add_versions_different_dup(self):
1789
index = self.two_graph_index(deltas=True, catch_adds=True)
1791
self.assertRaises(errors.KnitCorrupt, index.add_records,
1792
[(('tip',), 'line-delta', (None, 0, 100), [('parent',)])])
1793
self.assertRaises(errors.KnitCorrupt, index.add_records,
1794
[(('tip',), 'fulltext', (None, 0, 100), [('parent',)])])
1796
self.assertRaises(errors.KnitCorrupt, index.add_records,
1797
[(('tip',), 'fulltext,no-eol', (None, 0, 100), [])])
1798
# change options in the second record
1799
self.assertRaises(errors.KnitCorrupt, index.add_records,
1800
[(('tip',), 'fulltext,no-eol', (None, 0, 100), [('parent',)]),
1801
(('tip',), 'line-delta', (None, 0, 100), [('parent',)])])
1802
self.assertEqual([], self.caught_entries)
1804
def make_g_index_missing_compression_parent(self):
1805
graph_index = self.make_g_index('missing_comp', 2,
1806
[(('tip', ), ' 100 78',
1807
([('missing-parent', ), ('ghost', )], [('missing-parent', )]))])
1810
def make_g_index_missing_parent(self):
1811
graph_index = self.make_g_index('missing_parent', 2,
1812
[(('parent', ), ' 100 78', ([], [])),
1813
(('tip', ), ' 100 78',
1814
([('parent', ), ('missing-parent', )], [('parent', )])),
1818
def make_g_index_no_external_refs(self):
1819
graph_index = self.make_g_index('no_external_refs', 2,
1820
[(('rev', ), ' 100 78',
1821
([('parent', ), ('ghost', )], []))])
1824
def test_add_good_unvalidated_index(self):
1825
unvalidated = self.make_g_index_no_external_refs()
1826
combined = CombinedGraphIndex([unvalidated])
1827
index = _KnitGraphIndex(combined, lambda: True, deltas=True)
1828
index.scan_unvalidated_index(unvalidated)
1829
self.assertEqual(frozenset(), index.get_missing_compression_parents())
1831
def test_add_missing_compression_parent_unvalidated_index(self):
1832
unvalidated = self.make_g_index_missing_compression_parent()
1833
combined = CombinedGraphIndex([unvalidated])
1834
index = _KnitGraphIndex(combined, lambda: True, deltas=True)
1835
index.scan_unvalidated_index(unvalidated)
1836
# This also checks that its only the compression parent that is
1837
# examined, otherwise 'ghost' would also be reported as a missing
1840
frozenset([('missing-parent',)]),
1841
index.get_missing_compression_parents())
1843
def test_add_missing_noncompression_parent_unvalidated_index(self):
1844
unvalidated = self.make_g_index_missing_parent()
1845
combined = CombinedGraphIndex([unvalidated])
1846
index = _KnitGraphIndex(combined, lambda: True, deltas=True,
1847
track_external_parent_refs=True)
1848
index.scan_unvalidated_index(unvalidated)
1850
frozenset([('missing-parent',)]), index.get_missing_parents())
1852
def test_track_external_parent_refs(self):
1853
g_index = self.make_g_index('empty', 2, [])
1854
combined = CombinedGraphIndex([g_index])
1855
index = _KnitGraphIndex(combined, lambda: True, deltas=True,
1856
add_callback=self.catch_add, track_external_parent_refs=True)
1857
self.caught_entries = []
1859
(('new-key',), 'fulltext,no-eol', (None, 50, 60),
1860
[('parent-1',), ('parent-2',)])])
1862
frozenset([('parent-1',), ('parent-2',)]),
1863
index.get_missing_parents())
1865
def test_add_unvalidated_index_with_present_external_references(self):
1866
index = self.two_graph_index(deltas=True)
1867
# Ugly hack to get at one of the underlying GraphIndex objects that
1868
# two_graph_index built.
1869
unvalidated = index._graph_index._indices[1]
1870
# 'parent' is an external ref of _indices[1] (unvalidated), but is
1871
# present in _indices[0].
1872
index.scan_unvalidated_index(unvalidated)
1873
self.assertEqual(frozenset(), index.get_missing_compression_parents())
1875
def make_new_missing_parent_g_index(self, name):
1876
missing_parent = name + '-missing-parent'
1877
graph_index = self.make_g_index(name, 2,
1878
[((name + 'tip', ), ' 100 78',
1879
([(missing_parent, ), ('ghost', )], [(missing_parent, )]))])
1882
def test_add_mulitiple_unvalidated_indices_with_missing_parents(self):
1883
g_index_1 = self.make_new_missing_parent_g_index('one')
1884
g_index_2 = self.make_new_missing_parent_g_index('two')
1885
combined = CombinedGraphIndex([g_index_1, g_index_2])
1886
index = _KnitGraphIndex(combined, lambda: True, deltas=True)
1887
index.scan_unvalidated_index(g_index_1)
1888
index.scan_unvalidated_index(g_index_2)
1890
frozenset([('one-missing-parent',), ('two-missing-parent',)]),
1891
index.get_missing_compression_parents())
1893
def test_add_mulitiple_unvalidated_indices_with_mutual_dependencies(self):
1894
graph_index_a = self.make_g_index('one', 2,
1895
[(('parent-one', ), ' 100 78', ([('non-compression-parent',)], [])),
1896
(('child-of-two', ), ' 100 78',
1897
([('parent-two',)], [('parent-two',)]))])
1898
graph_index_b = self.make_g_index('two', 2,
1899
[(('parent-two', ), ' 100 78', ([('non-compression-parent',)], [])),
1900
(('child-of-one', ), ' 100 78',
1901
([('parent-one',)], [('parent-one',)]))])
1902
combined = CombinedGraphIndex([graph_index_a, graph_index_b])
1903
index = _KnitGraphIndex(combined, lambda: True, deltas=True)
1904
index.scan_unvalidated_index(graph_index_a)
1905
index.scan_unvalidated_index(graph_index_b)
1907
frozenset([]), index.get_missing_compression_parents())
1910
class TestNoParentsGraphIndexKnit(KnitTests):
1911
"""Tests for knits using _KnitGraphIndex with no parents."""
1913
def make_g_index(self, name, ref_lists=0, nodes=[]):
1914
builder = GraphIndexBuilder(ref_lists)
1915
for node, references in nodes:
1916
builder.add_node(node, references)
1917
stream = builder.finish()
1918
trans = self.get_transport()
1919
size = trans.put_file(name, stream)
1920
return GraphIndex(trans, name, size)
1922
def test_add_good_unvalidated_index(self):
1923
unvalidated = self.make_g_index('unvalidated')
1924
combined = CombinedGraphIndex([unvalidated])
1925
index = _KnitGraphIndex(combined, lambda: True, parents=False)
1926
index.scan_unvalidated_index(unvalidated)
1927
self.assertEqual(frozenset(),
1928
index.get_missing_compression_parents())
1930
def test_parents_deltas_incompatible(self):
1931
index = CombinedGraphIndex([])
1932
self.assertRaises(errors.KnitError, _KnitGraphIndex, lambda:True,
1933
index, deltas=True, parents=False)
1935
def two_graph_index(self, catch_adds=False):
1936
"""Build a two-graph index.
1938
:param deltas: If true, use underlying indices with two node-ref
1939
lists and 'parent' set to a delta-compressed against tail.
1941
# put several versions in the index.
1942
index1 = self.make_g_index('1', 0, [
1943
(('tip', ), 'N0 100'),
1945
index2 = self.make_g_index('2', 0, [
1946
(('parent', ), ' 100 78'),
1947
(('separate', ), '')])
1948
combined_index = CombinedGraphIndex([index1, index2])
1950
self.combined_index = combined_index
1951
self.caught_entries = []
1952
add_callback = self.catch_add
1955
return _KnitGraphIndex(combined_index, lambda:True, parents=False,
1956
add_callback=add_callback)
1958
def test_keys(self):
1959
index = self.two_graph_index()
1960
self.assertEqual(set([('tail',), ('tip',), ('parent',), ('separate',)]),
1963
def test_get_position(self):
1964
index = self.two_graph_index()
1965
self.assertEqual((index._graph_index._indices[0], 0, 100),
1966
index.get_position(('tip',)))
1967
self.assertEqual((index._graph_index._indices[1], 100, 78),
1968
index.get_position(('parent',)))
1970
def test_get_method(self):
1971
index = self.two_graph_index()
1972
self.assertEqual('fulltext', index.get_method(('tip',)))
1973
self.assertEqual(['fulltext'], index.get_options(('parent',)))
1975
def test_get_options(self):
1976
index = self.two_graph_index()
1977
self.assertEqual(['fulltext', 'no-eol'], index.get_options(('tip',)))
1978
self.assertEqual(['fulltext'], index.get_options(('parent',)))
1980
def test_get_parent_map(self):
1981
index = self.two_graph_index()
1982
self.assertEqual({('parent',):None},
1983
index.get_parent_map([('parent',), ('ghost',)]))
1985
def catch_add(self, entries):
1986
self.caught_entries.append(entries)
1988
def test_add_no_callback_errors(self):
1989
index = self.two_graph_index()
1990
self.assertRaises(errors.ReadOnlyError, index.add_records,
1991
[(('new',), 'fulltext,no-eol', (None, 50, 60), [('separate',)])])
1993
def test_add_version_smoke(self):
1994
index = self.two_graph_index(catch_adds=True)
1995
index.add_records([(('new',), 'fulltext,no-eol', (None, 50, 60), [])])
1996
self.assertEqual([[(('new', ), 'N50 60')]],
1997
self.caught_entries)
1999
def test_add_version_delta_not_delta_index(self):
2000
index = self.two_graph_index(catch_adds=True)
2001
self.assertRaises(errors.KnitCorrupt, index.add_records,
2002
[(('new',), 'no-eol,line-delta', (None, 0, 100), [])])
2003
self.assertEqual([], self.caught_entries)
2005
def test_add_version_same_dup(self):
2006
index = self.two_graph_index(catch_adds=True)
2007
# options can be spelt two different ways
2008
index.add_records([(('tip',), 'fulltext,no-eol', (None, 0, 100), [])])
2009
index.add_records([(('tip',), 'no-eol,fulltext', (None, 0, 100), [])])
2010
# position/length are ignored (because each pack could have fulltext or
2011
# delta, and be at a different position.
2012
index.add_records([(('tip',), 'fulltext,no-eol', (None, 50, 100), [])])
2013
index.add_records([(('tip',), 'fulltext,no-eol', (None, 0, 1000), [])])
2014
# but neither should have added data.
2015
self.assertEqual([[], [], [], []], self.caught_entries)
2017
def test_add_version_different_dup(self):
2018
index = self.two_graph_index(catch_adds=True)
2020
self.assertRaises(errors.KnitCorrupt, index.add_records,
2021
[(('tip',), 'no-eol,line-delta', (None, 0, 100), [])])
2022
self.assertRaises(errors.KnitCorrupt, index.add_records,
2023
[(('tip',), 'line-delta,no-eol', (None, 0, 100), [])])
2024
self.assertRaises(errors.KnitCorrupt, index.add_records,
2025
[(('tip',), 'fulltext', (None, 0, 100), [])])
2027
self.assertRaises(errors.KnitCorrupt, index.add_records,
2028
[(('tip',), 'fulltext,no-eol', (None, 0, 100), [('parent',)])])
2029
self.assertEqual([], self.caught_entries)
2031
def test_add_versions(self):
2032
index = self.two_graph_index(catch_adds=True)
2034
(('new',), 'fulltext,no-eol', (None, 50, 60), []),
2035
(('new2',), 'fulltext', (None, 0, 6), []),
2037
self.assertEqual([(('new', ), 'N50 60'), (('new2', ), ' 0 6')],
2038
sorted(self.caught_entries[0]))
2039
self.assertEqual(1, len(self.caught_entries))
2041
def test_add_versions_delta_not_delta_index(self):
2042
index = self.two_graph_index(catch_adds=True)
2043
self.assertRaises(errors.KnitCorrupt, index.add_records,
2044
[(('new',), 'no-eol,line-delta', (None, 0, 100), [('parent',)])])
2045
self.assertEqual([], self.caught_entries)
2047
def test_add_versions_parents_not_parents_index(self):
2048
index = self.two_graph_index(catch_adds=True)
2049
self.assertRaises(errors.KnitCorrupt, index.add_records,
2050
[(('new',), 'no-eol,fulltext', (None, 0, 100), [('parent',)])])
2051
self.assertEqual([], self.caught_entries)
2053
def test_add_versions_random_id_accepted(self):
2054
index = self.two_graph_index(catch_adds=True)
2055
index.add_records([], random_id=True)
2057
def test_add_versions_same_dup(self):
2058
index = self.two_graph_index(catch_adds=True)
2059
# options can be spelt two different ways
2060
index.add_records([(('tip',), 'fulltext,no-eol', (None, 0, 100), [])])
2061
index.add_records([(('tip',), 'no-eol,fulltext', (None, 0, 100), [])])
2062
# position/length are ignored (because each pack could have fulltext or
2063
# delta, and be at a different position.
2064
index.add_records([(('tip',), 'fulltext,no-eol', (None, 50, 100), [])])
2065
index.add_records([(('tip',), 'fulltext,no-eol', (None, 0, 1000), [])])
2066
# but neither should have added data.
2067
self.assertEqual([[], [], [], []], self.caught_entries)
2069
def test_add_versions_different_dup(self):
2070
index = self.two_graph_index(catch_adds=True)
2072
self.assertRaises(errors.KnitCorrupt, index.add_records,
2073
[(('tip',), 'no-eol,line-delta', (None, 0, 100), [])])
2074
self.assertRaises(errors.KnitCorrupt, index.add_records,
2075
[(('tip',), 'line-delta,no-eol', (None, 0, 100), [])])
2076
self.assertRaises(errors.KnitCorrupt, index.add_records,
2077
[(('tip',), 'fulltext', (None, 0, 100), [])])
2079
self.assertRaises(errors.KnitCorrupt, index.add_records,
2080
[(('tip',), 'fulltext,no-eol', (None, 0, 100), [('parent',)])])
2081
# change options in the second record
2082
self.assertRaises(errors.KnitCorrupt, index.add_records,
2083
[(('tip',), 'fulltext,no-eol', (None, 0, 100), []),
2084
(('tip',), 'no-eol,line-delta', (None, 0, 100), [])])
2085
self.assertEqual([], self.caught_entries)
2088
class TestKnitVersionedFiles(KnitTests):
2090
def assertGroupKeysForIo(self, exp_groups, keys, non_local_keys,
2091
positions, _min_buffer_size=None):
2092
kvf = self.make_test_knit()
2093
if _min_buffer_size is None:
2094
_min_buffer_size = knit._STREAM_MIN_BUFFER_SIZE
2095
self.assertEqual(exp_groups, kvf._group_keys_for_io(keys,
2096
non_local_keys, positions,
2097
_min_buffer_size=_min_buffer_size))
2099
def assertSplitByPrefix(self, expected_map, expected_prefix_order,
2101
split, prefix_order = KnitVersionedFiles._split_by_prefix(keys)
2102
self.assertEqual(expected_map, split)
2103
self.assertEqual(expected_prefix_order, prefix_order)
2105
def test__group_keys_for_io(self):
2106
ft_detail = ('fulltext', False)
2107
ld_detail = ('line-delta', False)
2115
f_a: (ft_detail, (f_a, 0, 100), None),
2116
f_b: (ld_detail, (f_b, 100, 21), f_a),
2117
f_c: (ld_detail, (f_c, 180, 15), f_b),
2118
g_a: (ft_detail, (g_a, 121, 35), None),
2119
g_b: (ld_detail, (g_b, 156, 12), g_a),
2120
g_c: (ld_detail, (g_c, 195, 13), g_a),
2122
self.assertGroupKeysForIo([([f_a], set())],
2123
[f_a], [], positions)
2124
self.assertGroupKeysForIo([([f_a], set([f_a]))],
2125
[f_a], [f_a], positions)
2126
self.assertGroupKeysForIo([([f_a, f_b], set([]))],
2127
[f_a, f_b], [], positions)
2128
self.assertGroupKeysForIo([([f_a, f_b], set([f_b]))],
2129
[f_a, f_b], [f_b], positions)
2130
self.assertGroupKeysForIo([([f_a, f_b, g_a, g_b], set())],
2131
[f_a, g_a, f_b, g_b], [], positions)
2132
self.assertGroupKeysForIo([([f_a, f_b, g_a, g_b], set())],
2133
[f_a, g_a, f_b, g_b], [], positions,
2134
_min_buffer_size=150)
2135
self.assertGroupKeysForIo([([f_a, f_b], set()), ([g_a, g_b], set())],
2136
[f_a, g_a, f_b, g_b], [], positions,
2137
_min_buffer_size=100)
2138
self.assertGroupKeysForIo([([f_c], set()), ([g_b], set())],
2139
[f_c, g_b], [], positions,
2140
_min_buffer_size=125)
2141
self.assertGroupKeysForIo([([g_b, f_c], set())],
2142
[g_b, f_c], [], positions,
2143
_min_buffer_size=125)
2145
def test__split_by_prefix(self):
2146
self.assertSplitByPrefix({'f': [('f', 'a'), ('f', 'b')],
2147
'g': [('g', 'b'), ('g', 'a')],
2149
[('f', 'a'), ('g', 'b'),
2150
('g', 'a'), ('f', 'b')])
2152
self.assertSplitByPrefix({'f': [('f', 'a'), ('f', 'b')],
2153
'g': [('g', 'b'), ('g', 'a')],
2155
[('f', 'a'), ('f', 'b'),
2156
('g', 'b'), ('g', 'a')])
2158
self.assertSplitByPrefix({'f': [('f', 'a'), ('f', 'b')],
2159
'g': [('g', 'b'), ('g', 'a')],
2161
[('f', 'a'), ('f', 'b'),
2162
('g', 'b'), ('g', 'a')])
2164
self.assertSplitByPrefix({'f': [('f', 'a'), ('f', 'b')],
2165
'g': [('g', 'b'), ('g', 'a')],
2166
'': [('a',), ('b',)]
2168
[('f', 'a'), ('g', 'b'),
2170
('g', 'a'), ('f', 'b')])
2173
class TestStacking(KnitTests):
2175
def get_basis_and_test_knit(self):
2176
basis = self.make_test_knit(name='basis')
2177
basis = RecordingVersionedFilesDecorator(basis)
2178
test = self.make_test_knit(name='test')
2179
test.add_fallback_versioned_files(basis)
2182
def test_add_fallback_versioned_files(self):
2183
basis = self.make_test_knit(name='basis')
2184
test = self.make_test_knit(name='test')
2185
# It must not error; other tests test that the fallback is referred to
2186
# when accessing data.
2187
test.add_fallback_versioned_files(basis)
2189
def test_add_lines(self):
2190
# lines added to the test are not added to the basis
2191
basis, test = self.get_basis_and_test_knit()
2193
key_basis = ('bar',)
2194
key_cross_border = ('quux',)
2195
key_delta = ('zaphod',)
2196
test.add_lines(key, (), ['foo\n'])
2197
self.assertEqual({}, basis.get_parent_map([key]))
2198
# lines added to the test that reference across the stack do a
2200
basis.add_lines(key_basis, (), ['foo\n'])
2202
test.add_lines(key_cross_border, (key_basis,), ['foo\n'])
2203
self.assertEqual('fulltext', test._index.get_method(key_cross_border))
2204
# we don't even need to look at the basis to see that this should be
2205
# stored as a fulltext
2206
self.assertEqual([], basis.calls)
2207
# Subsequent adds do delta.
2209
test.add_lines(key_delta, (key_cross_border,), ['foo\n'])
2210
self.assertEqual('line-delta', test._index.get_method(key_delta))
2211
self.assertEqual([], basis.calls)
2213
def test_annotate(self):
2214
# annotations from the test knit are answered without asking the basis
2215
basis, test = self.get_basis_and_test_knit()
2217
key_basis = ('bar',)
2218
key_missing = ('missing',)
2219
test.add_lines(key, (), ['foo\n'])
2220
details = test.annotate(key)
2221
self.assertEqual([(key, 'foo\n')], details)
2222
self.assertEqual([], basis.calls)
2223
# But texts that are not in the test knit are looked for in the basis
2225
basis.add_lines(key_basis, (), ['foo\n', 'bar\n'])
2227
details = test.annotate(key_basis)
2228
self.assertEqual([(key_basis, 'foo\n'), (key_basis, 'bar\n')], details)
2229
# Not optimised to date:
2230
# self.assertEqual([("annotate", key_basis)], basis.calls)
2231
self.assertEqual([('get_parent_map', set([key_basis])),
2232
('get_parent_map', set([key_basis])),
2233
('get_record_stream', [key_basis], 'topological', True)],
2236
def test_check(self):
2237
# At the moment checking a stacked knit does implicitly check the
2239
basis, test = self.get_basis_and_test_knit()
2242
def test_get_parent_map(self):
2243
# parents in the test knit are answered without asking the basis
2244
basis, test = self.get_basis_and_test_knit()
2246
key_basis = ('bar',)
2247
key_missing = ('missing',)
2248
test.add_lines(key, (), [])
2249
parent_map = test.get_parent_map([key])
2250
self.assertEqual({key: ()}, parent_map)
2251
self.assertEqual([], basis.calls)
2252
# But parents that are not in the test knit are looked for in the basis
2253
basis.add_lines(key_basis, (), [])
2255
parent_map = test.get_parent_map([key, key_basis, key_missing])
2256
self.assertEqual({key: (),
2257
key_basis: ()}, parent_map)
2258
self.assertEqual([("get_parent_map", set([key_basis, key_missing]))],
2261
def test_get_record_stream_unordered_fulltexts(self):
2262
# records from the test knit are answered without asking the basis:
2263
basis, test = self.get_basis_and_test_knit()
2265
key_basis = ('bar',)
2266
key_missing = ('missing',)
2267
test.add_lines(key, (), ['foo\n'])
2268
records = list(test.get_record_stream([key], 'unordered', True))
2269
self.assertEqual(1, len(records))
2270
self.assertEqual([], basis.calls)
2271
# Missing (from test knit) objects are retrieved from the basis:
2272
basis.add_lines(key_basis, (), ['foo\n', 'bar\n'])
2274
records = list(test.get_record_stream([key_basis, key_missing],
2276
self.assertEqual(2, len(records))
2277
calls = list(basis.calls)
2278
for record in records:
2279
self.assertSubset([record.key], (key_basis, key_missing))
2280
if record.key == key_missing:
2281
self.assertIsInstance(record, AbsentContentFactory)
2283
reference = list(basis.get_record_stream([key_basis],
2284
'unordered', True))[0]
2285
self.assertEqual(reference.key, record.key)
2286
self.assertEqual(reference.sha1, record.sha1)
2287
self.assertEqual(reference.storage_kind, record.storage_kind)
2288
self.assertEqual(reference.get_bytes_as(reference.storage_kind),
2289
record.get_bytes_as(record.storage_kind))
2290
self.assertEqual(reference.get_bytes_as('fulltext'),
2291
record.get_bytes_as('fulltext'))
2292
# It's not strictly minimal, but it seems reasonable for now for it to
2293
# ask which fallbacks have which parents.
2295
("get_parent_map", set([key_basis, key_missing])),
2296
("get_record_stream", [key_basis], 'unordered', True)],
2299
def test_get_record_stream_ordered_fulltexts(self):
2300
# ordering is preserved down into the fallback store.
2301
basis, test = self.get_basis_and_test_knit()
2303
key_basis = ('bar',)
2304
key_basis_2 = ('quux',)
2305
key_missing = ('missing',)
2306
test.add_lines(key, (key_basis,), ['foo\n'])
2307
# Missing (from test knit) objects are retrieved from the basis:
2308
basis.add_lines(key_basis, (key_basis_2,), ['foo\n', 'bar\n'])
2309
basis.add_lines(key_basis_2, (), ['quux\n'])
2311
# ask for in non-topological order
2312
records = list(test.get_record_stream(
2313
[key, key_basis, key_missing, key_basis_2], 'topological', True))
2314
self.assertEqual(4, len(records))
2316
for record in records:
2317
self.assertSubset([record.key],
2318
(key_basis, key_missing, key_basis_2, key))
2319
if record.key == key_missing:
2320
self.assertIsInstance(record, AbsentContentFactory)
2322
results.append((record.key, record.sha1, record.storage_kind,
2323
record.get_bytes_as('fulltext')))
2324
calls = list(basis.calls)
2325
order = [record[0] for record in results]
2326
self.assertEqual([key_basis_2, key_basis, key], order)
2327
for result in results:
2328
if result[0] == key:
2332
record = source.get_record_stream([result[0]], 'unordered',
2334
self.assertEqual(record.key, result[0])
2335
self.assertEqual(record.sha1, result[1])
2336
# We used to check that the storage kind matched, but actually it
2337
# depends on whether it was sourced from the basis, or in a single
2338
# group, because asking for full texts returns proxy objects to a
2339
# _ContentMapGenerator object; so checking the kind is unneeded.
2340
self.assertEqual(record.get_bytes_as('fulltext'), result[3])
2341
# It's not strictly minimal, but it seems reasonable for now for it to
2342
# ask which fallbacks have which parents.
2344
("get_parent_map", set([key_basis, key_basis_2, key_missing])),
2345
# topological is requested from the fallback, because that is what
2346
# was requested at the top level.
2347
("get_record_stream", [key_basis_2, key_basis], 'topological', True)],
2350
def test_get_record_stream_unordered_deltas(self):
2351
# records from the test knit are answered without asking the basis:
2352
basis, test = self.get_basis_and_test_knit()
2354
key_basis = ('bar',)
2355
key_missing = ('missing',)
2356
test.add_lines(key, (), ['foo\n'])
2357
records = list(test.get_record_stream([key], 'unordered', False))
2358
self.assertEqual(1, len(records))
2359
self.assertEqual([], basis.calls)
2360
# Missing (from test knit) objects are retrieved from the basis:
2361
basis.add_lines(key_basis, (), ['foo\n', 'bar\n'])
2363
records = list(test.get_record_stream([key_basis, key_missing],
2364
'unordered', False))
2365
self.assertEqual(2, len(records))
2366
calls = list(basis.calls)
2367
for record in records:
2368
self.assertSubset([record.key], (key_basis, key_missing))
2369
if record.key == key_missing:
2370
self.assertIsInstance(record, AbsentContentFactory)
2372
reference = list(basis.get_record_stream([key_basis],
2373
'unordered', False))[0]
2374
self.assertEqual(reference.key, record.key)
2375
self.assertEqual(reference.sha1, record.sha1)
2376
self.assertEqual(reference.storage_kind, record.storage_kind)
2377
self.assertEqual(reference.get_bytes_as(reference.storage_kind),
2378
record.get_bytes_as(record.storage_kind))
2379
# It's not strictly minimal, but it seems reasonable for now for it to
2380
# ask which fallbacks have which parents.
2382
("get_parent_map", set([key_basis, key_missing])),
2383
("get_record_stream", [key_basis], 'unordered', False)],
2386
def test_get_record_stream_ordered_deltas(self):
2387
# ordering is preserved down into the fallback store.
2388
basis, test = self.get_basis_and_test_knit()
2390
key_basis = ('bar',)
2391
key_basis_2 = ('quux',)
2392
key_missing = ('missing',)
2393
test.add_lines(key, (key_basis,), ['foo\n'])
2394
# Missing (from test knit) objects are retrieved from the basis:
2395
basis.add_lines(key_basis, (key_basis_2,), ['foo\n', 'bar\n'])
2396
basis.add_lines(key_basis_2, (), ['quux\n'])
2398
# ask for in non-topological order
2399
records = list(test.get_record_stream(
2400
[key, key_basis, key_missing, key_basis_2], 'topological', False))
2401
self.assertEqual(4, len(records))
2403
for record in records:
2404
self.assertSubset([record.key],
2405
(key_basis, key_missing, key_basis_2, key))
2406
if record.key == key_missing:
2407
self.assertIsInstance(record, AbsentContentFactory)
2409
results.append((record.key, record.sha1, record.storage_kind,
2410
record.get_bytes_as(record.storage_kind)))
2411
calls = list(basis.calls)
2412
order = [record[0] for record in results]
2413
self.assertEqual([key_basis_2, key_basis, key], order)
2414
for result in results:
2415
if result[0] == key:
2419
record = source.get_record_stream([result[0]], 'unordered',
2421
self.assertEqual(record.key, result[0])
2422
self.assertEqual(record.sha1, result[1])
2423
self.assertEqual(record.storage_kind, result[2])
2424
self.assertEqual(record.get_bytes_as(record.storage_kind), result[3])
2425
# It's not strictly minimal, but it seems reasonable for now for it to
2426
# ask which fallbacks have which parents.
2428
("get_parent_map", set([key_basis, key_basis_2, key_missing])),
2429
("get_record_stream", [key_basis_2, key_basis], 'topological', False)],
2432
def test_get_sha1s(self):
2433
# sha1's in the test knit are answered without asking the basis
2434
basis, test = self.get_basis_and_test_knit()
2436
key_basis = ('bar',)
2437
key_missing = ('missing',)
2438
test.add_lines(key, (), ['foo\n'])
2439
key_sha1sum = osutils.sha('foo\n').hexdigest()
2440
sha1s = test.get_sha1s([key])
2441
self.assertEqual({key: key_sha1sum}, sha1s)
2442
self.assertEqual([], basis.calls)
2443
# But texts that are not in the test knit are looked for in the basis
2444
# directly (rather than via text reconstruction) so that remote servers
2445
# etc don't have to answer with full content.
2446
basis.add_lines(key_basis, (), ['foo\n', 'bar\n'])
2447
basis_sha1sum = osutils.sha('foo\nbar\n').hexdigest()
2449
sha1s = test.get_sha1s([key, key_missing, key_basis])
2450
self.assertEqual({key: key_sha1sum,
2451
key_basis: basis_sha1sum}, sha1s)
2452
self.assertEqual([("get_sha1s", set([key_basis, key_missing]))],
2455
def test_insert_record_stream(self):
2456
# records are inserted as normal; insert_record_stream builds on
2457
# add_lines, so a smoke test should be all that's needed:
2459
key_basis = ('bar',)
2460
key_delta = ('zaphod',)
2461
basis, test = self.get_basis_and_test_knit()
2462
source = self.make_test_knit(name='source')
2463
basis.add_lines(key_basis, (), ['foo\n'])
2465
source.add_lines(key_basis, (), ['foo\n'])
2466
source.add_lines(key_delta, (key_basis,), ['bar\n'])
2467
stream = source.get_record_stream([key_delta], 'unordered', False)
2468
test.insert_record_stream(stream)
2469
# XXX: this does somewhat too many calls in making sure of whether it
2470
# has to recreate the full text.
2471
self.assertEqual([("get_parent_map", set([key_basis])),
2472
('get_parent_map', set([key_basis])),
2473
('get_record_stream', [key_basis], 'unordered', True)],
2475
self.assertEqual({key_delta:(key_basis,)},
2476
test.get_parent_map([key_delta]))
2477
self.assertEqual('bar\n', test.get_record_stream([key_delta],
2478
'unordered', True).next().get_bytes_as('fulltext'))
2480
def test_iter_lines_added_or_present_in_keys(self):
2481
# Lines from the basis are returned, and lines for a given key are only
2485
# all sources are asked for keys:
2486
basis, test = self.get_basis_and_test_knit()
2487
basis.add_lines(key1, (), ["foo"])
2489
lines = list(test.iter_lines_added_or_present_in_keys([key1]))
2490
self.assertEqual([("foo\n", key1)], lines)
2491
self.assertEqual([("iter_lines_added_or_present_in_keys", set([key1]))],
2493
# keys in both are not duplicated:
2494
test.add_lines(key2, (), ["bar\n"])
2495
basis.add_lines(key2, (), ["bar\n"])
2497
lines = list(test.iter_lines_added_or_present_in_keys([key2]))
2498
self.assertEqual([("bar\n", key2)], lines)
2499
self.assertEqual([], basis.calls)
2501
def test_keys(self):
2504
# all sources are asked for keys:
2505
basis, test = self.get_basis_and_test_knit()
2507
self.assertEqual(set(), set(keys))
2508
self.assertEqual([("keys",)], basis.calls)
2509
# keys from a basis are returned:
2510
basis.add_lines(key1, (), [])
2513
self.assertEqual(set([key1]), set(keys))
2514
self.assertEqual([("keys",)], basis.calls)
2515
# keys in both are not duplicated:
2516
test.add_lines(key2, (), [])
2517
basis.add_lines(key2, (), [])
2520
self.assertEqual(2, len(keys))
2521
self.assertEqual(set([key1, key2]), set(keys))
2522
self.assertEqual([("keys",)], basis.calls)
2524
def test_add_mpdiffs(self):
2525
# records are inserted as normal; add_mpdiff builds on
2526
# add_lines, so a smoke test should be all that's needed:
2528
key_basis = ('bar',)
2529
key_delta = ('zaphod',)
2530
basis, test = self.get_basis_and_test_knit()
2531
source = self.make_test_knit(name='source')
2532
basis.add_lines(key_basis, (), ['foo\n'])
2534
source.add_lines(key_basis, (), ['foo\n'])
2535
source.add_lines(key_delta, (key_basis,), ['bar\n'])
2536
diffs = source.make_mpdiffs([key_delta])
2537
test.add_mpdiffs([(key_delta, (key_basis,),
2538
source.get_sha1s([key_delta])[key_delta], diffs[0])])
2539
self.assertEqual([("get_parent_map", set([key_basis])),
2540
('get_record_stream', [key_basis], 'unordered', True),],
2542
self.assertEqual({key_delta:(key_basis,)},
2543
test.get_parent_map([key_delta]))
2544
self.assertEqual('bar\n', test.get_record_stream([key_delta],
2545
'unordered', True).next().get_bytes_as('fulltext'))
2547
def test_make_mpdiffs(self):
2548
# Generating an mpdiff across a stacking boundary should detect parent
2552
key_right = ('zaphod',)
2553
basis, test = self.get_basis_and_test_knit()
2554
basis.add_lines(key_left, (), ['bar\n'])
2555
basis.add_lines(key_right, (), ['zaphod\n'])
2557
test.add_lines(key, (key_left, key_right),
2558
['bar\n', 'foo\n', 'zaphod\n'])
2559
diffs = test.make_mpdiffs([key])
2561
multiparent.MultiParent([multiparent.ParentText(0, 0, 0, 1),
2562
multiparent.NewText(['foo\n']),
2563
multiparent.ParentText(1, 0, 2, 1)])],
2565
self.assertEqual(3, len(basis.calls))
2567
("get_parent_map", set([key_left, key_right])),
2568
("get_parent_map", set([key_left, key_right])),
2571
last_call = basis.calls[-1]
2572
self.assertEqual('get_record_stream', last_call[0])
2573
self.assertEqual(set([key_left, key_right]), set(last_call[1]))
2574
self.assertEqual('topological', last_call[2])
2575
self.assertEqual(True, last_call[3])
2578
class TestNetworkBehaviour(KnitTests):
2579
"""Tests for getting data out of/into knits over the network."""
2581
def test_include_delta_closure_generates_a_knit_delta_closure(self):
2582
vf = self.make_test_knit(name='test')
2583
# put in three texts, giving ft, delta, delta
2584
vf.add_lines(('base',), (), ['base\n', 'content\n'])
2585
vf.add_lines(('d1',), (('base',),), ['d1\n'])
2586
vf.add_lines(('d2',), (('d1',),), ['d2\n'])
2587
# But heuristics could interfere, so check what happened:
2588
self.assertEqual(['knit-ft-gz', 'knit-delta-gz', 'knit-delta-gz'],
2589
[record.storage_kind for record in
2590
vf.get_record_stream([('base',), ('d1',), ('d2',)],
2591
'topological', False)])
2592
# generate a stream of just the deltas include_delta_closure=True,
2593
# serialise to the network, and check that we get a delta closure on the wire.
2594
stream = vf.get_record_stream([('d1',), ('d2',)], 'topological', True)
2595
netb = [record.get_bytes_as(record.storage_kind) for record in stream]
2596
# The first bytes should be a memo from _ContentMapGenerator, and the
2597
# second bytes should be empty (because its a API proxy not something
2598
# for wire serialisation.
2599
self.assertEqual('', netb[1])
2601
kind, line_end = network_bytes_to_kind_and_offset(bytes)
2602
self.assertEqual('knit-delta-closure', kind)
2605
class TestContentMapGenerator(KnitTests):
2606
"""Tests for ContentMapGenerator"""
2608
def test_get_record_stream_gives_records(self):
2609
vf = self.make_test_knit(name='test')
2610
# put in three texts, giving ft, delta, delta
2611
vf.add_lines(('base',), (), ['base\n', 'content\n'])
2612
vf.add_lines(('d1',), (('base',),), ['d1\n'])
2613
vf.add_lines(('d2',), (('d1',),), ['d2\n'])
2614
keys = [('d1',), ('d2',)]
2615
generator = _VFContentMapGenerator(vf, keys,
2616
global_map=vf.get_parent_map(keys))
2617
for record in generator.get_record_stream():
2618
if record.key == ('d1',):
2619
self.assertEqual('d1\n', record.get_bytes_as('fulltext'))
2621
self.assertEqual('d2\n', record.get_bytes_as('fulltext'))
2623
def test_get_record_stream_kinds_are_raw(self):
2624
vf = self.make_test_knit(name='test')
2625
# put in three texts, giving ft, delta, delta
2626
vf.add_lines(('base',), (), ['base\n', 'content\n'])
2627
vf.add_lines(('d1',), (('base',),), ['d1\n'])
2628
vf.add_lines(('d2',), (('d1',),), ['d2\n'])
2629
keys = [('base',), ('d1',), ('d2',)]
2630
generator = _VFContentMapGenerator(vf, keys,
2631
global_map=vf.get_parent_map(keys))
2632
kinds = {('base',): 'knit-delta-closure',
2633
('d1',): 'knit-delta-closure-ref',
2634
('d2',): 'knit-delta-closure-ref',
2636
for record in generator.get_record_stream():
2637
self.assertEqual(kinds[record.key], record.storage_kind)