~bzr-pqm/bzr/bzr.dev

« back to all changes in this revision

Viewing changes to bzrlib/tests/test_knit.py

  • Committer: Andrew Bennetts
  • Date: 2008-07-03 07:56:02 UTC
  • mto: This revision was merged to the branch mainline in revision 3520.
  • Revision ID: andrew.bennetts@canonical.com-20080703075602-8n055qsfkjijcz6i
Better tests for {pre,post}_change_branch_tip hooks.

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
 
# Copyright (C) 2006-2011 Canonical Ltd
 
1
# Copyright (C) 2005, 2006, 2007 Canonical Ltd
2
2
#
3
3
# This program is free software; you can redistribute it and/or modify
4
4
# it under the terms of the GNU General Public License as published by
12
12
#
13
13
# You should have received a copy of the GNU General Public License
14
14
# along with this program; if not, write to the Free Software
15
 
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
16
16
 
17
17
"""Tests for Knit data structure"""
18
18
 
19
19
from cStringIO import StringIO
 
20
import difflib
20
21
import gzip
 
22
import sha
21
23
import sys
22
24
 
23
25
from bzrlib import (
24
26
    errors,
 
27
    generate_ids,
25
28
    knit,
26
29
    multiparent,
27
 
    osutils,
28
30
    pack,
29
 
    tests,
30
 
    transport,
31
31
    )
32
32
from bzrlib.errors import (
 
33
    RevisionAlreadyPresent,
33
34
    KnitHeaderError,
 
35
    RevisionNotPresent,
34
36
    NoSuchFile,
35
37
    )
36
38
from bzrlib.index import *
37
39
from bzrlib.knit import (
38
40
    AnnotatedKnitContent,
39
41
    KnitContent,
 
42
    KnitSequenceMatcher,
40
43
    KnitVersionedFiles,
41
44
    PlainKnitContent,
42
 
    _VFContentMapGenerator,
 
45
    _DirectPackAccess,
43
46
    _KndxIndex,
44
47
    _KnitGraphIndex,
45
48
    _KnitKeyAccess,
46
49
    make_file_factory,
47
50
    )
48
 
from bzrlib.patiencediff import PatienceSequenceMatcher
49
 
from bzrlib.repofmt import (
50
 
    knitpack_repo,
51
 
    pack_repo,
52
 
    )
 
51
from bzrlib.osutils import split_lines
 
52
from bzrlib.symbol_versioning import one_four
53
53
from bzrlib.tests import (
 
54
    Feature,
 
55
    KnownFailure,
54
56
    TestCase,
55
57
    TestCaseWithMemoryTransport,
56
58
    TestCaseWithTransport,
57
 
    TestNotApplicable,
58
59
    )
 
60
from bzrlib.transport import get_transport
 
61
from bzrlib.transport.memory import MemoryTransport
 
62
from bzrlib.tuned_gzip import GzipFile
59
63
from bzrlib.versionedfile import (
60
64
    AbsentContentFactory,
61
65
    ConstantMapper,
62
 
    network_bytes_to_kind_and_offset,
63
66
    RecordingVersionedFilesDecorator,
64
67
    )
65
68
 
66
69
 
67
 
compiled_knit_feature = tests.ModuleAvailableFeature(
68
 
                            'bzrlib._knit_load_data_pyx')
 
70
class _CompiledKnitFeature(Feature):
 
71
 
 
72
    def _probe(self):
 
73
        try:
 
74
            import bzrlib._knit_load_data_c
 
75
        except ImportError:
 
76
            return False
 
77
        return True
 
78
 
 
79
    def feature_name(self):
 
80
        return 'bzrlib._knit_load_data_c'
 
81
 
 
82
CompiledKnitFeature = _CompiledKnitFeature()
69
83
 
70
84
 
71
85
class KnitContentTestsMixin(object):
100
114
        line_delta = source_content.line_delta(target_content)
101
115
        delta_blocks = list(KnitContent.get_line_delta_blocks(line_delta,
102
116
            source_lines, target_lines))
103
 
        matcher = PatienceSequenceMatcher(None, source_lines, target_lines)
104
 
        matcher_blocks = list(matcher.get_matching_blocks())
 
117
        matcher = KnitSequenceMatcher(None, source_lines, target_lines)
 
118
        matcher_blocks = list(list(matcher.get_matching_blocks()))
105
119
        self.assertEqual(matcher_blocks, delta_blocks)
106
120
 
107
121
    def test_get_line_delta_blocks(self):
257
271
        return queue_call
258
272
 
259
273
 
260
 
class MockReadvFailingTransport(MockTransport):
261
 
    """Fail in the middle of a readv() result.
262
 
 
263
 
    This Transport will successfully yield the first two requested hunks, but
264
 
    raise NoSuchFile for the rest.
265
 
    """
266
 
 
267
 
    def readv(self, relpath, offsets):
268
 
        count = 0
269
 
        for result in MockTransport.readv(self, relpath, offsets):
270
 
            count += 1
271
 
            # we use 2 because the first offset is the pack header, the second
272
 
            # is the first actual content requset
273
 
            if count > 2:
274
 
                raise errors.NoSuchFile(relpath)
275
 
            yield result
276
 
 
277
 
 
278
274
class KnitRecordAccessTestsMixin(object):
279
275
    """Tests for getting and putting knit records."""
280
276
 
283
279
        access = self.get_access()
284
280
        memos = access.add_raw_records([('key', 10)], '1234567890')
285
281
        self.assertEqual(['1234567890'], list(access.get_raw_records(memos)))
286
 
 
 
282
 
287
283
    def test_add_several_raw_records(self):
288
284
        """add_raw_records with many records and read some back."""
289
285
        access = self.get_access()
309
305
        mapper = ConstantMapper("foo")
310
306
        access = _KnitKeyAccess(self.get_transport(), mapper)
311
307
        return access
312
 
 
313
 
 
314
 
class _TestException(Exception):
315
 
    """Just an exception for local tests to use."""
316
 
 
 
308
    
317
309
 
318
310
class TestPackKnitAccess(TestCaseWithMemoryTransport, KnitRecordAccessTestsMixin):
319
311
    """Tests for the pack based access."""
327
319
            transport.append_bytes(packname, bytes)
328
320
        writer = pack.ContainerWriter(write_data)
329
321
        writer.begin()
330
 
        access = pack_repo._DirectPackAccess({})
 
322
        access = _DirectPackAccess({})
331
323
        access.set_writer(writer, index, (transport, packname))
332
324
        return access, writer
333
325
 
334
 
    def make_pack_file(self):
335
 
        """Create a pack file with 2 records."""
336
 
        access, writer = self._get_access(packname='packname', index='foo')
337
 
        memos = []
338
 
        memos.extend(access.add_raw_records([('key1', 10)], '1234567890'))
339
 
        memos.extend(access.add_raw_records([('key2', 5)], '12345'))
340
 
        writer.end()
341
 
        return memos
342
 
 
343
 
    def test_pack_collection_pack_retries(self):
344
 
        """An explicit pack of a pack collection succeeds even when a
345
 
        concurrent pack happens.
346
 
        """
347
 
        builder = self.make_branch_builder('.')
348
 
        builder.start_series()
349
 
        builder.build_snapshot('rev-1', None, [
350
 
            ('add', ('', 'root-id', 'directory', None)),
351
 
            ('add', ('file', 'file-id', 'file', 'content\nrev 1\n')),
352
 
            ])
353
 
        builder.build_snapshot('rev-2', ['rev-1'], [
354
 
            ('modify', ('file-id', 'content\nrev 2\n')),
355
 
            ])
356
 
        builder.build_snapshot('rev-3', ['rev-2'], [
357
 
            ('modify', ('file-id', 'content\nrev 3\n')),
358
 
            ])
359
 
        self.addCleanup(builder.finish_series)
360
 
        b = builder.get_branch()
361
 
        self.addCleanup(b.lock_write().unlock)
362
 
        repo = b.repository
363
 
        collection = repo._pack_collection
364
 
        # Concurrently repack the repo.
365
 
        reopened_repo = repo.bzrdir.open_repository()
366
 
        reopened_repo.pack()
367
 
        # Pack the new pack.
368
 
        collection.pack()
369
 
 
370
 
    def make_vf_for_retrying(self):
371
 
        """Create 3 packs and a reload function.
372
 
 
373
 
        Originally, 2 pack files will have the data, but one will be missing.
374
 
        And then the third will be used in place of the first two if reload()
375
 
        is called.
376
 
 
377
 
        :return: (versioned_file, reload_counter)
378
 
            versioned_file  a KnitVersionedFiles using the packs for access
379
 
        """
380
 
        builder = self.make_branch_builder('.', format="1.9")
381
 
        builder.start_series()
382
 
        builder.build_snapshot('rev-1', None, [
383
 
            ('add', ('', 'root-id', 'directory', None)),
384
 
            ('add', ('file', 'file-id', 'file', 'content\nrev 1\n')),
385
 
            ])
386
 
        builder.build_snapshot('rev-2', ['rev-1'], [
387
 
            ('modify', ('file-id', 'content\nrev 2\n')),
388
 
            ])
389
 
        builder.build_snapshot('rev-3', ['rev-2'], [
390
 
            ('modify', ('file-id', 'content\nrev 3\n')),
391
 
            ])
392
 
        builder.finish_series()
393
 
        b = builder.get_branch()
394
 
        b.lock_write()
395
 
        self.addCleanup(b.unlock)
396
 
        # Pack these three revisions into another pack file, but don't remove
397
 
        # the originals
398
 
        repo = b.repository
399
 
        collection = repo._pack_collection
400
 
        collection.ensure_loaded()
401
 
        orig_packs = collection.packs
402
 
        packer = knitpack_repo.KnitPacker(collection, orig_packs, '.testpack')
403
 
        new_pack = packer.pack()
404
 
        # forget about the new pack
405
 
        collection.reset()
406
 
        repo.refresh_data()
407
 
        vf = repo.revisions
408
 
        # Set up a reload() function that switches to using the new pack file
409
 
        new_index = new_pack.revision_index
410
 
        access_tuple = new_pack.access_tuple()
411
 
        reload_counter = [0, 0, 0]
412
 
        def reload():
413
 
            reload_counter[0] += 1
414
 
            if reload_counter[1] > 0:
415
 
                # We already reloaded, nothing more to do
416
 
                reload_counter[2] += 1
417
 
                return False
418
 
            reload_counter[1] += 1
419
 
            vf._index._graph_index._indices[:] = [new_index]
420
 
            vf._access._indices.clear()
421
 
            vf._access._indices[new_index] = access_tuple
422
 
            return True
423
 
        # Delete one of the pack files so the data will need to be reloaded. We
424
 
        # will delete the file with 'rev-2' in it
425
 
        trans, name = orig_packs[1].access_tuple()
426
 
        trans.delete(name)
427
 
        # We don't have the index trigger reloading because we want to test
428
 
        # that we reload when the .pack disappears
429
 
        vf._access._reload_func = reload
430
 
        return vf, reload_counter
431
 
 
432
 
    def make_reload_func(self, return_val=True):
433
 
        reload_called = [0]
434
 
        def reload():
435
 
            reload_called[0] += 1
436
 
            return return_val
437
 
        return reload_called, reload
438
 
 
439
 
    def make_retry_exception(self):
440
 
        # We raise a real exception so that sys.exc_info() is properly
441
 
        # populated
442
 
        try:
443
 
            raise _TestException('foobar')
444
 
        except _TestException, e:
445
 
            retry_exc = errors.RetryWithNewPacks(None, reload_occurred=False,
446
 
                                                 exc_info=sys.exc_info())
447
 
        return retry_exc
448
 
 
449
326
    def test_read_from_several_packs(self):
450
327
        access, writer = self._get_access()
451
328
        memos = []
458
335
        memos.extend(access.add_raw_records([('key', 5)], 'alpha'))
459
336
        writer.end()
460
337
        transport = self.get_transport()
461
 
        access = pack_repo._DirectPackAccess({"FOO":(transport, 'packfile'),
 
338
        access = _DirectPackAccess({"FOO":(transport, 'packfile'),
462
339
            "FOOBAR":(transport, 'pack2'),
463
340
            "BAZ":(transport, 'pack3')})
464
341
        self.assertEqual(['1234567890', '12345', 'alpha'],
474
351
 
475
352
    def test_set_writer(self):
476
353
        """The writer should be settable post construction."""
477
 
        access = pack_repo._DirectPackAccess({})
 
354
        access = _DirectPackAccess({})
478
355
        transport = self.get_transport()
479
356
        packname = 'packfile'
480
357
        index = 'foo'
487
364
        writer.end()
488
365
        self.assertEqual(['1234567890'], list(access.get_raw_records(memos)))
489
366
 
490
 
    def test_missing_index_raises_retry(self):
491
 
        memos = self.make_pack_file()
492
 
        transport = self.get_transport()
493
 
        reload_called, reload_func = self.make_reload_func()
494
 
        # Note that the index key has changed from 'foo' to 'bar'
495
 
        access = pack_repo._DirectPackAccess({'bar':(transport, 'packname')},
496
 
                                   reload_func=reload_func)
497
 
        e = self.assertListRaises(errors.RetryWithNewPacks,
498
 
                                  access.get_raw_records, memos)
499
 
        # Because a key was passed in which does not match our index list, we
500
 
        # assume that the listing was already reloaded
501
 
        self.assertTrue(e.reload_occurred)
502
 
        self.assertIsInstance(e.exc_info, tuple)
503
 
        self.assertIs(e.exc_info[0], KeyError)
504
 
        self.assertIsInstance(e.exc_info[1], KeyError)
505
 
 
506
 
    def test_missing_index_raises_key_error_with_no_reload(self):
507
 
        memos = self.make_pack_file()
508
 
        transport = self.get_transport()
509
 
        # Note that the index key has changed from 'foo' to 'bar'
510
 
        access = pack_repo._DirectPackAccess({'bar':(transport, 'packname')})
511
 
        e = self.assertListRaises(KeyError, access.get_raw_records, memos)
512
 
 
513
 
    def test_missing_file_raises_retry(self):
514
 
        memos = self.make_pack_file()
515
 
        transport = self.get_transport()
516
 
        reload_called, reload_func = self.make_reload_func()
517
 
        # Note that the 'filename' has been changed to 'different-packname'
518
 
        access = pack_repo._DirectPackAccess(
519
 
            {'foo':(transport, 'different-packname')},
520
 
            reload_func=reload_func)
521
 
        e = self.assertListRaises(errors.RetryWithNewPacks,
522
 
                                  access.get_raw_records, memos)
523
 
        # The file has gone missing, so we assume we need to reload
524
 
        self.assertFalse(e.reload_occurred)
525
 
        self.assertIsInstance(e.exc_info, tuple)
526
 
        self.assertIs(e.exc_info[0], errors.NoSuchFile)
527
 
        self.assertIsInstance(e.exc_info[1], errors.NoSuchFile)
528
 
        self.assertEqual('different-packname', e.exc_info[1].path)
529
 
 
530
 
    def test_missing_file_raises_no_such_file_with_no_reload(self):
531
 
        memos = self.make_pack_file()
532
 
        transport = self.get_transport()
533
 
        # Note that the 'filename' has been changed to 'different-packname'
534
 
        access = pack_repo._DirectPackAccess(
535
 
            {'foo': (transport, 'different-packname')})
536
 
        e = self.assertListRaises(errors.NoSuchFile,
537
 
                                  access.get_raw_records, memos)
538
 
 
539
 
    def test_failing_readv_raises_retry(self):
540
 
        memos = self.make_pack_file()
541
 
        transport = self.get_transport()
542
 
        failing_transport = MockReadvFailingTransport(
543
 
                                [transport.get_bytes('packname')])
544
 
        reload_called, reload_func = self.make_reload_func()
545
 
        access = pack_repo._DirectPackAccess(
546
 
            {'foo': (failing_transport, 'packname')},
547
 
            reload_func=reload_func)
548
 
        # Asking for a single record will not trigger the Mock failure
549
 
        self.assertEqual(['1234567890'],
550
 
            list(access.get_raw_records(memos[:1])))
551
 
        self.assertEqual(['12345'],
552
 
            list(access.get_raw_records(memos[1:2])))
553
 
        # A multiple offset readv() will fail mid-way through
554
 
        e = self.assertListRaises(errors.RetryWithNewPacks,
555
 
                                  access.get_raw_records, memos)
556
 
        # The file has gone missing, so we assume we need to reload
557
 
        self.assertFalse(e.reload_occurred)
558
 
        self.assertIsInstance(e.exc_info, tuple)
559
 
        self.assertIs(e.exc_info[0], errors.NoSuchFile)
560
 
        self.assertIsInstance(e.exc_info[1], errors.NoSuchFile)
561
 
        self.assertEqual('packname', e.exc_info[1].path)
562
 
 
563
 
    def test_failing_readv_raises_no_such_file_with_no_reload(self):
564
 
        memos = self.make_pack_file()
565
 
        transport = self.get_transport()
566
 
        failing_transport = MockReadvFailingTransport(
567
 
                                [transport.get_bytes('packname')])
568
 
        reload_called, reload_func = self.make_reload_func()
569
 
        access = pack_repo._DirectPackAccess(
570
 
            {'foo':(failing_transport, 'packname')})
571
 
        # Asking for a single record will not trigger the Mock failure
572
 
        self.assertEqual(['1234567890'],
573
 
            list(access.get_raw_records(memos[:1])))
574
 
        self.assertEqual(['12345'],
575
 
            list(access.get_raw_records(memos[1:2])))
576
 
        # A multiple offset readv() will fail mid-way through
577
 
        e = self.assertListRaises(errors.NoSuchFile,
578
 
                                  access.get_raw_records, memos)
579
 
 
580
 
    def test_reload_or_raise_no_reload(self):
581
 
        access = pack_repo._DirectPackAccess({}, reload_func=None)
582
 
        retry_exc = self.make_retry_exception()
583
 
        # Without a reload_func, we will just re-raise the original exception
584
 
        self.assertRaises(_TestException, access.reload_or_raise, retry_exc)
585
 
 
586
 
    def test_reload_or_raise_reload_changed(self):
587
 
        reload_called, reload_func = self.make_reload_func(return_val=True)
588
 
        access = pack_repo._DirectPackAccess({}, reload_func=reload_func)
589
 
        retry_exc = self.make_retry_exception()
590
 
        access.reload_or_raise(retry_exc)
591
 
        self.assertEqual([1], reload_called)
592
 
        retry_exc.reload_occurred=True
593
 
        access.reload_or_raise(retry_exc)
594
 
        self.assertEqual([2], reload_called)
595
 
 
596
 
    def test_reload_or_raise_reload_no_change(self):
597
 
        reload_called, reload_func = self.make_reload_func(return_val=False)
598
 
        access = pack_repo._DirectPackAccess({}, reload_func=reload_func)
599
 
        retry_exc = self.make_retry_exception()
600
 
        # If reload_occurred is False, then we consider it an error to have
601
 
        # reload_func() return False (no changes).
602
 
        self.assertRaises(_TestException, access.reload_or_raise, retry_exc)
603
 
        self.assertEqual([1], reload_called)
604
 
        retry_exc.reload_occurred=True
605
 
        # If reload_occurred is True, then we assume nothing changed because
606
 
        # it had changed earlier, but didn't change again
607
 
        access.reload_or_raise(retry_exc)
608
 
        self.assertEqual([2], reload_called)
609
 
 
610
 
    def test_annotate_retries(self):
611
 
        vf, reload_counter = self.make_vf_for_retrying()
612
 
        # It is a little bit bogus to annotate the Revision VF, but it works,
613
 
        # as we have ancestry stored there
614
 
        key = ('rev-3',)
615
 
        reload_lines = vf.annotate(key)
616
 
        self.assertEqual([1, 1, 0], reload_counter)
617
 
        plain_lines = vf.annotate(key)
618
 
        self.assertEqual([1, 1, 0], reload_counter) # No extra reloading
619
 
        if reload_lines != plain_lines:
620
 
            self.fail('Annotation was not identical with reloading.')
621
 
        # Now delete the packs-in-use, which should trigger another reload, but
622
 
        # this time we just raise an exception because we can't recover
623
 
        for trans, name in vf._access._indices.itervalues():
624
 
            trans.delete(name)
625
 
        self.assertRaises(errors.NoSuchFile, vf.annotate, key)
626
 
        self.assertEqual([2, 1, 1], reload_counter)
627
 
 
628
 
    def test__get_record_map_retries(self):
629
 
        vf, reload_counter = self.make_vf_for_retrying()
630
 
        keys = [('rev-1',), ('rev-2',), ('rev-3',)]
631
 
        records = vf._get_record_map(keys)
632
 
        self.assertEqual(keys, sorted(records.keys()))
633
 
        self.assertEqual([1, 1, 0], reload_counter)
634
 
        # Now delete the packs-in-use, which should trigger another reload, but
635
 
        # this time we just raise an exception because we can't recover
636
 
        for trans, name in vf._access._indices.itervalues():
637
 
            trans.delete(name)
638
 
        self.assertRaises(errors.NoSuchFile, vf._get_record_map, keys)
639
 
        self.assertEqual([2, 1, 1], reload_counter)
640
 
 
641
 
    def test_get_record_stream_retries(self):
642
 
        vf, reload_counter = self.make_vf_for_retrying()
643
 
        keys = [('rev-1',), ('rev-2',), ('rev-3',)]
644
 
        record_stream = vf.get_record_stream(keys, 'topological', False)
645
 
        record = record_stream.next()
646
 
        self.assertEqual(('rev-1',), record.key)
647
 
        self.assertEqual([0, 0, 0], reload_counter)
648
 
        record = record_stream.next()
649
 
        self.assertEqual(('rev-2',), record.key)
650
 
        self.assertEqual([1, 1, 0], reload_counter)
651
 
        record = record_stream.next()
652
 
        self.assertEqual(('rev-3',), record.key)
653
 
        self.assertEqual([1, 1, 0], reload_counter)
654
 
        # Now delete all pack files, and see that we raise the right error
655
 
        for trans, name in vf._access._indices.itervalues():
656
 
            trans.delete(name)
657
 
        self.assertListRaises(errors.NoSuchFile,
658
 
            vf.get_record_stream, keys, 'topological', False)
659
 
 
660
 
    def test_iter_lines_added_or_present_in_keys_retries(self):
661
 
        vf, reload_counter = self.make_vf_for_retrying()
662
 
        keys = [('rev-1',), ('rev-2',), ('rev-3',)]
663
 
        # Unfortunately, iter_lines_added_or_present_in_keys iterates the
664
 
        # result in random order (determined by the iteration order from a
665
 
        # set()), so we don't have any solid way to trigger whether data is
666
 
        # read before or after. However we tried to delete the middle node to
667
 
        # exercise the code well.
668
 
        # What we care about is that all lines are always yielded, but not
669
 
        # duplicated
670
 
        count = 0
671
 
        reload_lines = sorted(vf.iter_lines_added_or_present_in_keys(keys))
672
 
        self.assertEqual([1, 1, 0], reload_counter)
673
 
        # Now do it again, to make sure the result is equivalent
674
 
        plain_lines = sorted(vf.iter_lines_added_or_present_in_keys(keys))
675
 
        self.assertEqual([1, 1, 0], reload_counter) # No extra reloading
676
 
        self.assertEqual(plain_lines, reload_lines)
677
 
        self.assertEqual(21, len(plain_lines))
678
 
        # Now delete all pack files, and see that we raise the right error
679
 
        for trans, name in vf._access._indices.itervalues():
680
 
            trans.delete(name)
681
 
        self.assertListRaises(errors.NoSuchFile,
682
 
            vf.iter_lines_added_or_present_in_keys, keys)
683
 
        self.assertEqual([2, 1, 1], reload_counter)
684
 
 
685
 
    def test_get_record_stream_yields_disk_sorted_order(self):
686
 
        # if we get 'unordered' pick a semi-optimal order for reading. The
687
 
        # order should be grouped by pack file, and then by position in file
688
 
        repo = self.make_repository('test', format='pack-0.92')
689
 
        repo.lock_write()
690
 
        self.addCleanup(repo.unlock)
691
 
        repo.start_write_group()
692
 
        vf = repo.texts
693
 
        vf.add_lines(('f-id', 'rev-5'), [('f-id', 'rev-4')], ['lines\n'])
694
 
        vf.add_lines(('f-id', 'rev-1'), [], ['lines\n'])
695
 
        vf.add_lines(('f-id', 'rev-2'), [('f-id', 'rev-1')], ['lines\n'])
696
 
        repo.commit_write_group()
697
 
        # We inserted them as rev-5, rev-1, rev-2, we should get them back in
698
 
        # the same order
699
 
        stream = vf.get_record_stream([('f-id', 'rev-1'), ('f-id', 'rev-5'),
700
 
                                       ('f-id', 'rev-2')], 'unordered', False)
701
 
        keys = [r.key for r in stream]
702
 
        self.assertEqual([('f-id', 'rev-5'), ('f-id', 'rev-1'),
703
 
                          ('f-id', 'rev-2')], keys)
704
 
        repo.start_write_group()
705
 
        vf.add_lines(('f-id', 'rev-4'), [('f-id', 'rev-3')], ['lines\n'])
706
 
        vf.add_lines(('f-id', 'rev-3'), [('f-id', 'rev-2')], ['lines\n'])
707
 
        vf.add_lines(('f-id', 'rev-6'), [('f-id', 'rev-5')], ['lines\n'])
708
 
        repo.commit_write_group()
709
 
        # Request in random order, to make sure the output order isn't based on
710
 
        # the request
711
 
        request_keys = set(('f-id', 'rev-%d' % i) for i in range(1, 7))
712
 
        stream = vf.get_record_stream(request_keys, 'unordered', False)
713
 
        keys = [r.key for r in stream]
714
 
        # We want to get the keys back in disk order, but it doesn't matter
715
 
        # which pack we read from first. So this can come back in 2 orders
716
 
        alt1 = [('f-id', 'rev-%d' % i) for i in [4, 3, 6, 5, 1, 2]]
717
 
        alt2 = [('f-id', 'rev-%d' % i) for i in [5, 1, 2, 4, 3, 6]]
718
 
        if keys != alt1 and keys != alt2:
719
 
            self.fail('Returned key order did not match either expected order.'
720
 
                      ' expected %s or %s, not %s'
721
 
                      % (alt1, alt2, keys))
722
 
 
723
367
 
724
368
class LowLevelKnitDataTests(TestCase):
725
369
 
730
374
        gz_file.close()
731
375
        return sio.getvalue()
732
376
 
733
 
    def make_multiple_records(self):
734
 
        """Create the content for multiple records."""
735
 
        sha1sum = osutils.sha('foo\nbar\n').hexdigest()
736
 
        total_txt = []
737
 
        gz_txt = self.create_gz_content('version rev-id-1 2 %s\n'
738
 
                                        'foo\n'
739
 
                                        'bar\n'
740
 
                                        'end rev-id-1\n'
741
 
                                        % (sha1sum,))
742
 
        record_1 = (0, len(gz_txt), sha1sum)
743
 
        total_txt.append(gz_txt)
744
 
        sha1sum = osutils.sha('baz\n').hexdigest()
745
 
        gz_txt = self.create_gz_content('version rev-id-2 1 %s\n'
746
 
                                        'baz\n'
747
 
                                        'end rev-id-2\n'
748
 
                                        % (sha1sum,))
749
 
        record_2 = (record_1[1], len(gz_txt), sha1sum)
750
 
        total_txt.append(gz_txt)
751
 
        return total_txt, record_1, record_2
752
 
 
753
377
    def test_valid_knit_data(self):
754
 
        sha1sum = osutils.sha('foo\nbar\n').hexdigest()
 
378
        sha1sum = sha.new('foo\nbar\n').hexdigest()
755
379
        gz_txt = self.create_gz_content('version rev-id-1 2 %s\n'
756
380
                                        'foo\n'
757
381
                                        'bar\n'
769
393
        raw_contents = list(knit._read_records_iter_raw(records))
770
394
        self.assertEqual([(('rev-id-1',), gz_txt, sha1sum)], raw_contents)
771
395
 
772
 
    def test_multiple_records_valid(self):
773
 
        total_txt, record_1, record_2 = self.make_multiple_records()
774
 
        transport = MockTransport([''.join(total_txt)])
775
 
        access = _KnitKeyAccess(transport, ConstantMapper('filename'))
776
 
        knit = KnitVersionedFiles(None, access)
777
 
        records = [(('rev-id-1',), (('rev-id-1',), record_1[0], record_1[1])),
778
 
                   (('rev-id-2',), (('rev-id-2',), record_2[0], record_2[1]))]
779
 
 
780
 
        contents = list(knit._read_records_iter(records))
781
 
        self.assertEqual([(('rev-id-1',), ['foo\n', 'bar\n'], record_1[2]),
782
 
                          (('rev-id-2',), ['baz\n'], record_2[2])],
783
 
                         contents)
784
 
 
785
 
        raw_contents = list(knit._read_records_iter_raw(records))
786
 
        self.assertEqual([(('rev-id-1',), total_txt[0], record_1[2]),
787
 
                          (('rev-id-2',), total_txt[1], record_2[2])],
788
 
                         raw_contents)
789
 
 
790
396
    def test_not_enough_lines(self):
791
 
        sha1sum = osutils.sha('foo\n').hexdigest()
 
397
        sha1sum = sha.new('foo\n').hexdigest()
792
398
        # record says 2 lines data says 1
793
399
        gz_txt = self.create_gz_content('version rev-id-1 2 %s\n'
794
400
                                        'foo\n'
806
412
        self.assertEqual([(('rev-id-1',),  gz_txt, sha1sum)], raw_contents)
807
413
 
808
414
    def test_too_many_lines(self):
809
 
        sha1sum = osutils.sha('foo\nbar\n').hexdigest()
 
415
        sha1sum = sha.new('foo\nbar\n').hexdigest()
810
416
        # record says 1 lines data says 2
811
417
        gz_txt = self.create_gz_content('version rev-id-1 1 %s\n'
812
418
                                        'foo\n'
825
431
        self.assertEqual([(('rev-id-1',), gz_txt, sha1sum)], raw_contents)
826
432
 
827
433
    def test_mismatched_version_id(self):
828
 
        sha1sum = osutils.sha('foo\nbar\n').hexdigest()
 
434
        sha1sum = sha.new('foo\nbar\n').hexdigest()
829
435
        gz_txt = self.create_gz_content('version rev-id-1 2 %s\n'
830
436
                                        'foo\n'
831
437
                                        'bar\n'
844
450
            knit._read_records_iter_raw(records))
845
451
 
846
452
    def test_uncompressed_data(self):
847
 
        sha1sum = osutils.sha('foo\nbar\n').hexdigest()
 
453
        sha1sum = sha.new('foo\nbar\n').hexdigest()
848
454
        txt = ('version rev-id-1 2 %s\n'
849
455
               'foo\n'
850
456
               'bar\n'
864
470
            knit._read_records_iter_raw(records))
865
471
 
866
472
    def test_corrupted_data(self):
867
 
        sha1sum = osutils.sha('foo\nbar\n').hexdigest()
 
473
        sha1sum = sha.new('foo\nbar\n').hexdigest()
868
474
        gz_txt = self.create_gz_content('version rev-id-1 2 %s\n'
869
475
                                        'foo\n'
870
476
                                        'bar\n'
887
493
 
888
494
    def get_knit_index(self, transport, name, mode):
889
495
        mapper = ConstantMapper(name)
 
496
        orig = knit._load_data
 
497
        def reset():
 
498
            knit._load_data = orig
 
499
        self.addCleanup(reset)
890
500
        from bzrlib._knit_load_data_py import _load_data_py
891
 
        self.overrideAttr(knit, '_load_data', _load_data_py)
 
501
        knit._load_data = _load_data_py
892
502
        allow_writes = lambda: 'w' in mode
893
503
        return _KndxIndex(transport, mapper, lambda:None, allow_writes, lambda:True)
894
504
 
1110
720
            call[1][1].getvalue())
1111
721
        self.assertEqual({'create_parent_dir': True}, call[2])
1112
722
 
1113
 
    def assertTotalBuildSize(self, size, keys, positions):
1114
 
        self.assertEqual(size,
1115
 
                         knit._get_total_build_size(None, keys, positions))
1116
 
 
1117
 
    def test__get_total_build_size(self):
1118
 
        positions = {
1119
 
            ('a',): (('fulltext', False), (('a',), 0, 100), None),
1120
 
            ('b',): (('line-delta', False), (('b',), 100, 21), ('a',)),
1121
 
            ('c',): (('line-delta', False), (('c',), 121, 35), ('b',)),
1122
 
            ('d',): (('line-delta', False), (('d',), 156, 12), ('b',)),
1123
 
            }
1124
 
        self.assertTotalBuildSize(100, [('a',)], positions)
1125
 
        self.assertTotalBuildSize(121, [('b',)], positions)
1126
 
        # c needs both a & b
1127
 
        self.assertTotalBuildSize(156, [('c',)], positions)
1128
 
        # we shouldn't count 'b' twice
1129
 
        self.assertTotalBuildSize(156, [('b',), ('c',)], positions)
1130
 
        self.assertTotalBuildSize(133, [('d',)], positions)
1131
 
        self.assertTotalBuildSize(168, [('c',), ('d',)], positions)
1132
 
 
1133
723
    def test_get_position(self):
1134
724
        transport = MockTransport([
1135
725
            _KndxIndex.HEADER,
1276
866
            else:
1277
867
                raise
1278
868
 
1279
 
    def test_scan_unvalidated_index_not_implemented(self):
1280
 
        transport = MockTransport()
1281
 
        index = self.get_knit_index(transport, 'filename', 'r')
1282
 
        self.assertRaises(
1283
 
            NotImplementedError, index.scan_unvalidated_index,
1284
 
            'dummy graph_index')
1285
 
        self.assertRaises(
1286
 
            NotImplementedError, index.get_missing_compression_parents)
1287
 
 
1288
869
    def test_short_line(self):
1289
870
        transport = MockTransport([
1290
871
            _KndxIndex.HEADER,
1319
900
 
1320
901
class LowLevelKnitIndexTests_c(LowLevelKnitIndexTests):
1321
902
 
1322
 
    _test_needs_features = [compiled_knit_feature]
 
903
    _test_needs_features = [CompiledKnitFeature]
1323
904
 
1324
905
    def get_knit_index(self, transport, name, mode):
1325
906
        mapper = ConstantMapper(name)
1326
 
        from bzrlib._knit_load_data_pyx import _load_data_c
1327
 
        self.overrideAttr(knit, '_load_data', _load_data_c)
 
907
        orig = knit._load_data
 
908
        def reset():
 
909
            knit._load_data = orig
 
910
        self.addCleanup(reset)
 
911
        from bzrlib._knit_load_data_c import _load_data_c
 
912
        knit._load_data = _load_data_c
1328
913
        allow_writes = lambda: mode == 'w'
1329
 
        return _KndxIndex(transport, mapper, lambda:None,
1330
 
                          allow_writes, lambda:True)
1331
 
 
1332
 
 
1333
 
class Test_KnitAnnotator(TestCaseWithMemoryTransport):
1334
 
 
1335
 
    def make_annotator(self):
1336
 
        factory = knit.make_pack_factory(True, True, 1)
1337
 
        vf = factory(self.get_transport())
1338
 
        return knit._KnitAnnotator(vf)
1339
 
 
1340
 
    def test__expand_fulltext(self):
1341
 
        ann = self.make_annotator()
1342
 
        rev_key = ('rev-id',)
1343
 
        ann._num_compression_children[rev_key] = 1
1344
 
        res = ann._expand_record(rev_key, (('parent-id',),), None,
1345
 
                           ['line1\n', 'line2\n'], ('fulltext', True))
1346
 
        # The content object and text lines should be cached appropriately
1347
 
        self.assertEqual(['line1\n', 'line2'], res)
1348
 
        content_obj = ann._content_objects[rev_key]
1349
 
        self.assertEqual(['line1\n', 'line2\n'], content_obj._lines)
1350
 
        self.assertEqual(res, content_obj.text())
1351
 
        self.assertEqual(res, ann._text_cache[rev_key])
1352
 
 
1353
 
    def test__expand_delta_comp_parent_not_available(self):
1354
 
        # Parent isn't available yet, so we return nothing, but queue up this
1355
 
        # node for later processing
1356
 
        ann = self.make_annotator()
1357
 
        rev_key = ('rev-id',)
1358
 
        parent_key = ('parent-id',)
1359
 
        record = ['0,1,1\n', 'new-line\n']
1360
 
        details = ('line-delta', False)
1361
 
        res = ann._expand_record(rev_key, (parent_key,), parent_key,
1362
 
                                 record, details)
1363
 
        self.assertEqual(None, res)
1364
 
        self.assertTrue(parent_key in ann._pending_deltas)
1365
 
        pending = ann._pending_deltas[parent_key]
1366
 
        self.assertEqual(1, len(pending))
1367
 
        self.assertEqual((rev_key, (parent_key,), record, details), pending[0])
1368
 
 
1369
 
    def test__expand_record_tracks_num_children(self):
1370
 
        ann = self.make_annotator()
1371
 
        rev_key = ('rev-id',)
1372
 
        rev2_key = ('rev2-id',)
1373
 
        parent_key = ('parent-id',)
1374
 
        record = ['0,1,1\n', 'new-line\n']
1375
 
        details = ('line-delta', False)
1376
 
        ann._num_compression_children[parent_key] = 2
1377
 
        ann._expand_record(parent_key, (), None, ['line1\n', 'line2\n'],
1378
 
                           ('fulltext', False))
1379
 
        res = ann._expand_record(rev_key, (parent_key,), parent_key,
1380
 
                                 record, details)
1381
 
        self.assertEqual({parent_key: 1}, ann._num_compression_children)
1382
 
        # Expanding the second child should remove the content object, and the
1383
 
        # num_compression_children entry
1384
 
        res = ann._expand_record(rev2_key, (parent_key,), parent_key,
1385
 
                                 record, details)
1386
 
        self.assertFalse(parent_key in ann._content_objects)
1387
 
        self.assertEqual({}, ann._num_compression_children)
1388
 
        # We should not cache the content_objects for rev2 and rev, because
1389
 
        # they do not have compression children of their own.
1390
 
        self.assertEqual({}, ann._content_objects)
1391
 
 
1392
 
    def test__expand_delta_records_blocks(self):
1393
 
        ann = self.make_annotator()
1394
 
        rev_key = ('rev-id',)
1395
 
        parent_key = ('parent-id',)
1396
 
        record = ['0,1,1\n', 'new-line\n']
1397
 
        details = ('line-delta', True)
1398
 
        ann._num_compression_children[parent_key] = 2
1399
 
        ann._expand_record(parent_key, (), None,
1400
 
                           ['line1\n', 'line2\n', 'line3\n'],
1401
 
                           ('fulltext', False))
1402
 
        ann._expand_record(rev_key, (parent_key,), parent_key, record, details)
1403
 
        self.assertEqual({(rev_key, parent_key): [(1, 1, 1), (3, 3, 0)]},
1404
 
                         ann._matching_blocks)
1405
 
        rev2_key = ('rev2-id',)
1406
 
        record = ['0,1,1\n', 'new-line\n']
1407
 
        details = ('line-delta', False)
1408
 
        ann._expand_record(rev2_key, (parent_key,), parent_key, record, details)
1409
 
        self.assertEqual([(1, 1, 2), (3, 3, 0)],
1410
 
                         ann._matching_blocks[(rev2_key, parent_key)])
1411
 
 
1412
 
    def test__get_parent_ann_uses_matching_blocks(self):
1413
 
        ann = self.make_annotator()
1414
 
        rev_key = ('rev-id',)
1415
 
        parent_key = ('parent-id',)
1416
 
        parent_ann = [(parent_key,)]*3
1417
 
        block_key = (rev_key, parent_key)
1418
 
        ann._annotations_cache[parent_key] = parent_ann
1419
 
        ann._matching_blocks[block_key] = [(0, 1, 1), (3, 3, 0)]
1420
 
        # We should not try to access any parent_lines content, because we know
1421
 
        # we already have the matching blocks
1422
 
        par_ann, blocks = ann._get_parent_annotations_and_matches(rev_key,
1423
 
                                        ['1\n', '2\n', '3\n'], parent_key)
1424
 
        self.assertEqual(parent_ann, par_ann)
1425
 
        self.assertEqual([(0, 1, 1), (3, 3, 0)], blocks)
1426
 
        self.assertEqual({}, ann._matching_blocks)
1427
 
 
1428
 
    def test__process_pending(self):
1429
 
        ann = self.make_annotator()
1430
 
        rev_key = ('rev-id',)
1431
 
        p1_key = ('p1-id',)
1432
 
        p2_key = ('p2-id',)
1433
 
        record = ['0,1,1\n', 'new-line\n']
1434
 
        details = ('line-delta', False)
1435
 
        p1_record = ['line1\n', 'line2\n']
1436
 
        ann._num_compression_children[p1_key] = 1
1437
 
        res = ann._expand_record(rev_key, (p1_key,p2_key), p1_key,
1438
 
                                 record, details)
1439
 
        self.assertEqual(None, res)
1440
 
        # self.assertTrue(p1_key in ann._pending_deltas)
1441
 
        self.assertEqual({}, ann._pending_annotation)
1442
 
        # Now insert p1, and we should be able to expand the delta
1443
 
        res = ann._expand_record(p1_key, (), None, p1_record,
1444
 
                                 ('fulltext', False))
1445
 
        self.assertEqual(p1_record, res)
1446
 
        ann._annotations_cache[p1_key] = [(p1_key,)]*2
1447
 
        res = ann._process_pending(p1_key)
1448
 
        self.assertEqual([], res)
1449
 
        self.assertFalse(p1_key in ann._pending_deltas)
1450
 
        self.assertTrue(p2_key in ann._pending_annotation)
1451
 
        self.assertEqual({p2_key: [(rev_key, (p1_key, p2_key))]},
1452
 
                         ann._pending_annotation)
1453
 
        # Now fill in parent 2, and pending annotation should be satisfied
1454
 
        res = ann._expand_record(p2_key, (), None, [], ('fulltext', False))
1455
 
        ann._annotations_cache[p2_key] = []
1456
 
        res = ann._process_pending(p2_key)
1457
 
        self.assertEqual([rev_key], res)
1458
 
        self.assertEqual({}, ann._pending_annotation)
1459
 
        self.assertEqual({}, ann._pending_deltas)
1460
 
 
1461
 
    def test_record_delta_removes_basis(self):
1462
 
        ann = self.make_annotator()
1463
 
        ann._expand_record(('parent-id',), (), None,
1464
 
                           ['line1\n', 'line2\n'], ('fulltext', False))
1465
 
        ann._num_compression_children['parent-id'] = 2
1466
 
 
1467
 
    def test_annotate_special_text(self):
1468
 
        ann = self.make_annotator()
1469
 
        vf = ann._vf
1470
 
        rev1_key = ('rev-1',)
1471
 
        rev2_key = ('rev-2',)
1472
 
        rev3_key = ('rev-3',)
1473
 
        spec_key = ('special:',)
1474
 
        vf.add_lines(rev1_key, [], ['initial content\n'])
1475
 
        vf.add_lines(rev2_key, [rev1_key], ['initial content\n',
1476
 
                                            'common content\n',
1477
 
                                            'content in 2\n'])
1478
 
        vf.add_lines(rev3_key, [rev1_key], ['initial content\n',
1479
 
                                            'common content\n',
1480
 
                                            'content in 3\n'])
1481
 
        spec_text = ('initial content\n'
1482
 
                     'common content\n'
1483
 
                     'content in 2\n'
1484
 
                     'content in 3\n')
1485
 
        ann.add_special_text(spec_key, [rev2_key, rev3_key], spec_text)
1486
 
        anns, lines = ann.annotate(spec_key)
1487
 
        self.assertEqual([(rev1_key,),
1488
 
                          (rev2_key, rev3_key),
1489
 
                          (rev2_key,),
1490
 
                          (rev3_key,),
1491
 
                         ], anns)
1492
 
        self.assertEqualDiff(spec_text, ''.join(lines))
 
914
        return _KndxIndex(transport, mapper, lambda:None, allow_writes, lambda:True)
1493
915
 
1494
916
 
1495
917
class KnitTests(TestCaseWithTransport):
1500
922
        return make_file_factory(annotate, mapper)(self.get_transport())
1501
923
 
1502
924
 
1503
 
class TestBadShaError(KnitTests):
1504
 
    """Tests for handling of sha errors."""
1505
 
 
1506
 
    def test_sha_exception_has_text(self):
1507
 
        # having the failed text included in the error allows for recovery.
1508
 
        source = self.make_test_knit()
1509
 
        target = self.make_test_knit(name="target")
1510
 
        if not source._max_delta_chain:
1511
 
            raise TestNotApplicable(
1512
 
                "cannot get delta-caused sha failures without deltas.")
1513
 
        # create a basis
1514
 
        basis = ('basis',)
1515
 
        broken = ('broken',)
1516
 
        source.add_lines(basis, (), ['foo\n'])
1517
 
        source.add_lines(broken, (basis,), ['foo\n', 'bar\n'])
1518
 
        # Seed target with a bad basis text
1519
 
        target.add_lines(basis, (), ['gam\n'])
1520
 
        target.insert_record_stream(
1521
 
            source.get_record_stream([broken], 'unordered', False))
1522
 
        err = self.assertRaises(errors.KnitCorrupt,
1523
 
            target.get_record_stream([broken], 'unordered', True
1524
 
            ).next().get_bytes_as, 'chunked')
1525
 
        self.assertEqual(['gam\n', 'bar\n'], err.content)
1526
 
        # Test for formatting with live data
1527
 
        self.assertStartsWith(str(err), "Knit ")
1528
 
 
1529
 
 
1530
925
class TestKnitIndex(KnitTests):
1531
926
 
1532
927
    def test_add_versions_dictionary_compresses(self):
1604
999
        # could leave an empty .kndx file, which bzr would later claim was a
1605
1000
        # corrupted file since the header was not present. In reality, the file
1606
1001
        # just wasn't created, so it should be ignored.
1607
 
        t = transport.get_transport('.')
 
1002
        t = get_transport('.')
1608
1003
        t.put_bytes('test.kndx', '')
1609
1004
 
1610
1005
        knit = self.make_test_knit()
1611
1006
 
1612
1007
    def test_knit_index_checks_header(self):
1613
 
        t = transport.get_transport('.')
 
1008
        t = get_transport('.')
1614
1009
        t.put_bytes('test.kndx', '# not really a knit header\n\n')
1615
1010
        k = self.make_test_knit()
1616
1011
        self.assertRaises(KnitHeaderError, k.keys)
1732
1127
            [('parent',)])])
1733
1128
        # but neither should have added data:
1734
1129
        self.assertEqual([[], [], [], []], self.caught_entries)
1735
 
 
 
1130
        
1736
1131
    def test_add_version_different_dup(self):
1737
1132
        index = self.two_graph_index(deltas=True, catch_adds=True)
1738
1133
        # change options
1739
1134
        self.assertRaises(errors.KnitCorrupt, index.add_records,
1740
 
            [(('tip',), 'line-delta', (None, 0, 100), [('parent',)])])
 
1135
            [(('tip',), 'no-eol,line-delta', (None, 0, 100), [('parent',)])])
 
1136
        self.assertRaises(errors.KnitCorrupt, index.add_records,
 
1137
            [(('tip',), 'line-delta,no-eol', (None, 0, 100), [('parent',)])])
1741
1138
        self.assertRaises(errors.KnitCorrupt, index.add_records,
1742
1139
            [(('tip',), 'fulltext', (None, 0, 100), [('parent',)])])
1743
1140
        # parents
1744
1141
        self.assertRaises(errors.KnitCorrupt, index.add_records,
1745
1142
            [(('tip',), 'fulltext,no-eol', (None, 0, 100), [])])
1746
1143
        self.assertEqual([], self.caught_entries)
1747
 
 
 
1144
        
1748
1145
    def test_add_versions_nodeltas(self):
1749
1146
        index = self.two_graph_index(catch_adds=True)
1750
1147
        index.add_records([
1792
1189
            [('parent',)])])
1793
1190
        # but neither should have added data.
1794
1191
        self.assertEqual([[], [], [], []], self.caught_entries)
1795
 
 
 
1192
        
1796
1193
    def test_add_versions_different_dup(self):
1797
1194
        index = self.two_graph_index(deltas=True, catch_adds=True)
1798
1195
        # change options
1799
1196
        self.assertRaises(errors.KnitCorrupt, index.add_records,
1800
 
            [(('tip',), 'line-delta', (None, 0, 100), [('parent',)])])
 
1197
            [(('tip',), 'no-eol,line-delta', (None, 0, 100), [('parent',)])])
 
1198
        self.assertRaises(errors.KnitCorrupt, index.add_records,
 
1199
            [(('tip',), 'line-delta,no-eol', (None, 0, 100), [('parent',)])])
1801
1200
        self.assertRaises(errors.KnitCorrupt, index.add_records,
1802
1201
            [(('tip',), 'fulltext', (None, 0, 100), [('parent',)])])
1803
1202
        # parents
1806
1205
        # change options in the second record
1807
1206
        self.assertRaises(errors.KnitCorrupt, index.add_records,
1808
1207
            [(('tip',), 'fulltext,no-eol', (None, 0, 100), [('parent',)]),
1809
 
             (('tip',), 'line-delta', (None, 0, 100), [('parent',)])])
 
1208
             (('tip',), 'no-eol,line-delta', (None, 0, 100), [('parent',)])])
1810
1209
        self.assertEqual([], self.caught_entries)
1811
1210
 
1812
 
    def make_g_index_missing_compression_parent(self):
1813
 
        graph_index = self.make_g_index('missing_comp', 2,
1814
 
            [(('tip', ), ' 100 78',
1815
 
              ([('missing-parent', ), ('ghost', )], [('missing-parent', )]))])
1816
 
        return graph_index
1817
 
 
1818
 
    def make_g_index_missing_parent(self):
1819
 
        graph_index = self.make_g_index('missing_parent', 2,
1820
 
            [(('parent', ), ' 100 78', ([], [])),
1821
 
             (('tip', ), ' 100 78',
1822
 
              ([('parent', ), ('missing-parent', )], [('parent', )])),
1823
 
              ])
1824
 
        return graph_index
1825
 
 
1826
 
    def make_g_index_no_external_refs(self):
1827
 
        graph_index = self.make_g_index('no_external_refs', 2,
1828
 
            [(('rev', ), ' 100 78',
1829
 
              ([('parent', ), ('ghost', )], []))])
1830
 
        return graph_index
1831
 
 
1832
 
    def test_add_good_unvalidated_index(self):
1833
 
        unvalidated = self.make_g_index_no_external_refs()
1834
 
        combined = CombinedGraphIndex([unvalidated])
1835
 
        index = _KnitGraphIndex(combined, lambda: True, deltas=True)
1836
 
        index.scan_unvalidated_index(unvalidated)
1837
 
        self.assertEqual(frozenset(), index.get_missing_compression_parents())
1838
 
 
1839
 
    def test_add_missing_compression_parent_unvalidated_index(self):
1840
 
        unvalidated = self.make_g_index_missing_compression_parent()
1841
 
        combined = CombinedGraphIndex([unvalidated])
1842
 
        index = _KnitGraphIndex(combined, lambda: True, deltas=True)
1843
 
        index.scan_unvalidated_index(unvalidated)
1844
 
        # This also checks that its only the compression parent that is
1845
 
        # examined, otherwise 'ghost' would also be reported as a missing
1846
 
        # parent.
1847
 
        self.assertEqual(
1848
 
            frozenset([('missing-parent',)]),
1849
 
            index.get_missing_compression_parents())
1850
 
 
1851
 
    def test_add_missing_noncompression_parent_unvalidated_index(self):
1852
 
        unvalidated = self.make_g_index_missing_parent()
1853
 
        combined = CombinedGraphIndex([unvalidated])
1854
 
        index = _KnitGraphIndex(combined, lambda: True, deltas=True,
1855
 
            track_external_parent_refs=True)
1856
 
        index.scan_unvalidated_index(unvalidated)
1857
 
        self.assertEqual(
1858
 
            frozenset([('missing-parent',)]), index.get_missing_parents())
1859
 
 
1860
 
    def test_track_external_parent_refs(self):
1861
 
        g_index = self.make_g_index('empty', 2, [])
1862
 
        combined = CombinedGraphIndex([g_index])
1863
 
        index = _KnitGraphIndex(combined, lambda: True, deltas=True,
1864
 
            add_callback=self.catch_add, track_external_parent_refs=True)
1865
 
        self.caught_entries = []
1866
 
        index.add_records([
1867
 
            (('new-key',), 'fulltext,no-eol', (None, 50, 60),
1868
 
             [('parent-1',), ('parent-2',)])])
1869
 
        self.assertEqual(
1870
 
            frozenset([('parent-1',), ('parent-2',)]),
1871
 
            index.get_missing_parents())
1872
 
 
1873
 
    def test_add_unvalidated_index_with_present_external_references(self):
1874
 
        index = self.two_graph_index(deltas=True)
1875
 
        # Ugly hack to get at one of the underlying GraphIndex objects that
1876
 
        # two_graph_index built.
1877
 
        unvalidated = index._graph_index._indices[1]
1878
 
        # 'parent' is an external ref of _indices[1] (unvalidated), but is
1879
 
        # present in _indices[0].
1880
 
        index.scan_unvalidated_index(unvalidated)
1881
 
        self.assertEqual(frozenset(), index.get_missing_compression_parents())
1882
 
 
1883
 
    def make_new_missing_parent_g_index(self, name):
1884
 
        missing_parent = name + '-missing-parent'
1885
 
        graph_index = self.make_g_index(name, 2,
1886
 
            [((name + 'tip', ), ' 100 78',
1887
 
              ([(missing_parent, ), ('ghost', )], [(missing_parent, )]))])
1888
 
        return graph_index
1889
 
 
1890
 
    def test_add_mulitiple_unvalidated_indices_with_missing_parents(self):
1891
 
        g_index_1 = self.make_new_missing_parent_g_index('one')
1892
 
        g_index_2 = self.make_new_missing_parent_g_index('two')
1893
 
        combined = CombinedGraphIndex([g_index_1, g_index_2])
1894
 
        index = _KnitGraphIndex(combined, lambda: True, deltas=True)
1895
 
        index.scan_unvalidated_index(g_index_1)
1896
 
        index.scan_unvalidated_index(g_index_2)
1897
 
        self.assertEqual(
1898
 
            frozenset([('one-missing-parent',), ('two-missing-parent',)]),
1899
 
            index.get_missing_compression_parents())
1900
 
 
1901
 
    def test_add_mulitiple_unvalidated_indices_with_mutual_dependencies(self):
1902
 
        graph_index_a = self.make_g_index('one', 2,
1903
 
            [(('parent-one', ), ' 100 78', ([('non-compression-parent',)], [])),
1904
 
             (('child-of-two', ), ' 100 78',
1905
 
              ([('parent-two',)], [('parent-two',)]))])
1906
 
        graph_index_b = self.make_g_index('two', 2,
1907
 
            [(('parent-two', ), ' 100 78', ([('non-compression-parent',)], [])),
1908
 
             (('child-of-one', ), ' 100 78',
1909
 
              ([('parent-one',)], [('parent-one',)]))])
1910
 
        combined = CombinedGraphIndex([graph_index_a, graph_index_b])
1911
 
        index = _KnitGraphIndex(combined, lambda: True, deltas=True)
1912
 
        index.scan_unvalidated_index(graph_index_a)
1913
 
        index.scan_unvalidated_index(graph_index_b)
1914
 
        self.assertEqual(
1915
 
            frozenset([]), index.get_missing_compression_parents())
1916
 
 
1917
1211
 
1918
1212
class TestNoParentsGraphIndexKnit(KnitTests):
1919
1213
    """Tests for knits using _KnitGraphIndex with no parents."""
1927
1221
        size = trans.put_file(name, stream)
1928
1222
        return GraphIndex(trans, name, size)
1929
1223
 
1930
 
    def test_add_good_unvalidated_index(self):
1931
 
        unvalidated = self.make_g_index('unvalidated')
1932
 
        combined = CombinedGraphIndex([unvalidated])
1933
 
        index = _KnitGraphIndex(combined, lambda: True, parents=False)
1934
 
        index.scan_unvalidated_index(unvalidated)
1935
 
        self.assertEqual(frozenset(),
1936
 
            index.get_missing_compression_parents())
1937
 
 
1938
1224
    def test_parents_deltas_incompatible(self):
1939
1225
        index = CombinedGraphIndex([])
1940
1226
        self.assertRaises(errors.KnitError, _KnitGraphIndex, lambda:True,
2021
1307
        index.add_records([(('tip',), 'fulltext,no-eol', (None, 0, 1000), [])])
2022
1308
        # but neither should have added data.
2023
1309
        self.assertEqual([[], [], [], []], self.caught_entries)
2024
 
 
 
1310
        
2025
1311
    def test_add_version_different_dup(self):
2026
1312
        index = self.two_graph_index(catch_adds=True)
2027
1313
        # change options
2035
1321
        self.assertRaises(errors.KnitCorrupt, index.add_records,
2036
1322
            [(('tip',), 'fulltext,no-eol', (None, 0, 100), [('parent',)])])
2037
1323
        self.assertEqual([], self.caught_entries)
2038
 
 
 
1324
        
2039
1325
    def test_add_versions(self):
2040
1326
        index = self.two_graph_index(catch_adds=True)
2041
1327
        index.add_records([
2073
1359
        index.add_records([(('tip',), 'fulltext,no-eol', (None, 0, 1000), [])])
2074
1360
        # but neither should have added data.
2075
1361
        self.assertEqual([[], [], [], []], self.caught_entries)
2076
 
 
 
1362
        
2077
1363
    def test_add_versions_different_dup(self):
2078
1364
        index = self.two_graph_index(catch_adds=True)
2079
1365
        # change options
2093
1379
        self.assertEqual([], self.caught_entries)
2094
1380
 
2095
1381
 
2096
 
class TestKnitVersionedFiles(KnitTests):
2097
 
 
2098
 
    def assertGroupKeysForIo(self, exp_groups, keys, non_local_keys,
2099
 
                             positions, _min_buffer_size=None):
2100
 
        kvf = self.make_test_knit()
2101
 
        if _min_buffer_size is None:
2102
 
            _min_buffer_size = knit._STREAM_MIN_BUFFER_SIZE
2103
 
        self.assertEqual(exp_groups, kvf._group_keys_for_io(keys,
2104
 
                                        non_local_keys, positions,
2105
 
                                        _min_buffer_size=_min_buffer_size))
2106
 
 
2107
 
    def assertSplitByPrefix(self, expected_map, expected_prefix_order,
2108
 
                            keys):
2109
 
        split, prefix_order = KnitVersionedFiles._split_by_prefix(keys)
2110
 
        self.assertEqual(expected_map, split)
2111
 
        self.assertEqual(expected_prefix_order, prefix_order)
2112
 
 
2113
 
    def test__group_keys_for_io(self):
2114
 
        ft_detail = ('fulltext', False)
2115
 
        ld_detail = ('line-delta', False)
2116
 
        f_a = ('f', 'a')
2117
 
        f_b = ('f', 'b')
2118
 
        f_c = ('f', 'c')
2119
 
        g_a = ('g', 'a')
2120
 
        g_b = ('g', 'b')
2121
 
        g_c = ('g', 'c')
2122
 
        positions = {
2123
 
            f_a: (ft_detail, (f_a, 0, 100), None),
2124
 
            f_b: (ld_detail, (f_b, 100, 21), f_a),
2125
 
            f_c: (ld_detail, (f_c, 180, 15), f_b),
2126
 
            g_a: (ft_detail, (g_a, 121, 35), None),
2127
 
            g_b: (ld_detail, (g_b, 156, 12), g_a),
2128
 
            g_c: (ld_detail, (g_c, 195, 13), g_a),
2129
 
            }
2130
 
        self.assertGroupKeysForIo([([f_a], set())],
2131
 
                                  [f_a], [], positions)
2132
 
        self.assertGroupKeysForIo([([f_a], set([f_a]))],
2133
 
                                  [f_a], [f_a], positions)
2134
 
        self.assertGroupKeysForIo([([f_a, f_b], set([]))],
2135
 
                                  [f_a, f_b], [], positions)
2136
 
        self.assertGroupKeysForIo([([f_a, f_b], set([f_b]))],
2137
 
                                  [f_a, f_b], [f_b], positions)
2138
 
        self.assertGroupKeysForIo([([f_a, f_b, g_a, g_b], set())],
2139
 
                                  [f_a, g_a, f_b, g_b], [], positions)
2140
 
        self.assertGroupKeysForIo([([f_a, f_b, g_a, g_b], set())],
2141
 
                                  [f_a, g_a, f_b, g_b], [], positions,
2142
 
                                  _min_buffer_size=150)
2143
 
        self.assertGroupKeysForIo([([f_a, f_b], set()), ([g_a, g_b], set())],
2144
 
                                  [f_a, g_a, f_b, g_b], [], positions,
2145
 
                                  _min_buffer_size=100)
2146
 
        self.assertGroupKeysForIo([([f_c], set()), ([g_b], set())],
2147
 
                                  [f_c, g_b], [], positions,
2148
 
                                  _min_buffer_size=125)
2149
 
        self.assertGroupKeysForIo([([g_b, f_c], set())],
2150
 
                                  [g_b, f_c], [], positions,
2151
 
                                  _min_buffer_size=125)
2152
 
 
2153
 
    def test__split_by_prefix(self):
2154
 
        self.assertSplitByPrefix({'f': [('f', 'a'), ('f', 'b')],
2155
 
                                  'g': [('g', 'b'), ('g', 'a')],
2156
 
                                 }, ['f', 'g'],
2157
 
                                 [('f', 'a'), ('g', 'b'),
2158
 
                                  ('g', 'a'), ('f', 'b')])
2159
 
 
2160
 
        self.assertSplitByPrefix({'f': [('f', 'a'), ('f', 'b')],
2161
 
                                  'g': [('g', 'b'), ('g', 'a')],
2162
 
                                 }, ['f', 'g'],
2163
 
                                 [('f', 'a'), ('f', 'b'),
2164
 
                                  ('g', 'b'), ('g', 'a')])
2165
 
 
2166
 
        self.assertSplitByPrefix({'f': [('f', 'a'), ('f', 'b')],
2167
 
                                  'g': [('g', 'b'), ('g', 'a')],
2168
 
                                 }, ['f', 'g'],
2169
 
                                 [('f', 'a'), ('f', 'b'),
2170
 
                                  ('g', 'b'), ('g', 'a')])
2171
 
 
2172
 
        self.assertSplitByPrefix({'f': [('f', 'a'), ('f', 'b')],
2173
 
                                  'g': [('g', 'b'), ('g', 'a')],
2174
 
                                  '': [('a',), ('b',)]
2175
 
                                 }, ['f', 'g', ''],
2176
 
                                 [('f', 'a'), ('g', 'b'),
2177
 
                                  ('a',), ('b',),
2178
 
                                  ('g', 'a'), ('f', 'b')])
2179
 
 
2180
 
 
2181
1382
class TestStacking(KnitTests):
2182
1383
 
2183
1384
    def get_basis_and_test_knit(self):
2209
1410
        basis.calls = []
2210
1411
        test.add_lines(key_cross_border, (key_basis,), ['foo\n'])
2211
1412
        self.assertEqual('fulltext', test._index.get_method(key_cross_border))
2212
 
        # we don't even need to look at the basis to see that this should be
2213
 
        # stored as a fulltext
2214
 
        self.assertEqual([], basis.calls)
 
1413
        self.assertEqual([("get_parent_map", set([key_basis]))], basis.calls)
2215
1414
        # Subsequent adds do delta.
2216
1415
        basis.calls = []
2217
1416
        test.add_lines(key_delta, (key_cross_border,), ['foo\n'])
2232
1431
        # directly.
2233
1432
        basis.add_lines(key_basis, (), ['foo\n', 'bar\n'])
2234
1433
        basis.calls = []
 
1434
        self.assertRaises(RevisionNotPresent, test.annotate, key_basis)
 
1435
        raise KnownFailure("Annotation on stacked knits currently fails.")
2235
1436
        details = test.annotate(key_basis)
2236
1437
        self.assertEqual([(key_basis, 'foo\n'), (key_basis, 'bar\n')], details)
2237
 
        # Not optimised to date:
2238
 
        # self.assertEqual([("annotate", key_basis)], basis.calls)
2239
 
        self.assertEqual([('get_parent_map', set([key_basis])),
2240
 
            ('get_parent_map', set([key_basis])),
2241
 
            ('get_record_stream', [key_basis], 'topological', True)],
2242
 
            basis.calls)
 
1438
        self.assertEqual([("annotate", key_basis)], basis.calls)
2243
1439
 
2244
1440
    def test_check(self):
2245
 
        # At the moment checking a stacked knit does implicitly check the
2246
 
        # fallback files.
 
1441
        # check() must not check the fallback files, it's none of its business.
2247
1442
        basis, test = self.get_basis_and_test_knit()
 
1443
        basis.check = None
2248
1444
        test.check()
2249
1445
 
2250
1446
    def test_get_parent_map(self):
2341
1537
                True).next()
2342
1538
            self.assertEqual(record.key, result[0])
2343
1539
            self.assertEqual(record.sha1, result[1])
2344
 
            # We used to check that the storage kind matched, but actually it
2345
 
            # depends on whether it was sourced from the basis, or in a single
2346
 
            # group, because asking for full texts returns proxy objects to a
2347
 
            # _ContentMapGenerator object; so checking the kind is unneeded.
 
1540
            self.assertEqual(record.storage_kind, result[2])
2348
1541
            self.assertEqual(record.get_bytes_as('fulltext'), result[3])
2349
1542
        # It's not strictly minimal, but it seems reasonable for now for it to
2350
1543
        # ask which fallbacks have which parents.
2351
1544
        self.assertEqual([
2352
1545
            ("get_parent_map", set([key_basis, key_basis_2, key_missing])),
2353
 
            # topological is requested from the fallback, because that is what
2354
 
            # was requested at the top level.
2355
 
            ("get_record_stream", [key_basis_2, key_basis], 'topological', True)],
 
1546
            # unordered is asked for by the underlying worker as it still
 
1547
            # buffers everything while answering - which is a problem!
 
1548
            ("get_record_stream", [key_basis_2, key_basis], 'unordered', True)],
2356
1549
            calls)
2357
1550
 
2358
1551
    def test_get_record_stream_unordered_deltas(self):
2444
1637
        key_basis = ('bar',)
2445
1638
        key_missing = ('missing',)
2446
1639
        test.add_lines(key, (), ['foo\n'])
2447
 
        key_sha1sum = osutils.sha('foo\n').hexdigest()
 
1640
        key_sha1sum = sha.new('foo\n').hexdigest()
2448
1641
        sha1s = test.get_sha1s([key])
2449
1642
        self.assertEqual({key: key_sha1sum}, sha1s)
2450
1643
        self.assertEqual([], basis.calls)
2452
1645
        # directly (rather than via text reconstruction) so that remote servers
2453
1646
        # etc don't have to answer with full content.
2454
1647
        basis.add_lines(key_basis, (), ['foo\n', 'bar\n'])
2455
 
        basis_sha1sum = osutils.sha('foo\nbar\n').hexdigest()
 
1648
        basis_sha1sum = sha.new('foo\nbar\n').hexdigest()
2456
1649
        basis.calls = []
2457
1650
        sha1s = test.get_sha1s([key, key_missing, key_basis])
2458
1651
        self.assertEqual({key: key_sha1sum,
2474
1667
        source.add_lines(key_delta, (key_basis,), ['bar\n'])
2475
1668
        stream = source.get_record_stream([key_delta], 'unordered', False)
2476
1669
        test.insert_record_stream(stream)
2477
 
        # XXX: this does somewhat too many calls in making sure of whether it
2478
 
        # has to recreate the full text.
2479
 
        self.assertEqual([("get_parent_map", set([key_basis])),
2480
 
             ('get_parent_map', set([key_basis])),
2481
 
             ('get_record_stream', [key_basis], 'unordered', True)],
 
1670
        self.assertEqual([("get_parent_map", set([key_basis]))],
2482
1671
            basis.calls)
2483
1672
        self.assertEqual({key_delta:(key_basis,)},
2484
1673
            test.get_parent_map([key_delta]))
2487
1676
 
2488
1677
    def test_iter_lines_added_or_present_in_keys(self):
2489
1678
        # Lines from the basis are returned, and lines for a given key are only
2490
 
        # returned once.
 
1679
        # returned once. 
2491
1680
        key1 = ('foo1',)
2492
1681
        key2 = ('foo2',)
2493
1682
        # all sources are asked for keys:
2545
1734
        test.add_mpdiffs([(key_delta, (key_basis,),
2546
1735
            source.get_sha1s([key_delta])[key_delta], diffs[0])])
2547
1736
        self.assertEqual([("get_parent_map", set([key_basis])),
2548
 
            ('get_record_stream', [key_basis], 'unordered', True),],
 
1737
            ('get_record_stream', [key_basis], 'unordered', True),
 
1738
            ('get_parent_map', set([key_basis]))],
2549
1739
            basis.calls)
2550
1740
        self.assertEqual({key_delta:(key_basis,)},
2551
1741
            test.get_parent_map([key_delta]))
2570
1760
                multiparent.NewText(['foo\n']),
2571
1761
                multiparent.ParentText(1, 0, 2, 1)])],
2572
1762
            diffs)
2573
 
        self.assertEqual(3, len(basis.calls))
 
1763
        self.assertEqual(4, len(basis.calls))
2574
1764
        self.assertEqual([
2575
1765
            ("get_parent_map", set([key_left, key_right])),
2576
1766
            ("get_parent_map", set([key_left, key_right])),
 
1767
            ("get_parent_map", set([key_left, key_right])),
2577
1768
            ],
2578
 
            basis.calls[:-1])
2579
 
        last_call = basis.calls[-1]
 
1769
            basis.calls[:3])
 
1770
        last_call = basis.calls[3]
2580
1771
        self.assertEqual('get_record_stream', last_call[0])
2581
1772
        self.assertEqual(set([key_left, key_right]), set(last_call[1]))
2582
 
        self.assertEqual('topological', last_call[2])
 
1773
        self.assertEqual('unordered', last_call[2])
2583
1774
        self.assertEqual(True, last_call[3])
2584
 
 
2585
 
 
2586
 
class TestNetworkBehaviour(KnitTests):
2587
 
    """Tests for getting data out of/into knits over the network."""
2588
 
 
2589
 
    def test_include_delta_closure_generates_a_knit_delta_closure(self):
2590
 
        vf = self.make_test_knit(name='test')
2591
 
        # put in three texts, giving ft, delta, delta
2592
 
        vf.add_lines(('base',), (), ['base\n', 'content\n'])
2593
 
        vf.add_lines(('d1',), (('base',),), ['d1\n'])
2594
 
        vf.add_lines(('d2',), (('d1',),), ['d2\n'])
2595
 
        # But heuristics could interfere, so check what happened:
2596
 
        self.assertEqual(['knit-ft-gz', 'knit-delta-gz', 'knit-delta-gz'],
2597
 
            [record.storage_kind for record in
2598
 
             vf.get_record_stream([('base',), ('d1',), ('d2',)],
2599
 
                'topological', False)])
2600
 
        # generate a stream of just the deltas include_delta_closure=True,
2601
 
        # serialise to the network, and check that we get a delta closure on the wire.
2602
 
        stream = vf.get_record_stream([('d1',), ('d2',)], 'topological', True)
2603
 
        netb = [record.get_bytes_as(record.storage_kind) for record in stream]
2604
 
        # The first bytes should be a memo from _ContentMapGenerator, and the
2605
 
        # second bytes should be empty (because its a API proxy not something
2606
 
        # for wire serialisation.
2607
 
        self.assertEqual('', netb[1])
2608
 
        bytes = netb[0]
2609
 
        kind, line_end = network_bytes_to_kind_and_offset(bytes)
2610
 
        self.assertEqual('knit-delta-closure', kind)
2611
 
 
2612
 
 
2613
 
class TestContentMapGenerator(KnitTests):
2614
 
    """Tests for ContentMapGenerator"""
2615
 
 
2616
 
    def test_get_record_stream_gives_records(self):
2617
 
        vf = self.make_test_knit(name='test')
2618
 
        # put in three texts, giving ft, delta, delta
2619
 
        vf.add_lines(('base',), (), ['base\n', 'content\n'])
2620
 
        vf.add_lines(('d1',), (('base',),), ['d1\n'])
2621
 
        vf.add_lines(('d2',), (('d1',),), ['d2\n'])
2622
 
        keys = [('d1',), ('d2',)]
2623
 
        generator = _VFContentMapGenerator(vf, keys,
2624
 
            global_map=vf.get_parent_map(keys))
2625
 
        for record in generator.get_record_stream():
2626
 
            if record.key == ('d1',):
2627
 
                self.assertEqual('d1\n', record.get_bytes_as('fulltext'))
2628
 
            else:
2629
 
                self.assertEqual('d2\n', record.get_bytes_as('fulltext'))
2630
 
 
2631
 
    def test_get_record_stream_kinds_are_raw(self):
2632
 
        vf = self.make_test_knit(name='test')
2633
 
        # put in three texts, giving ft, delta, delta
2634
 
        vf.add_lines(('base',), (), ['base\n', 'content\n'])
2635
 
        vf.add_lines(('d1',), (('base',),), ['d1\n'])
2636
 
        vf.add_lines(('d2',), (('d1',),), ['d2\n'])
2637
 
        keys = [('base',), ('d1',), ('d2',)]
2638
 
        generator = _VFContentMapGenerator(vf, keys,
2639
 
            global_map=vf.get_parent_map(keys))
2640
 
        kinds = {('base',): 'knit-delta-closure',
2641
 
            ('d1',): 'knit-delta-closure-ref',
2642
 
            ('d2',): 'knit-delta-closure-ref',
2643
 
            }
2644
 
        for record in generator.get_record_stream():
2645
 
            self.assertEqual(kinds[record.key], record.storage_kind)