~bzr-pqm/bzr/bzr.dev

« back to all changes in this revision

Viewing changes to bzrlib/tests/test_groupcompress.py

  • Committer: Danny van Heumen
  • Date: 2010-03-09 16:38:10 UTC
  • mto: (4634.139.5 2.0)
  • mto: This revision was merged to the branch mainline in revision 5160.
  • Revision ID: danny@dannyvanheumen.nl-20100309163810-ujn8hcx08f75nlf1
Refined test to make use of locking hooks and also validate if lock is truly a checkout-lock.

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
 
# Copyright (C) 2008, 2009, 2010 Canonical Ltd
 
1
# Copyright (C) 2008, 2009 Canonical Ltd
2
2
#
3
3
# This program is free software; you can redistribute it and/or modify
4
4
# it under the terms of the GNU General Public License as published by
29
29
    versionedfile,
30
30
    )
31
31
from bzrlib.osutils import sha_string
32
 
from bzrlib.tests.test__groupcompress import compiled_groupcompress_feature
 
32
from bzrlib.tests.test__groupcompress import CompiledGroupCompressFeature
33
33
 
34
34
 
35
35
def load_tests(standard_tests, module, loader):
39
39
    scenarios = [
40
40
        ('python', {'compressor': groupcompress.PythonGroupCompressor}),
41
41
        ]
42
 
    if compiled_groupcompress_feature.available():
 
42
    if CompiledGroupCompressFeature.available():
43
43
        scenarios.append(('C',
44
44
            {'compressor': groupcompress.PyrexGroupCompressor}))
45
45
    return tests.multiply_tests(to_adapt, scenarios, result)
135
135
 
136
136
class TestPyrexGroupCompressor(TestGroupCompressor):
137
137
 
138
 
    _test_needs_features = [compiled_groupcompress_feature]
 
138
    _test_needs_features = [CompiledGroupCompressFeature]
139
139
    compressor = groupcompress.PyrexGroupCompressor
140
140
 
141
141
    def test_stats(self):
418
418
        # And the decompressor is finalized
419
419
        self.assertIs(None, block._z_content_decompressor)
420
420
 
421
 
    def test__ensure_all_content(self):
 
421
    def test_partial_decomp_no_known_length(self):
422
422
        content_chunks = []
423
 
        # We need a sufficient amount of data so that zlib.decompress has
424
 
        # partial decompression to work with. Most auto-generated data
425
 
        # compresses a bit too well, we want a combination, so we combine a sha
426
 
        # hash with compressible data.
427
423
        for i in xrange(2048):
428
424
            next_content = '%d\nThis is a bit of duplicate text\n' % (i,)
429
425
            content_chunks.append(next_content)
437
433
        block._z_content = z_content
438
434
        block._z_content_length = len(z_content)
439
435
        block._compressor_name = 'zlib'
440
 
        block._content_length = 158634
 
436
        block._content_length = None # Don't tell the decompressed length
441
437
        self.assertIs(None, block._content)
442
 
        # The first _ensure_content got all of the required data
443
 
        block._ensure_content(158634)
 
438
        block._ensure_content(100)
 
439
        self.assertIsNot(None, block._content)
 
440
        # We have decompressed at least 100 bytes
 
441
        self.assertTrue(len(block._content) >= 100)
 
442
        # We have not decompressed the whole content
 
443
        self.assertTrue(len(block._content) < 158634)
 
444
        self.assertEqualDiff(content[:len(block._content)], block._content)
 
445
        # ensuring content that we already have shouldn't cause any more data
 
446
        # to be extracted
 
447
        cur_len = len(block._content)
 
448
        block._ensure_content(cur_len - 10)
 
449
        self.assertEqual(cur_len, len(block._content))
 
450
        # Now we want a bit more content
 
451
        cur_len += 10
 
452
        block._ensure_content(cur_len)
 
453
        self.assertTrue(len(block._content) >= cur_len)
 
454
        self.assertTrue(len(block._content) < 158634)
 
455
        self.assertEqualDiff(content[:len(block._content)], block._content)
 
456
        # And now lets finish
 
457
        block._ensure_content()
444
458
        self.assertEqualDiff(content, block._content)
445
 
        # And we should have released the _z_content_decompressor since it was
446
 
        # fully consumed
 
459
        # And the decompressor is finalized
447
460
        self.assertIs(None, block._z_content_decompressor)
448
461
 
449
462
    def test__dump(self):
459
472
                         ], block._dump())
460
473
 
461
474
 
462
 
class TestCaseWithGroupCompressVersionedFiles(
463
 
        tests.TestCaseWithMemoryTransport):
 
475
class TestCaseWithGroupCompressVersionedFiles(tests.TestCaseWithTransport):
464
476
 
465
477
    def make_test_vf(self, create_graph, keylength=1, do_cleanup=True,
466
478
                     dir='.', inconsistency_fatal=True):
733
745
                              " \('b',\) \('42 32 0 8', \(\(\),\)\) \('74 32"
734
746
                              " 0 8', \(\(\('a',\),\),\)\)")
735
747
 
736
 
    def test_clear_cache(self):
737
 
        vf = self.make_source_with_b(True, 'source')
738
 
        vf.writer.end()
739
 
        for record in vf.get_record_stream([('a',), ('b',)], 'unordered',
740
 
                                           True):
741
 
            pass
742
 
        self.assertTrue(len(vf._group_cache) > 0)
743
 
        vf.clear_cache()
744
 
        self.assertEqual(0, len(vf._group_cache))
745
 
 
746
 
 
747
 
 
748
 
class StubGCVF(object):
749
 
    def __init__(self, canned_get_blocks=None):
750
 
        self._group_cache = {}
751
 
        self._canned_get_blocks = canned_get_blocks or []
752
 
    def _get_blocks(self, read_memos):
753
 
        return iter(self._canned_get_blocks)
754
 
    
755
 
 
756
 
class Test_BatchingBlockFetcher(TestCaseWithGroupCompressVersionedFiles):
757
 
    """Simple whitebox unit tests for _BatchingBlockFetcher."""
758
 
    
759
 
    def test_add_key_new_read_memo(self):
760
 
        """Adding a key with an uncached read_memo new to this batch adds that
761
 
        read_memo to the list of memos to fetch.
762
 
        """
763
 
        # locations are: index_memo, ignored, parents, ignored
764
 
        # where index_memo is: (idx, offset, len, factory_start, factory_end)
765
 
        # and (idx, offset, size) is known as the 'read_memo', identifying the
766
 
        # raw bytes needed.
767
 
        read_memo = ('fake index', 100, 50)
768
 
        locations = {
769
 
            ('key',): (read_memo + (None, None), None, None, None)}
770
 
        batcher = groupcompress._BatchingBlockFetcher(StubGCVF(), locations)
771
 
        total_size = batcher.add_key(('key',))
772
 
        self.assertEqual(50, total_size)
773
 
        self.assertEqual([('key',)], batcher.keys)
774
 
        self.assertEqual([read_memo], batcher.memos_to_get)
775
 
 
776
 
    def test_add_key_duplicate_read_memo(self):
777
 
        """read_memos that occur multiple times in a batch will only be fetched
778
 
        once.
779
 
        """
780
 
        read_memo = ('fake index', 100, 50)
781
 
        # Two keys, both sharing the same read memo (but different overall
782
 
        # index_memos).
783
 
        locations = {
784
 
            ('key1',): (read_memo + (0, 1), None, None, None),
785
 
            ('key2',): (read_memo + (1, 2), None, None, None)}
786
 
        batcher = groupcompress._BatchingBlockFetcher(StubGCVF(), locations)
787
 
        total_size = batcher.add_key(('key1',))
788
 
        total_size = batcher.add_key(('key2',))
789
 
        self.assertEqual(50, total_size)
790
 
        self.assertEqual([('key1',), ('key2',)], batcher.keys)
791
 
        self.assertEqual([read_memo], batcher.memos_to_get)
792
 
 
793
 
    def test_add_key_cached_read_memo(self):
794
 
        """Adding a key with a cached read_memo will not cause that read_memo
795
 
        to be added to the list to fetch.
796
 
        """
797
 
        read_memo = ('fake index', 100, 50)
798
 
        gcvf = StubGCVF()
799
 
        gcvf._group_cache[read_memo] = 'fake block'
800
 
        locations = {
801
 
            ('key',): (read_memo + (None, None), None, None, None)}
802
 
        batcher = groupcompress._BatchingBlockFetcher(gcvf, locations)
803
 
        total_size = batcher.add_key(('key',))
804
 
        self.assertEqual(0, total_size)
805
 
        self.assertEqual([('key',)], batcher.keys)
806
 
        self.assertEqual([], batcher.memos_to_get)
807
 
 
808
 
    def test_yield_factories_empty(self):
809
 
        """An empty batch yields no factories."""
810
 
        batcher = groupcompress._BatchingBlockFetcher(StubGCVF(), {})
811
 
        self.assertEqual([], list(batcher.yield_factories()))
812
 
 
813
 
    def test_yield_factories_calls_get_blocks(self):
814
 
        """Uncached memos are retrieved via get_blocks."""
815
 
        read_memo1 = ('fake index', 100, 50)
816
 
        read_memo2 = ('fake index', 150, 40)
817
 
        gcvf = StubGCVF(
818
 
            canned_get_blocks=[
819
 
                (read_memo1, groupcompress.GroupCompressBlock()),
820
 
                (read_memo2, groupcompress.GroupCompressBlock())])
821
 
        locations = {
822
 
            ('key1',): (read_memo1 + (None, None), None, None, None),
823
 
            ('key2',): (read_memo2 + (None, None), None, None, None)}
824
 
        batcher = groupcompress._BatchingBlockFetcher(gcvf, locations)
825
 
        batcher.add_key(('key1',))
826
 
        batcher.add_key(('key2',))
827
 
        factories = list(batcher.yield_factories(full_flush=True))
828
 
        self.assertLength(2, factories)
829
 
        keys = [f.key for f in factories]
830
 
        kinds = [f.storage_kind for f in factories]
831
 
        self.assertEqual([('key1',), ('key2',)], keys)
832
 
        self.assertEqual(['groupcompress-block', 'groupcompress-block'], kinds)
833
 
 
834
 
    def test_yield_factories_flushing(self):
835
 
        """yield_factories holds back on yielding results from the final block
836
 
        unless passed full_flush=True.
837
 
        """
838
 
        fake_block = groupcompress.GroupCompressBlock()
839
 
        read_memo = ('fake index', 100, 50)
840
 
        gcvf = StubGCVF()
841
 
        gcvf._group_cache[read_memo] = fake_block
842
 
        locations = {
843
 
            ('key',): (read_memo + (None, None), None, None, None)}
844
 
        batcher = groupcompress._BatchingBlockFetcher(gcvf, locations)
845
 
        batcher.add_key(('key',))
846
 
        self.assertEqual([], list(batcher.yield_factories()))
847
 
        factories = list(batcher.yield_factories(full_flush=True))
848
 
        self.assertLength(1, factories)
849
 
        self.assertEqual(('key',), factories[0].key)
850
 
        self.assertEqual('groupcompress-block', factories[0].storage_kind)
851
 
 
852
748
 
853
749
class TestLazyGroupCompress(tests.TestCaseWithTransport):
854
750
 
1066
962
        # consumption
1067
963
        self.add_key_to_manager(('key4',), locations, block, manager)
1068
964
        self.assertTrue(manager.check_is_well_utilized())
1069
 
 
1070
 
 
1071
 
class Test_GCBuildDetails(tests.TestCase):
1072
 
 
1073
 
    def test_acts_like_tuple(self):
1074
 
        # _GCBuildDetails inlines some of the data that used to be spread out
1075
 
        # across a bunch of tuples
1076
 
        bd = groupcompress._GCBuildDetails((('parent1',), ('parent2',)),
1077
 
            ('INDEX', 10, 20, 0, 5))
1078
 
        self.assertEqual(4, len(bd))
1079
 
        self.assertEqual(('INDEX', 10, 20, 0, 5), bd[0])
1080
 
        self.assertEqual(None, bd[1]) # Compression Parent is always None
1081
 
        self.assertEqual((('parent1',), ('parent2',)), bd[2])
1082
 
        self.assertEqual(('group', None), bd[3]) # Record details
1083
 
 
1084
 
    def test__repr__(self):
1085
 
        bd = groupcompress._GCBuildDetails((('parent1',), ('parent2',)),
1086
 
            ('INDEX', 10, 20, 0, 5))
1087
 
        self.assertEqual("_GCBuildDetails(('INDEX', 10, 20, 0, 5),"
1088
 
                         " (('parent1',), ('parent2',)))",
1089
 
                         repr(bd))
1090