~bzr-pqm/bzr/bzr.dev

« back to all changes in this revision

Viewing changes to bzrlib/tests/test_groupcompress.py

  • Committer: Canonical.com Patch Queue Manager
  • Date: 2009-08-28 07:42:03 UTC
  • mfrom: (4634.3.22 gc-batching)
  • Revision ID: pqm@pqm.ubuntu.com-20090828074203-nmpg23fc16tqzbxf
(andrew) Add some direct unit tests of _BatchingBlockFetcher.

Show diffs side-by-side

added added

removed removed

Lines of Context:
702
702
                              " 0 8', \(\(\('a',\),\),\)\)")
703
703
 
704
704
 
 
705
class StubGCVF(object):
 
706
    def __init__(self, canned_get_blocks=None):
 
707
        self._group_cache = {}
 
708
        self._canned_get_blocks = canned_get_blocks or []
 
709
    def _get_blocks(self, read_memos):
 
710
        return iter(self._canned_get_blocks)
 
711
    
 
712
 
 
713
class Test_BatchingBlockFetcher(TestCaseWithGroupCompressVersionedFiles):
 
714
    """Simple whitebox unit tests for _BatchingBlockFetcher."""
 
715
    
 
716
    def test_add_key_new_read_memo(self):
 
717
        """Adding a key with an uncached read_memo new to this batch adds that
 
718
        read_memo to the list of memos to fetch.
 
719
        """
 
720
        # locations are: index_memo, ignored, parents, ignored
 
721
        # where index_memo is: (idx, offset, len, factory_start, factory_end)
 
722
        # and (idx, offset, size) is known as the 'read_memo', identifying the
 
723
        # raw bytes needed.
 
724
        read_memo = ('fake index', 100, 50)
 
725
        locations = {
 
726
            ('key',): (read_memo + (None, None), None, None, None)}
 
727
        batcher = groupcompress._BatchingBlockFetcher(StubGCVF(), locations)
 
728
        total_size = batcher.add_key(('key',))
 
729
        self.assertEqual(50, total_size)
 
730
        self.assertEqual([('key',)], batcher.keys)
 
731
        self.assertEqual([read_memo], batcher.memos_to_get)
 
732
 
 
733
    def test_add_key_duplicate_read_memo(self):
 
734
        """read_memos that occur multiple times in a batch will only be fetched
 
735
        once.
 
736
        """
 
737
        read_memo = ('fake index', 100, 50)
 
738
        # Two keys, both sharing the same read memo (but different overall
 
739
        # index_memos).
 
740
        locations = {
 
741
            ('key1',): (read_memo + (0, 1), None, None, None),
 
742
            ('key2',): (read_memo + (1, 2), None, None, None)}
 
743
        batcher = groupcompress._BatchingBlockFetcher(StubGCVF(), locations)
 
744
        total_size = batcher.add_key(('key1',))
 
745
        total_size = batcher.add_key(('key2',))
 
746
        self.assertEqual(50, total_size)
 
747
        self.assertEqual([('key1',), ('key2',)], batcher.keys)
 
748
        self.assertEqual([read_memo], batcher.memos_to_get)
 
749
 
 
750
    def test_add_key_cached_read_memo(self):
 
751
        """Adding a key with a cached read_memo will not cause that read_memo
 
752
        to be added to the list to fetch.
 
753
        """
 
754
        read_memo = ('fake index', 100, 50)
 
755
        gcvf = StubGCVF()
 
756
        gcvf._group_cache[read_memo] = 'fake block'
 
757
        locations = {
 
758
            ('key',): (read_memo + (None, None), None, None, None)}
 
759
        batcher = groupcompress._BatchingBlockFetcher(gcvf, locations)
 
760
        total_size = batcher.add_key(('key',))
 
761
        self.assertEqual(0, total_size)
 
762
        self.assertEqual([('key',)], batcher.keys)
 
763
        self.assertEqual([], batcher.memos_to_get)
 
764
 
 
765
    def test_yield_factories_empty(self):
 
766
        """An empty batch yields no factories."""
 
767
        batcher = groupcompress._BatchingBlockFetcher(StubGCVF(), {})
 
768
        self.assertEqual([], list(batcher.yield_factories()))
 
769
 
 
770
    def test_yield_factories_calls_get_blocks(self):
 
771
        """Uncached memos are retrieved via get_blocks."""
 
772
        read_memo1 = ('fake index', 100, 50)
 
773
        read_memo2 = ('fake index', 150, 40)
 
774
        gcvf = StubGCVF(
 
775
            canned_get_blocks=[
 
776
                (read_memo1, groupcompress.GroupCompressBlock()),
 
777
                (read_memo2, groupcompress.GroupCompressBlock())])
 
778
        locations = {
 
779
            ('key1',): (read_memo1 + (None, None), None, None, None),
 
780
            ('key2',): (read_memo2 + (None, None), None, None, None)}
 
781
        batcher = groupcompress._BatchingBlockFetcher(gcvf, locations)
 
782
        batcher.add_key(('key1',))
 
783
        batcher.add_key(('key2',))
 
784
        factories = list(batcher.yield_factories(full_flush=True))
 
785
        self.assertLength(2, factories)
 
786
        keys = [f.key for f in factories]
 
787
        kinds = [f.storage_kind for f in factories]
 
788
        self.assertEqual([('key1',), ('key2',)], keys)
 
789
        self.assertEqual(['groupcompress-block', 'groupcompress-block'], kinds)
 
790
 
 
791
    def test_yield_factories_flushing(self):
 
792
        """yield_factories holds back on yielding results from the final block
 
793
        unless passed full_flush=True.
 
794
        """
 
795
        fake_block = groupcompress.GroupCompressBlock()
 
796
        read_memo = ('fake index', 100, 50)
 
797
        gcvf = StubGCVF()
 
798
        gcvf._group_cache[read_memo] = fake_block
 
799
        locations = {
 
800
            ('key',): (read_memo + (None, None), None, None, None)}
 
801
        batcher = groupcompress._BatchingBlockFetcher(gcvf, locations)
 
802
        batcher.add_key(('key',))
 
803
        self.assertEqual([], list(batcher.yield_factories()))
 
804
        factories = list(batcher.yield_factories(full_flush=True))
 
805
        self.assertLength(1, factories)
 
806
        self.assertEqual(('key',), factories[0].key)
 
807
        self.assertEqual('groupcompress-block', factories[0].storage_kind)
 
808
 
 
809
 
705
810
class TestLazyGroupCompress(tests.TestCaseWithTransport):
706
811
 
707
812
    _texts = {