~bzr-pqm/bzr/bzr.dev

« back to all changes in this revision

Viewing changes to bzrlib/tests/test_groupcompress.py

  • Committer: John Arbash Meinel
  • Date: 2009-03-17 18:29:06 UTC
  • mto: (3735.2.156 brisbane-core)
  • mto: This revision was merged to the branch mainline in revision 4280.
  • Revision ID: john@arbash-meinel.com-20090317182906-s7ynapnrcxj9i99s
We now have a 'reuse_blocks=False' flag for autopack et al.
We need to be careful that insert_record_stream() is a simple function,
but _insert_record_stream() is a generator.

Show diffs side-by-side

added added

removed removed

Lines of Context:
446
446
    def test_get_record_stream_as_requested(self):
447
447
        # Consider promoting 'as-requested' to general availability, and
448
448
        # make this a VF interface test
449
 
        vf = self.make_test_vf(False, do_cleanup=False,
450
 
                               dir='source')
 
449
        vf = self.make_test_vf(False, dir='source')
451
450
        vf.add_lines(('a',), (), ['lines\n'])
452
451
        vf.add_lines(('b',), (), ['lines\n'])
453
452
        vf.add_lines(('c',), (), ['lines\n'])
461
460
                    [('b',), ('a',), ('d',), ('c',)],
462
461
                    'as-requested', False)]
463
462
        self.assertEqual([('b',), ('a',), ('d',), ('c',)], keys)
464
 
        # We have to cleanup manually, because we create a second VF
465
 
        groupcompress.cleanup_pack_group(vf)
466
463
 
467
464
        # It should work even after being repacked into another VF
468
465
        vf2 = self.make_test_vf(False, dir='target')
479
476
                    'as-requested', False)]
480
477
        self.assertEqual([('b',), ('a',), ('d',), ('c',)], keys)
481
478
 
482
 
    def test_get_record_stream_block(self):
483
 
        vf = self.make_test_vf(True, do_cleanup=False, dir='source')
 
479
    def test_insert_record_stream_re_uses_blocks(self):
 
480
        vf = self.make_test_vf(True, dir='source')
484
481
        def grouped_stream(revision_ids, first_parents=()):
485
482
            parents = first_parents
486
483
            for revision_id in revision_ids:
500
497
        block_bytes = {}
501
498
        stream = vf.get_record_stream([(r,) for r in 'abcdefgh'],
502
499
                                      'unordered', False)
 
500
        num_records = 0
503
501
        for record in stream:
504
502
            if record.key in [('a',), ('e',)]:
505
503
                self.assertEqual('groupcompress-block', record.storage_kind)
507
505
                self.assertEqual('groupcompress-block-ref',
508
506
                                 record.storage_kind)
509
507
            block_bytes[record.key] = record._manager._block._z_content
 
508
            num_records += 1
 
509
        self.assertEqual(8, num_records)
510
510
        for r in 'abcd':
511
511
            key = (r,)
512
512
            self.assertIs(block_bytes[key], block_bytes[('a',)])
522
522
        # the target vf, but the groups themselves should not be disturbed.
523
523
        vf2.insert_record_stream(vf.get_record_stream(
524
524
            [(r,) for r in 'abcdefgh'], 'groupcompress', False))
525
 
        groupcompress.cleanup_pack_group(vf)
526
525
        stream = vf2.get_record_stream([(r,) for r in 'abcdefgh'],
527
526
                                       'groupcompress', False)
528
527
        vf2.writer.end()
 
528
        num_records = 0
529
529
        for record in stream:
 
530
            num_records += 1
530
531
            self.assertEqual(block_bytes[record.key],
531
532
                             record._manager._block._z_content)
 
533
        self.assertEqual(8, num_records)
 
534
 
 
535
    def test__insert_record_stream_no_reuse_block(self):
 
536
        vf = self.make_test_vf(True, dir='source')
 
537
        def grouped_stream(revision_ids, first_parents=()):
 
538
            parents = first_parents
 
539
            for revision_id in revision_ids:
 
540
                key = (revision_id,)
 
541
                record = versionedfile.FulltextContentFactory(
 
542
                    key, parents, None,
 
543
                    'some content that is\n'
 
544
                    'identical except for\n'
 
545
                    'revision_id:%s\n' % (revision_id,))
 
546
                yield record
 
547
                parents = (key,)
 
548
        # One group, a-d
 
549
        vf.insert_record_stream(grouped_stream(['a', 'b', 'c', 'd']))
 
550
        # Second group, e-h
 
551
        vf.insert_record_stream(grouped_stream(['e', 'f', 'g', 'h'],
 
552
                                               first_parents=(('d',),)))
 
553
        vf.writer.end()
 
554
        self.assertEqual(8, len(list(vf.get_record_stream(
 
555
                                        [(r,) for r in 'abcdefgh'],
 
556
                                        'unordered', False))))
 
557
        # Now copy the blocks into another vf, and ensure that the blocks are
 
558
        # preserved without creating new entries
 
559
        vf2 = self.make_test_vf(True, dir='target')
 
560
        # ordering in 'groupcompress' order, should actually swap the groups in
 
561
        # the target vf, but the groups themselves should not be disturbed.
 
562
        list(vf2._insert_record_stream(vf.get_record_stream(
 
563
            [(r,) for r in 'abcdefgh'], 'groupcompress', False),
 
564
            reuse_blocks=False))
 
565
        vf2.writer.end()
 
566
        # After inserting with reuse_blocks=False, we should have everything in
 
567
        # a single new block.
 
568
        stream = vf2.get_record_stream([(r,) for r in 'abcdefgh'],
 
569
                                       'groupcompress', False)
 
570
        block = None
 
571
        for record in stream:
 
572
            if block is None:
 
573
                block = record._manager._block
 
574
            else:
 
575
                self.assertIs(block, record._manager._block)
 
576
 
532
577
 
533
578
class TestLazyGroupCompress(tests.TestCaseWithTransport):
534
579