~bzr-pqm/bzr/bzr.dev

« back to all changes in this revision

Viewing changes to bzrlib/tests/test_groupcompress.py

  • Committer: John Arbash Meinel
  • Date: 2009-03-17 17:46:17 UTC
  • mto: (3735.2.156 brisbane-core)
  • mto: This revision was merged to the branch mainline in revision 4280.
  • Revision ID: john@arbash-meinel.com-20090317174617-osa5ia09no26xm1w
groupcompress now copies the blocks exactly as they were given.

One major concern here is that 'topo_sort' is not particularly stable. For example,
given a history of a=>b=>c=>d and e=>f=>g=>h, it easily groups the contents as,
h,a,b,c,d,e,f,g. Which is interleaving unrelated histories.
This will actually cause transmission of the e-h group 2x, and cause effective
'bloat'.
We can still tell 'get_record_stream' to remove some of this.
Also, autopack still needs to be told to *not* re-use blocks.

Show diffs side-by-side

added added

removed removed

Lines of Context:
23
23
    errors,
24
24
    osutils,
25
25
    tests,
 
26
    versionedfile,
26
27
    )
27
28
from bzrlib.osutils import sha_string
28
29
from bzrlib.tests import (
429
430
 
430
431
class TestCaseWithGroupCompressVersionedFiles(tests.TestCaseWithTransport):
431
432
 
432
 
    def make_test_vf(self, create_graph, keylength=1, do_cleanup=True):
433
 
        t = self.get_transport()
 
433
    def make_test_vf(self, create_graph, keylength=1, do_cleanup=True,
 
434
                     dir='.'):
 
435
        t = self.get_transport(dir)
 
436
        t.ensure_base()
434
437
        vf = groupcompress.make_pack_factory(graph=create_graph,
435
438
            delta=False, keylength=keylength)(t)
436
439
        if do_cleanup:
443
446
    def test_get_record_stream_as_requested(self):
444
447
        # Consider promoting 'as-requested' to general availability, and
445
448
        # make this a VF interface test
446
 
        vf = self.make_test_vf(False, do_cleanup=False)
 
449
        vf = self.make_test_vf(False, do_cleanup=False,
 
450
                               dir='source')
447
451
        vf.add_lines(('a',), (), ['lines\n'])
448
452
        vf.add_lines(('b',), (), ['lines\n'])
449
453
        vf.add_lines(('c',), (), ['lines\n'])
461
465
        groupcompress.cleanup_pack_group(vf)
462
466
 
463
467
        # It should work even after being repacked into another VF
464
 
        vf2 = self.make_test_vf(False)
 
468
        vf2 = self.make_test_vf(False, dir='target')
465
469
        vf2.insert_record_stream(vf.get_record_stream(
466
470
                    [('b',), ('a',), ('d',), ('c',)], 'as-requested', False))
467
471
        vf2.writer.end()
475
479
                    'as-requested', False)]
476
480
        self.assertEqual([('b',), ('a',), ('d',), ('c',)], keys)
477
481
 
 
482
    def test_get_record_stream_block(self):
 
483
        vf = self.make_test_vf(True, do_cleanup=False, dir='source')
 
484
        def grouped_stream(revision_ids, first_parents=()):
 
485
            parents = first_parents
 
486
            for revision_id in revision_ids:
 
487
                key = (revision_id,)
 
488
                record = versionedfile.FulltextContentFactory(
 
489
                    key, parents, None,
 
490
                    'some content that is\n'
 
491
                    'identical except for\n'
 
492
                    'revision_id:%s\n' % (revision_id,))
 
493
                yield record
 
494
                parents = (key,)
 
495
        # One group, a-d
 
496
        vf.insert_record_stream(grouped_stream(['a', 'b', 'c', 'd']))
 
497
        # Second group, e-h
 
498
        vf.insert_record_stream(grouped_stream(['e', 'f', 'g', 'h'],
 
499
                                               first_parents=(('d',),)))
 
500
        block_bytes = {}
 
501
        stream = vf.get_record_stream([(r,) for r in 'abcdefgh'],
 
502
                                      'unordered', False)
 
503
        for record in stream:
 
504
            if record.key in [('a',), ('e',)]:
 
505
                self.assertEqual('groupcompress-block', record.storage_kind)
 
506
            else:
 
507
                self.assertEqual('groupcompress-block-ref',
 
508
                                 record.storage_kind)
 
509
            block_bytes[record.key] = record._manager._block._z_content
 
510
        for r in 'abcd':
 
511
            key = (r,)
 
512
            self.assertIs(block_bytes[key], block_bytes[('a',)])
 
513
            self.assertNotEqual(block_bytes[key], block_bytes[('e',)])
 
514
        for r in 'efgh':
 
515
            key = (r,)
 
516
            self.assertIs(block_bytes[key], block_bytes[('e',)])
 
517
            self.assertNotEqual(block_bytes[key], block_bytes[('a',)])
 
518
        # Now copy the blocks into another vf, and ensure that the blocks are
 
519
        # preserved without creating new entries
 
520
        vf2 = self.make_test_vf(True, dir='target')
 
521
        # ordering in 'groupcompress' order, should actually swap the groups in
 
522
        # the target vf, but the groups themselves should not be disturbed.
 
523
        vf2.insert_record_stream(vf.get_record_stream(
 
524
            [(r,) for r in 'abcdefgh'], 'groupcompress', False))
 
525
        groupcompress.cleanup_pack_group(vf)
 
526
        stream = vf2.get_record_stream([(r,) for r in 'abcdefgh'],
 
527
                                       'groupcompress', False)
 
528
        vf2.writer.end()
 
529
        for record in stream:
 
530
            self.assertEqual(block_bytes[record.key],
 
531
                             record._manager._block._z_content)
478
532
 
479
533
class TestLazyGroupCompress(tests.TestCaseWithTransport):
480
534