~bzr-pqm/bzr/bzr.dev

« back to all changes in this revision

Viewing changes to bzrlib/groupcompress.py

  • Committer: Andrew Bennetts
  • Date: 2009-08-26 05:39:00 UTC
  • mto: This revision was merged to the branch mainline in revision 4657.
  • Revision ID: andrew.bennetts@canonical.com-20090826053900-fhgikaxozhrvsfmj
Make BATCH_SIZE a global.

Show diffs side-by-side

added added

removed removed

Lines of Context:
44
44
    VersionedFiles,
45
45
    )
46
46
 
 
47
# Minimum number of uncompressed bytes to try fetch at once when retrieving
 
48
# groupcompress blocks.
 
49
BATCH_SIZE = 2**16
 
50
 
47
51
_USE_LZMA = False and (pylzma is not None)
48
52
 
49
53
# osutils.sha_string('')
50
54
_null_sha1 = 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
51
55
 
52
 
 
53
56
def sort_gc_optimal(parent_map):
54
57
    """Sort and group the keys in parent_map into groupcompress order.
55
58
 
1455
1458
        # Batch up as many keys as we can until either:
1456
1459
        #  - we encounter an unadded ref, or
1457
1460
        #  - we run out of keys, or
1458
 
        #  - the total bytes to retrieve for this batch > 256k
 
1461
        #  - the total bytes to retrieve for this batch > BATCH_SIZE
1459
1462
        batcher = _BatchingBlockFetcher(self, locations)
1460
 
        BATCH_SIZE = 2**18
1461
1463
        for source, keys in source_keys:
1462
1464
            if source is self:
1463
1465
                for key in keys: