~bzr-pqm/bzr/bzr.dev

« back to all changes in this revision

Viewing changes to bzrlib/groupcompress.py

  • Committer: John Arbash Meinel
  • Date: 2009-03-17 18:29:06 UTC
  • mto: (3735.2.156 brisbane-core)
  • mto: This revision was merged to the branch mainline in revision 4280.
  • Revision ID: john@arbash-meinel.com-20090317182906-s7ynapnrcxj9i99s
We now have a 'reuse_blocks=False' flag for autopack et al.
We need to be careful that insert_record_stream() is a simple function,
but _insert_record_stream() is a generator.

Show diffs side-by-side

added added

removed removed

Lines of Context:
1295
1295
        for _ in self._insert_record_stream(stream):
1296
1296
            pass
1297
1297
 
1298
 
    def _insert_record_stream(self, stream, random_id=False, nostore_sha=None):
 
1298
    def _insert_record_stream(self, stream, random_id=False, nostore_sha=None,
 
1299
                              reuse_blocks=True):
1299
1300
        """Internal core to insert a record stream into this container.
1300
1301
 
1301
1302
        This helper function has a different interface than insert_record_stream
1304
1305
        :param stream: A stream of records to insert.
1305
1306
        :param nostore_sha: If the sha1 of a given text matches nostore_sha,
1306
1307
            raise ExistingContent, rather than committing the new text.
 
1308
        :param reuse_blocks: If the source is streaming from
 
1309
            groupcompress-blocks, just insert the blocks as-is, rather than
 
1310
            expanding the texts and inserting again.
1307
1311
        :return: An iterator over the sha1 of the inserted records.
1308
1312
        :seealso insert_record_stream:
1309
1313
        :seealso add_lines:
1346
1350
            # Raise an error when a record is missing.
1347
1351
            if record.storage_kind == 'absent':
1348
1352
                raise errors.RevisionNotPresent(record.key, self)
1349
 
            if record.storage_kind == 'groupcompress-block':
1350
 
                # Insert the raw block into the target repo
1351
 
                insert_manager = record._manager
1352
 
                bytes = record._manager._block.to_bytes()
1353
 
                _, start, length = self._access.add_raw_records(
1354
 
                    [(None, len(bytes))], bytes)[0]
1355
 
                del bytes
1356
 
                block_start = start
1357
 
                block_length = length
1358
 
            if record.storage_kind in ('groupcompress-block',
1359
 
                                       'groupcompress-block-ref'):
1360
 
                assert insert_manager is not None
1361
 
                assert record._manager is insert_manager
1362
 
                value = "%d %d %d %d" % (block_start, block_length,
1363
 
                                         record._start, record._end)
1364
 
                nodes = [(record.key, value, (record.parents,))]
1365
 
                self._index.add_records(nodes, random_id=random_id)
1366
 
                continue
 
1353
            if reuse_blocks:
 
1354
                # If the reuse_blocks flag is set, check to see if we can just
 
1355
                # copy a groupcompress block as-is.
 
1356
                if record.storage_kind == 'groupcompress-block':
 
1357
                    # Insert the raw block into the target repo
 
1358
                    insert_manager = record._manager
 
1359
                    bytes = record._manager._block.to_bytes()
 
1360
                    _, start, length = self._access.add_raw_records(
 
1361
                        [(None, len(bytes))], bytes)[0]
 
1362
                    del bytes
 
1363
                    block_start = start
 
1364
                    block_length = length
 
1365
                if record.storage_kind in ('groupcompress-block',
 
1366
                                           'groupcompress-block-ref'):
 
1367
                    assert insert_manager is not None
 
1368
                    assert record._manager is insert_manager
 
1369
                    value = "%d %d %d %d" % (block_start, block_length,
 
1370
                                             record._start, record._end)
 
1371
                    nodes = [(record.key, value, (record.parents,))]
 
1372
                    self._index.add_records(nodes, random_id=random_id)
 
1373
                    continue
1367
1374
            try:
1368
1375
                bytes = record.get_bytes_as('fulltext')
1369
1376
            except errors.UnavailableRepresentation: