~bzr-pqm/bzr/bzr.dev

« back to all changes in this revision

Viewing changes to groupcompress.py

  • Committer: John Arbash Meinel
  • Date: 2009-03-05 18:40:29 UTC
  • mto: (0.17.44 groupcompress)
  • mto: This revision was merged to the branch mainline in revision 4280.
  • Revision ID: john@arbash-meinel.com-20090305184029-05aqk336dekq5h7z
Prototype using LZMA as the secondary compressor, rather than zlib.

Show diffs side-by-side

added added

removed removed

Lines of Context:
21
21
from cStringIO import StringIO
22
22
import struct
23
23
import zlib
 
24
import pylzma
24
25
 
25
26
from bzrlib import (
26
27
    annotate,
167
168
            assert header_length == 0
168
169
            zcontent = bytes[pos2+1:]
169
170
            if zcontent:
170
 
                out._content = zlib.decompress(zcontent)
 
171
                out._content = pylzma.decompress(zcontent)
171
172
                out._size = len(out._content)
172
173
            return out
173
174
        pos = pos2 + 1
174
175
        pos2 = pos + z_header_length
175
176
        z_header_bytes = bytes[pos:pos2]
176
177
        assert len(z_header_bytes) == z_header_length
177
 
        d = zlib.decompressobj()
178
 
        header_bytes = d.decompress(z_header_bytes)
 
178
        header_bytes = pylzma.decompress(z_header_bytes)
179
179
        assert len(header_bytes) == header_length
180
180
        del z_header_bytes
181
181
        lines = header_bytes.split('\n')
199
199
            info_dict[key] = value
200
200
        zcontent = bytes[pos2:]
201
201
        if zcontent:
202
 
            out._content = d.decompress(zcontent)
203
 
            assert d.flush() == ''
 
202
            out._content = pylzma.decompress(zcontent)
204
203
            out._size = header_len + len(out._content)
205
204
        return out
206
205
 
233
232
            elif entry.type == 'delta':
234
233
                assert c == 'd'
235
234
        content_len, len_len = decode_base128_int(
236
 
                                self._content[start + 1:start + 11])
 
235
                            self._content[entry.start + 1:entry.start + 11])
237
236
        assert entry.length == content_len + 1 + len_len
238
 
        content_start = start + 1 + len_len
 
237
        content_start = entry.start + 1 + len_len
239
238
        end = entry.start + entry.length
240
239
        content = self._content[content_start:end]
241
240
        if c == 'f':
281
280
            chunks.append(chunk)
282
281
        bytes = ''.join(chunks)
283
282
        info_len = len(bytes)
284
 
        c = zlib.compressobj()
285
283
        z_bytes = []
286
 
        z_bytes.append(c.compress(bytes))
 
284
        z_bytes.append(pylzma.compress(bytes))
287
285
        del bytes
288
286
        # TODO: we may want to have the header compressed in the same chain
289
287
        #       as the data, or we may not, evaulate it
292
290
        #       label in the header is duplicated in the text.
293
291
        #       For chk pages and real bytes, I would guess this is not
294
292
        #       true.
295
 
        z_bytes.append(c.flush(zlib.Z_SYNC_FLUSH))
296
293
        z_len = sum(map(len, z_bytes))
297
294
        c_len = len(content)
298
295
        if _NO_LABELS:
299
296
            z_bytes = []
300
297
            z_len = 0
301
298
            info_len = 0
302
 
            c = zlib.compressobj()
303
 
        z_bytes.append(c.compress(content))
304
 
        z_bytes.append(c.flush())
 
299
        z_bytes.append(pylzma.compress(content))
305
300
        chunks = [self.GCB_HEADER,
306
301
                  '%d\n' % (z_len,),
307
302
                  '%d\n' % (info_len,),