1
# Copyright (C) 2005, 2006 Canonical Ltd
2
# Written by Robert Collins <robert.collins@canonical.com>
4
# This program is free software; you can redistribute it and/or modify
5
# it under the terms of the GNU General Public License as published by
6
# the Free Software Foundation; either version 2 of the License, or
7
# (at your option) any later version.
9
# This program is distributed in the hope that it will be useful,
10
# but WITHOUT ANY WARRANTY; without even the implied warranty of
11
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12
# GNU General Public License for more details.
14
# You should have received a copy of the GNU General Public License
15
# along with this program; if not, write to the Free Software
16
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18
"""Bzrlib specific gzip tunings. We plan to feed these to the upstream gzip."""
20
from cStringIO import StringIO
22
# make GzipFile faster:
24
from gzip import U32, LOWU32, FEXTRA, FCOMMENT, FNAME, FHCRC
29
# we want a \n preserved, break on \n only splitlines.
32
__all__ = ["GzipFile", "bytes_to_gzip"]
35
def bytes_to_gzip(bytes, factory=zlib.compressobj,
36
level=zlib.Z_DEFAULT_COMPRESSION, method=zlib.DEFLATED,
37
width=-zlib.MAX_WBITS, mem=zlib.DEF_MEM_LEVEL,
39
"""Create a gzip file containing bytes and return its content."""
41
'\037\213' # self.fileobj.write('\037\213') # magic header
42
'\010' # self.fileobj.write('\010') # compression method
43
# fname = self.filename[:-3]
47
'\x00' # self.fileobj.write(chr(flags))
48
'\0\0\0\0' # write32u(self.fileobj, long(time.time()))
49
'\002' # self.fileobj.write('\002')
50
'\377' # self.fileobj.write('\377')
52
'' # self.fileobj.write(fname + '\000')
54
# using a compressobj avoids a small header and trailer that the compress()
55
# utility function adds.
56
compress = factory(level, method, width, mem, 0)
57
result.append(compress.compress(bytes))
58
result.append(compress.flush())
59
result.append(struct.pack("<L", LOWU32(crc32(bytes))))
60
# size may exceed 2GB, or even 4GB
61
result.append(struct.pack("<L", LOWU32(len(bytes))))
62
return ''.join(result)
65
class GzipFile(gzip.GzipFile):
66
"""Knit tuned version of GzipFile.
68
This is based on the following lsprof stats:
69
python 2.4 stock GzipFile write:
70
58971 0 5644.3090 2721.4730 gzip:193(write)
71
+58971 0 1159.5530 1159.5530 +<built-in method compress>
72
+176913 0 987.0320 987.0320 +<len>
73
+58971 0 423.1450 423.1450 +<zlib.crc32>
74
+58971 0 353.1060 353.1060 +<method 'write' of 'cStringIO.
77
58971 0 4477.2590 2103.1120 bzrlib.knit:1250(write)
78
+58971 0 1297.7620 1297.7620 +<built-in method compress>
79
+58971 0 406.2160 406.2160 +<zlib.crc32>
80
+58971 0 341.9020 341.9020 +<method 'write' of 'cStringIO.
82
+58971 0 328.2670 328.2670 +<len>
85
Yes, its only 1.6 seconds, but they add up.
88
def _add_read_data(self, data):
90
# temp var for len(data) and switch to +='s.
93
self.crc = zlib.crc32(data, self.crc)
95
self.extrasize += len_data
98
def _write_gzip_header(self):
99
"""A tuned version of gzip._write_gzip_header
101
We have some extra constrains that plain Gzip does not.
102
1) We want to write the whole blob at once. rather than multiple
103
calls to fileobj.write().
104
2) We never have a filename
105
3) We don't care about the time
108
'\037\213' # self.fileobj.write('\037\213') # magic header
109
'\010' # self.fileobj.write('\010') # compression method
110
# fname = self.filename[:-3]
114
'\x00' # self.fileobj.write(chr(flags))
115
'\0\0\0\0' # write32u(self.fileobj, long(time.time()))
116
'\002' # self.fileobj.write('\002')
117
'\377' # self.fileobj.write('\377')
119
'' # self.fileobj.write(fname + '\000')
122
def _read(self, size=1024):
123
# various optimisations:
124
# reduces lsprof count from 2500 to
125
# 8337 calls in 1272, 365 internal
126
if self.fileobj is None:
127
raise EOFError, "Reached EOF"
130
# If the _new_member flag is set, we have to
131
# jump to the next member, if there is one.
133
# First, check if we're at the end of the file;
134
# if so, it's time to stop; no more members to read.
135
next_header_bytes = self.fileobj.read(10)
136
if next_header_bytes == '':
137
raise EOFError, "Reached EOF"
140
self._read_gzip_header(next_header_bytes)
141
self.decompress = zlib.decompressobj(-zlib.MAX_WBITS)
142
self._new_member = False
144
# Read a chunk of data from the file
145
buf = self.fileobj.read(size)
147
# If the EOF has been reached, flush the decompression object
148
# and mark this object as finished.
151
self._add_read_data(self.decompress.flush())
152
assert len(self.decompress.unused_data) >= 8, "what does flush do?"
153
self._gzip_tail = self.decompress.unused_data[0:8]
155
# tell the driving read() call we have stuffed all the data
157
raise EOFError, 'Reached EOF'
159
self._add_read_data(self.decompress.decompress(buf))
161
if self.decompress.unused_data != "":
162
# Ending case: we've come to the end of a member in the file,
163
# so seek back to the start of the data for the next member which
164
# is the length of the decompress objects unused data - the first
165
# 8 bytes for the end crc and size records.
167
# so seek back to the start of the unused data, finish up
168
# this member, and read a new gzip header.
169
# (The number of bytes to seek back is the length of the unused
170
# data, minus 8 because those 8 bytes are part of this member.
171
seek_length = len (self.decompress.unused_data) - 8
173
# we read too much data
174
self.fileobj.seek(-seek_length, 1)
175
self._gzip_tail = self.decompress.unused_data[0:8]
176
elif seek_length < 0:
177
# we haven't read enough to check the checksum.
178
assert -8 < seek_length, "too great a seek."
179
buf = self.fileobj.read(-seek_length)
180
self._gzip_tail = self.decompress.unused_data + buf
182
self._gzip_tail = self.decompress.unused_data
184
# Check the CRC and file size, and set the flag so we read
185
# a new member on the next call
187
self._new_member = True
190
"""tuned to reduce function calls and eliminate file seeking:
192
reduces lsprof count from 800 to 288
194
avoid U32 call by using struct format L
197
# We've read to the end of the file, so we should have 8 bytes of
198
# unused data in the decompressor. If we don't, there is a corrupt file.
199
# We use these 8 bytes to calculate the CRC and the recorded file size.
200
# We then check the that the computed CRC and size of the
201
# uncompressed data matches the stored values. Note that the size
202
# stored is the true file size mod 2**32.
203
assert len(self._gzip_tail) == 8, "gzip trailer is incorrect length."
204
crc32, isize = struct.unpack("<LL", self._gzip_tail)
205
# note that isize is unsigned - it can exceed 2GB
206
if crc32 != U32(self.crc):
207
raise IOError, "CRC check failed %d %d" % (crc32, U32(self.crc))
208
elif isize != LOWU32(self.size):
209
raise IOError, "Incorrect length of data produced"
211
def _read_gzip_header(self, bytes=None):
212
"""Supply bytes if the minimum header size is already read.
214
:param bytes: 10 bytes of header data.
216
"""starting cost: 300 in 3998
217
15998 reads from 3998 calls
221
bytes = self.fileobj.read(10)
223
if magic != '\037\213':
224
raise IOError, 'Not a gzipped file'
225
method = ord(bytes[2:3])
227
raise IOError, 'Unknown compression method'
228
flag = ord(bytes[3:4])
229
# modtime = self.fileobj.read(4) (bytes [4:8])
230
# extraflag = self.fileobj.read(1) (bytes[8:9])
231
# os = self.fileobj.read(1) (bytes[9:10])
232
# self.fileobj.read(6)
235
# Read & discard the extra field, if present
236
xlen = ord(self.fileobj.read(1))
237
xlen = xlen + 256*ord(self.fileobj.read(1))
238
self.fileobj.read(xlen)
240
# Read and discard a null-terminated string containing the filename
242
s = self.fileobj.read(1)
243
if not s or s=='\000':
246
# Read and discard a null-terminated string containing a comment
248
s = self.fileobj.read(1)
249
if not s or s=='\000':
252
self.fileobj.read(2) # Read & discard the 16-bit header CRC
254
def readline(self, size=-1):
255
"""Tuned to remove buffer length calls in _unread and...
257
also removes multiple len(c) calls, inlines _unread,
258
total savings - lsprof 5800 to 5300
261
8176 calls to read() in 1684
262
changing the min chunk size to 200 halved all the cache misses
263
leading to a drop to:
265
4168 call to read() in 1646
266
- i.e. just reduced the function call overhead. May be worth
269
if size < 0: size = sys.maxint
271
readsize = min(200, size) # Read from the file in small chunks
274
return "".join(bufs) # Return resulting line
277
c = self.read(readsize)
278
# number of bytes read
282
# We set i=size to break out of the loop under two
283
# conditions: 1) there's no newline, and the chunk is
284
# larger than size, or 2) there is a newline, but the
285
# resulting line would be longer than 'size'.
286
if i==-1 and len_c > size: i=size-1
287
elif size <= i: i = size -1
289
if i >= 0 or c == '':
290
# if i>= 0 we have a newline or have triggered the above
291
# if size is not None condition.
292
# if c == '' its EOF.
293
bufs.append(c[:i+1]) # Add portion of last chunk
294
# -- inlined self._unread --
295
## self._unread(c[i+1:], len_c - i) # Push back rest of chunk
296
self.extrabuf = c[i+1:] + self.extrabuf
297
self.extrasize = len_c - i + self.extrasize
298
self.offset -= len_c - i
299
# -- end inlined self._unread --
300
return ''.join(bufs) # Return resulting line
302
# Append chunk to list, decrease 'size',
305
readsize = min(size, readsize * 2)
307
def readlines(self, sizehint=0):
308
# optimise to avoid all the buffer manipulation
309
# lsprof changed from:
310
# 4168 calls in 5472 with 32000 calls to readline()
313
# Negative numbers result in reading all the lines
315
# python's gzip routine uses sizehint. This is a more efficient way
316
# than python uses to honor it. But it is even more efficient to
317
# just read the entire thing and use cStringIO to split into lines.
320
# content = self.read(sizehint)
321
# return bzrlib.osutils.split_lines(content)
322
content = StringIO(self.read(-1))
323
return content.readlines()
325
def _unread(self, buf, len_buf=None):
326
"""tuned to remove unneeded len calls.
328
because this is such an inner routine in readline, and readline is
329
in many inner loops, this has been inlined into readline().
331
The len_buf parameter combined with the reduction in len calls dropped
332
the lsprof ms count for this routine on my test data from 800 to 200 -
337
self.extrabuf = buf + self.extrabuf
338
self.extrasize = len_buf + self.extrasize
339
self.offset -= len_buf
341
def write(self, data):
342
if self.mode != gzip.WRITE:
344
raise IOError(errno.EBADF, "write() on read-only GzipFile object")
346
if self.fileobj is None:
347
raise ValueError, "write() on closed GzipFile object"
350
self.size = self.size + data_len
351
self.crc = zlib.crc32(data, self.crc)
352
self.fileobj.write( self.compress.compress(data) )
353
self.offset += data_len
355
def writelines(self, lines):
356
# profiling indicated a significant overhead
357
# calling write for each line.
358
# this batch call is a lot faster :).
359
# (4 seconds to 1 seconds for the sample upgrades I was testing).
360
self.write(''.join(lines))