1
# Copyright (C) 2005, 2006 Canonical Ltd
2
# Written by Robert Collins <robert.collins@canonical.com>
4
# This program is free software; you can redistribute it and/or modify
5
# it under the terms of the GNU General Public License as published by
6
# the Free Software Foundation; either version 2 of the License, or
7
# (at your option) any later version.
9
# This program is distributed in the hope that it will be useful,
10
# but WITHOUT ANY WARRANTY; without even the implied warranty of
11
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12
# GNU General Public License for more details.
14
# You should have received a copy of the GNU General Public License
15
# along with this program; if not, write to the Free Software
16
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18
"""Bzrlib specific gzip tunings. We plan to feed these to the upstream gzip."""
20
from cStringIO import StringIO
22
# make GzipFile faster:
24
from gzip import U32, LOWU32, FEXTRA, FCOMMENT, FNAME, FHCRC
29
# we want a \n preserved, break on \n only splitlines.
32
__all__ = ["GzipFile", "bytes_to_gzip"]
35
def bytes_to_gzip(bytes, factory=zlib.compressobj,
36
level=zlib.Z_DEFAULT_COMPRESSION, method=zlib.DEFLATED,
37
width=-zlib.MAX_WBITS, mem=zlib.DEF_MEM_LEVEL,
39
"""Create a gzip file containing bytes and return its content."""
41
'\037\213' # self.fileobj.write('\037\213') # magic header
42
'\010' # self.fileobj.write('\010') # compression method
43
# fname = self.filename[:-3]
47
'\x00' # self.fileobj.write(chr(flags))
48
'\0\0\0\0' # write32u(self.fileobj, long(time.time()))
49
'\002' # self.fileobj.write('\002')
50
'\377' # self.fileobj.write('\377')
52
'' # self.fileobj.write(fname + '\000')
54
# using a compressobj avoids a small header and trailer that the compress()
55
# utility function adds.
56
compress = factory(level, method, width, mem, 0)
57
result.append(compress.compress(bytes))
58
result.append(compress.flush())
59
result.append(struct.pack("<L", LOWU32(crc32(bytes))))
60
# size may exceed 2GB, or even 4GB
61
result.append(struct.pack("<L", LOWU32(len(bytes))))
62
return ''.join(result)
65
class GzipFile(gzip.GzipFile):
66
"""Knit tuned version of GzipFile.
68
This is based on the following lsprof stats:
69
python 2.4 stock GzipFile write:
70
58971 0 5644.3090 2721.4730 gzip:193(write)
71
+58971 0 1159.5530 1159.5530 +<built-in method compress>
72
+176913 0 987.0320 987.0320 +<len>
73
+58971 0 423.1450 423.1450 +<zlib.crc32>
74
+58971 0 353.1060 353.1060 +<method 'write' of 'cStringIO.
77
58971 0 4477.2590 2103.1120 bzrlib.knit:1250(write)
78
+58971 0 1297.7620 1297.7620 +<built-in method compress>
79
+58971 0 406.2160 406.2160 +<zlib.crc32>
80
+58971 0 341.9020 341.9020 +<method 'write' of 'cStringIO.
82
+58971 0 328.2670 328.2670 +<len>
85
Yes, its only 1.6 seconds, but they add up.
88
def _add_read_data(self, data):
90
# temp var for len(data) and switch to +='s.
93
self.crc = zlib.crc32(data, self.crc)
95
self.extrasize += len_data
98
def _write_gzip_header(self):
99
"""A tuned version of gzip._write_gzip_header
101
We have some extra constrains that plain Gzip does not.
102
1) We want to write the whole blob at once. rather than multiple
103
calls to fileobj.write().
104
2) We never have a filename
105
3) We don't care about the time
108
'\037\213' # self.fileobj.write('\037\213') # magic header
109
'\010' # self.fileobj.write('\010') # compression method
110
# fname = self.filename[:-3]
114
'\x00' # self.fileobj.write(chr(flags))
115
'\0\0\0\0' # write32u(self.fileobj, long(time.time()))
116
'\002' # self.fileobj.write('\002')
117
'\377' # self.fileobj.write('\377')
119
'' # self.fileobj.write(fname + '\000')
122
def _read(self, size=1024):
123
# various optimisations:
124
# reduces lsprof count from 2500 to
125
# 8337 calls in 1272, 365 internal
126
if self.fileobj is None:
127
raise EOFError, "Reached EOF"
130
# If the _new_member flag is set, we have to
131
# jump to the next member, if there is one.
133
# First, check if we're at the end of the file;
134
# if so, it's time to stop; no more members to read.
135
next_header_bytes = self.fileobj.read(10)
136
if next_header_bytes == '':
137
raise EOFError, "Reached EOF"
140
self._read_gzip_header(next_header_bytes)
141
self.decompress = zlib.decompressobj(-zlib.MAX_WBITS)
142
self._new_member = False
144
# Read a chunk of data from the file
145
buf = self.fileobj.read(size)
147
# If the EOF has been reached, flush the decompression object
148
# and mark this object as finished.
151
self._add_read_data(self.decompress.flush())
152
if len(self.decompress.unused_data) < 8:
153
raise AssertionError("what does flush do?")
154
self._gzip_tail = self.decompress.unused_data[0:8]
156
# tell the driving read() call we have stuffed all the data
158
raise EOFError, 'Reached EOF'
160
self._add_read_data(self.decompress.decompress(buf))
162
if self.decompress.unused_data != "":
163
# Ending case: we've come to the end of a member in the file,
164
# so seek back to the start of the data for the next member which
165
# is the length of the decompress objects unused data - the first
166
# 8 bytes for the end crc and size records.
168
# so seek back to the start of the unused data, finish up
169
# this member, and read a new gzip header.
170
# (The number of bytes to seek back is the length of the unused
171
# data, minus 8 because those 8 bytes are part of this member.
172
seek_length = len (self.decompress.unused_data) - 8
174
# we read too much data
175
self.fileobj.seek(-seek_length, 1)
176
self._gzip_tail = self.decompress.unused_data[0:8]
177
elif seek_length < 0:
178
# we haven't read enough to check the checksum.
179
if not (-8 < seek_length):
180
raise AssertionError("too great a seek")
181
buf = self.fileobj.read(-seek_length)
182
self._gzip_tail = self.decompress.unused_data + buf
184
self._gzip_tail = self.decompress.unused_data
186
# Check the CRC and file size, and set the flag so we read
187
# a new member on the next call
189
self._new_member = True
192
"""tuned to reduce function calls and eliminate file seeking:
194
reduces lsprof count from 800 to 288
196
avoid U32 call by using struct format L
199
# We've read to the end of the file, so we should have 8 bytes of
200
# unused data in the decompressor. If we don't, there is a corrupt file.
201
# We use these 8 bytes to calculate the CRC and the recorded file size.
202
# We then check the that the computed CRC and size of the
203
# uncompressed data matches the stored values. Note that the size
204
# stored is the true file size mod 2**32.
205
if not (len(self._gzip_tail) == 8):
206
raise AssertionError("gzip trailer is incorrect length.")
207
crc32, isize = struct.unpack("<LL", self._gzip_tail)
208
# note that isize is unsigned - it can exceed 2GB
209
if crc32 != U32(self.crc):
210
raise IOError, "CRC check failed %d %d" % (crc32, U32(self.crc))
211
elif isize != LOWU32(self.size):
212
raise IOError, "Incorrect length of data produced"
214
def _read_gzip_header(self, bytes=None):
215
"""Supply bytes if the minimum header size is already read.
217
:param bytes: 10 bytes of header data.
219
"""starting cost: 300 in 3998
220
15998 reads from 3998 calls
224
bytes = self.fileobj.read(10)
226
if magic != '\037\213':
227
raise IOError, 'Not a gzipped file'
228
method = ord(bytes[2:3])
230
raise IOError, 'Unknown compression method'
231
flag = ord(bytes[3:4])
232
# modtime = self.fileobj.read(4) (bytes [4:8])
233
# extraflag = self.fileobj.read(1) (bytes[8:9])
234
# os = self.fileobj.read(1) (bytes[9:10])
235
# self.fileobj.read(6)
238
# Read & discard the extra field, if present
239
xlen = ord(self.fileobj.read(1))
240
xlen = xlen + 256*ord(self.fileobj.read(1))
241
self.fileobj.read(xlen)
243
# Read and discard a null-terminated string containing the filename
245
s = self.fileobj.read(1)
246
if not s or s=='\000':
249
# Read and discard a null-terminated string containing a comment
251
s = self.fileobj.read(1)
252
if not s or s=='\000':
255
self.fileobj.read(2) # Read & discard the 16-bit header CRC
257
def readline(self, size=-1):
258
"""Tuned to remove buffer length calls in _unread and...
260
also removes multiple len(c) calls, inlines _unread,
261
total savings - lsprof 5800 to 5300
264
8176 calls to read() in 1684
265
changing the min chunk size to 200 halved all the cache misses
266
leading to a drop to:
268
4168 call to read() in 1646
269
- i.e. just reduced the function call overhead. May be worth
272
if size < 0: size = sys.maxint
274
readsize = min(200, size) # Read from the file in small chunks
277
return "".join(bufs) # Return resulting line
280
c = self.read(readsize)
281
# number of bytes read
285
# We set i=size to break out of the loop under two
286
# conditions: 1) there's no newline, and the chunk is
287
# larger than size, or 2) there is a newline, but the
288
# resulting line would be longer than 'size'.
289
if i==-1 and len_c > size: i=size-1
290
elif size <= i: i = size -1
292
if i >= 0 or c == '':
293
# if i>= 0 we have a newline or have triggered the above
294
# if size is not None condition.
295
# if c == '' its EOF.
296
bufs.append(c[:i+1]) # Add portion of last chunk
297
# -- inlined self._unread --
298
## self._unread(c[i+1:], len_c - i) # Push back rest of chunk
299
self.extrabuf = c[i+1:] + self.extrabuf
300
self.extrasize = len_c - i + self.extrasize
301
self.offset -= len_c - i
302
# -- end inlined self._unread --
303
return ''.join(bufs) # Return resulting line
305
# Append chunk to list, decrease 'size',
308
readsize = min(size, readsize * 2)
310
def readlines(self, sizehint=0):
311
# optimise to avoid all the buffer manipulation
312
# lsprof changed from:
313
# 4168 calls in 5472 with 32000 calls to readline()
316
# Negative numbers result in reading all the lines
318
# python's gzip routine uses sizehint. This is a more efficient way
319
# than python uses to honor it. But it is even more efficient to
320
# just read the entire thing and use cStringIO to split into lines.
323
# content = self.read(sizehint)
324
# return bzrlib.osutils.split_lines(content)
325
content = StringIO(self.read(-1))
326
return content.readlines()
328
def _unread(self, buf, len_buf=None):
329
"""tuned to remove unneeded len calls.
331
because this is such an inner routine in readline, and readline is
332
in many inner loops, this has been inlined into readline().
334
The len_buf parameter combined with the reduction in len calls dropped
335
the lsprof ms count for this routine on my test data from 800 to 200 -
340
self.extrabuf = buf + self.extrabuf
341
self.extrasize = len_buf + self.extrasize
342
self.offset -= len_buf
344
def write(self, data):
345
if self.mode != gzip.WRITE:
347
raise IOError(errno.EBADF, "write() on read-only GzipFile object")
349
if self.fileobj is None:
350
raise ValueError, "write() on closed GzipFile object"
353
self.size = self.size + data_len
354
self.crc = zlib.crc32(data, self.crc)
355
self.fileobj.write( self.compress.compress(data) )
356
self.offset += data_len
358
def writelines(self, lines):
359
# profiling indicated a significant overhead
360
# calling write for each line.
361
# this batch call is a lot faster :).
362
# (4 seconds to 1 seconds for the sample upgrades I was testing).
363
self.write(''.join(lines))