1
# Copyright (C) 2005, 2006 by Canonical Ltd
2
# Written by Robert Collins <robert.collins@canonical.com>
4
# This program is free software; you can redistribute it and/or modify
5
# it under the terms of the GNU General Public License as published by
6
# the Free Software Foundation; either version 2 of the License, or
7
# (at your option) any later version.
9
# This program is distributed in the hope that it will be useful,
10
# but WITHOUT ANY WARRANTY; without even the implied warranty of
11
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12
# GNU General Public License for more details.
14
# You should have received a copy of the GNU General Public License
15
# along with this program; if not, write to the Free Software
16
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18
"""Bzrlib specific gzip tunings. We plan to feed these to the upstream gzip."""
20
from cStringIO import StringIO
22
# make GzipFile faster:
24
from gzip import U32, LOWU32, FEXTRA, FCOMMENT, FNAME, FHCRC
29
# we want a \n preserved, break on \n only splitlines.
32
__all__ = ["GzipFile"]
35
class GzipFile(gzip.GzipFile):
36
"""Knit tuned version of GzipFile.
38
This is based on the following lsprof stats:
39
python 2.4 stock GzipFile write:
40
58971 0 5644.3090 2721.4730 gzip:193(write)
41
+58971 0 1159.5530 1159.5530 +<built-in method compress>
42
+176913 0 987.0320 987.0320 +<len>
43
+58971 0 423.1450 423.1450 +<zlib.crc32>
44
+58971 0 353.1060 353.1060 +<method 'write' of 'cStringIO.
47
58971 0 4477.2590 2103.1120 bzrlib.knit:1250(write)
48
+58971 0 1297.7620 1297.7620 +<built-in method compress>
49
+58971 0 406.2160 406.2160 +<zlib.crc32>
50
+58971 0 341.9020 341.9020 +<method 'write' of 'cStringIO.
52
+58971 0 328.2670 328.2670 +<len>
55
Yes, its only 1.6 seconds, but they add up.
58
def _add_read_data(self, data):
60
# temp var for len(data) and switch to +='s.
63
self.crc = zlib.crc32(data, self.crc)
65
self.extrasize += len_data
68
def _write_gzip_header(self):
69
"""A tuned version of gzip._write_gzip_header
71
We have some extra constrains that plain Gzip does not.
72
1) We want to write the whole blob at once. rather than multiple
73
calls to fileobj.write().
74
2) We never have a filename
75
3) We don't care about the time
78
'\037\213' # self.fileobj.write('\037\213') # magic header
79
'\010' # self.fileobj.write('\010') # compression method
80
# fname = self.filename[:-3]
84
'\x00' # self.fileobj.write(chr(flags))
85
'\0\0\0\0' # write32u(self.fileobj, long(time.time()))
86
'\002' # self.fileobj.write('\002')
87
'\377' # self.fileobj.write('\377')
89
'' # self.fileobj.write(fname + '\000')
92
def _read(self, size=1024):
93
# various optimisations:
94
# reduces lsprof count from 2500 to
95
# 8337 calls in 1272, 365 internal
96
if self.fileobj is None:
97
raise EOFError, "Reached EOF"
100
# If the _new_member flag is set, we have to
101
# jump to the next member, if there is one.
103
# First, check if we're at the end of the file;
104
# if so, it's time to stop; no more members to read.
105
next_header_bytes = self.fileobj.read(10)
106
if next_header_bytes == '':
107
raise EOFError, "Reached EOF"
110
self._read_gzip_header(next_header_bytes)
111
self.decompress = zlib.decompressobj(-zlib.MAX_WBITS)
112
self._new_member = False
114
# Read a chunk of data from the file
115
buf = self.fileobj.read(size)
117
# If the EOF has been reached, flush the decompression object
118
# and mark this object as finished.
121
self._add_read_data(self.decompress.flush())
122
assert len(self.decompress.unused_data) >= 8, "what does flush do?"
123
self._gzip_tail = self.decompress.unused_data[0:8]
125
# tell the driving read() call we have stuffed all the data
127
raise EOFError, 'Reached EOF'
129
self._add_read_data(self.decompress.decompress(buf))
131
if self.decompress.unused_data != "":
132
# Ending case: we've come to the end of a member in the file,
133
# so seek back to the start of the data for the next member which
134
# is the length of the decompress objects unused data - the first
135
# 8 bytes for the end crc and size records.
137
# so seek back to the start of the unused data, finish up
138
# this member, and read a new gzip header.
139
# (The number of bytes to seek back is the length of the unused
140
# data, minus 8 because those 8 bytes are part of this member.
141
seek_length = len (self.decompress.unused_data) - 8
143
# we read too much data
144
self.fileobj.seek(-seek_length, 1)
145
self._gzip_tail = self.decompress.unused_data[0:8]
146
elif seek_length < 0:
147
# we haven't read enough to check the checksum.
148
assert -8 < seek_length, "too great a seek."
149
buf = self.fileobj.read(-seek_length)
150
self._gzip_tail = self.decompress.unused_data + buf
152
self._gzip_tail = self.decompress.unused_data
154
# Check the CRC and file size, and set the flag so we read
155
# a new member on the next call
157
self._new_member = True
160
"""tuned to reduce function calls and eliminate file seeking:
162
reduces lsprof count from 800 to 288
164
avoid U32 call by using struct format L
167
# We've read to the end of the file, so we should have 8 bytes of
168
# unused data in the decompressor. If we don't, there is a corrupt file.
169
# We use these 8 bytes to calculate the CRC and the recorded file size.
170
# We then check the that the computed CRC and size of the
171
# uncompressed data matches the stored values. Note that the size
172
# stored is the true file size mod 2**32.
173
assert len(self._gzip_tail) == 8, "gzip trailer is incorrect length."
174
crc32, isize = struct.unpack("<LL", self._gzip_tail)
175
# note that isize is unsigned - it can exceed 2GB
176
if crc32 != U32(self.crc):
177
raise IOError, "CRC check failed %d %d" % (crc32, U32(self.crc))
178
elif isize != LOWU32(self.size):
179
raise IOError, "Incorrect length of data produced"
181
def _read_gzip_header(self, bytes=None):
182
"""Supply bytes if the minimum header size is already read.
184
:param bytes: 10 bytes of header data.
186
"""starting cost: 300 in 3998
187
15998 reads from 3998 calls
191
bytes = self.fileobj.read(10)
193
if magic != '\037\213':
194
raise IOError, 'Not a gzipped file'
195
method = ord(bytes[2:3])
197
raise IOError, 'Unknown compression method'
198
flag = ord(bytes[3:4])
199
# modtime = self.fileobj.read(4) (bytes [4:8])
200
# extraflag = self.fileobj.read(1) (bytes[8:9])
201
# os = self.fileobj.read(1) (bytes[9:10])
202
# self.fileobj.read(6)
205
# Read & discard the extra field, if present
206
xlen = ord(self.fileobj.read(1))
207
xlen = xlen + 256*ord(self.fileobj.read(1))
208
self.fileobj.read(xlen)
210
# Read and discard a null-terminated string containing the filename
212
s = self.fileobj.read(1)
213
if not s or s=='\000':
216
# Read and discard a null-terminated string containing a comment
218
s = self.fileobj.read(1)
219
if not s or s=='\000':
222
self.fileobj.read(2) # Read & discard the 16-bit header CRC
224
def readline(self, size=-1):
225
"""Tuned to remove buffer length calls in _unread and...
227
also removes multiple len(c) calls, inlines _unread,
228
total savings - lsprof 5800 to 5300
231
8176 calls to read() in 1684
232
changing the min chunk size to 200 halved all the cache misses
233
leading to a drop to:
235
4168 call to read() in 1646
236
- i.e. just reduced the function call overhead. May be worth
239
if size < 0: size = sys.maxint
241
readsize = min(200, size) # Read from the file in small chunks
244
return "".join(bufs) # Return resulting line
247
c = self.read(readsize)
248
# number of bytes read
252
# We set i=size to break out of the loop under two
253
# conditions: 1) there's no newline, and the chunk is
254
# larger than size, or 2) there is a newline, but the
255
# resulting line would be longer than 'size'.
256
if i==-1 and len_c > size: i=size-1
257
elif size <= i: i = size -1
259
if i >= 0 or c == '':
260
# if i>= 0 we have a newline or have triggered the above
261
# if size is not None condition.
262
# if c == '' its EOF.
263
bufs.append(c[:i+1]) # Add portion of last chunk
264
# -- inlined self._unread --
265
## self._unread(c[i+1:], len_c - i) # Push back rest of chunk
266
self.extrabuf = c[i+1:] + self.extrabuf
267
self.extrasize = len_c - i + self.extrasize
268
self.offset -= len_c - i
269
# -- end inlined self._unread --
270
return ''.join(bufs) # Return resulting line
272
# Append chunk to list, decrease 'size',
275
readsize = min(size, readsize * 2)
277
def readlines(self, sizehint=0):
278
# optimise to avoid all the buffer manipulation
279
# lsprof changed from:
280
# 4168 calls in 5472 with 32000 calls to readline()
283
# Negative numbers result in reading all the lines
285
# python's gzip routine uses sizehint. This is a more efficient way
286
# than python uses to honor it. But it is even more efficient to
287
# just read the entire thing and use cStringIO to split into lines.
290
# content = self.read(sizehint)
291
# return bzrlib.osutils.split_lines(content)
292
content = StringIO(self.read(-1))
293
return content.readlines()
295
def _unread(self, buf, len_buf=None):
296
"""tuned to remove unneeded len calls.
298
because this is such an inner routine in readline, and readline is
299
in many inner loops, this has been inlined into readline().
301
The len_buf parameter combined with the reduction in len calls dropped
302
the lsprof ms count for this routine on my test data from 800 to 200 -
307
self.extrabuf = buf + self.extrabuf
308
self.extrasize = len_buf + self.extrasize
309
self.offset -= len_buf
311
def write(self, data):
312
if self.mode != gzip.WRITE:
314
raise IOError(errno.EBADF, "write() on read-only GzipFile object")
316
if self.fileobj is None:
317
raise ValueError, "write() on closed GzipFile object"
320
self.size = self.size + data_len
321
self.crc = zlib.crc32(data, self.crc)
322
self.fileobj.write( self.compress.compress(data) )
323
self.offset += data_len
325
def writelines(self, lines):
326
# profiling indicated a significant overhead
327
# calling write for each line.
328
# this batch call is a lot faster :).
329
# (4 seconds to 1 seconds for the sample upgrades I was testing).
330
self.write(''.join(lines))