14
14
# You should have received a copy of the GNU General Public License
15
15
# along with this program; if not, write to the Free Software
16
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
16
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18
18
"""Bzrlib specific gzip tunings. We plan to feed these to the upstream gzip."""
20
from __future__ import absolute_import
22
20
from cStringIO import StringIO
24
22
# make GzipFile faster:
26
from gzip import FEXTRA, FCOMMENT, FNAME, FHCRC
24
from gzip import U32, LOWU32, FEXTRA, FCOMMENT, FNAME, FHCRC
31
29
# we want a \n preserved, break on \n only splitlines.
32
from bzrlib import symbol_versioning
34
32
__all__ = ["GzipFile", "bytes_to_gzip"]
38
"""Return i as an unsigned integer, assuming it fits in 32 bits.
40
If it's >= 2GB when viewed as a 32-bit unsigned int, return a long.
48
"""Return the low-order 32 bits of an int, as a non-negative int."""
49
return i & 0xFFFFFFFFL
52
35
def bytes_to_gzip(bytes, factory=zlib.compressobj,
53
36
level=zlib.Z_DEFAULT_COMPRESSION, method=zlib.DEFLATED,
54
37
width=-zlib.MAX_WBITS, mem=zlib.DEF_MEM_LEVEL,
56
39
"""Create a gzip file containing bytes and return its content."""
57
return chunks_to_gzip([bytes])
60
def chunks_to_gzip(chunks, factory=zlib.compressobj,
61
level=zlib.Z_DEFAULT_COMPRESSION, method=zlib.DEFLATED,
62
width=-zlib.MAX_WBITS, mem=zlib.DEF_MEM_LEVEL,
64
"""Create a gzip file containing chunks and return its content.
66
:param chunks: An iterable of strings. Each string can have arbitrary
70
41
'\037\213' # self.fileobj.write('\037\213') # magic header
71
42
'\010' # self.fileobj.write('\010') # compression method
120
85
Yes, its only 1.6 seconds, but they add up.
123
def __init__(self, *args, **kwargs):
124
symbol_versioning.warn(
125
symbol_versioning.deprecated_in((2, 3, 0))
126
% 'bzrlib.tuned_gzip.GzipFile',
127
DeprecationWarning, stacklevel=2)
128
gzip.GzipFile.__init__(self, *args, **kwargs)
130
if sys.version_info >= (2, 7, 4):
131
def _add_read_data(self, data):
133
# temp var for len(data) and switch to +='s.
136
self.crc = zlib.crc32(data, self.crc) & 0xffffffffL
137
offset = self.offset - self.extrastart
138
self.extrabuf = self.extrabuf[offset:] + data
139
self.extrasize = self.extrasize + len_data
140
self.extrastart = self.offset
141
self.size = self.size + len_data
143
def _add_read_data(self, data):
145
# temp var for len(data) and switch to +='s.
148
self.crc = zlib.crc32(data, self.crc)
149
self.extrabuf += data
150
self.extrasize += len_data
151
self.size += len_data
88
def _add_read_data(self, data):
90
# temp var for len(data) and switch to +='s.
93
self.crc = zlib.crc32(data, self.crc)
95
self.extrasize += len_data
153
98
def _write_gzip_header(self):
154
99
"""A tuned version of gzip._write_gzip_header
156
101
We have some extra constrains that plain Gzip does not.
157
1) We want to write the whole blob at once. rather than multiple
102
1) We want to write the whole blob at once. rather than multiple
158
103
calls to fileobj.write().
159
104
2) We never have a filename
160
105
3) We don't care about the time
174
119
'' # self.fileobj.write(fname + '\000')
177
if sys.version_info < (2, 7, 4):
178
def _read(self, size=1024):
179
# various optimisations:
180
# reduces lsprof count from 2500 to
181
# 8337 calls in 1272, 365 internal
182
if self.fileobj is None:
122
def _read(self, size=1024):
123
# various optimisations:
124
# reduces lsprof count from 2500 to
125
# 8337 calls in 1272, 365 internal
126
if self.fileobj is None:
127
raise EOFError, "Reached EOF"
130
# If the _new_member flag is set, we have to
131
# jump to the next member, if there is one.
133
# First, check if we're at the end of the file;
134
# if so, it's time to stop; no more members to read.
135
next_header_bytes = self.fileobj.read(10)
136
if next_header_bytes == '':
183
137
raise EOFError, "Reached EOF"
186
# If the _new_member flag is set, we have to
187
# jump to the next member, if there is one.
189
# First, check if we're at the end of the file;
190
# if so, it's time to stop; no more members to read.
191
next_header_bytes = self.fileobj.read(10)
192
if next_header_bytes == '':
193
raise EOFError, "Reached EOF"
196
self._read_gzip_header(next_header_bytes)
197
self.decompress = zlib.decompressobj(-zlib.MAX_WBITS)
198
self._new_member = False
200
# Read a chunk of data from the file
201
buf = self.fileobj.read(size)
203
# If the EOF has been reached, flush the decompression object
204
# and mark this object as finished.
207
self._add_read_data(self.decompress.flush())
208
if len(self.decompress.unused_data) < 8:
209
raise AssertionError("what does flush do?")
140
self._read_gzip_header(next_header_bytes)
141
self.decompress = zlib.decompressobj(-zlib.MAX_WBITS)
142
self._new_member = False
144
# Read a chunk of data from the file
145
buf = self.fileobj.read(size)
147
# If the EOF has been reached, flush the decompression object
148
# and mark this object as finished.
151
self._add_read_data(self.decompress.flush())
152
assert len(self.decompress.unused_data) >= 8, "what does flush do?"
153
self._gzip_tail = self.decompress.unused_data[0:8]
155
# tell the driving read() call we have stuffed all the data
157
raise EOFError, 'Reached EOF'
159
self._add_read_data(self.decompress.decompress(buf))
161
if self.decompress.unused_data != "":
162
# Ending case: we've come to the end of a member in the file,
163
# so seek back to the start of the data for the next member which
164
# is the length of the decompress objects unused data - the first
165
# 8 bytes for the end crc and size records.
167
# so seek back to the start of the unused data, finish up
168
# this member, and read a new gzip header.
169
# (The number of bytes to seek back is the length of the unused
170
# data, minus 8 because those 8 bytes are part of this member.
171
seek_length = len (self.decompress.unused_data) - 8
173
# we read too much data
174
self.fileobj.seek(-seek_length, 1)
210
175
self._gzip_tail = self.decompress.unused_data[0:8]
212
# tell the driving read() call we have stuffed all the data
214
raise EOFError, 'Reached EOF'
216
self._add_read_data(self.decompress.decompress(buf))
218
if self.decompress.unused_data != "":
219
# Ending case: we've come to the end of a member in the file,
220
# so seek back to the start of the data for the next member
221
# which is the length of the decompress objects unused data -
222
# the first 8 bytes for the end crc and size records.
224
# so seek back to the start of the unused data, finish up
225
# this member, and read a new gzip header.
226
# (The number of bytes to seek back is the length of the unused
227
# data, minus 8 because those 8 bytes are part of this member.
228
seek_length = len (self.decompress.unused_data) - 8
230
# we read too much data
231
self.fileobj.seek(-seek_length, 1)
232
self._gzip_tail = self.decompress.unused_data[0:8]
233
elif seek_length < 0:
234
# we haven't read enough to check the checksum.
235
if not (-8 < seek_length):
236
raise AssertionError("too great a seek")
237
buf = self.fileobj.read(-seek_length)
238
self._gzip_tail = self.decompress.unused_data + buf
240
self._gzip_tail = self.decompress.unused_data
242
# Check the CRC and file size, and set the flag so we read
243
# a new member on the next call
245
self._new_member = True
248
"""tuned to reduce function calls and eliminate file seeking:
250
reduces lsprof count from 800 to 288
252
avoid U32 call by using struct format L
255
# We've read to the end of the file, so we should have 8 bytes of
256
# unused data in the decompressor. If we don't, there is a corrupt
257
# file. We use these 8 bytes to calculate the CRC and the recorded
258
# file size. We then check the that the computed CRC and size of
259
# the uncompressed data matches the stored values. Note that the
260
# size stored is the true file size mod 2**32.
261
if not (len(self._gzip_tail) == 8):
262
raise AssertionError("gzip trailer is incorrect length.")
263
crc32, isize = struct.unpack("<LL", self._gzip_tail)
264
# note that isize is unsigned - it can exceed 2GB
265
if crc32 != U32(self.crc):
266
raise IOError, "CRC check failed %d %d" % (crc32, U32(self.crc))
267
elif isize != LOWU32(self.size):
268
raise IOError, "Incorrect length of data produced"
176
elif seek_length < 0:
177
# we haven't read enough to check the checksum.
178
assert -8 < seek_length, "too great a seek."
179
buf = self.fileobj.read(-seek_length)
180
self._gzip_tail = self.decompress.unused_data + buf
182
self._gzip_tail = self.decompress.unused_data
184
# Check the CRC and file size, and set the flag so we read
185
# a new member on the next call
187
self._new_member = True
190
"""tuned to reduce function calls and eliminate file seeking:
192
reduces lsprof count from 800 to 288
194
avoid U32 call by using struct format L
197
# We've read to the end of the file, so we should have 8 bytes of
198
# unused data in the decompressor. If we don't, there is a corrupt file.
199
# We use these 8 bytes to calculate the CRC and the recorded file size.
200
# We then check the that the computed CRC and size of the
201
# uncompressed data matches the stored values. Note that the size
202
# stored is the true file size mod 2**32.
203
assert len(self._gzip_tail) == 8, "gzip trailer is incorrect length."
204
crc32, isize = struct.unpack("<LL", self._gzip_tail)
205
# note that isize is unsigned - it can exceed 2GB
206
if crc32 != U32(self.crc):
207
raise IOError, "CRC check failed %d %d" % (crc32, U32(self.crc))
208
elif isize != LOWU32(self.size):
209
raise IOError, "Incorrect length of data produced"
270
211
def _read_gzip_header(self, bytes=None):
271
212
"""Supply bytes if the minimum header size is already read.
273
214
:param bytes: 10 bytes of header data.
275
216
"""starting cost: 300 in 3998