3
3
# I made one modification to profile so that it returns a pair
4
4
# instead of just the Stats object
6
from __future__ import absolute_import
9
13
from _lsprof import Profiler, profiler_entry
15
from bzrlib import errors
11
17
__all__ = ['profile', 'Stats']
16
def _thread_profile(f, *args, **kwds):
17
# we lose the first profile point for a new thread in order to trampoline
18
# a new Profile object into place
20
thr = thread.get_ident()
21
_g_threadmap[thr] = p = Profiler()
22
# this overrides our sys.setprofile hook:
23
p.enable(subcalls=True, builtins=True)
26
19
def profile(f, *args, **kwds):
30
p.enable(subcalls=True)
31
threading.setprofile(_thread_profile)
20
"""Run a function profile.
22
Exceptions are not caught: If you need stats even when exceptions are to be
23
raised, pass in a closure that will catch the exceptions and transform them
24
appropriately for your driver function.
26
Important caveat: only one profile can execute at a time. See BzrProfiler
29
:return: The functions return value and a stats object.
31
profiler = BzrProfiler()
33
34
ret = f(*args, **kwds)
36
for pp in _g_threadmap.values():
38
threading.setprofile(None)
36
stats = profiler.stop()
40
class BzrProfiler(object):
41
"""Bzr utility wrapper around Profiler.
41
for tid, pp in _g_threadmap.items():
42
threads[tid] = Stats(pp.getstats(), {})
44
return ret, Stats(p.getstats(), threads)
43
For most uses the module level 'profile()' function will be suitable.
44
However profiling when a simple wrapped function isn't available may
45
be easier to accomplish using this class.
47
To use it, create a BzrProfiler and call start() on it. Some arbitrary
48
time later call stop() to stop profiling and retrieve the statistics
49
from the code executed in the interim.
51
Note that profiling involves a threading.Lock around the actual profiling.
52
This is needed because profiling involves global manipulation of the python
53
interpreter state. As such you cannot perform multiple profiles at once.
54
Trying to do so will lock out the second profiler unless the global
55
bzrlib.lsprof.BzrProfiler.profiler_block is set to 0. Setting it to 0 will
56
cause profiling to fail rather than blocking.
60
"""Serialise rather than failing to profile concurrent profile requests."""
62
profiler_lock = threading.Lock()
63
"""Global lock used to serialise profiles."""
68
This hooks into threading and will record all calls made until
71
self._g_threadmap = {}
73
permitted = self.__class__.profiler_lock.acquire(
74
self.__class__.profiler_block)
76
raise errors.InternalBzrError(msg="Already profiling something")
78
self.p.enable(subcalls=True)
79
threading.setprofile(self._thread_profile)
81
self.__class__.profiler_lock.release()
87
This unhooks from threading and cleans up the profiler, returning
88
the gathered Stats object.
90
:return: A bzrlib.lsprof.Stats object.
94
for pp in self._g_threadmap.values():
96
threading.setprofile(None)
100
for tid, pp in self._g_threadmap.items():
101
threads[tid] = Stats(pp.getstats(), {})
102
self._g_threadmap = None
103
return Stats(p.getstats(), threads)
105
self.__class__.profiler_lock.release()
107
def _thread_profile(self, f, *args, **kwds):
108
# we lose the first profile point for a new thread in order to
109
# trampoline a new Profile object into place
110
thr = thread.get_ident()
111
self._g_threadmap[thr] = p = Profiler()
112
# this overrides our sys.setprofile hook:
113
p.enable(subcalls=True, builtins=True)
47
116
class Stats(object):
117
"""Wrapper around the collected data.
119
A Stats instance is created when the profiler finishes. Normal
120
usage is to use save() to write out the data to a file, or pprint()
121
to write human-readable information to the command line.
50
124
def __init__(self, data, threads):
52
126
self.threads = threads
54
128
def sort(self, crit="inlinetime"):
129
"""Sort the data by the supplied critera.
131
:param crit: the data attribute used as the sort key."""
56
132
if crit not in profiler_entry.__dict__:
57
133
raise ValueError, "Can't sort by %s" % crit
58
134
self.data.sort(lambda b, a: cmp(getattr(a, crit),
104
185
"""Output profiling data in calltree format (for KCacheGrind)."""
105
186
_CallTreeFilter(self.data).output(file)
188
def save(self, filename, format=None):
189
"""Save profiling data to a file.
191
:param filename: the name of the output file
192
:param format: 'txt' for a text representation;
193
'callgrind' for calltree format;
194
otherwise a pickled Python object. A format of None indicates
195
that the format to use is to be found from the filename. If
196
the name starts with callgrind.out, callgrind format is used
197
otherwise the format is given by the filename extension.
200
basename = os.path.basename(filename)
201
if basename.startswith('callgrind.out'):
204
ext = os.path.splitext(filename)[1]
207
outfile = open(filename, 'wb')
209
if format == "callgrind":
210
self.calltree(outfile)
211
elif format == "txt":
212
self.pprint(file=outfile)
215
cPickle.dump(self, outfile, 2)
108
220
class _CallTreeFilter(object):
221
"""Converter of a Stats object to input suitable for KCacheGrind.
223
This code is taken from http://ddaa.net/blog/python/lsprof-calltree
224
with the changes made by J.P. Calderone and Itamar applied. Note that
225
isinstance(code, str) needs to be used at times to determine if the code
226
object is actually an external code object (with a filename, etc.) or
110
230
def __init__(self, data):
112
232
self.out_file = None
114
234
def output(self, out_file):
115
self.out_file = out_file
116
print >> out_file, 'events: Ticks'
235
self.out_file = out_file
236
out_file.write('events: Ticks\n')
117
237
self._print_summary()
118
238
for entry in self.data:
119
239
self._entry(entry)
123
243
for entry in self.data:
124
244
totaltime = int(entry.totaltime * 1000)
125
245
max_cost = max(max_cost, totaltime)
126
print >> self.out_file, 'summary: %d' % (max_cost,)
246
self.out_file.write('summary: %d\n' % (max_cost,))
128
248
def _entry(self, entry):
129
249
out_file = self.out_file
130
250
code = entry.code
131
251
inlinetime = int(entry.inlinetime * 1000)
132
#print >> out_file, 'ob=%s' % (code.co_filename,)
133
print >> out_file, 'fi=%s' % (code.co_filename,)
134
print >> out_file, 'fn=%s' % (label(code, True),)
135
print >> out_file, '%d %d' % (code.co_firstlineno, inlinetime)
252
#out_file.write('ob=%s\n' % (code.co_filename,))
253
if isinstance(code, str):
254
out_file.write('fi=~\n')
256
out_file.write('fi=%s\n' % (code.co_filename,))
257
out_file.write('fn=%s\n' % (label(code, True),))
258
if isinstance(code, str):
259
out_file.write('0 %s\n' % (inlinetime,))
261
out_file.write('%d %d\n' % (code.co_firstlineno, inlinetime))
136
262
# recursive calls are counted in entry.calls
138
264
calls = entry.calls
267
if isinstance(code, str):
270
lineno = code.co_firstlineno
141
271
for subentry in calls:
142
self._subentry(code.co_firstlineno, subentry)
272
self._subentry(lineno, subentry)
145
275
def _subentry(self, lineno, subentry):
146
276
out_file = self.out_file
147
277
code = subentry.code
148
278
totaltime = int(subentry.totaltime * 1000)
149
#print >> out_file, 'cob=%s' % (code.co_filename,)
150
print >> out_file, 'cfn=%s' % (label(code, True),)
151
print >> out_file, 'cfi=%s' % (code.co_filename,)
152
print >> out_file, 'calls=%d %d' % (
153
subentry.callcount, code.co_firstlineno)
154
print >> out_file, '%d %d' % (lineno, totaltime)
279
#out_file.write('cob=%s\n' % (code.co_filename,))
280
if isinstance(code, str):
281
out_file.write('cfi=~\n')
282
out_file.write('cfn=%s\n' % (label(code, True),))
283
out_file.write('calls=%d 0\n' % (subentry.callcount,))
285
out_file.write('cfi=%s\n' % (code.co_filename,))
286
out_file.write('cfn=%s\n' % (label(code, True),))
287
out_file.write('calls=%d %d\n' % (
288
subentry.callcount, code.co_firstlineno))
289
out_file.write('%d %d\n' % (lineno, totaltime))