3
3
# I made one modification to profile so that it returns a pair
4
4
# instead of just the Stats object
6
from __future__ import absolute_import
13
9
from _lsprof import Profiler, profiler_entry
15
from bzrlib import errors
17
11
__all__ = ['profile', 'Stats']
16
def _thread_profile(f, *args, **kwds):
17
# we lose the first profile point for a new thread in order to trampoline
18
# a new Profile object into place
20
thr = thread.get_ident()
21
_g_threadmap[thr] = p = Profiler()
22
# this overrides our sys.setprofile hook:
23
p.enable(subcalls=True, builtins=True)
19
26
def profile(f, *args, **kwds):
20
"""Run a function profile.
22
Exceptions are not caught: If you need stats even when exceptions are to be
23
raised, pass in a closure that will catch the exceptions and transform them
24
appropriately for your driver function.
26
Important caveat: only one profile can execute at a time. See BzrProfiler
29
:return: The functions return value and a stats object.
31
profiler = BzrProfiler()
30
p.enable(subcalls=True)
31
threading.setprofile(_thread_profile)
34
33
ret = f(*args, **kwds)
36
stats = profiler.stop()
40
class BzrProfiler(object):
41
"""Bzr utility wrapper around Profiler.
36
for pp in _g_threadmap.values():
38
threading.setprofile(None)
43
For most uses the module level 'profile()' function will be suitable.
44
However profiling when a simple wrapped function isn't available may
45
be easier to accomplish using this class.
47
To use it, create a BzrProfiler and call start() on it. Some arbitrary
48
time later call stop() to stop profiling and retrieve the statistics
49
from the code executed in the interim.
51
Note that profiling involves a threading.Lock around the actual profiling.
52
This is needed because profiling involves global manipulation of the python
53
interpreter state. As such you cannot perform multiple profiles at once.
54
Trying to do so will lock out the second profiler unless the global
55
bzrlib.lsprof.BzrProfiler.profiler_block is set to 0. Setting it to 0 will
56
cause profiling to fail rather than blocking.
60
"""Serialise rather than failing to profile concurrent profile requests."""
62
profiler_lock = threading.Lock()
63
"""Global lock used to serialise profiles."""
68
This hooks into threading and will record all calls made until
71
self._g_threadmap = {}
73
permitted = self.__class__.profiler_lock.acquire(
74
self.__class__.profiler_block)
76
raise errors.InternalBzrError(msg="Already profiling something")
78
self.p.enable(subcalls=True)
79
threading.setprofile(self._thread_profile)
81
self.__class__.profiler_lock.release()
87
This unhooks from threading and cleans up the profiler, returning
88
the gathered Stats object.
90
:return: A bzrlib.lsprof.Stats object.
94
for pp in self._g_threadmap.values():
96
threading.setprofile(None)
100
for tid, pp in self._g_threadmap.items():
101
threads[tid] = Stats(pp.getstats(), {})
102
self._g_threadmap = None
103
return Stats(p.getstats(), threads)
105
self.__class__.profiler_lock.release()
107
def _thread_profile(self, f, *args, **kwds):
108
# we lose the first profile point for a new thread in order to
109
# trampoline a new Profile object into place
110
thr = thread.get_ident()
111
self._g_threadmap[thr] = p = Profiler()
112
# this overrides our sys.setprofile hook:
113
p.enable(subcalls=True, builtins=True)
41
for tid, pp in _g_threadmap.items():
42
threads[tid] = Stats(pp.getstats(), {})
44
return ret, Stats(p.getstats(), threads)
116
47
class Stats(object):
117
"""Wrapper around the collected data.
119
A Stats instance is created when the profiler finishes. Normal
120
usage is to use save() to write out the data to a file, or pprint()
121
to write human-readable information to the command line.
124
50
def __init__(self, data, threads):
126
52
self.threads = threads
128
54
def sort(self, crit="inlinetime"):
129
"""Sort the data by the supplied critera.
131
:param crit: the data attribute used as the sort key."""
132
56
if crit not in profiler_entry.__dict__:
133
57
raise ValueError, "Can't sort by %s" % crit
134
58
self.data.sort(lambda b, a: cmp(getattr(a, crit),
185
104
"""Output profiling data in calltree format (for KCacheGrind)."""
186
105
_CallTreeFilter(self.data).output(file)
188
def save(self, filename, format=None):
189
"""Save profiling data to a file.
191
:param filename: the name of the output file
192
:param format: 'txt' for a text representation;
193
'callgrind' for calltree format;
194
otherwise a pickled Python object. A format of None indicates
195
that the format to use is to be found from the filename. If
196
the name starts with callgrind.out, callgrind format is used
197
otherwise the format is given by the filename extension.
200
basename = os.path.basename(filename)
201
if basename.startswith('callgrind.out'):
204
ext = os.path.splitext(filename)[1]
207
outfile = open(filename, 'wb')
209
if format == "callgrind":
210
self.calltree(outfile)
211
elif format == "txt":
212
self.pprint(file=outfile)
215
cPickle.dump(self, outfile, 2)
220
108
class _CallTreeFilter(object):
221
"""Converter of a Stats object to input suitable for KCacheGrind.
223
This code is taken from http://ddaa.net/blog/python/lsprof-calltree
224
with the changes made by J.P. Calderone and Itamar applied. Note that
225
isinstance(code, str) needs to be used at times to determine if the code
226
object is actually an external code object (with a filename, etc.) or
230
110
def __init__(self, data):
232
112
self.out_file = None
234
114
def output(self, out_file):
235
self.out_file = out_file
236
out_file.write('events: Ticks\n')
115
self.out_file = out_file
116
print >> out_file, 'events: Ticks'
237
117
self._print_summary()
238
118
for entry in self.data:
239
119
self._entry(entry)
243
123
for entry in self.data:
244
124
totaltime = int(entry.totaltime * 1000)
245
125
max_cost = max(max_cost, totaltime)
246
self.out_file.write('summary: %d\n' % (max_cost,))
126
print >> self.out_file, 'summary: %d' % (max_cost,)
248
128
def _entry(self, entry):
249
129
out_file = self.out_file
250
130
code = entry.code
251
131
inlinetime = int(entry.inlinetime * 1000)
252
#out_file.write('ob=%s\n' % (code.co_filename,))
253
if isinstance(code, str):
254
out_file.write('fi=~\n')
256
out_file.write('fi=%s\n' % (code.co_filename,))
257
out_file.write('fn=%s\n' % (label(code, True),))
258
if isinstance(code, str):
259
out_file.write('0 %s\n' % (inlinetime,))
261
out_file.write('%d %d\n' % (code.co_firstlineno, inlinetime))
132
#print >> out_file, 'ob=%s' % (code.co_filename,)
133
print >> out_file, 'fi=%s' % (code.co_filename,)
134
print >> out_file, 'fn=%s' % (label(code, True),)
135
print >> out_file, '%d %d' % (code.co_firstlineno, inlinetime)
262
136
# recursive calls are counted in entry.calls
264
138
calls = entry.calls
267
if isinstance(code, str):
270
lineno = code.co_firstlineno
271
141
for subentry in calls:
272
self._subentry(lineno, subentry)
142
self._subentry(code.co_firstlineno, subentry)
275
145
def _subentry(self, lineno, subentry):
276
146
out_file = self.out_file
277
147
code = subentry.code
278
148
totaltime = int(subentry.totaltime * 1000)
279
#out_file.write('cob=%s\n' % (code.co_filename,))
280
if isinstance(code, str):
281
out_file.write('cfi=~\n')
282
out_file.write('cfn=%s\n' % (label(code, True),))
283
out_file.write('calls=%d 0\n' % (subentry.callcount,))
285
out_file.write('cfi=%s\n' % (code.co_filename,))
286
out_file.write('cfn=%s\n' % (label(code, True),))
287
out_file.write('calls=%d %d\n' % (
288
subentry.callcount, code.co_firstlineno))
289
out_file.write('%d %d\n' % (lineno, totaltime))
149
#print >> out_file, 'cob=%s' % (code.co_filename,)
150
print >> out_file, 'cfn=%s' % (label(code, True),)
151
print >> out_file, 'cfi=%s' % (code.co_filename,)
152
print >> out_file, 'calls=%d %d' % (
153
subentry.callcount, code.co_firstlineno)
154
print >> out_file, '%d %d' % (lineno, totaltime)