3
3
# I made one modification to profile so that it returns a pair
4
4
# instead of just the Stats object
9
11
from _lsprof import Profiler, profiler_entry
13
from bzrlib import errors
11
15
__all__ = ['profile', 'Stats']
16
def _thread_profile(f, *args, **kwds):
17
# we lose the first profile point for a new thread in order to trampoline
18
# a new Profile object into place
20
thr = thread.get_ident()
21
_g_threadmap[thr] = p = Profiler()
22
# this overrides our sys.setprofile hook:
23
p.enable(subcalls=True, builtins=True)
26
17
def profile(f, *args, **kwds):
30
p.enable(subcalls=True)
31
threading.setprofile(_thread_profile)
18
"""Run a function profile.
20
Exceptions are not caught: If you need stats even when exceptions are to be
21
raised, pass in a closure that will catch the exceptions and transform them
22
appropriately for your driver function.
24
Important caveat: only one profile can execute at a time. See BzrProfiler
27
:return: The functions return value and a stats object.
29
profiler = BzrProfiler()
33
32
ret = f(*args, **kwds)
36
for pp in _g_threadmap.values():
38
threading.setprofile(None)
34
stats = profiler.stop()
38
class BzrProfiler(object):
39
"""Bzr utility wrapper around Profiler.
41
for tid, pp in _g_threadmap.items():
42
threads[tid] = Stats(pp.getstats(), {})
44
return ret, Stats(p.getstats(), threads)
41
For most uses the module level 'profile()' function will be suitable.
42
However profiling when a simple wrapped function isn't available may
43
be easier to accomplish using this class.
45
To use it, create a BzrProfiler and call start() on it. Some arbitrary
46
time later call stop() to stop profiling and retrieve the statistics
47
from the code executed in the interim.
49
Note that profiling involves a threading.Lock around the actual profiling.
50
This is needed because profiling involves global manipulation of the python
51
interpreter state. As such you cannot perform multiple profiles at once.
52
Trying to do so will lock out the second profiler unless the global
53
bzrlib.lsprof.BzrProfiler.profiler_block is set to 0. Setting it to 0 will
54
cause profiling to fail rather than blocking.
58
"""Serialise rather than failing to profile concurrent profile requests."""
60
profiler_lock = threading.Lock()
61
"""Global lock used to serialise profiles."""
66
This hooks into threading and will record all calls made until
69
self._g_threadmap = {}
71
permitted = self.__class__.profiler_lock.acquire(
72
self.__class__.profiler_block)
74
raise errors.InternalBzrError(msg="Already profiling something")
76
self.p.enable(subcalls=True)
77
threading.setprofile(self._thread_profile)
79
self.__class__.profiler_lock.release()
85
This unhooks from threading and cleans up the profiler, returning
86
the gathered Stats object.
88
:return: A bzrlib.lsprof.Stats object.
92
for pp in self._g_threadmap.values():
94
threading.setprofile(None)
98
for tid, pp in self._g_threadmap.items():
99
threads[tid] = Stats(pp.getstats(), {})
100
self._g_threadmap = None
101
return Stats(p.getstats(), threads)
103
self.__class__.profiler_lock.release()
105
def _thread_profile(self, f, *args, **kwds):
106
# we lose the first profile point for a new thread in order to
107
# trampoline a new Profile object into place
108
thr = thread.get_ident()
109
self._g_threadmap[thr] = p = Profiler()
110
# this overrides our sys.setprofile hook:
111
p.enable(subcalls=True, builtins=True)
47
114
class Stats(object):
115
"""Wrapper around the collected data.
117
A Stats instance is created when the profiler finishes. Normal
118
usage is to use save() to write out the data to a file, or pprint()
119
to write human-readable information to the command line.
50
122
def __init__(self, data, threads):
52
124
self.threads = threads
54
126
def sort(self, crit="inlinetime"):
127
"""Sort the data by the supplied critera.
129
:param crit: the data attribute used as the sort key."""
56
130
if crit not in profiler_entry.__dict__:
57
131
raise ValueError, "Can't sort by %s" % crit
58
132
self.data.sort(lambda b, a: cmp(getattr(a, crit),
104
183
"""Output profiling data in calltree format (for KCacheGrind)."""
105
184
_CallTreeFilter(self.data).output(file)
186
def save(self, filename, format=None):
187
"""Save profiling data to a file.
189
:param filename: the name of the output file
190
:param format: 'txt' for a text representation;
191
'callgrind' for calltree format;
192
otherwise a pickled Python object. A format of None indicates
193
that the format to use is to be found from the filename. If
194
the name starts with callgrind.out, callgrind format is used
195
otherwise the format is given by the filename extension.
198
basename = os.path.basename(filename)
199
if basename.startswith('callgrind.out'):
202
ext = os.path.splitext(filename)[1]
205
outfile = open(filename, 'wb')
207
if format == "callgrind":
208
self.calltree(outfile)
209
elif format == "txt":
210
self.pprint(file=outfile)
213
cPickle.dump(self, outfile, 2)
108
218
class _CallTreeFilter(object):
219
"""Converter of a Stats object to input suitable for KCacheGrind.
221
This code is taken from http://ddaa.net/blog/python/lsprof-calltree
222
with the changes made by J.P. Calderone and Itamar applied. Note that
223
isinstance(code, str) needs to be used at times to determine if the code
224
object is actually an external code object (with a filename, etc.) or
110
228
def __init__(self, data):
112
230
self.out_file = None
114
232
def output(self, out_file):
115
self.out_file = out_file
116
print >> out_file, 'events: Ticks'
233
self.out_file = out_file
234
out_file.write('events: Ticks\n')
117
235
self._print_summary()
118
236
for entry in self.data:
119
237
self._entry(entry)
123
241
for entry in self.data:
124
242
totaltime = int(entry.totaltime * 1000)
125
243
max_cost = max(max_cost, totaltime)
126
print >> self.out_file, 'summary: %d' % (max_cost,)
244
self.out_file.write('summary: %d\n' % (max_cost,))
128
246
def _entry(self, entry):
129
247
out_file = self.out_file
130
248
code = entry.code
131
249
inlinetime = int(entry.inlinetime * 1000)
132
#print >> out_file, 'ob=%s' % (code.co_filename,)
133
print >> out_file, 'fi=%s' % (code.co_filename,)
134
print >> out_file, 'fn=%s' % (label(code, True),)
135
print >> out_file, '%d %d' % (code.co_firstlineno, inlinetime)
250
#out_file.write('ob=%s\n' % (code.co_filename,))
251
if isinstance(code, str):
252
out_file.write('fi=~\n')
254
out_file.write('fi=%s\n' % (code.co_filename,))
255
out_file.write('fn=%s\n' % (label(code, True),))
256
if isinstance(code, str):
257
out_file.write('0 %s\n' % (inlinetime,))
259
out_file.write('%d %d\n' % (code.co_firstlineno, inlinetime))
136
260
# recursive calls are counted in entry.calls
138
262
calls = entry.calls
265
if isinstance(code, str):
268
lineno = code.co_firstlineno
141
269
for subentry in calls:
142
self._subentry(code.co_firstlineno, subentry)
270
self._subentry(lineno, subentry)
145
273
def _subentry(self, lineno, subentry):
146
274
out_file = self.out_file
147
275
code = subentry.code
148
276
totaltime = int(subentry.totaltime * 1000)
149
#print >> out_file, 'cob=%s' % (code.co_filename,)
150
print >> out_file, 'cfn=%s' % (label(code, True),)
151
print >> out_file, 'cfi=%s' % (code.co_filename,)
152
print >> out_file, 'calls=%d %d' % (
153
subentry.callcount, code.co_firstlineno)
154
print >> out_file, '%d %d' % (lineno, totaltime)
277
#out_file.write('cob=%s\n' % (code.co_filename,))
278
if isinstance(code, str):
279
out_file.write('cfi=~\n')
280
out_file.write('cfn=%s\n' % (label(code, True),))
281
out_file.write('calls=%d 0\n' % (subentry.callcount,))
283
out_file.write('cfi=%s\n' % (code.co_filename,))
284
out_file.write('cfn=%s\n' % (label(code, True),))
285
out_file.write('calls=%d %d\n' % (
286
subentry.callcount, code.co_firstlineno))
287
out_file.write('%d %d\n' % (lineno, totaltime))