3
3
# I made one modification to profile so that it returns a pair
4
4
# instead of just the Stats object
6
from __future__ import absolute_import
7
from _lsprof import Profiler, profiler_entry, profiler_subentry
13
from _lsprof import Profiler, profiler_entry
15
from bzrlib import errors
9
17
__all__ = ['profile', 'Stats']
11
19
def profile(f, *args, **kwds):
14
p.enable(subcalls=True)
20
"""Run a function profile.
22
Exceptions are not caught: If you need stats even when exceptions are to be
23
raised, pass in a closure that will catch the exceptions and transform them
24
appropriately for your driver function.
26
Important caveat: only one profile can execute at a time. See BzrProfiler
29
:return: The functions return value and a stats object.
31
profiler = BzrProfiler()
16
34
ret = f(*args, **kwds)
19
return ret,Stats(p.getstats())
36
stats = profiler.stop()
40
class BzrProfiler(object):
41
"""Bzr utility wrapper around Profiler.
43
For most uses the module level 'profile()' function will be suitable.
44
However profiling when a simple wrapped function isn't available may
45
be easier to accomplish using this class.
47
To use it, create a BzrProfiler and call start() on it. Some arbitrary
48
time later call stop() to stop profiling and retrieve the statistics
49
from the code executed in the interim.
51
Note that profiling involves a threading.Lock around the actual profiling.
52
This is needed because profiling involves global manipulation of the python
53
interpreter state. As such you cannot perform multiple profiles at once.
54
Trying to do so will lock out the second profiler unless the global
55
bzrlib.lsprof.BzrProfiler.profiler_block is set to 0. Setting it to 0 will
56
cause profiling to fail rather than blocking.
60
"""Serialise rather than failing to profile concurrent profile requests."""
62
profiler_lock = threading.Lock()
63
"""Global lock used to serialise profiles."""
68
This hooks into threading and will record all calls made until
71
self._g_threadmap = {}
73
permitted = self.__class__.profiler_lock.acquire(
74
self.__class__.profiler_block)
76
raise errors.InternalBzrError(msg="Already profiling something")
78
self.p.enable(subcalls=True)
79
threading.setprofile(self._thread_profile)
81
self.__class__.profiler_lock.release()
87
This unhooks from threading and cleans up the profiler, returning
88
the gathered Stats object.
90
:return: A bzrlib.lsprof.Stats object.
94
for pp in self._g_threadmap.values():
96
threading.setprofile(None)
100
for tid, pp in self._g_threadmap.items():
101
threads[tid] = Stats(pp.getstats(), {})
102
self._g_threadmap = None
103
return Stats(p.getstats(), threads)
105
self.__class__.profiler_lock.release()
107
def _thread_profile(self, f, *args, **kwds):
108
# we lose the first profile point for a new thread in order to
109
# trampoline a new Profile object into place
110
thr = thread.get_ident()
111
self._g_threadmap[thr] = p = Profiler()
112
# this overrides our sys.setprofile hook:
113
p.enable(subcalls=True, builtins=True)
22
116
class Stats(object):
25
def __init__(self, data):
117
"""Wrapper around the collected data.
119
A Stats instance is created when the profiler finishes. Normal
120
usage is to use save() to write out the data to a file, or pprint()
121
to write human-readable information to the command line.
124
def __init__(self, data, threads):
126
self.threads = threads
28
128
def sort(self, crit="inlinetime"):
129
"""Sort the data by the supplied critera.
131
:param crit: the data attribute used as the sort key."""
30
132
if crit not in profiler_entry.__dict__:
31
133
raise ValueError, "Can't sort by %s" % crit
32
134
self.data.sort(lambda b, a: cmp(getattr(a, crit),
67
174
if not isinstance(e.code, str):
68
175
self.data[i] = type(e)((label(e.code),) + e[1:])
70
for j in range(len(e.calls)):
72
if not isinstance(se.code, str):
73
e.calls[j] = type(se)((label(se.code),) + se[1:])
177
for j in range(len(e.calls)):
179
if not isinstance(se.code, str):
180
e.calls[j] = type(se)((label(se.code),) + se[1:])
181
for s in self.threads.values():
184
def calltree(self, file):
185
"""Output profiling data in calltree format (for KCacheGrind)."""
186
_CallTreeFilter(self.data).output(file)
188
def save(self, filename, format=None):
189
"""Save profiling data to a file.
191
:param filename: the name of the output file
192
:param format: 'txt' for a text representation;
193
'callgrind' for calltree format;
194
otherwise a pickled Python object. A format of None indicates
195
that the format to use is to be found from the filename. If
196
the name starts with callgrind.out, callgrind format is used
197
otherwise the format is given by the filename extension.
200
basename = os.path.basename(filename)
201
if basename.startswith('callgrind.out'):
204
ext = os.path.splitext(filename)[1]
207
outfile = open(filename, 'wb')
209
if format == "callgrind":
210
self.calltree(outfile)
211
elif format == "txt":
212
self.pprint(file=outfile)
215
cPickle.dump(self, outfile, 2)
220
class _CallTreeFilter(object):
221
"""Converter of a Stats object to input suitable for KCacheGrind.
223
This code is taken from http://ddaa.net/blog/python/lsprof-calltree
224
with the changes made by J.P. Calderone and Itamar applied. Note that
225
isinstance(code, str) needs to be used at times to determine if the code
226
object is actually an external code object (with a filename, etc.) or
230
def __init__(self, data):
234
def output(self, out_file):
235
self.out_file = out_file
236
out_file.write('events: Ticks\n')
237
self._print_summary()
238
for entry in self.data:
241
def _print_summary(self):
243
for entry in self.data:
244
totaltime = int(entry.totaltime * 1000)
245
max_cost = max(max_cost, totaltime)
246
self.out_file.write('summary: %d\n' % (max_cost,))
248
def _entry(self, entry):
249
out_file = self.out_file
251
inlinetime = int(entry.inlinetime * 1000)
252
#out_file.write('ob=%s\n' % (code.co_filename,))
253
if isinstance(code, str):
254
out_file.write('fi=~\n')
256
out_file.write('fi=%s\n' % (code.co_filename,))
257
out_file.write('fn=%s\n' % (label(code, True),))
258
if isinstance(code, str):
259
out_file.write('0 %s\n' % (inlinetime,))
261
out_file.write('%d %d\n' % (code.co_firstlineno, inlinetime))
262
# recursive calls are counted in entry.calls
267
if isinstance(code, str):
270
lineno = code.co_firstlineno
271
for subentry in calls:
272
self._subentry(lineno, subentry)
275
def _subentry(self, lineno, subentry):
276
out_file = self.out_file
278
totaltime = int(subentry.totaltime * 1000)
279
#out_file.write('cob=%s\n' % (code.co_filename,))
280
if isinstance(code, str):
281
out_file.write('cfi=~\n')
282
out_file.write('cfn=%s\n' % (label(code, True),))
283
out_file.write('calls=%d 0\n' % (subentry.callcount,))
285
out_file.write('cfi=%s\n' % (code.co_filename,))
286
out_file.write('cfn=%s\n' % (label(code, True),))
287
out_file.write('calls=%d %d\n' % (
288
subentry.callcount, code.co_firstlineno))
289
out_file.write('%d %d\n' % (lineno, totaltime))
293
def label(code, calltree=False):
78
294
if isinstance(code, str):
81
297
mname = _fn2mod[code.co_filename]
83
for k, v in sys.modules.iteritems():
299
for k, v in sys.modules.items():
86
if not hasattr(v, '__file__'):
302
if getattr(v, '__file__', None) is None:
88
304
if not isinstance(v.__file__, str):