perf: allow profiling of more than one run
authorPierre-Yves David <pierre-yves.david@octobus.net>
Sun, 14 Apr 2024 02:38:41 +0200
changeset 51589 90ef3e042e10
parent 51588 1574718fa62f
child 51590 a6bdd2e6f7cb
perf: allow profiling of more than one run By default, we still profile the first run only. However profiling more run help to understand side effect from one run to the other. So we add an option to be able to do so.
contrib/perf.py
tests/test-contrib-perf.t
--- a/contrib/perf.py	Sun Apr 14 02:36:55 2024 +0200
+++ b/contrib/perf.py	Sun Apr 14 02:38:41 2024 +0200
@@ -20,7 +20,10 @@
 
 ``profile-benchmark``
   Enable profiling for the benchmarked section.
-  (The first iteration is benchmarked)
+  (by default, the first iteration is benchmarked)
+
+``profiled-runs``
+  list of iteration to profile (starting from 0)
 
 ``run-limits``
   Control the number of runs each benchmark will perform. The option value
@@ -318,6 +321,11 @@
     )
     configitem(
         b'perf',
+        b'profiled-runs',
+        default=mercurial.configitems.dynamicdefault,
+    )
+    configitem(
+        b'perf',
         b'run-limits',
         default=mercurial.configitems.dynamicdefault,
         experimental=True,
@@ -354,7 +362,7 @@
     )
     configitem(
         b'perf',
-        b'profile-benchmark',
+        b'profiled-runs',
         default=mercurial.configitems.dynamicdefault,
     )
     configitem(
@@ -491,9 +499,12 @@
         limits = DEFAULTLIMITS
 
     profiler = None
+    profiled_runs = set()
     if profiling is not None:
         if ui.configbool(b"perf", b"profile-benchmark", False):
-            profiler = profiling.profile(ui)
+            profiler = lambda: profiling.profile(ui)
+            for run in ui.configlist(b"perf", b"profiled-runs", [0]):
+                profiled_runs.add(int(run))
 
     prerun = getint(ui, b"perf", b"pre-run", 0)
     t = functools.partial(
@@ -503,6 +514,7 @@
         limits=limits,
         prerun=prerun,
         profiler=profiler,
+        profiled_runs=profiled_runs,
     )
     return t, fm
 
@@ -547,13 +559,14 @@
     limits=DEFAULTLIMITS,
     prerun=0,
     profiler=None,
+    profiled_runs=(0,),
 ):
     gc.collect()
     results = []
     begin = util.timer()
     count = 0
     if profiler is None:
-        profiler = NOOPCTX
+        profiler = lambda: NOOPCTX
     for i in range(prerun):
         if setup is not None:
             setup()
@@ -561,13 +574,16 @@
             func()
     keepgoing = True
     while keepgoing:
+        if count in profiled_runs:
+            prof = profiler()
+        else:
+            prof = NOOPCTX
         if setup is not None:
             setup()
         with context():
-            with profiler:
+            with prof:
                 with timeone() as item:
                     r = func()
-        profiler = NOOPCTX
         count += 1
         results.append(item[0])
         cstop = util.timer()
--- a/tests/test-contrib-perf.t	Sun Apr 14 02:36:55 2024 +0200
+++ b/tests/test-contrib-perf.t	Sun Apr 14 02:38:41 2024 +0200
@@ -59,8 +59,11 @@
     number of run to perform before starting measurement.
   
   "profile-benchmark"
-    Enable profiling for the benchmarked section. (The first iteration is
-    benchmarked)
+    Enable profiling for the benchmarked section. (by default, the first
+    iteration is benchmarked)
+  
+  "profiled-runs"
+    list of iteration to profile (starting from 0)
   
   "run-limits"
     Control the number of runs each benchmark will perform. The option value