Lava: Consider threads when computing time per event
authorFrancis Deslauriers <francis.deslauriers@efficios.com>
Thu, 19 Jan 2017 21:22:14 +0000 (16:22 -0500)
committerFrancis Deslauriers <francis.deslauriers@efficios.com>
Wed, 25 Jan 2017 19:42:41 +0000 (14:42 -0500)
Also, changed the time unit from nsec to usec

Signed-off-by: Francis Deslauriers <francis.deslauriers@efficios.com>
scripts/lttng-baremetal-tests/generate-plots.py
scripts/lttng-baremetal-tests/parse-results.py

index 6df689f82db7672bce28670058416ab5ebe10a1f..1db638a9216fc6d00b88269d3926b535fdf0ea68 100644 (file)
@@ -27,16 +27,16 @@ from matplotlib.ticker import MaxNLocator
 from cycler import cycler
 
 def rename_cols(df):
-    new_cols = {'baseline_1thr_pereventmean': 'basel_1thr',
-            'baseline_2thr_pereventmean': 'basel_2thr',
-            'baseline_4thr_pereventmean': 'basel_4thr',
-            'baseline_8thr_pereventmean': 'basel_8thr',
-            'baseline_16thr_pereventmean': 'basel_16thr',
-            'lttng_1thr_pereventmean': 'lttng_1thr',
-            'lttng_2thr_pereventmean': 'lttng_2thr',
-            'lttng_4thr_pereventmean': 'lttng_4thr',
-            'lttng_8thr_pereventmean': 'lttng_8thr',
-            'lttng_16thr_pereventmean': 'lttng_16thr'
+    new_cols = {'baseline_1thr_peritermean': 'basel_1thr',
+            'baseline_2thr_peritermean': 'basel_2thr',
+            'baseline_4thr_peritermean': 'basel_4thr',
+            'baseline_8thr_peritermean': 'basel_8thr',
+            'baseline_16thr_peritermean': 'basel_16thr',
+            'lttng_1thr_peritermean': 'lttng_1thr',
+            'lttng_2thr_peritermean': 'lttng_2thr',
+            'lttng_4thr_peritermean': 'lttng_4thr',
+            'lttng_8thr_peritermean': 'lttng_8thr',
+            'lttng_16thr_peritermean': 'lttng_16thr'
             }
     df.rename(columns=new_cols, inplace=True)
     return df
@@ -63,7 +63,7 @@ def create_plot(df, graph_type):
         ax.set_ylim(0)
         ax.grid()
         ax.set_xlabel('Jenkins Build ID')
-        ax.set_ylabel('Meantime per syscall [ns]')
+        ax.set_ylabel('Meantime per syscall [us]')
         ax.legend(labels=curr_df.columns.values, bbox_to_anchor=(1.2,1))
         ax.xaxis.set_major_locator(MaxNLocator(integer=True))
 
index 25fcd0d6c8bfeb49f318bc6d2b88e61975def907..544539f07db0408e8e7ff1c092020c25d3873c4b 100755 (executable)
@@ -7,27 +7,64 @@ import pandas as pd
 import sys
 
 def test_case(df):
-    df['nsecperiter']=(df['duration']*1000)/(df['nbiter'])
-    stdev = pd.DataFrame({'perevent_stdev' : 
-                          df.groupby(['nbthreads', 'tracer', 'testcase','sleeptime'])['nsecperiter'].std()}).reset_index()
-    mean = pd.DataFrame({'perevent_mean' :
-                         df.groupby(['nbthreads', 'tracer', 'testcase','sleeptime'])['nsecperiter'].mean()}).reset_index()
-    mem_mean = pd.DataFrame({'mem_mean' :
-                             df.groupby(['nbthreads','tracer','testcase','sleeptime'])['maxmem'].mean()}).reset_index()
-    mem_stdev = pd.DataFrame({'mem_stdev' :
-                              df.groupby(['nbthreads','tracer','testcase','sleeptime'])['maxmem'].std()}).reset_index()
-    tmp = mean.merge(stdev)
-    tmp = tmp.merge(mem_mean)
-    tmp = tmp.merge(mem_stdev)
+    # Duration is in usec
+    # usecPecIter = Duration/(average number of iteration per thread)
+    df['usecperiter'] = (df['nbthreads'] * df['duration']) / df['nbiter']
+
+    periter_mean = pd.DataFrame({'periter_mean' :
+                         df.groupby(['nbthreads', 'tracer', 'testcase','sleeptime'])['usecperiter'].mean()}).reset_index()
+
+    periter_stdev = pd.DataFrame({'periter_stdev' :
+                          df.groupby(['nbthreads', 'tracer', 'testcase','sleeptime'])['usecperiter'].std()}).reset_index()
+
+    nbiter_mean = pd.DataFrame({'nbiter_mean' :
+                          df.groupby(['nbthreads', 'tracer', 'testcase','sleeptime'])['nbiter'].mean()}).reset_index()
+
+    nbiter_stdev = pd.DataFrame({'nbiter_stdev' :
+                          df.groupby(['nbthreads', 'tracer', 'testcase','sleeptime'])['nbiter'].std()}).reset_index()
+
+    duration_mean = pd.DataFrame({'duration_mean' :
+                         df.groupby(['nbthreads', 'tracer', 'testcase','sleeptime'])['duration'].mean()}).reset_index()
+
+    duration_stdev = pd.DataFrame({'duration_stdev' :
+                         df.groupby(['nbthreads', 'tracer', 'testcase','sleeptime'])['duration'].std()}).reset_index()
+
+    tmp = periter_mean.merge(periter_stdev)
+
+    tmp = tmp.merge(nbiter_mean)
+    tmp = tmp.merge(nbiter_stdev)
+
+    tmp = tmp.merge(duration_mean)
+    tmp = tmp.merge(duration_stdev)
+
+    # if there is any NaN or None value in the DF we raise an exeception
+    if tmp.isnull().values.any():
+        raise Exception('NaN value found in dataframe')
 
     for i, row in tmp.iterrows():
-        testcase_name='_'.join([row['tracer'],str(row['nbthreads'])+'thr', 'pereventmean'])
-        yield( {"name": testcase_name, "result": "pass", "units": "nsec/event",
-            "measurement": str(row['perevent_mean'])})
+        testcase_name='_'.join([row['tracer'],str(row['nbthreads'])+'thr', 'peritermean'])
+        yield( {"name": testcase_name, "result": "pass", "units": "usec/iter",
+            "measurement": str(row['periter_mean'])})
+
+        testcase_name='_'.join([row['tracer'],str(row['nbthreads'])+'thr', 'periterstdev'])
+        yield( {"name": testcase_name, "result": "pass", "units": "usec/iter",
+            "measurement": str(row['periter_stdev'])})
+
+        testcase_name='_'.join([row['tracer'],str(row['nbthreads'])+'thr', 'nbitermean'])
+        yield( {"name": testcase_name, "result": "pass", "units": "iterations",
+            "measurement": str(row['nbiter_mean'])})
+
+        testcase_name='_'.join([row['tracer'],str(row['nbthreads'])+'thr', 'nbiterstdev'])
+        yield( {"name": testcase_name, "result": "pass", "units": "iterations",
+            "measurement": str(row['nbiter_stdev'])})
+
+        testcase_name='_'.join([row['tracer'],str(row['nbthreads'])+'thr', 'durationmean'])
+        yield( {"name": testcase_name, "result": "pass", "units": "usec",
+            "measurement": str(row['duration_mean'])})
 
-        testcase_name='_'.join([row['tracer'],str(row['nbthreads'])+'thr', 'pereventstdev'])
-        yield( {"name": testcase_name, "result": "pass", "units": "nsec/event",
-            "measurement": str(row['perevent_stdev'])})
+        testcase_name='_'.join([row['tracer'],str(row['nbthreads'])+'thr', 'durationstdev'])
+        yield( {"name": testcase_name, "result": "pass", "units": "usec",
+            "measurement": str(row['duration_stdev'])})
 
         testcase_name='_'.join([row['tracer'],str(row['nbthreads'])+'thr', 'memmean'])
         yield( {"name": testcase_name, "result": "pass", "units": "kB",
@@ -52,8 +89,7 @@ def main():
             '--units', res['units']])
 
         # Save the results to write to the CSV file
-        if 'pereventmean' in res['name']:
-            results[res['name']]=res['measurement']
+        results[res['name']]=res['measurement']
 
     # Write the dictionnary to a csv file where each key is a column
     with open('processed_results.csv', 'w') as output_csv:
This page took 0.027079 seconds and 4 git commands to generate.