Job Run Metadata 43/13943/2
authorMark Beierl <mark.beierl@emc.com>
Wed, 11 May 2016 16:54:48 +0000 (10:54 -0600)
committerMark Beierl <mark.beierl@emc.com>
Wed, 11 May 2016 17:23:48 +0000 (11:23 -0600)
Add the ability to store arbitrary metadata about the job for
later reporting

Change-Id: If7f1d0a69111567d69db865baec6c20f9a1f494f
Signed-off-by: Mark Beierl <mark.beierl@emc.com>
rest_server.py
storperf/storperf_master.py
storperf/test_executor.py

index 45d77ff..b628a7e 100644 (file)
@@ -95,9 +95,16 @@ def results_page(job_id):
                      'Write Latency (ms)')
     writechart = chart.to_base64_image()
 
+    metadata = "<table>"
+    for key, value in params.iteritems():
+        metadata += "<TR><TD>" + key + "<TD>" + value + "</TR>"
+    metadata += "</table>"
+
     html = """<html><body>%s <BR>
     Number of VMs: %s <BR>
     Cinder volume size per VM: %s (GB) <BR>
+    Metadata: <BR>
+    %s<BR>
     <center>Read Latency Report <BR>
     <img src="data:image/png;base64,%s"/>
     <center>Write Latency Report <BR>
@@ -105,6 +112,7 @@ def results_page(job_id):
     </body></html>""" % (job_id,
                          params['agent_count'],
                          params['volume_size'],
+                         metadata,
                          readchart,
                          writechart,
                          )
@@ -233,7 +241,7 @@ class Job(Resource):
         self.logger = logging.getLogger(__name__)
 
     @swagger.operation(
-        notes='Fetch the average latency of the specified workload',
+        notes='Fetch the metrics of the specified workload',
         parameters=[
             {
                 "name": "id",
@@ -243,12 +251,21 @@ class Job(Resource):
                 "type": "string",
                 "allowMultiple": False,
                 "paramType": "query"
+            },
+            {
+                "name": "type",
+                "description": "The type of metrics to report.  May be "
+                "metrics (default), or metadata",
+                "required": False,
+                "type": "string",
+                "allowMultiple": False,
+                "paramType": "query"
             }
         ],
         responseMessages=[
             {
                 "code": 200,
-                "message": "Wordload ID found, response in JSON format"
+                "message": "Workload ID found, response in JSON format"
             },
             {
                 "code": 404,
@@ -257,9 +274,18 @@ class Job(Resource):
         ]
     )
     def get(self):
+
+        type = "metrics"
+        if request.args.get('type'):
+            type = request.args.get('type')
+
         workload_id = request.args.get('id')
-        print workload_id
-        return jsonify(storperf.fetch_results(workload_id))
+
+        if type == "metrics":
+            return jsonify(storperf.fetch_results(workload_id))
+
+        if type == "metadata":
+            return jsonify(storperf.fetch_metadata(workload_id))
 
     @swagger.operation(
         parameters=[
@@ -310,8 +336,12 @@ class Job(Resource):
                 storperf.workloads = request.json['workload']
             else:
                 storperf.workloads = None
+            if ('metadata' in request.json):
+                metadata = request.json['metadata']
+            else:
+                metadata = {}
 
-            job_id = storperf.execute_workloads()
+            job_id = storperf.execute_workloads(metadata)
 
             return jsonify({'job_id': job_id})
 
index b678bc8..33f0819 100644 (file)
@@ -283,7 +283,7 @@ class StorPerfMaster(object):
                 self._heat_client.stacks.delete(stack_id=self.stack_id)
             sleep(2)
 
-    def execute_workloads(self):
+    def execute_workloads(self, metadata={}):
         if (self.stack_id is None):
             raise ParameterError("ERROR: Stack does not exist")
 
@@ -305,11 +305,13 @@ class StorPerfMaster(object):
 
         self._test_executor.slaves = slaves
         job_id = self._test_executor.execute()
-        params = {}
+
+        params = metadata
         params['agent_count'] = self.agent_count
         params['public_network'] = self.public_network
         params['volume_size'] = self.volume_size
         self.job_db.record_workload_params(job_id, params)
+
         return job_id
 
     def terminate_workloads(self):
@@ -319,6 +321,9 @@ class StorPerfMaster(object):
         graphite_db = GraphiteDB()
         return graphite_db.fetch_averages(job_id)
 
+    def fetch_metadata(self, job_id):
+        return self.job_db.fetch_workload_params(job_id)
+
     def _setup_slave(self, slave):
         logger = logging.getLogger(__name__ + ":" + slave)
 
@@ -387,7 +392,7 @@ class StorPerfMaster(object):
     def _attach_to_openstack(self):
 
         time_since_last_auth = datetime.now() - self._last_openstack_auth
-        print time_since_last_auth.total_seconds()
+
         if (self._cinder_client is None or
                 time_since_last_auth.total_seconds() > 600):
             self._last_openstack_auth = datetime.now()
index 7fdf741..d1ad3ca 100644 (file)
@@ -207,7 +207,3 @@ class TestExecutor(object):
         self.job_db.end_workload(workload)
 
         self.logger.info("Ended " + workload.fullname)
-
-    def fetch_workloads(self, job, workload_name=""):
-        self.job_db.job_id = job
-        return self.job_db.fetch_workloads(workload_name)