Ability to query job status 93/24693/4
authorMark Beierl <mark.beierl@dell.com>
Tue, 22 Nov 2016 20:45:17 +0000 (15:45 -0500)
committerMark Beierl <mark.beierl@dell.com>
Wed, 23 Nov 2016 15:54:26 +0000 (10:54 -0500)
Provide a method to query the status of a job by id

Change-Id: I4d3567995e7c7c3359596346009358276e79d65c
JIRA: STORPERF-89
Signed-off-by: Mark Beierl <mark.beierl@dell.com>
rest_server.py
storperf/storperf_master.py
storperf/test_executor.py

index 2b5bbfb..d852bbb 100644 (file)
@@ -287,6 +287,9 @@ class Job(Resource):
         if type == "metadata":
             return jsonify(storperf.fetch_metadata(workload_id))
 
+        if type == "status":
+            return jsonify({"Status": storperf.fetch_job_status(workload_id)})
+
     @swagger.operation(
         parameters=[
             {
index 91b1bd1..99df47f 100644 (file)
@@ -331,6 +331,9 @@ class StorPerfMaster(object):
     def fetch_metadata(self, job_id):
         return self.job_db.fetch_workload_params(job_id)
 
+    def fetch_job_status(self, job_id):
+        return self._test_executor.execution_status(job_id)
+
     def _setup_slave(self, slave):
         logger = logging.getLogger(__name__ + ":" + slave)
 
index fc51adc..c984175 100644 (file)
@@ -7,8 +7,16 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 
+import copy
+import imp
+import logging
 from os import listdir
+import os
 from os.path import isfile, join
+import sched
+from threading import Thread
+import time
+
 from storperf.carbon.converter import Converter
 from storperf.carbon.emitter import CarbonMetricTransmitter
 from storperf.db import test_results_db
@@ -16,13 +24,6 @@ from storperf.db.graphite_db import GraphiteDB
 from storperf.db.job_db import JobDB
 from storperf.fio.fio_invoker import FIOInvoker
 from storperf.utilities import dictionary
-from threading import Thread
-import copy
-import imp
-import logging
-import os
-import sched
-import time
 
 
 class UnknownWorkload(Exception):
@@ -165,6 +166,15 @@ class TestExecutor(object):
             terminated_hosts.append(workload.remote_host)
         return terminated_hosts
 
+    def execution_status(self, job_id):
+        if self.job_db.job_id != job_id:
+            return "Completed"
+
+        if (self._terminated is False):
+            return "Running"
+
+        return "Completed"
+
     def execute_workloads(self):
         self._terminated = False
         self.logger.info("Starting job %s" % (self.job_db.job_id))
@@ -232,6 +242,8 @@ class TestExecutor(object):
 
             self.logger.info("Completed workload %s" % (workload_name))
         self.logger.info("Completed job %s" % (self.job_db.job_id))
+        self._terminated = True
+
         end_time = time.time()
         pod_name = dictionary.get_key_from_dict(self.metadata,
                                                 'pod_name',
@@ -263,7 +275,7 @@ class TestExecutor(object):
             payload['metrics'] = graphite_db.fetch_averages(self.job_db.job_id)
             criteria = {}
             criteria['block_sizes'] = self.block_sizes
-            criteria['queue_depths'] = self.block_sizes
+            criteria['queue_depths'] = self.queue_depths
 
             try:
                 test_results_db.push_results_to_db(test_db,