Testresults DB Alignment 85/27685/1
authormbeierl <mark.beierl@dell.com>
Fri, 27 Jan 2017 21:35:37 +0000 (16:35 -0500)
committermbeierl <mark.beierl@dell.com>
Fri, 27 Jan 2017 21:35:37 +0000 (16:35 -0500)
Changes the key names sent to the testresults db to align with other
projects.

Defines a clear PASS/FAIL based on the ability to find steady state for
all runs.  If one run failed to achieve steady state, run gets labelled
as FAIL.

Change-Id: I73143779453689eea93829079388ed8035134aa3
JIRA: STORPERF-100
Signed-off-by: mbeierl <mark.beierl@dell.com>
ci/daily.sh
ci/start_job.sh
storperf/db/test_results_db.py
storperf/utilities/data_handler.py
tests/utilities_tests/data_handler_test.py

index 80263ae..11af7f4 100755 (executable)
@@ -14,11 +14,6 @@ then
     WORKSPACE=`pwd`
 fi
 
-if [ -d $WORKSPACE/ci/job ]
-then
-    sudo rm -rf $WORKSPACE/ci/job
-fi
-
 git clone --depth 1 https://gerrit.opnfv.org/gerrit/releng $WORKSPACE/ci/job/releng
 
 virtualenv $WORKSPACE/ci/job/storperf_daily_venv
@@ -39,9 +34,21 @@ then
 fi
 export POD_NAME=$NODE_NAME
 
+# Unless we get a job that automatically deploys Apex or other installers,
+# we have to rely on there being a value written into a file to tell us
+# what scenario was deployed.  This file needs to tell us:
+# DEPLOYED_SCENARIO
+# DISK_TYPE
+if [ -f ~/jenkins-env.rc ]
+then
+    . ~/jenkins-env.rc
+fi
+export SCENARIO_NAME=$DEPLOYED_SCENARIO
+
 sudo find $WORKSPACE/ -name '*.db' -exec rm -fv {} \;
 
 $WORKSPACE/ci/generate-admin-rc.sh
+echo "TEST_DB_URL=http://testresults.opnfv.org/test/api/v1" >> $WORKSPACE/ci/job/admin.rc
 $WORKSPACE/ci/generate-environment.sh
 
 . $WORKSPACE/ci/job/environment.rc
@@ -51,7 +58,7 @@ do
     export "$env"
 done < $WORKSPACE/ci/job/admin.rc
 
-echo "TEST_DB_URL=http://testresults.opnfv.org/test/api/v1" >> $WORKSPACE/ci/job/admin.rc
+export VERSION=`echo ${BUILD_TAG#*daily-} | cut -d- -f1`
 
 echo ==========================================================================
 echo Environment
@@ -71,7 +78,6 @@ echo ==========================================================================
 export QUEUE_DEPTH=8
 export BLOCK_SIZE=16384
 export WORKLOAD=_warm_up
-export SCENARIO_NAME="${CINDER_BACKEND}_${WORKLOAD}"
 WARM_UP=`$WORKSPACE/ci/start_job.sh | awk '/job_id/ {print $2}' | sed 's/"//g'`
 
 WARM_UP_STATUS=`curl -s -X GET "http://127.0.0.1:5000/api/v1.0/jobs?id=$WARM_UP&type=status" \
@@ -91,9 +97,7 @@ echo ==========================================================================
 export WORKLOAD=ws,wr,rs,rr,rw
 export BLOCK_SIZE=2048,8192,16384
 export QUEUE_DEPTH=1,2,8
-export SCENARIO_NAME="${CINDER_BACKEND}_${WORKLOAD}"
-export VERSION
-export BUILD_TAG
+export TEST_CASE=snia_steady_state
 
 JOB=`$WORKSPACE/ci/start_job.sh \
     | awk '/job_id/ {print $2}' | sed 's/"//g'`
index 51f35cb..86b8fc8 100755 (executable)
@@ -13,17 +13,23 @@ cat << EOF > body.json
    "block_sizes": "${BLOCK_SIZE}",
    "nowarm": "string",
    "nossd": "string",
-   "deadline": 600,
+   "deadline": 1200,
    "queue_depths": "${QUEUE_DEPTH}",
    "workload": "${WORKLOAD}",
     "metadata": {
-       "disk_type": "SSD",
-      "pod_name": "${POD_NAME}",
-      "scenario_name": "${SCENARIO_NAME}",
-      "storage_node_count": ${CINDER_NODES}
+        "disk_type": "${DISK_TYPE}",
+        "pod_name": "${POD_NAME}",
+        "scenario_name": "${SCENARIO_NAME}",
+        "storage_node_count": ${CINDER_NODES},
+        "version": "${VERSION}",
+        "build_tag": "${BUILD_TAG}",
+        "test_case": "${TEST_CASE}"
    }
 }
 EOF
 
-curl -s -X POST --header 'Content-Type: application/json' --header 'Accept: application/json' \
-    -d @body.json http://127.0.0.1:5000/api/v1.0/jobs
\ No newline at end of file
+cat body.json
+
+curl -s -X POST --header 'Content-Type: application/json' \
+    --header 'Accept: application/json' \
+    -d @body.json http://127.0.0.1:5000/api/v1.0/jobs
index 75cb05d..bb328db 100644 (file)
@@ -9,7 +9,6 @@
 
 import json
 import os
-
 import requests
 
 
@@ -29,17 +28,18 @@ def get_installer_type(logger=None):
 
 def push_results_to_db(db_url, project, case_name,
                        test_start, test_stop, logger, pod_name,
-                       version, scenario, criteria, build_tag, payload):
+                       version, scenario, criteria, build_tag, details):
     """
     POST results to the Result target DB
     """
     url = db_url + "/results"
     installer = get_installer_type(logger)
+
     params = {"project_name": project, "case_name": case_name,
-              "start_date": test_start, "stop_date": test_stop,
               "pod_name": pod_name, "installer": installer,
               "version": version, "scenario": scenario, "criteria": criteria,
-              "build_tag": build_tag, "details": payload}
+              "build_tag": build_tag, "start_date": test_start,
+              "stop_date": test_stop, "details": details}
 
     headers = {'Content-Type': 'application/json'}
     try:
@@ -56,5 +56,5 @@ def push_results_to_db(db_url, project, case_name,
         logger.error("Error [push_results_to_db('%s', '%s', '%s', " +
                      "'%s', '%s', '%s', '%s', '%s', '%s')]:" %
                      (db_url, project, case_name, pod_name, version,
-                      scenario, criteria, build_tag, payload[:512]), e)
+                      scenario, criteria, build_tag, details[:512]), e)
         return False
index 2d4194a..2f79054 100644 (file)
@@ -124,42 +124,55 @@ class DataHandler(object):
         return SteadyState.steady_state(data_series)
 
     def _push_to_db(self, executor):
-        test_db = os.environ.get('TEST_DB_URL')
 
-        if test_db is not None:
-            pod_name = dictionary.get_key_from_dict(executor.metadata,
-                                                    'pod_name',
-                                                    'Unknown')
-            version = dictionary.get_key_from_dict(executor.metadata,
-                                                   'version',
-                                                   'Unknown')
-            scenario = dictionary.get_key_from_dict(executor.metadata,
-                                                    'scenario_name',
-                                                    'Unknown')
-            build_tag = dictionary.get_key_from_dict(executor.metadata,
-                                                     'build_tag',
-                                                     'Unknown')
-            duration = executor.end_time - executor.start_time
+        pod_name = dictionary.get_key_from_dict(executor.metadata,
+                                                'pod_name',
+                                                'Unknown')
+        version = dictionary.get_key_from_dict(executor.metadata,
+                                               'version',
+                                               'Unknown')
+        scenario = dictionary.get_key_from_dict(executor.metadata,
+                                                'scenario_name',
+                                                'Unknown')
+        build_tag = dictionary.get_key_from_dict(executor.metadata,
+                                                 'build_tag',
+                                                 'Unknown')
+        test_case = dictionary.get_key_from_dict(executor.metadata,
+                                                 'test_case',
+                                                 'Unknown')
+        duration = executor.end_time - executor.start_time
+
+        payload = executor.metadata
+
+        steady_state = True
+        for _, value in executor.metadata['steady_state'].items():
+            steady_state = steady_state and value
+
+        payload['timestart'] = executor.start_time
+        payload['duration'] = duration
+        graphite_db = GraphiteDB()
+        payload['metrics'] = graphite_db.fetch_averages(
+            executor.job_db.job_id)
+        if steady_state:
+            criteria = 'PASS'
+        else:
+            criteria = 'FAIL'
 
-            self.logger.info("Pushing results to %s" % (test_db))
+        start_time = time.strftime('%Y-%m-%d %H:%M:%S',
+                                   time.gmtime(executor.start_time))
 
-            payload = executor.metadata
-            payload['timestart'] = executor.start_time
-            payload['duration'] = duration
-            payload['status'] = 'OK'
-            graphite_db = GraphiteDB()
-            payload['metrics'] = graphite_db.fetch_averages(
-                executor.job_db.job_id)
-            criteria = {}
-            criteria['block_sizes'] = executor.block_sizes
-            criteria['queue_depths'] = executor.queue_depths
+        end_time = time.strftime('%Y-%m-%d %H:%M:%S',
+                                 time.gmtime(executor.end_time))
 
+        test_db = os.environ.get('TEST_DB_URL')
+        if test_db is not None:
+            self.logger.info("Pushing results to %s" % (test_db))
             try:
                 test_results_db.push_results_to_db(test_db,
                                                    "storperf",
-                                                   "Latency Test",
-                                                   executor.start_time,
-                                                   executor.end_time,
+                                                   test_case,
+                                                   start_time,
+                                                   end_time,
                                                    self.logger,
                                                    pod_name,
                                                    version,
index 3813957..7963c9f 100644 (file)
@@ -44,6 +44,7 @@ class DataHandlerTest(unittest.TestCase):
         self.job_db = mock
         self.pushed = False
         self.current_workload = None
+        self.db_results = None
         pass
 
     @property
@@ -52,6 +53,7 @@ class DataHandlerTest(unittest.TestCase):
 
     def push_results_to_db(self, *args):
         self.pushed = True
+        self.db_results = args
         pass
 
     def terminate(self):
@@ -131,6 +133,13 @@ class DataHandlerTest(unittest.TestCase):
         self._terminated = True
         mock_results_db.side_effect = self.push_results_to_db
         mock_graphite_db.side_effect = MockGraphiteDB
+        self.metadata = {
+            "steady_state": {
+                "rr.queue-depth.8.block-size.16384": True,
+                "rr.queue-depth.8.block-size.2048": False,
+                "rr.queue-depth.8.block-size.8192": True,
+            },
+        }
 
         self.data_handler.data_event(self)
         self.assertEqual(True, self.pushed)
@@ -248,3 +257,30 @@ class DataHandlerTest(unittest.TestCase):
         self.assertEqual(True, self._terminated)
 
         self.assertEqual(False, self.pushed)
+
+    @mock.patch.dict(os.environ, {'TEST_DB_URL': 'mock'})
+    @mock.patch("storperf.db.test_results_db.push_results_to_db")
+    def test_playload_report(self,
+                             mock_results_db):
+        mock_results_db.side_effect = self.push_results_to_db
+        self.start_time = 1504559100
+        self.end_time = 1504560000
+        self.metadata = {
+            "scenario_name": "ceph_ws,wr,rs,rr,rw",
+            "status": "OK",
+            "steady_state": {
+                "rr.queue-depth.8.block-size.16384": True,
+                "rr.queue-depth.8.block-size.2048": False,
+                "rr.queue-depth.8.block-size.8192": True,
+            },
+            "storage_node_count": 5,
+            "volume_size": 10
+        }
+        self.data_handler._push_to_db(self)
+        self.assertEqual('FAIL', self.db_results[9],
+                         'Expected FAIL in criteria')
+        self.assertEqual('2017-09-04 21:05:00', self.db_results[3],
+                         'Start time')
+        self.assertEqual('2017-09-04 21:20:00', self.db_results[4],
+                         'End time')
+