Run benchmark test in restful server post api 87/20787/5
authorzhifeng.jiang <jiang.zhifeng@zte.com.cn>
Fri, 9 Sep 2016 15:46:07 +0000 (23:46 +0800)
committerzhifeng.jiang <jiang.zhifeng@zte.com.cn>
Tue, 20 Sep 2016 12:16:00 +0000 (20:16 +0800)
modification:
    Call ansible async in restful server post api
    Set the job state 'finished' when finish benchmark test
    Terminate the next benchmark in restful server delete api
    job result will be in next commit

JIRA:QTIP-97

Change-Id: I252482dddd9b35ba33f992e8ea19037d8919fad6
Signed-off-by: zhifeng.jiang <jiang.zhifeng@zte.com.cn>
func/args_handler.py
restful_server/db.py
restful_server/qtip_server.py
test_list/compute
test_list/network
test_list/storage
tests/qtip_server_test.py

index 57ecfcb..90d902b 100644 (file)
@@ -7,19 +7,23 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 import os
+from operator import add
+import simplejson as json
 from func.env_setup import Env_setup
 from func.spawn_vm import SpawnVM
 from func.driver import Driver
 
 
-def get_files_in_test_list(suit_name):
-    with open('test_list/' + suit_name, 'r') as fin_put:
-        benchmark_list = fin_put.readlines()
-        return map(lambda x: x.rstrip(), benchmark_list)
+def get_files_in_test_list(suit_name, case_type='all'):
+    benchmark_list = json.load(file('test_list/{0}'.format(suit_name)))
+    return reduce(add, benchmark_list.values()) \
+        if case_type == 'all' else benchmark_list[case_type]
 
 
-def get_files_in_test_case(lab, suit_name):
-    return os.listdir('./test_cases/{0}/{1}'.format(lab, suit_name))
+def get_files_in_test_case(lab, suit_name, case_type='all'):
+    test_case_all = os.listdir('./test_cases/{0}/{1}'.format(lab, suit_name))
+    return test_case_all if case_type == 'all' else \
+        filter(lambda x: case_type in x, test_case_all)
 
 
 def get_benchmark_path(lab, suit, benchmark):
index 42808b8..916fc03 100644 (file)
@@ -10,6 +10,7 @@ from datetime import datetime
 import uuid
 
 jobs = {}
+threads = {}
 
 
 def create_job(args):
@@ -23,8 +24,8 @@ def create_job(args):
                'suite_name': args["suite_name"],
                'max-minutes': args["max-minutes"],
                'type': args["type"],
-               'start-time': str(datetime.now()),
-               'end-time': None,
+               'start_time': str(datetime.now()),
+               'end_time': None,
                'state': 'processing',
                'state_detail': [],
                'result': []}
@@ -33,7 +34,9 @@ def create_job(args):
 
 
 def delete_job(job_id):
-    if job_id in jobs.keys():
+    if job_id in threads:
+        stop_thread(job_id)
+    if job_id in jobs:
         jobs[job_id]['end_time'] = str(datetime.now())
         jobs[job_id]['state'] = 'terminated'
         return True
@@ -42,23 +45,24 @@ def delete_job(job_id):
 
 
 def get_job_info(job_id):
-    if job_id in jobs.keys():
+    if job_id in jobs:
         return jobs[job_id]
     else:
         return None
 
 
-def finish_job(job_id, state):
-    jobs[job_id]['end-time'] = str(datetime.now())
-    jobs[job_id]['state'] = state
+def finish_job(job_id):
+    jobs[job_id]['end_time'] = str(datetime.now())
+    jobs[job_id]['state'] = 'finished'
+    del threads[job_id]
 
 
 def update_job_state_detail(job_id, state_detail):
-    jobs[job_id][state_detail] = state_detail
+    jobs[job_id]['state_detail'] = state_detail
 
 
 def update_job_result(job_id, result):
-    jobs[job_id][result] = result
+    jobs[job_id]['result'] = result
 
 
 def is_job_timeout(job_id):
@@ -66,3 +70,22 @@ def is_job_timeout(job_id):
                                                 "%Y-%m-%d %H:%M:%S.%f")
     return True if jobs[job_id]['max-minutes'] * 60 < period.total_seconds()\
         else False
+
+
+def start_thread(job_id, thread, thread_stop):
+    threads[job_id] = {'thread': thread,
+                       'thread_stop': thread_stop}
+    thread.start()
+
+
+def stop_thread(job_id):
+    if threads[job_id]['thread'].isAlive():
+        threads[job_id]['thread_stop'].set()
+        threads[job_id]['thread'].join()
+    if job_id in threads:
+        del threads[job_id]
+
+
+def update_benmark_state_in_state_detail(job_id, benchmark, benchmark_state):
+    filter(lambda x: x["benchmark"] == benchmark,
+           get_job_info(job_id)["state_detail"])[0]['state'] = benchmark_state
index 00d598a..ccd8978 100644 (file)
@@ -9,7 +9,10 @@
 from flask import Flask, abort
 from flask_restful import Api, Resource, fields, reqparse
 from flask_restful_swagger import swagger
+import threading
+from copy import copy
 import db
+import func.args_handler as args_handler
 
 
 app = Flask(__name__)
@@ -89,12 +92,12 @@ class JobList(Resource):
 "installer_ip": The installer ip of the pod,
 
 "max-minutes": If specified, the maximum duration in minutes
-for any single test iteration, default is '10',
+for any single test iteration, default is '60',
 
 "pod_name": If specified, the Pod name, default is 'default',
 
-"suite_name": If specified, Test suite name, for example 'compute', 'network', 'storage', 'all',
-default is 'all'
+"suite_name": If specified, Test suite name, for example 'compute', 'network', 'storage',
+default is 'compute'
 "type": BM or VM,default is 'BM'
                 """,
                 "required": True,
@@ -122,13 +125,48 @@ default is 'all'
         parser = reqparse.RequestParser()
         parser.add_argument('installer_type', type=str, required=True, help='Installer_type is required')
         parser.add_argument('installer_ip', type=str, required=True, help='Installer_ip is required')
-        parser.add_argument('max-minutes', type=int, required=False, default=10, help='max-minutes should be integer')
+        parser.add_argument('max-minutes', type=int, required=False, default=60, help='max-minutes should be integer')
         parser.add_argument('pod_name', type=str, required=False, default='default', help='pod_name should be string')
-        parser.add_argument('suite_name', type=str, required=False, default='all', help='suite_name should be string')
+        parser.add_argument('suite_name', type=str, required=False, default='compute', help='suite_name should be string')
         parser.add_argument('type', type=str, required=False, default='BM', help='type should be BM, VM and ALL')
         args = parser.parse_args()
-        ret = db.create_job(args)
-        return {'job_id': str(ret)} if ret else abort(409, 'message:It already has one job running now!')
+        if not args_handler.check_suit_in_test_list(args["suite_name"]):
+            return abort(404, 'message:Test Suit {0} does not exist in test_list'.format(args["suite_name"]))
+        if not args_handler.check_lab_name(args["pod_name"]):
+            return abort(404, 'message: You have specified a lab {0}\
+                               that is not present in test_cases'.format(args['pod_name']))
+
+        job_id = db.create_job(args)
+        if not job_id:
+            return abort(409, 'message:It already has one job running now!')
+
+        benchmarks = args_handler.get_files_in_test_list(args["suite_name"],
+                                                         args["type"].lower())
+        test_cases = args_handler.get_files_in_test_case(args["pod_name"],
+                                                         args["suite_name"],
+                                                         args["type"].lower())
+        benchmarks_list = filter(lambda x: x in test_cases, benchmarks)
+        state_detail = map(lambda x: {'benchmark': x, 'state': 'idle'}, benchmarks_list)
+        db.update_job_state_detail(job_id, copy(state_detail))
+        thread_stop = threading.Event()
+        post_thread = threading.Thread(target=self.thread_post, args=(args["installer_type"],
+                                                                      benchmarks_list,
+                                                                      args["pod_name"],
+                                                                      args["suite_name"],
+                                                                      job_id,
+                                                                      thread_stop))
+        db.start_thread(job_id, post_thread, thread_stop)
+        return {'job_id': str(job_id)}
+
+    def thread_post(self, installer_type, benchmarks_list, pod_name, suite_name, job_id, stop_event):
+        for benchmark in benchmarks_list:
+            if db.is_job_timeout(job_id) or stop_event.is_set():
+                break
+            db.update_benmark_state_in_state_detail(job_id, benchmark, 'processing')
+            args_handler.prepare_and_run_benchmark(installer_type, '/home',
+                                                   args_handler.get_benchmark_path(pod_name, suite_name, benchmark))
+            db.update_benmark_state_in_state_detail(job_id, benchmark, 'finished')
+        db.finish_job(job_id)
 
 
 api.add_resource(JobList, '/api/v1.0/jobs')
index 7fc7614..3bf1b18 100644 (file)
@@ -1,10 +1,16 @@
-dhrystone_bm.yaml\r
-dhrystone_vm.yaml\r
-whetstone_bm.yaml\r
-whetstone_vm.yaml\r
-ramspeed_bm.yaml\r
-ramspeed_vm.yaml\r
-dpi_bm.yaml\r
-dpi_vm.yaml\r
-ssl_bm.yaml\r
-ssl_vm.yaml
\ No newline at end of file
+{
+    "bm": [
+        "dhrystone_bm.yaml",
+        "whetstone_bm.yaml",
+        "ramspeed_bm.yaml",
+        "dpi_bm.yaml",
+        "ssl_bm.yaml"
+    ],
+    "vm": [
+        "dhrystone_vm.yaml",
+        "whetstone_vm.yaml",
+        "ramspeed_vm.yaml",
+        "dpi_vm.yaml",
+        "ssl_vm.yaml"
+    ]
+}
index 677f2ba..58ce5cb 100644 (file)
@@ -1,3 +1,9 @@
-iperf_bm.yaml
-iperf_vm.yaml
-iperf_vm_2.yaml
\ No newline at end of file
+{
+    "bm": [
+        "iperf_bm.yaml"
+    ],
+    "vm": [
+        "iperf_vm.yaml",
+        "iperf_vm_2.yaml"
+    ]
+}
index c7f5340..f3068dd 100644 (file)
@@ -1,2 +1,8 @@
-fio_bm.yaml
-fio_vm.yaml
\ No newline at end of file
+{
+    "bm": [
+        "fio_bm.yaml"
+    ],
+    "vm": [
+        "fio_vm.yaml"
+    ]
+}
index c2b1297..3f70a1f 100644 (file)
@@ -1,6 +1,8 @@
 import restful_server.qtip_server as server
 import pytest
 import json
+import mock
+import time
 
 
 @pytest.fixture
@@ -14,6 +16,14 @@ def app_client(app):
     return client
 
 
+def side_effect_sleep(sleep_time):
+    time.sleep(sleep_time)
+
+
+def side_effect_pass():
+    pass
+
+
 class TestClass:
     @pytest.mark.parametrize("body, expected", [
         ({'installer_type': 'fuel',
@@ -22,11 +32,15 @@ class TestClass:
           'installer_type': 'fuel',
           'installer_ip': '10.20.0.2',
           'pod_name': 'default',
-          'suite_name': 'all',
-          'max-minutes': 10,
+          'suite_name': 'compute',
+          'max-minutes': 60,
           'type': 'BM',
-          'state': 'processing',
-          'state_detail': [],
+          'state': 'finished',
+          'state_detail': [{'state': 'finished', 'benchmark': 'dhrystone_bm.yaml'},
+                           {'state': 'finished', 'benchmark': 'whetstone_bm.yaml'},
+                           {'state': 'finished', 'benchmark': 'ramspeed_bm.yaml'},
+                           {'state': 'finished', 'benchmark': 'dpi_bm.yaml'},
+                           {'state': 'finished', 'benchmark': 'ssl_bm.yaml'}],
           'result': []}),
         ({'installer_type': 'fuel',
           'installer_ip': '10.20.0.2',
@@ -41,17 +55,26 @@ class TestClass:
           'suite_name': 'compute',
           'max-minutes': 20,
           'type': 'VM',
-          'state': 'processing',
-          'state_detail': [],
+          'state': 'finished',
+          'state_detail': [{u'state': u'finished', u'benchmark': u'dhrystone_vm.yaml'},
+                           {u'state': u'finished', u'benchmark': u'whetstone_vm.yaml'},
+                           {u'state': u'finished', u'benchmark': u'ramspeed_vm.yaml'},
+                           {u'state': u'finished', u'benchmark': u'dpi_vm.yaml'},
+                           {u'state': u'finished', u'benchmark': u'ssl_vm.yaml'}],
           'result': []})
     ])
-    def test_post_get_delete_job_successful(self, app_client, body, expected):
+    @mock.patch('restful_server.qtip_server.args_handler.prepare_and_run_benchmark')
+    def test_post_get_delete_job_successful(self, mock_args_handler, app_client, body, expected):
         reply = app_client.post("/api/v1.0/jobs", data=body)
-        print reply.data
+        print(reply.data)
         id = json.loads(reply.data)['job_id']
         expected['job_id'] = id
-        get_reply = app_client.get("/api/v1.0/jobs/%s" % id)
-        reply_data = json.loads(get_reply.data)
+        post_process = ''
+        while post_process != 'finished':
+            get_reply = app_client.get("/api/v1.0/jobs/%s" % id)
+            reply_data = json.loads(get_reply.data)
+            post_process = reply_data['state']
+            print(reply_data)
         assert len(filter(lambda x: reply_data[x] == expected[x], expected.keys())) == len(expected)
         delete_reply = app_client.delete("/api/v1.0/jobs/%s" % id)
         assert "successful" in delete_reply.data
@@ -62,15 +85,11 @@ class TestClass:
           {'installer_type': 'compass',
            'installer_ip': '192.168.20.50'}],
          ['job_id',
-          'It already has one job running now!']),
-        ([{'installer_type': 'fuel',
-           'installer_ip': '10.20.0.2'},
-          {'installer_type': 'compass',
-           'insta_ip': '192.168.20.50'}],
-         ['job_id',
-          'Installer_ip is required'])
+          'It already has one job running now!'])
     ])
-    def test_post_two_jobs_unsuccessful(self, app_client, body, expected):
+    @mock.patch('restful_server.qtip_server.args_handler.prepare_and_run_benchmark',
+                side_effect=[side_effect_sleep(0.5), side_effect_pass])
+    def test_post_two_jobs_unsuccessful(self, mock_args_hanler, app_client, body, expected):
         reply_1 = app_client.post("/api/v1.0/jobs", data=body[0])
         reply_2 = app_client.post("/api/v1.0/jobs", data=body[1])
         assert expected[0] in json.loads(reply_1.data).keys()