metrics baseline in compute-baseline.json 61/35561/4
authorzhihui wu <wu.zhihui1@zte.com.cn>
Wed, 31 May 2017 02:42:23 +0000 (10:42 +0800)
committerzhihui wu <wu.zhihui1@zte.com.cn>
Thu, 1 Jun 2017 01:44:57 +0000 (09:44 +0800)
- Previously, metrics baseline are defined in the spec file.
In this patch, move metrics baseline to compute-baseline.json.
- you can get qpi based with baseline file given in calculate.yml
- you can generate a customized baseline file when baseline file is
not given in calculate.yml

And baseline will be removed from the spec file.

Change-Id: I123d28e28543cb153b60120b5076306fa2fa3873
Signed-off-by: zhihui wu <wu.zhihui1@zte.com.cn>
qtip/ansible_library/plugins/action/calculate.py
requirements.txt
resources/ansible_roles/qtip/tasks/calculate.yml
tests/unit/ansible_library/plugins/action/calculate_test.py

index 077d863..383be58 100644 (file)
@@ -9,18 +9,18 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 
-import humanfriendly
 import json
 import numbers
-from numpy import mean
-import yaml
 
 from ansible.plugins.action import ActionBase
 from ansible.utils.display import Display
+from asq.initiators import query
+import humanfriendly
+from numpy import mean
+import yaml
 
 from qtip.util.export_to import export_to_file
 
-
 display = Display()
 
 
@@ -45,22 +45,51 @@ class ActionModule(ActionBase):
                 metrics[metric] = json.load(f)
         dest = self._task.args.get('dest')
 
-        return calc_qpi(spec, metrics, dest=dest)
+        baseline_file = self._task.args.get('baseline')
+        if baseline_file is not None:
+            with open(baseline_file) as f:
+                baseline = json.load(f)
+                return calc_qpi(spec, metrics, baseline, dest=dest)
+        else:
+            return save_as_baseline(spec, metrics, dest=dest)
 
 
+# TODO(wuzhihui): It is more reasonable to put this function into collect.py.
+# For now metrics data is not easy to be collected from collect.py.
 @export_to_file
-def calc_qpi(qpi_spec, metrics):
+def save_as_baseline(qpi_spec, metrics):
+    display.vv("save {} metrics as baseline".format(qpi_spec['name']))
+    display.vvv("spec: {}".format(qpi_spec))
+    display.vvv("metrics: {}".format(metrics))
 
+    return {
+        'name': qpi_spec['name'],
+        'score': 2048,
+        'description': qpi_spec['description'],
+        'details': {
+            'metrics': metrics,
+            'spec': "https://git.opnfv.org/qtip/tree/resources/QPI/compute.yaml",
+            'baseline': ""
+        }
+    }
+
+
+@export_to_file
+def calc_qpi(qpi_spec, metrics, qpi_baseline):
     display.vv("calculate QPI {}".format(qpi_spec['name']))
     display.vvv("spec: {}".format(qpi_spec))
     display.vvv("metrics: {}".format(metrics))
+    display.vvv("baseline: {}".format(qpi_baseline))
 
-    section_results = [calc_section(s, metrics)
-                       for s in qpi_spec['sections']]
+    section_results = []
+    for s in qpi_spec['sections']:
+        s_baseline = query(qpi_baseline['sections']).first(
+            lambda section: section['name'] == s['name'])
+        section_results.append(calc_section(s, metrics, s_baseline))
 
     # TODO(yujunz): use formula in spec
-    standard_score = 2048
-    qpi_score = int(mean([r['score'] for r in section_results]) * standard_score)
+    qpi_score = int(
+        mean([r['score'] for r in section_results]) * qpi_baseline['score'])
 
     results = {
         'score': qpi_score,
@@ -69,21 +98,26 @@ def calc_qpi(qpi_spec, metrics):
         'children': section_results,
         'details': {
             'metrics': metrics,
-            'spec': "https://git.opnfv.org/qtip/tree/resources/QPI/compute.yaml"
+            'spec': "https://git.opnfv.org/qtip/tree/resources/QPI/compute.yaml",
+            'baseline': "https://git.opnfv.org/qtip/tree/resources/QPI/compute-baseline.json"
         }
     }
 
     return results
 
 
-def calc_section(section_spec, metrics):
-
+def calc_section(section_spec, metrics, section_baseline):
     display.vv("calculate section {}".format(section_spec['name']))
     display.vvv("spec: {}".format(section_spec))
     display.vvv("metrics: {}".format(metrics))
+    display.vvv("baseline: {}".format(section_baseline))
+
+    metric_results = []
+    for m in section_spec['metrics']:
+        m_baseline = query(section_baseline['metrics']).first(
+            lambda metric: metric['name'] == m['name'])
+        metric_results.append(calc_metric(m, metrics[m['name']], m_baseline))
 
-    metric_results = [calc_metric(m, metrics[m['name']])
-                      for m in section_spec['metrics']]
     # TODO(yujunz): use formula in spec
     section_score = mean([r['score'] for r in metric_results])
     return {
@@ -94,17 +128,23 @@ def calc_section(section_spec, metrics):
     }
 
 
-def calc_metric(metric_spec, metrics):
-
+def calc_metric(metric_spec, metrics, metric_basline):
     display.vv("calculate metric {}".format(metric_spec['name']))
     display.vvv("spec: {}".format(metric_spec))
     display.vvv("metrics: {}".format(metrics))
+    display.vvv("baseline: {}".format(metric_basline))
 
     # TODO(yujunz): use formula in spec
-    workload_results = [{'name': w['name'],
-                         'description': 'workload',
-                         'score': calc_score(metrics[w['name']], w['baseline'])}
-                        for w in metric_spec['workloads']]
+    workload_results = []
+    for w in metric_spec['workloads']:
+        w_baseline = query(metric_basline['workloads']).first(
+            lambda workload: workload['name'] == w['name'])
+        workload_results.append({
+            'name': w['name'],
+            'description': 'workload',
+            'score': calc_score(metrics[w['name']], w_baseline['baseline'])
+        })
+
     metric_score = mean([r['score'] for r in workload_results])
     return {
         'score': metric_score,
@@ -118,5 +158,6 @@ def calc_score(metrics, baseline):
     if not isinstance(baseline, numbers.Number):
         baseline = humanfriendly.parse_size(baseline)
 
-    return mean([m if isinstance(m, numbers.Number) else humanfriendly.parse_size(m)
-                 for m in metrics]) / baseline
+    return mean(
+        [m if isinstance(m, numbers.Number) else humanfriendly.parse_size(m)
+         for m in metrics]) / baseline
index e601d10..b9d0e88 100644 (file)
@@ -9,4 +9,5 @@ pbr
 prettytable
 six
 PyYAML
-humanfriendly
\ No newline at end of file
+humanfriendly
+asq
\ No newline at end of file
index 63fec7f..61e96fa 100644 (file)
@@ -19,5 +19,6 @@
       floatmem: "{{ qtip_results }}/memory/float-metrics.json"
       arithmetic: "{{ qtip_results }}/arithmetic/metrics.json"
     spec:   "{{ qtip_resources }}/QPI/compute.yaml"
+    baseline: "{{ qtip_resources }}/QPI/compute-baseline.json"
     dest: "{{ qtip_results }}/compute.json"
   delegate_to: localhost
index fae5982..80a0720 100644 (file)
@@ -42,7 +42,7 @@ def section_spec(metric_spec):
     }
 
 
-@pytest.fixture
+@pytest.fixture()
 def qpi_spec(section_spec):
     return {
         "name": "compute",
@@ -51,6 +51,42 @@ def qpi_spec(section_spec):
     }
 
 
+@pytest.fixture()
+def rsa_sign_baseline():
+    return {'name': 'rsa_sign', 'baseline': '500'}
+
+
+@pytest.fixture()
+def rsa_verify_baseline():
+    return {"name": "rsa_verify", "baseline": 600}
+
+
+@pytest.fixture()
+def metric_baseline(rsa_sign_baseline, rsa_verify_baseline):
+    return {
+        "name": "ssl_rsa",
+        "workloads": [rsa_sign_baseline, rsa_verify_baseline]
+    }
+
+
+@pytest.fixture()
+def section_baseline(metric_baseline):
+    return {
+        "name": "ssl",
+        "metrics": [metric_baseline]
+    }
+
+
+@pytest.fixture()
+def qpi_baseline(section_baseline):
+    return {
+        "name": "compute-baseline",
+        "description": "The baseline for compute QPI",
+        "score": 2048,
+        "sections": [section_baseline]
+    }
+
+
 @pytest.fixture()
 def metric_result():
     return {'score': 1.0,
@@ -76,19 +112,26 @@ def qpi_result(section_result, metrics):
             'children': [section_result],
             'details': {
                 'spec': "https://git.opnfv.org/qtip/tree/resources/QPI/compute.yaml",
+                'baseline': "https://git.opnfv.org/qtip/tree/resources/QPI/compute-baseline.json",
                 'metrics': metrics}}
 
 
-def test_calc_metric(metric_spec, metrics, metric_result):
-    assert calculate.calc_metric(metric_spec, metrics['ssl_rsa']) == metric_result
+def test_calc_metric(metric_spec, metrics, metric_baseline, metric_result):
+    assert calculate.calc_metric(metric_spec,
+                                 metrics['ssl_rsa'],
+                                 metric_baseline) == metric_result
 
 
-def test_calc_section(section_spec, metrics, section_result):
-    assert calculate.calc_section(section_spec, metrics) == section_result
+def test_calc_section(section_spec, metrics, section_baseline, section_result):
+    assert calculate.calc_section(section_spec,
+                                  metrics,
+                                  section_baseline) == section_result
 
 
-def test_calc_qpi(qpi_spec, metrics, qpi_result):
-    assert calculate.calc_qpi(qpi_spec, metrics) == qpi_result
+def test_calc_qpi(qpi_spec, metrics, qpi_baseline, qpi_result):
+    assert calculate.calc_qpi(qpi_spec,
+                              metrics,
+                              qpi_baseline) == qpi_result
 
 
 @pytest.mark.parametrize('metrics, baseline, expected', [