for metric, filename in metrics_files.items():
with open(filename) as f:
metrics[metric] = json.load(f)
+
+ with open(self._task.args.get('sysinfo')) as f:
+ data = json.load(f)
+ sysinfo = dict([(k['name'], data[k['name']][0]) for k in spec['system_info']])
+
dest = self._task.args.get('dest')
baseline_file = self._task.args.get('baseline')
if baseline_file is not None:
with open(baseline_file) as f:
baseline = json.load(f)
- return calc_qpi(spec, metrics, baseline, dest=dest)
+ return calc_qpi(spec, metrics, baseline, sysinfo, dest=dest)
else:
- return save_as_baseline(spec, metrics, dest=dest)
+ return save_as_baseline(spec, metrics, sysinfo, dest=dest)
# TODO(wuzhihui): It is more reasonable to put this function into collect.py.
# For now metrics data is not easy to be collected from collect.py.
@export_to_file
-def save_as_baseline(qpi_spec, metrics):
+def save_as_baseline(qpi_spec, metrics, sysinfo):
display.vv("save {} metrics as baseline".format(qpi_spec['name']))
display.vvv("spec: {}".format(qpi_spec))
display.vvv("metrics: {}".format(metrics))
'name': qpi_spec['name'],
'score': 2048,
'description': qpi_spec['description'],
+ 'system_info': sysinfo,
'details': {
'metrics': metrics,
'spec': "https://git.opnfv.org/qtip/tree/resources/QPI/compute.yaml",
@export_to_file
-def calc_qpi(qpi_spec, metrics, qpi_baseline):
+def calc_qpi(qpi_spec, metrics, qpi_baseline, sysinfo):
display.vv("calculate QPI {}".format(qpi_spec['name']))
display.vvv("spec: {}".format(qpi_spec))
display.vvv("metrics: {}".format(metrics))
'score': qpi_score,
'name': qpi_spec['name'],
'description': qpi_spec['description'],
+ 'system_info': sysinfo,
'children': section_results,
'details': {
'metrics': metrics,
})
metric_score = mean([r['score'] for r in workload_results])
+
return {
'score': metric_score,
'name': metric_spec['name'],
"product": "EC600G3",
"cpu": "2 Deca core Intel Xeon E5-2650 v3s (-HT-MCP-SMP-)",
"os": "Ubuntu 16.04 xenial",
- "kernel": "4.4.0-72-generic x86_64 (64 bit)"
- },
- "condition": {
+ "kernel": "4.4.0-72-generic x86_64 (64 bit)",
"cpu_speed": "1200/3000 MHz",
"memory": "4062.3/128524.1MB",
- "disk": "1200.3GB (0.9% used)",
+ "disk": "1200.3GB (0.9% used)"
+ },
+ "condition": {
"installer": "Fuel",
"scenario": "os-nosdn-nofeature-ha"
},
##############################################################################
name: compute
description: QTIP Performance Index of compute
+system_info:
+ - name: product
+ description: production commercial name
+ - name: cpu
+ description: cpu brand
+ - name: os
+ description: operating system version
+ - name: kernel
+ description: the core of a computer's operating system
+ - name: memory
+ description: memory usage
+ - name: disk
+ description: disk usage
formula: weighted arithmetic mean
sections: # split based on different application
- name: SSL
intmem: "{{ qtip_results }}/memory/integer-metrics.json"
floatmem: "{{ qtip_results }}/memory/float-metrics.json"
arithmetic: "{{ qtip_results }}/arithmetic/metrics.json"
+ sysinfo: "{{ qtip_results }}/sysinfo/condition.json"
spec: "{{ qtip_resources }}/QPI/compute.yaml"
baseline: "{{ qtip_resources }}/QPI/compute-baseline.json"
dest: "{{ qtip_results }}/compute.json"
@pytest.fixture()
-def qpi_result(section_result, metrics):
+def info():
+ return {
+ "system_info": {
+ "kernel": "4.4.0-72-generic x86_64 (64 bit)",
+ "product": "EC600G3",
+ "os": "Ubuntu 16.04 xenial",
+ "cpu": "2 Deca core Intel Xeon E5-2650 v3s (-HT-MCP-SMP-)",
+ "disk": "1200.3GB (25.1% used)",
+ "memory": "30769.7/128524.1MB"
+ }
+ }
+
+
+@pytest.fixture()
+def qpi_result(section_result, metrics, info):
return {'score': 2048,
'name': 'compute',
'description': 'QTIP Performance Index of compute',
+ 'system_info': info,
'children': [section_result],
'details': {
'spec': "https://git.opnfv.org/qtip/tree/resources/QPI/compute.yaml",
section_baseline) == section_result
-def test_calc_qpi(qpi_spec, metrics, qpi_baseline, qpi_result):
+def test_calc_qpi(qpi_spec, metrics, qpi_baseline, info, qpi_result):
assert calculate.calc_qpi(qpi_spec,
metrics,
- qpi_baseline) == qpi_result
+ qpi_baseline, info) == qpi_result
@pytest.mark.parametrize('metrics, baseline, expected', [