Merge "bugfix: kill process do not accurately kill "nova-api""
[yardstick.git] / yardstick / benchmark / scenarios / compute / spec_cpu.py
1 ##############################################################################
2 # Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9 from __future__ import absolute_import
10
11 import logging
12 import pkg_resources
13
14 import yardstick.ssh as ssh
15 from yardstick.benchmark.scenarios import base
16
17 LOG = logging.getLogger(__name__)
18
19
20 class SpecCPU(base.Scenario):
21     """Spec CPU2006 benchmark
22
23     Parameters
24         benchmark_subset - Specifies a subset of SPEC CPU2006 benchmarks to run
25             type:       string
26             unit:       na
27             default:    na
28
29         SPECint_benchmark - A SPECint benchmark to run
30             type:       string
31             unit:       na
32             default:    na
33
34         SPECint_benchmark - A SPECfp benchmark to run
35             type:       string
36             unit:       na
37             default:    na
38
39         output_format - Desired report format
40             type:       string
41             unit:       na
42             default:    na
43
44         runspec_config - SPEC CPU2006 config file provided to the runspec binary
45             type:       string
46             unit:       na
47             default:    "Example-linux64-amd64-gcc43+.cfg"
48
49         runspec_iterations - The number of benchmark iterations to execute.
50                              For a reportable run, must be 3.
51             type:       int
52             unit:       na
53             default:    na
54
55         runspec_tune - Tuning to use (base, peak, or all). For a reportable run, must be either
56                        base or all. Reportable runs do base first, then (optionally) peak.
57             type:       string
58             unit:       na
59             default:    na
60
61         runspec_size - Size of input data to run (test, train, or ref). Reportable runs ensure
62                        that your binaries can produce correct results with the test and train
63                        workloads.
64             type:       string
65             unit:       na
66             default:    na
67     """
68     __scenario_type__ = "SpecCPU2006"
69
70     def __init__(self, scenario_cfg, context_cfg):
71         self.scenario_cfg = scenario_cfg
72         self.context_cfg = context_cfg
73         self.setup_done = False
74         self.options = self.scenario_cfg['options']
75
76     def setup(self):
77         """scenario setup"""
78         host = self.context_cfg['host']
79         LOG.info("user:%s, host:%s", host['user'], host['ip'])
80         self.client = ssh.SSH.from_node(host, defaults={"user": "ubuntu"})
81         self.client.wait(timeout=600)
82
83         if "runspec_config" in self.options:
84             self.runspec_config = self.options["runspec_config"]
85
86             self.runspec_config_file = pkg_resources.resource_filename(
87                 "yardstick.resources", 'files/' + self.runspec_config)
88
89             # copy SPEC CPU2006 config file to host if given
90             self.client._put_file_shell(self.runspec_config_file,
91                                         '/usr/cpu2006/config/yardstick_spec_cpu2006.cfg')
92         else:
93             self.runspec_config = "Example-linux64-amd64-gcc43+.cfg"
94
95         self.setup_done = True
96
97     def run(self, result):
98         """execute the benchmark"""
99
100         if not self.setup_done:
101             self.setup()
102
103         cmd = "cd /usr/cpu2006/ && . ./shrc && runspec --config %s" % self.runspec_config
104         cmd_args = ""
105
106         if "rate" in self.options:
107             cmd_args += " --rate %s" % self.options["runspec_rate"]
108
109         if "output_format" in self.options:
110             cmd_args += " --output_format %s" % self.options["output_format"]
111
112         if "runspec_tune" in self.options:
113             cmd_args += " --tune %s" % self.options["runspec_tune"]
114
115         benchmark_subset = self.options.get('benchmark_subset', None)
116         specint_benchmark = self.options.get('SPECint_benchmark', None)
117         specfp_benchmark = self.options.get('SPECfp_benchmark', None)
118
119         if benchmark_subset:
120             cmd_args += " %s" % benchmark_subset
121         else:
122             cmd_args += " --noreportable"
123
124             if "runspec_iterations" in self.options:
125                 cmd_args += " --iterations %s" % self.options["runspec_iterations"]
126
127             if "runspec_size" in self.options:
128                 cmd_args += " --size %s" % self.options["runspec_size"]
129
130             if specint_benchmark:
131                 cmd_args += " %s" % specint_benchmark
132
133             if specfp_benchmark:
134                 cmd_args += " %s" % specfp_benchmark
135
136         cmd += "%s" % cmd_args
137
138         LOG.debug("Executing command: %s", cmd)
139         status, stdout, stderr = self.client.execute(cmd, timeout=86400)
140         if status:
141             raise RuntimeError(stderr)
142
143         LOG.info('SPEC CPU2006 benchmark completed, please find benchmark reports \
144                   at /tmp/result directory')