bugfix: tc078 fails in some situations
[yardstick.git] / yardstick / benchmark / scenarios / compute / spec_cpu.py
1 ##############################################################################
2 # Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9 from __future__ import absolute_import
10
11 import logging
12 import pkg_resources
13 import os
14
15 import yardstick.ssh as ssh
16 from yardstick.benchmark.scenarios import base
17
18 LOG = logging.getLogger(__name__)
19
20
21 class SpecCPU(base.Scenario):
22     """Spec CPU2006 benchmark
23
24     Parameters
25         benchmark_subset - Specifies a subset of SPEC CPU2006 benchmarks to run
26             type:       string
27             unit:       na
28             default:    na
29
30         SPECint_benchmark - A SPECint benchmark to run
31             type:       string
32             unit:       na
33             default:    na
34
35         SPECint_benchmark - A SPECfp benchmark to run
36             type:       string
37             unit:       na
38             default:    na
39
40         output_format - Desired report format
41             type:       string
42             unit:       na
43             default:    na
44
45         runspec_config - SPEC CPU2006 config file provided to the runspec binary
46             type:       string
47             unit:       na
48             default:    "Example-linux64-amd64-gcc43+.cfg"
49
50         runspec_iterations - The number of benchmark iterations to execute.
51                              For a reportable run, must be 3.
52             type:       int
53             unit:       na
54             default:    na
55
56         runspec_tune - Tuning to use (base, peak, or all). For a reportable run, must be either
57                        base or all. Reportable runs do base first, then (optionally) peak.
58             type:       string
59             unit:       na
60             default:    na
61
62         runspec_size - Size of input data to run (test, train, or ref). Reportable runs ensure
63                        that your binaries can produce correct results with the test and train
64                        workloads.
65             type:       string
66             unit:       na
67             default:    na
68     """
69     __scenario_type__ = "SpecCPU2006"
70     CPU2006_DIR = "~/cpu2006"
71
72     def __init__(self, scenario_cfg, context_cfg):
73         self.scenario_cfg = scenario_cfg
74         self.context_cfg = context_cfg
75         self.setup_done = False
76         self.options = self.scenario_cfg['options']
77
78     def setup(self):
79         """scenario setup"""
80         host = self.context_cfg['host']
81         LOG.info("user:%s, host:%s", host['user'], host['ip'])
82         self.client = ssh.SSH.from_node(host, defaults={"user": "ubuntu"})
83         self.client.wait(timeout=600)
84
85         if "runspec_config" in self.options:
86             self.runspec_config = self.options["runspec_config"]
87
88             self.runspec_config_file = pkg_resources.resource_filename(
89                 "yardstick.resources", 'files/' + self.runspec_config)
90
91             # copy SPEC CPU2006 config file to host if given
92             cfg_path = os.path.join(self.CPU2006_DIR,
93                                     'config/yardstick_spec_cpu2006.cfg')
94             self.client._put_file_shell(self.runspec_config_file, cfg_path)
95         else:
96             self.runspec_config = "Example-linux64-amd64-gcc43+.cfg"
97
98         self.setup_done = True
99
100     def run(self, result):
101         """execute the benchmark"""
102
103         if not self.setup_done:
104             self.setup()
105
106         cmd = "cd %s && . ./shrc && runspec --config %s" % (
107             self.CPU2006_DIR, self.runspec_config)
108         cmd_args = ""
109
110         if "rate" in self.options:
111             cmd_args += " --rate %s" % self.options["runspec_rate"]
112
113         if "output_format" in self.options:
114             cmd_args += " --output_format %s" % self.options["output_format"]
115
116         if "runspec_tune" in self.options:
117             cmd_args += " --tune %s" % self.options["runspec_tune"]
118
119         benchmark_subset = self.options.get('benchmark_subset', None)
120         specint_benchmark = self.options.get('SPECint_benchmark', None)
121         specfp_benchmark = self.options.get('SPECfp_benchmark', None)
122
123         if benchmark_subset:
124             cmd_args += " %s" % benchmark_subset
125         else:
126             cmd_args += " --noreportable"
127
128             if "runspec_iterations" in self.options:
129                 cmd_args += " --iterations %s" % self.options["runspec_iterations"]
130
131             if "runspec_size" in self.options:
132                 cmd_args += " --size %s" % self.options["runspec_size"]
133
134             if specint_benchmark:
135                 cmd_args += " %s" % specint_benchmark
136
137             if specfp_benchmark:
138                 cmd_args += " %s" % specfp_benchmark
139
140         cmd += "%s" % cmd_args
141
142         LOG.debug("Executing command: %s", cmd)
143         status, stdout, stderr = self.client.execute(cmd, timeout=86400)
144         if status:
145             raise RuntimeError(stderr)
146
147         LOG.info('SPEC CPU2006 benchmark completed, please find benchmark reports \
148                   at /tmp/result directory')