Merge "collectd: write config file from Jinja2 template"
[yardstick.git] / yardstick / network_services / nfvi / resource.py
1 # Copyright (c) 2016-2017 Intel Corporation
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 #      http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """ Resource collection definitions """
15
16 from __future__ import absolute_import
17 from __future__ import print_function
18
19 import logging
20 from itertools import chain
21
22 import jinja2
23 import os
24 import os.path
25 import re
26 import multiprocessing
27 import pkg_resources
28
29 from oslo_config import cfg
30 from oslo_utils.encodeutils import safe_decode
31
32 from yardstick import ssh
33 from yardstick.common.task_template import finalize_for_yaml
34 from yardstick.common.utils import validate_non_string_sequence
35 from yardstick.network_services.nfvi.collectd import AmqpConsumer
36 from yardstick.network_services.utils import get_nsb_option
37
38 LOG = logging.getLogger(__name__)
39
40 CONF = cfg.CONF
41 ZMQ_OVS_PORT = 5567
42 ZMQ_POLLING_TIME = 12000
43 LIST_PLUGINS_ENABLED = ["amqp", "cpu", "cpufreq", "memory",
44                         "hugepages"]
45
46
47 class ResourceProfile(object):
48     """
49     This profile adds a resource at the beginning of the test session
50     """
51     COLLECTD_CONF = "collectd.conf"
52     AMPQ_PORT = 5672
53     DEFAULT_INTERVAL = 25
54
55     def __init__(self, mgmt, port_names=None, cores=None, plugins=None, interval=None):
56         if plugins is None:
57             self.plugins = {}
58         else:
59             self.plugins = plugins
60         if interval is None:
61             self.interval = self.DEFAULT_INTERVAL
62         else:
63             self.interval = interval
64         self.enable = True
65         self.cores = validate_non_string_sequence(cores, default=[])
66         self._queue = multiprocessing.Queue()
67         self.amqp_client = None
68         self.port_names = validate_non_string_sequence(port_names, default=[])
69
70         # we need to save mgmt so we can connect to port 5672
71         self.mgmt = mgmt
72         self.connection = ssh.AutoConnectSSH.from_node(mgmt)
73
74     def check_if_sa_running(self, process):
75         """ verify if system agent is running """
76         err, pid, _ = self.connection.execute("pgrep -f %s" % process)
77         return [err == 0, pid]
78
79     def run_collectd_amqp(self):
80         """ run amqp consumer to collect the NFVi data """
81         amqp_url = 'amqp://admin:admin@{}:{}/%2F'.format(self.mgmt['ip'], self.AMPQ_PORT)
82         amqp = AmqpConsumer(amqp_url, self._queue)
83         try:
84             amqp.run()
85         except (AttributeError, RuntimeError, KeyboardInterrupt):
86             amqp.stop()
87
88     @classmethod
89     def parse_simple_resource(cls, key, value):
90         reskey = "/".join(rkey for rkey in key if "nsb_stats" not in rkey)
91         return {reskey: value.split(":")[1]}
92
93     @classmethod
94     def get_cpu_data(cls, res_key0, res_key1, value):
95         """ Get cpu topology of the host """
96         pattern = r"-(\d+)"
97
98         if 'cpufreq' in res_key0:
99             metric, source = res_key0, res_key1
100         else:
101             metric, source = res_key1, res_key0
102
103         match = re.search(pattern, source, re.MULTILINE)
104         if not match:
105             return "error", "Invalid", "", ""
106
107         time, value = value.split(":")
108         return str(match.group(1)), metric, value, time
109
110     @classmethod
111     def parse_hugepages(cls, key, value):
112         return cls.parse_simple_resource(key, value)
113
114     @classmethod
115     def parse_dpdkstat(cls, key, value):
116         return cls.parse_simple_resource(key, value)
117
118     @classmethod
119     def parse_virt(cls, key, value):
120         return cls.parse_simple_resource(key, value)
121
122     @classmethod
123     def parse_ovs_stats(cls, key, value):
124         return cls.parse_simple_resource(key, value)
125
126     @classmethod
127     def parse_intel_pmu_stats(cls, key, value):
128         return {''.join(str(v) for v in key): value.split(":")[1]}
129
130     def parse_collectd_result(self, metrics, core_list):
131         """ convert collectd data into json"""
132         result = {
133             "cpu": {},
134             "memory": {},
135             "hugepages": {},
136             "dpdkstat": {},
137             "virt": {},
138             "ovs_stats": {},
139             "intel_pmu": {},
140         }
141         testcase = ""
142
143         # unicode decode
144         decoded = ((safe_decode(k, 'utf-8'), safe_decode(v, 'utf-8')) for k, v in metrics.items())
145         for key, value in decoded:
146             key_split = key.split("/")
147             res_key_iter = (key for key in key_split if "nsb_stats" not in key)
148             res_key0 = next(res_key_iter)
149             res_key1 = next(res_key_iter)
150
151             if "cpu" in res_key0 or "intel_rdt" in res_key0:
152                 cpu_key, name, metric, testcase = \
153                     self.get_cpu_data(res_key0, res_key1, value)
154                 if cpu_key in core_list:
155                     result["cpu"].setdefault(cpu_key, {}).update({name: metric})
156
157             elif "memory" in res_key0:
158                 result["memory"].update({res_key1: value.split(":")[0]})
159
160             elif "hugepages" in res_key0:
161                 result["hugepages"].update(self.parse_hugepages(key_split, value))
162
163             elif "dpdkstat" in res_key0:
164                 result["dpdkstat"].update(self.parse_dpdkstat(key_split, value))
165
166             elif "virt" in res_key1:
167                 result["virt"].update(self.parse_virt(key_split, value))
168
169             elif "ovs_stats" in res_key0:
170                 result["ovs_stats"].update(self.parse_ovs_stats(key_split, value))
171
172             elif "intel_pmu-all" in res_key0:
173                 result["intel_pmu"].update(self.parse_intel_pmu_stats(res_key1, value))
174
175         result["timestamp"] = testcase
176
177         return result
178
179     def amqp_process_for_nfvi_kpi(self):
180         """ amqp collect and return nfvi kpis """
181         if self.amqp_client is None and self.enable:
182             self.amqp_client = \
183                 multiprocessing.Process(target=self.run_collectd_amqp)
184             self.amqp_client.start()
185
186     def amqp_collect_nfvi_kpi(self):
187         """ amqp collect and return nfvi kpis """
188         if not self.enable:
189             return {}
190
191         metric = {}
192         while not self._queue.empty():
193             metric.update(self._queue.get())
194         msg = self.parse_collectd_result(metric, self.cores)
195         return msg
196
197     def _provide_config_file(self, config_file_path, nfvi_cfg, template_kwargs):
198         template = pkg_resources.resource_string("yardstick.network_services.nfvi",
199                                                  nfvi_cfg).decode('utf-8')
200         cfg_content = jinja2.Template(template, trim_blocks=True, lstrip_blocks=True,
201                                       finalize=finalize_for_yaml).render(
202             **template_kwargs)
203         # cfg_content = io.StringIO(template.format(**template_kwargs))
204         cfg_file = os.path.join(config_file_path, nfvi_cfg)
205         # must write as root, so use sudo
206         self.connection.execute("cat | sudo tee {}".format(cfg_file), stdin=cfg_content)
207
208     def _prepare_collectd_conf(self, config_file_path):
209         """ Prepare collectd conf """
210
211         kwargs = {
212             "interval": self.interval,
213             "loadplugins": set(chain(LIST_PLUGINS_ENABLED, self.plugins.keys())),
214             # Optional fields PortName is descriptive only, use whatever is present
215             "port_names": self.port_names,
216             # "ovs_bridge_interfaces": ["br-int"],
217             "plugins": self.plugins,
218         }
219         self._provide_config_file(config_file_path, self.COLLECTD_CONF, kwargs)
220
221     def _start_collectd(self, connection, bin_path):
222         LOG.debug("Starting collectd to collect NFVi stats")
223         connection.execute('sudo pkill -x -9 collectd')
224         bin_path = get_nsb_option("bin_path")
225         collectd_path = os.path.join(bin_path, "collectd", "sbin", "collectd")
226         config_file_path = os.path.join(bin_path, "collectd", "etc")
227         exit_status = connection.execute("which %s > /dev/null 2>&1" % collectd_path)[0]
228         if exit_status != 0:
229             LOG.warning("%s is not present disabling", collectd_path)
230             # disable auto-provisioning because it requires Internet access
231             # collectd_installer = os.path.join(bin_path, "collectd.sh")
232             # provision_tool(connection, collectd)
233             # http_proxy = os.environ.get('http_proxy', '')
234             # https_proxy = os.environ.get('https_proxy', '')
235             # connection.execute("sudo %s '%s' '%s'" % (
236             #     collectd_installer, http_proxy, https_proxy))
237             return
238         LOG.debug("Starting collectd to collect NFVi stats")
239         # ensure collectd.conf.d exists to avoid error/warning
240         connection.execute("sudo mkdir -p /etc/collectd/collectd.conf.d")
241         self._prepare_collectd_conf(config_file_path)
242
243         # Reset amqp queue
244         LOG.debug("reset and setup amqp to collect data from collectd")
245         connection.execute("sudo rm -rf /var/lib/rabbitmq/mnesia/rabbit*")
246         connection.execute("sudo service rabbitmq-server start")
247         connection.execute("sudo rabbitmqctl stop_app")
248         connection.execute("sudo rabbitmqctl reset")
249         connection.execute("sudo rabbitmqctl start_app")
250         connection.execute("sudo service rabbitmq-server restart")
251
252         LOG.debug("Creating admin user for rabbitmq in order to collect data from collectd")
253         connection.execute("sudo rabbitmqctl delete_user guest")
254         connection.execute("sudo rabbitmqctl add_user admin admin")
255         connection.execute("sudo rabbitmqctl authenticate_user admin admin")
256         connection.execute("sudo rabbitmqctl set_permissions -p / admin '.*' '.*' '.*'")
257
258         LOG.debug("Start collectd service.....")
259         connection.execute("sudo %s" % collectd_path)
260         LOG.debug("Done")
261
262     def initiate_systemagent(self, bin_path):
263         """ Start system agent for NFVi collection on host """
264         if self.enable:
265             try:
266                 self._start_collectd(self.connection, bin_path)
267             except Exception:
268                 LOG.exception("Exception during collectd start")
269                 raise
270
271     def start(self):
272         """ start nfvi collection """
273         if self.enable:
274             LOG.debug("Start NVFi metric collection...")
275
276     def stop(self):
277         """ stop nfvi collection """
278         if not self.enable:
279             return
280
281         agent = "collectd"
282         LOG.debug("Stop resource monitor...")
283
284         if self.amqp_client is not None:
285             self.amqp_client.terminate()
286
287         status, pid = self.check_if_sa_running(agent)
288         if status == 0:
289             return
290
291         self.connection.execute('sudo kill -9 %s' % pid)
292         self.connection.execute('sudo pkill -9 %s' % agent)
293         self.connection.execute('sudo service rabbitmq-server stop')
294         self.connection.execute("sudo rabbitmqctl stop_app")