Merge "Collectd Fixes"
[yardstick.git] / yardstick / network_services / nfvi / resource.py
1 # Copyright (c) 2016-2017 Intel Corporation
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 #      http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """ Resource collection definitions """
15
16 from __future__ import absolute_import
17 from __future__ import print_function
18 import tempfile
19 import logging
20 import os
21 import os.path
22 import re
23 import multiprocessing
24 from collections import Sequence
25
26 from oslo_config import cfg
27
28 from yardstick import ssh
29 from yardstick.network_services.nfvi.collectd import AmqpConsumer
30 from yardstick.network_services.utils import get_nsb_option
31
32 LOG = logging.getLogger(__name__)
33
34 CONF = cfg.CONF
35 ZMQ_OVS_PORT = 5567
36 ZMQ_POLLING_TIME = 12000
37 LIST_PLUGINS_ENABLED = ["amqp", "cpu", "cpufreq", "intel_rdt", "memory",
38                         "hugepages", "dpdkstat", "virt", "ovs_stats"]
39
40
41 class ResourceProfile(object):
42     """
43     This profile adds a resource at the beginning of the test session
44     """
45
46     def __init__(self, mgmt, interfaces=None, cores=None):
47         self.enable = True
48         self.connection = None
49         self.cores = cores if isinstance(cores, Sequence) else []
50         self._queue = multiprocessing.Queue()
51         self.amqp_client = None
52         self.interfaces = interfaces if isinstance(interfaces, Sequence) else []
53
54         # why the host or ip?
55         self.vnfip = mgmt.get("host", mgmt["ip"])
56         self.connection = ssh.SSH.from_node(mgmt, overrides={"ip": self.vnfip})
57
58         self.connection.wait()
59
60     def check_if_sa_running(self, process):
61         """ verify if system agent is running """
62         err, pid, _ = self.connection.execute("pgrep -f %s" % process)
63         return [err == 0, pid]
64
65     def run_collectd_amqp(self):
66         """ run amqp consumer to collect the NFVi data """
67         amqp_url = 'amqp://admin:admin@{}:5672/%2F'.format(self.vnfip)
68         amqp = AmqpConsumer(amqp_url, self._queue)
69         try:
70             amqp.run()
71         except (AttributeError, RuntimeError, KeyboardInterrupt):
72             amqp.stop()
73
74     @classmethod
75     def parse_simple_resource(cls, key, value):
76         reskey = "/".join(rkey for rkey in key if "nsb_stats" not in rkey)
77         return {reskey: value.split(":")[1]}
78
79     @classmethod
80     def get_cpu_data(cls, res_key0, res_key1, value):
81         """ Get cpu topology of the host """
82         pattern = r"-(\d+)"
83
84         if 'cpufreq' in res_key0:
85             metric, source = res_key0, res_key1
86         else:
87             metric, source = res_key1, res_key0
88
89         match = re.search(pattern, source, re.MULTILINE)
90         if not match:
91             return "error", "Invalid", "", ""
92
93         time, value = value.split(":")
94         return str(match.group(1)), metric, value, time
95
96     @classmethod
97     def parse_hugepages(cls, key, value):
98         return cls.parse_simple_resource(key, value)
99
100     @classmethod
101     def parse_dpdkstat(cls, key, value):
102         return cls.parse_simple_resource(key, value)
103
104     @classmethod
105     def parse_virt(cls, key, value):
106         return cls.parse_simple_resource(key, value)
107
108     @classmethod
109     def parse_ovs_stats(cls, key, value):
110         return cls.parse_simple_resource(key, value)
111
112     def parse_collectd_result(self, metrics, core_list):
113         """ convert collectd data into json"""
114         result = {
115             "cpu": {},
116             "memory": {},
117             "hugepages": {},
118             "dpdkstat": {},
119             "virt": {},
120             "ovs_stats": {},
121         }
122         testcase = ""
123
124         for key, value in metrics.items():
125             key_split = key.split("/")
126             res_key_iter = (key for key in key_split if "nsb_stats" not in key)
127             res_key0 = next(res_key_iter)
128             res_key1 = next(res_key_iter)
129
130             if "cpu" in res_key0 or "intel_rdt" in res_key0:
131                 cpu_key, name, metric, testcase = \
132                     self.get_cpu_data(res_key0, res_key1, value)
133                 if cpu_key in core_list:
134                     result["cpu"].setdefault(cpu_key, {}).update({name: metric})
135
136             elif "memory" in res_key0:
137                 result["memory"].update({res_key1: value.split(":")[0]})
138
139             elif "hugepages" in res_key0:
140                 result["hugepages"].update(self.parse_hugepages(key_split, value))
141
142             elif "dpdkstat" in res_key0:
143                 result["dpdkstat"].update(self.parse_dpdkstat(key_split, value))
144
145             elif "virt" in res_key1:
146                 result["virt"].update(self.parse_virt(key_split, value))
147
148             elif "ovs_stats" in res_key0:
149                 result["ovs_stats"].update(self.parse_ovs_stats(key_split, value))
150
151         result["timestamp"] = testcase
152
153         return result
154
155     def amqp_process_for_nfvi_kpi(self):
156         """ amqp collect and return nfvi kpis """
157         if self.amqp_client is None and self.enable:
158             self.amqp_client = \
159                 multiprocessing.Process(target=self.run_collectd_amqp)
160             self.amqp_client.start()
161
162     def amqp_collect_nfvi_kpi(self):
163         """ amqp collect and return nfvi kpis """
164         if not self.enable:
165             return {}
166
167         metric = {}
168         while not self._queue.empty():
169             metric.update(self._queue.get())
170         msg = self.parse_collectd_result(metric, self.cores)
171         return msg
172
173     def _provide_config_file(self, bin_path, nfvi_cfg, kwargs):
174         with open(os.path.join(bin_path, nfvi_cfg), 'r') as cfg:
175             template = cfg.read()
176         cfg, cfg_content = tempfile.mkstemp()
177         with os.fdopen(cfg, "w+") as cfg:
178             cfg.write(template.format(**kwargs))
179         cfg_file = os.path.join(bin_path, nfvi_cfg)
180         self.connection.put(cfg_content, cfg_file)
181
182     def _prepare_collectd_conf(self, bin_path):
183         """ Prepare collectd conf """
184         loadplugin = "\n".join("LoadPlugin {0}".format(plugin)
185                                for plugin in LIST_PLUGINS_ENABLED)
186
187         interfaces = "\n".join("PortName '{0[name]}'".format(interface)
188                                for interface in self.interfaces)
189
190         kwargs = {
191             "interval": '25',
192             "loadplugin": loadplugin,
193             "dpdk_interface": interfaces,
194         }
195
196         self._provide_config_file(bin_path, 'collectd.conf', kwargs)
197
198     def _start_collectd(self, connection, bin_path):
199         connection.execute('sudo pkill -9 collectd')
200         bin_path = get_nsb_option("bin_path")
201         collectd_path = os.path.join(bin_path, "collectd", "collectd")
202         exit_status = connection.execute("which %s > /dev/null 2>&1" % collectd_path)[0]
203         if exit_status != 0:
204             LOG.warning("%s is not present disabling", collectd_path)
205             # disable auto-provisioning because it requires Internet access
206             # collectd_installer = os.path.join(bin_path, "collectd.sh")
207             # provision_tool(connection, collectd)
208             # http_proxy = os.environ.get('http_proxy', '')
209             # https_proxy = os.environ.get('https_proxy', '')
210             # connection.execute("sudo %s '%s' '%s'" % (
211             #     collectd_installer, http_proxy, https_proxy))
212             return
213         LOG.debug("Starting collectd to collect NFVi stats")
214         self._prepare_collectd_conf(bin_path)
215
216         # Reset amqp queue
217         LOG.debug("reset and setup amqp to collect data from collectd")
218         connection.execute("sudo rm -rf /var/lib/rabbitmq/mnesia/rabbit*")
219         connection.execute("sudo service rabbitmq-server start")
220         connection.execute("sudo rabbitmqctl stop_app")
221         connection.execute("sudo rabbitmqctl reset")
222         connection.execute("sudo rabbitmqctl start_app")
223         connection.execute("sudo service rabbitmq-server restart")
224
225         LOG.debug("Start collectd service.....")
226         connection.execute("sudo %s" % collectd_path)
227         LOG.debug("Done")
228
229     def initiate_systemagent(self, bin_path):
230         """ Start system agent for NFVi collection on host """
231         if self.enable:
232             self._start_collectd(self.connection, bin_path)
233
234     def start(self):
235         """ start nfvi collection """
236         if self.enable:
237             LOG.debug("Start NVFi metric collection...")
238
239     def stop(self):
240         """ stop nfvi collection """
241         if not self.enable:
242             return
243
244         agent = "collectd"
245         LOG.debug("Stop resource monitor...")
246
247         if self.amqp_client is not None:
248             self.amqp_client.terminate()
249
250         status, pid = self.check_if_sa_running(agent)
251         if status == 0:
252             return
253
254         self.connection.execute('sudo kill -9 %s' % pid)
255         self.connection.execute('sudo pkill -9 %s' % agent)
256         self.connection.execute('sudo service rabbitmq-server stop')
257         self.connection.execute("sudo rabbitmqctl stop_app")