[ansible][fedora] Update package name
[barometer.git] / baro_tests / config_server.py
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright(c) 2017-2019 Intel Corporation and OPNFV. All rights reserved.
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License"); you may
6 # not use this file except in compliance with the License. You may obtain
7 # a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
13 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
14 # License for the specific language governing permissions and limitations
15 # under the License.
16 #
17
18 """Classes used by collectd.py"""
19
20 import time
21 import os.path
22 import os
23 import re
24 import yaml
25
26 from opnfv.deployment import factory
27 import paramiko
28 from functest.utils import constants
29
30 ID_RSA_PATH = '/root/.ssh/id_rsa'
31 SSH_KEYS_SCRIPT = '/home/opnfv/barometer/baro_utils/get_ssh_keys.sh'
32 DEF_PLUGIN_INTERVAL = 10
33 COLLECTD_CONF = '/etc/collectd.conf'
34 COLLECTD_CONF_DIR = '/etc/collectd/collectd.conf.d'
35 NOTIFICATION_FILE = '/var/log/python-notifications.dump'
36 COLLECTD_NOTIFICATION = '/etc/collectd_notification_dump.py'
37 APEX_IP = os.getenv("INSTALLER_IP").rstrip('\n')
38 APEX_USER = 'root'
39 APEX_USER_STACK = 'stack'
40 APEX_PKEY = '/root/.ssh/id_rsa'
41 TEST_VM_IMAGE = 'cirros-0.4.0-x86_64-disk.img'
42 TEST_VM_IMAGE_PATH = '/home/opnfv/functest/images/' + TEST_VM_IMAGE
43
44
45 class Node(object):
46     """Node configuration class"""
47     def __init__(self, attrs):
48         self.__null = attrs[0]
49         self.__id = attrs[1]
50         self.__name = attrs[2]
51         self.__status = attrs[3] if attrs[3] else None
52         self.__taskState = attrs[4]
53         self.__pwrState = attrs[5]
54         self.__ip = re.sub('^[a-z]+=', '', attrs[6])
55
56     def get_name(self):
57         """Get node name"""
58         return self.__name
59
60     def get_id(self):
61         """Get node ID"""
62         return self.__id
63
64     def get_ip(self):
65         """Get node IP address"""
66         return self.__ip
67
68     def get_roles(self):
69         """Get node role"""
70         return self.__roles
71
72
73 def get_apex_nodes():
74     handler = factory.Factory.get_handler('apex',
75                                           APEX_IP,
76                                           APEX_USER_STACK,
77                                           APEX_PKEY)
78     nodes = handler.get_nodes()
79     return nodes
80
81
82 class ConfigServer(object):
83     """Class to get env configuration"""
84     def __init__(self, host, user, logger, priv_key=None):
85         self.__host = host
86         self.__user = user
87         self.__passwd = None
88         self.__priv_key = priv_key
89         self.__nodes = list()
90         self.__logger = logger
91
92         self.__private_key_file = ID_RSA_PATH
93         if not os.path.isfile(self.__private_key_file):
94             self.__logger.error(
95                 "Private key file '{}'".format(self.__private_key_file)
96                 + " not found.")
97             raise IOError("Private key file '{}' not found.".format(
98                 self.__private_key_file))
99
100         # get list of available nodes
101         ssh, sftp = self.__open_sftp_session(
102             self.__host, self.__user, self.__passwd)
103         attempt = 1
104         fuel_node_passed = False
105
106         while (attempt <= 10) and not fuel_node_passed:
107             stdin, stdout, stderr = ssh.exec_command(
108                 "source stackrc; nova list")
109             stderr_lines = stderr.readlines()
110             if stderr_lines:
111                 self.__logger.warning(
112                     "'Apex node' command failed (try {}):".format(attempt))
113                 for line in stderr_lines:
114                     self.__logger.debug(line.strip())
115             else:
116                 fuel_node_passed = True
117                 if attempt > 1:
118                     self.__logger.info(
119                         "'Apex node' command passed (try {})".format(attempt))
120             attempt += 1
121         if not fuel_node_passed:
122             self.__logger.error(
123                 "'Apex node' command failed. This was the last try.")
124             raise OSError(
125                 "'Apex node' command failed. This was the last try.")
126         node_table = stdout.readlines()\
127
128         # skip table title and parse table values
129
130         for entry in node_table[3:]:
131             if entry[0] == '+' or entry[0] == '\n':
132                 print entry
133                 pass
134             else:
135                 self.__nodes.append(
136                     Node([str(x.strip(' \n')) for x in entry.split('|')]))
137
138     def get_controllers(self):
139         # Get list of controllers
140         print self.__nodes[0]._Node__ip
141         return (
142             [node for node in self.__nodes if 'controller' in node.get_name()])
143
144     def get_computes(self):
145         # Get list of computes
146         return (
147             [node for node in self.__nodes if 'compute' in node.get_name()])
148
149     def get_nodes(self):
150         # Get list of nodes
151         return self.__nodes
152
153     def __open_sftp_session(self, host, user, passwd=None):
154         # Connect to given host.
155         """Keyword arguments:
156         host -- host to connect
157         user -- user to use
158         passwd -- password to use
159
160         Return tuple of SSH and SFTP client instances.
161         """
162         # create SSH client
163         ssh = paramiko.SSHClient()
164         ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
165
166         # try a direct access using password or private key
167         if not passwd and not self.__priv_key:
168             # get private key
169             self.__priv_key = paramiko.RSAKey.from_private_key_file(
170                 self.__private_key_file)
171
172         # connect to the server
173         ssh.connect(
174             host, username=user, password=passwd, pkey=self.__priv_key)
175         sftp = ssh.open_sftp()
176
177         # return SFTP client instance
178         return ssh, sftp
179
180     def get_plugin_interval(self, compute, plugin):
181         """Find the plugin interval in collectd configuration.
182
183         Keyword arguments:
184         compute -- compute node instance
185         plugin -- plug-in name
186
187         If found, return interval value, otherwise the default value"""
188         default_interval = DEF_PLUGIN_INTERVAL
189         compute_name = compute.get_name()
190         nodes = get_apex_nodes()
191         for node in nodes:
192             if compute_name == node.get_dict()['name']:
193                 stdout = node.run_cmd(
194                     'cat /etc/collectd/collectd.conf.d/{}.conf'.format(plugin))
195                 if stdout is None:
196                     return default_interval
197                 for line in stdout.split('\n'):
198                     if 'Interval' in line:
199                         return 1
200         return default_interval
201
202     def get_plugin_config_values(self, compute, plugin, parameter):
203         """Get parameter values from collectd config file.
204
205         Keyword arguments:
206         compute -- compute node instance
207         plugin -- plug-in name
208         parameter -- plug-in parameter
209
210         Return list of found values."""
211         default_values = []
212         compute_name = compute.get_name()
213         nodes = get_apex_nodes()
214         for node in nodes:
215             if compute_name == node.get_dict()['name']:
216                 stdout = node.run_cmd(
217                     'cat /etc/collectd/collectd.conf.d/{}.conf' .format(plugin))
218                 if stdout is None:
219                     return default_values
220                 for line in stdout.split('\n'):
221                     if 'Interfaces' in line:
222                         return line.split(' ', 1)[1]
223                     elif 'Bridges' in line:
224                         return line.split(' ', 1)[1]
225                     elif 'Cores' in line:
226                         return line.split(' ', 1)[1]
227                     else:
228                         pass
229         return default_values
230
231     def execute_command(self, command, host_ip=None, ssh=None):
232         """Execute command on node and return list of lines of standard output.
233
234         Keyword arguments:
235         command -- command
236         host_ip -- IP of the node
237         ssh -- existing open SSH session to use
238
239         One of host_ip or ssh must not be None. If both are not None,
240         existing ssh session is used.
241         """
242         if host_ip is None and ssh is None:
243             raise ValueError('One of host_ip or ssh must not be None.')
244         if ssh is None:
245             ssh, sftp = self.__open_sftp_session(host_ip, 'root', 'opnfvapex')
246         stdin, stdout, stderr = ssh.exec_command(command)
247         return stdout.readlines()
248
249     def get_ovs_interfaces(self, compute):
250         """Get list of configured OVS interfaces
251
252         Keyword arguments:
253         compute -- compute node instance
254         """
255         compute_name = compute.get_name()
256         nodes = get_apex_nodes()
257         for node in nodes:
258             if compute_name == node.get_dict()['name']:
259                 stdout = node.run_cmd('sudo ovs-vsctl list-br')
260         return stdout
261
262     def is_gnocchi_running(self, controller):
263         """Check whether Gnocchi is running on controller.
264
265         Keyword arguments:
266         controller -- controller node instance
267
268         Return boolean value whether Gnocchi is running.
269         """
270         gnocchi_present = False
271         controller_name = controller.get_name()
272         nodes = get_apex_nodes()
273         for node in nodes:
274             if controller_name == node.get_dict()['name']:
275                 node.put_file(constants.ENV_FILE, 'overcloudrc.v3')
276                 stdout = node.run_cmd(
277                     "source overcloudrc.v3;"
278                     + "openstack catalog list | grep gnocchi")
279                 if stdout is None:
280                     return False
281                 elif 'gnocchi' in stdout:
282                     gnocchi_present = True
283                     return gnocchi_present
284                 else:
285                     return False
286         return gnocchi_present
287
288     def is_aodh_running(self, controller):
289         """Check whether aodh service is running on controller
290         """
291         aodh_present = False
292         controller_name = controller.get_name()
293         nodes = get_apex_nodes()
294         for node in nodes:
295             if controller_name == node.get_dict()['name']:
296                 node.put_file(constants.ENV_FILE, 'overcloudrc.v3')
297                 stdout = node.run_cmd(
298                     "source overcloudrc.v3;"
299                     + "openstack catalog list | grep aodh")
300                 if stdout is None:
301                     return False
302                 elif 'aodh' in stdout:
303                     aodh_present = True
304                     return aodh_present
305                 else:
306                     return False
307         return aodh_present
308
309     def is_redis_running(self, compute):
310         """Check whether redis service is running on compute"""
311         compute_name = compute.get_name()
312         nodes = get_apex_nodes()
313         for node in nodes:
314             if compute_name == node.get_dict()['name']:
315                 stdout = node.run_cmd('sudo systemctl status docker'
316                                       '&& sudo docker ps'
317                                       '| grep barometer-redis')
318                 if stdout and 'barometer-redis' in stdout:
319                     self.__logger.info(
320                         'Redis is running in node {}'.format(
321                          compute_name))
322                     return True
323         self.__logger.info(
324             'Redis is *not* running in node {}'.format(
325              compute_name))
326         return False
327
328     def is_dma_server_running(self, compute):
329         """Check whether DMA server is running on compute"""
330         compute_name = compute.get_name()
331         nodes = get_apex_nodes()
332         for node in nodes:
333             if compute_name == node.get_dict()['name']:
334                 stdout = node.run_cmd('sudo systemctl status docker'
335                                       '&& sudo docker ps'
336                                       '| grep opnfv/barometer-dma')
337                 if stdout and '/server' in stdout:
338                     self.__logger.info(
339                         'DMA Server is running in node {}'.format(
340                          compute_name))
341                     return True
342         self.__logger.info(
343             'DMA Server is *not* running in node {}'.format(
344              compute_name))
345         return False
346
347     def is_dma_infofetch_running(self, compute):
348         """Check whether DMA infofetch is running on compute"""
349         compute_name = compute.get_name()
350         nodes = get_apex_nodes()
351         for node in nodes:
352             if compute_name == node.get_dict()['name']:
353                 stdout = node.run_cmd('sudo systemctl status docker'
354                                       '&& sudo docker ps'
355                                       '| grep opnfv/barometer-dma')
356                 if stdout and '/infofetch' in stdout:
357                     self.__logger.info(
358                         'DMA InfoFetch is running in node {}'.format(
359                          compute_name))
360                     return True
361         self.__logger.info(
362             'DMA InfoFetch is *not* running in node {}'.format(
363              compute_name))
364         return False
365
366     def get_dma_config(self, compute):
367         """Get config values of DMA"""
368         compute_name = compute.get_name()
369         nodes = get_apex_nodes()
370         for node in nodes:
371             if compute_name == node.get_dict()['name']:
372                 # We use following after functest accept python-toml
373                 #     stdout = node.run_cmd(
374                 #         'cat /etc/barometer-dma/config.toml')
375                 #     try:
376                 #         agent_conf = toml.loads(stdout)
377                 #     except (TypeError, TomlDecodeError) as e:
378                 #         self.__logger.error(
379                 #             'DMA config error: {}'.format(e))
380                 #         agent_conf = None
381                 #     finally:
382                 #         return agent_conf
383                 readcmd = (
384                     'egrep "listen_port|amqp_"'
385                     ' /etc/barometer-dma/config.toml'
386                     '| sed -e "s/#.*$//" | sed -e "s/=/:/"'
387                     )
388                 stdout = node.run_cmd(readcmd)
389                 agent_conf = {"server": yaml.safe_load(stdout)}
390
391                 pingcmd = (
392                     'ping -n -c1 ' + agent_conf["server"]["amqp_host"] +
393                     '| sed -ne "s/^.*bytes from //p" | sed -e "s/:.*//"'
394                     )
395                 agent_conf["server"]["amqp_host"] = node.run_cmd(pingcmd)
396
397                 return agent_conf
398         return None
399
400     def is_mcelog_installed(self, compute, package):
401         """Check whether package exists on compute node.
402
403         Keyword arguments:
404         compute -- compute node instance
405         package -- Linux package to search for
406
407         Return boolean value whether package is installed.
408         """
409         compute_name = compute.get_name()
410         nodes = get_apex_nodes()
411         for node in nodes:
412             if compute_name == node.get_dict()['name']:
413                 stdout = node.run_cmd(
414                     'rpm -qa | grep mcelog')
415                 if stdout is None:
416                     return 0
417                 elif 'mcelog' in stdout:
418                     return 1
419                 else:
420                     return 0
421
422     def is_rdt_available(self, compute):
423         """Check whether the compute node is a virtual machine."""
424         compute_name = compute.get_name()
425         nodes = get_apex_nodes()
426         for node in nodes:
427             if compute_name == node.get_dict()['name']:
428                 stdout = node.run_cmd('cat /proc/cpuinfo | grep hypervisor')
429                 if 'hypervisor' in stdout:
430                     return False
431         return True
432
433     def is_libpqos_on_node(self, compute):
434         """Check whether libpqos is present on compute node"""
435
436         compute_name = compute.get_name()
437         nodes = get_apex_nodes()
438         for node in nodes:
439             if compute_name == node.get_dict()['name']:
440                 stdout = node.run_cmd('ls /usr/local/lib/ | grep libpqos')
441                 if 'libpqos' in stdout:
442                     return True
443         return False
444
445     def check_aodh_plugin_included(self, compute):
446         """Check if aodh plugin is included in collectd.conf file.
447         If not, try to enable it.
448
449         Keyword arguments:
450         compute -- compute node instance
451
452         Return boolean value whether AODH plugin is included
453         or it's enabling was successful.
454         """
455         compute_name = compute.get_name()
456         nodes = get_apex_nodes()
457         for node in nodes:
458             if compute_name == node.get_dict()['name']:
459                 aodh_conf = node.run_cmd('ls /etc/collectd/collectd.conf.d')
460                 if 'aodh.conf' not in aodh_conf:
461                     self.__logger.info(
462                         "AODH Plugin not included in {}".format(compute_name))
463                     return False
464                 else:
465                     self.__logger.info(
466                         "AODH plugin present in compute node {}" .format(
467                             compute_name))
468                     return True
469         return True
470
471     def check_gnocchi_plugin_included(self, compute):
472         """Check if gnocchi plugin is included in collectd.conf file.
473         If not, try to enable it.
474
475         Keyword arguments:
476         compute -- compute node instance
477
478         Return boolean value whether gnocchi plugin is included
479         or it's enabling was successful.
480         """
481         compute_name = compute.get_name()
482         nodes = get_apex_nodes()
483         for node in nodes:
484             if compute_name == node.get_dict()['name']:
485                 gnocchi_conf = node.run_cmd('ls /etc/collectd/collectd.conf.d')
486                 if 'collectd-ceilometer-plugin.conf' not in gnocchi_conf:
487                     self.__logger.info(
488                         "Gnocchi Plugin not included in node {}".format(
489                             compute_name))
490                     return False
491                 else:
492                     self.__logger.info(
493                         "Gnocchi plugin available in compute node {}" .format(
494                             compute_name))
495                     return True
496         return True
497
498     def check_snmp_plugin_included(self, compute):
499         """Check if SNMP plugin is active in compute node.
500         """
501         snmp_mib = '/usr/share/snmp/mibs/Intel-Rdt.txt'
502         snmp_string = 'INTEL-RDT-MIB::intelRdt'
503         compute_name = compute.get_name()
504         nodes = get_apex_nodes()
505         for node in nodes:
506             if compute_name == node.get_dict()['name']:
507                 stdout = node.run_cmd(
508                     'snmpwalk -v2c -m {0} -c public localhost {1}' .format(
509                         snmp_mib, snmp_string))
510                 self.__logger.info("snmp output = {}" .format(stdout))
511                 if 'OID' in stdout:
512                     return False
513                 else:
514                     return True
515
516     def enable_plugins(
517             self, compute, plugins, error_plugins, create_backup=True):
518         """Enable plugins on compute node
519
520         Keyword arguments:
521         compute -- compute node instance
522         plugins -- list of plugins to be enabled
523
524         Return boolean value indicating whether function was successful.
525         """
526         csv_file = os.path.dirname(os.path.realpath(__file__)) + '/csv.conf'
527         plugins = sorted(plugins)
528         compute_name = compute.get_name()
529         nodes = get_apex_nodes()
530         for node in nodes:
531             if compute_name == node.get_dict()['name']:
532                 node.put_file(csv_file, 'csv.conf')
533                 node.run_cmd(
534                     'sudo cp csv.conf '
535                     + '/etc/collectd/collectd.conf.d/csv.conf')
536         return True
537
538     def restart_collectd(self, compute):
539         """Restart collectd on compute node.
540
541         Keyword arguments:
542         compute -- compute node instance
543
544         Retrun tuple with boolean indicating success and list of warnings
545         received during collectd start.
546         """
547         compute_name = compute.get_name()
548         nodes = get_apex_nodes()
549
550         def get_collectd_processes(compute_node):
551             """Get number of running collectd processes.
552
553             Keyword arguments:
554             ssh_session -- instance of SSH session in which to check
555                 for processes
556             """
557             stdout = compute_node.run_cmd("pgrep collectd")
558             return len(stdout)
559
560         for node in nodes:
561             if compute_name == node.get_dict()['name']:
562                 # node.run_cmd('su; "opnfvapex"')
563                 self.__logger.info('Stopping collectd service...')
564                 node.run_cmd('sudo systemctl stop collectd')
565                 time.sleep(10)
566                 if get_collectd_processes(node):
567                     self.__logger.error('Collectd is still running...')
568                     return False, []
569                 self.__logger.info('Starting collectd service...')
570                 stdout = node.run_cmd('sudo systemctl start collectd')
571                 time.sleep(10)
572                 warning = [
573                     output.strip() for output in stdout if 'WARN: ' in output]
574                 if get_collectd_processes(node) == 0:
575                     self.__logger.error('Collectd is still not running...')
576                     return False, warning
577         return True, warning
578
579     def trigger_alarm_update(self, alarm, compute_node):
580         # TODO: move these actions to main, with criteria lists so that we can reference that
581         # i.e. test_plugin_with_aodh(self, compute, plugin.., logger, criteria_list, alarm_action)
582         if alarm == 'mcelog':
583             compute_node.run_cmd('sudo modprobe mce-inject')
584             compute_node.run_cmd('sudo ./mce-inject_ea < corrected')
585         if alarm == 'ovs_events':
586             compute_node.run_cmd('sudo ifconfig -a | grep br0')
587             compute_node.run_cmd('sudo ifconfig br0 down; sudo ifconfig br0 up')
588
589     def test_plugins_with_aodh(
590             self, compute, plugin_interval, logger,
591             criteria_list=[]):
592
593         metric_id = {}
594         timestamps1 = {}
595         timestamps2 = {}
596         nodes = get_apex_nodes()
597         compute_node = [node for node in nodes if node.get_dict()['name'] == compute][0]
598         for node in nodes:
599             if node.is_controller():
600                 self.__logger.info('Getting AODH Alarm list on {}' .format(
601                     (node.get_dict()['name'])))
602                 node.put_file(constants.ENV_FILE, 'overcloudrc.v3')
603                 self.trigger_alarm_update(criteria_list, compute_node)
604                 stdout = node.run_cmd(
605                     "source overcloudrc.v3;"
606                     + "aodh alarm list | grep {0} | grep {1}"
607                     .format(criteria_list, compute))
608                 if stdout is None:
609                     self.__logger.info("aodh alarm list was empty")
610                     return False
611                 for line in stdout.splitlines():
612                     line = line.replace('|', "")
613                     metric_id = line.split()[0]
614                     stdout = node.run_cmd(
615                         'source overcloudrc.v3; aodh alarm show {}' .format(
616                             metric_id))
617                     if stdout is None:
618                         self.__logger.info("aodh alarm list was empty")
619                         return False
620                     for line in stdout.splitlines()[3: -1]:
621                         line = line.replace('|', "")
622                         if line.split()[0] == 'state_timestamp':
623                             timestamps1 = line.split()[1]
624                     self.trigger_alarm_update(criteria_list, compute_node)
625                     time.sleep(12)
626                     stdout = node.run_cmd(
627                         "source overcloudrc.v3; aodh alarm show {}" .format(
628                             metric_id))
629                     if stdout is None:
630                         self.__logger.info("aodh alarm list was empty")
631                         return False
632                     for line in stdout.splitlines()[3:-1]:
633                         line = line.replace('|', "")
634                         if line.split()[0] == 'state_timestamp':
635                             timestamps2 = line.split()[1]
636                     if timestamps1 == timestamps2:
637                         self.__logger.info(
638                             "Data not updated after interval of 12 seconds")
639                         return False
640                     else:
641                         self.__logger.info("PASS")
642                         return True
643
644     def test_plugins_with_gnocchi(
645             self, compute, plugin_interval, logger,
646             criteria_list=[]):
647
648         metric_id = {}
649         timestamps1 = {}
650         timestamps2 = {}
651         nodes = get_apex_nodes()
652         if plugin_interval > 15:
653             sleep_time = plugin_interval*2
654         else:
655             sleep_time = 30
656
657         for node in nodes:
658             if node.is_controller():
659                 self.__logger.info('Getting gnocchi metric list on {}' .format(
660                     (node.get_dict()['name'])))
661                 node.put_file(constants.ENV_FILE, 'overcloudrc.v3')
662                 stdout = node.run_cmd(
663                     "source overcloudrc.v3;"
664                     + "gnocchi metric list | grep {0} | grep {1}"
665                     .format(criteria_list, compute))
666                 if stdout is None:
667                         self.__logger.info("gnocchi list was empty")
668                         return False
669                 for line in stdout.splitlines():
670                     line = line.replace('|', "")
671                     metric_id = line.split()[0]
672                     stdout = node.run_cmd(
673                         'source overcloudrc.v3;gnocchi measures show {}'.format(
674                             metric_id))
675                     if stdout is None:
676                         self.__logger.info("gnocchi list was empty")
677                         return False
678                     for line in stdout.splitlines()[3: -1]:
679                         if line[0] == '+':
680                             pass
681                         else:
682                             timestamps1 = line.replace('|', "")
683                             timestamps1 = timestamps1.split()[0]
684                     time.sleep(sleep_time)
685                     stdout = node.run_cmd(
686                         "source overcloudrc.v3;gnocchi measures show {}".format(
687                             metric_id))
688                     if stdout is None:
689                         self.__logger.info("gnocchi measures was empty")
690                         return False
691                     for line in stdout.splitlines()[3:-1]:
692                         if line[0] == '+':
693                             pass
694                         else:
695                             timestamps2 = line.replace('|', "")
696                             timestamps2 = timestamps2.split()[0]
697                     if timestamps1 == timestamps2:
698                         self.__logger.info(
699                             "Plugin Interval is {}" .format(plugin_interval))
700                         self.__logger.info(
701                             "Data not updated after {} seconds".format(
702                                 sleep_time))
703                         return False
704                     else:
705                         self.__logger.info("PASS")
706                         return True
707         return False
708
709     def test_plugins_with_snmp(
710             self, compute, plugin_interval, logger, plugin, snmp_mib_files=[],
711             snmp_mib_strings=[], snmp_in_commands=[]):
712
713         if plugin in ('hugepages', 'intel_rdt', 'mcelog'):
714             nodes = get_apex_nodes()
715             for node in nodes:
716                 if compute == node.get_dict()['name']:
717                     stdout = node.run_cmd(
718                         'snmpwalk -v2c -m {0} -c public localhost {1}' .format(
719                             snmp_mib_files, snmp_mib_strings))
720                     self.__logger.info("{}" .format(stdout))
721                     if stdout is None:
722                         self.__logger.info("No output from snmpwalk")
723                         return False
724                     elif 'OID' in stdout:
725                         self.__logger.info("SNMP query failed")
726                         return False
727                     else:
728                         counter1 = stdout.split()[3]
729                     time.sleep(10)
730                     stdout = node.run_cmd(
731                         'snmpwalk -v2c -m {0} -c public localhost {1}' .format(
732                             snmp_mib_files, snmp_mib_strings))
733                     self.__logger.info("{}" .format(stdout))
734                     if stdout is None:
735                         self.__logger.info("No output from snmpwalk")
736                     elif 'OID' in stdout:
737                         self.__logger.info(
738                             "SNMP query failed during second check")
739                         self.__logger.info("waiting for 10 sec")
740                         time.sleep(10)
741                     stdout = node.run_cmd(
742                         'snmpwalk -v2c -m {0} -c public localhost {1}' .format(
743                             snmp_mib_files, snmp_mib_strings))
744                     self.__logger.info("{}" .format(stdout))
745                     if stdout is None:
746                         self.__logger.info("No output from snmpwalk")
747                     elif 'OID' in stdout:
748                         self.__logger.info("SNMP query failed again")
749                         self.__logger.info("Failing this test case")
750                         return False
751                     else:
752                         counter2 = stdout.split()[3]
753
754                     if counter1 == counter2:
755                         return False
756                     else:
757                         return True
758         else:
759             return False
760
761     def check_dma_dummy_included(self, compute, name):
762         """Check if dummy collectd config by DMA
763            is included in collectd.conf file.
764
765         Keyword arguments:
766         compute -- compute node instance
767         name -- config file name
768         """
769         compute_name = compute.get_name()
770         nodes = get_apex_nodes()
771         for node in nodes:
772             if compute_name == node.get_dict()['name']:
773                 dummy_conf = node.run_cmd('ls /etc/collectd/collectd.conf.d')
774                 if name + '.conf' not in dummy_conf:
775                     self.__logger.error('check conf FAIL')
776                     return False
777                 else:
778                     self.__logger.info('check conf PASS')
779                     fullpath = '/etc/collectd/collectd.conf.d/{}'.format(
780                                name + '.conf')
781                     self.__logger.info('Delete file {}'.format(fullpath))
782                     node.run_cmd('sudo rm -f ' + fullpath)
783                     return True
784         self.__logger.error('Some panic, compute not found')
785         return False
786
787     def create_testvm(self, compute_node, test_name):
788         nodes = get_apex_nodes()
789         compute_name = compute_node.get_name()
790
791         controller_node = None
792         for node in nodes:
793             if node.is_controller():
794                 controller_node = node
795                 break
796
797         self.__logger.debug('Creating Test VM on {}' .format(compute_name))
798         self.__logger.debug('Create command is executed in {}' .format(
799             (controller_node.get_dict()['name'])))
800
801         node.put_file(constants.ENV_FILE, 'overcloudrc.v3')
802         node.put_file(TEST_VM_IMAGE_PATH, TEST_VM_IMAGE)
803         image = controller_node.run_cmd(
804             'source overcloudrc.v3;'
805             'openstack image create -f value -c id'
806             ' --disk-format qcow2 --file {0} {1}'
807             .format(TEST_VM_IMAGE, test_name))
808         flavor = controller_node.run_cmd(
809             'source overcloudrc.v3;'
810             'openstack flavor create -f value -c id {}'
811             .format(test_name))
812         host = controller_node.run_cmd(
813             'source overcloudrc.v3;'
814             'openstack hypervisor list -f value -c "Hypervisor Hostname"'
815             ' | grep "^{}\\."'
816             .format(compute_name))
817         server = controller_node.run_cmd(
818             'source overcloudrc.v3;'
819             'openstack server create -f value -c id'
820             ' --image {0} --flavor {1} --availability-zone {2} {3}'
821             .format(image, flavor, 'nova:' + host, test_name))
822
823         resources = {"image": image, "flavor": flavor, "server": server}
824
825         if server:
826             self.__logger.debug('VM created')
827         self.__logger.debug('VM info: {}'.format(resources))
828
829         return resources
830
831     def delete_testvm(self, resources):
832         nodes = get_apex_nodes()
833
834         controller_node = None
835         for node in nodes:
836             if node.is_controller():
837                 controller_node = node
838                 break
839
840         self.__logger.debug('Deleteing Test VM')
841         self.__logger.debug('VM to be deleted info: {}'.format(resources))
842         self.__logger.debug('Delete command is executed in {}' .format(
843             (controller_node.get_dict()['name'])))
844
845         server = resources.get('server', None)
846         flavor = resources.get('flavor', None)
847         image = resources.get('image', None)
848         if server:
849             controller_node.run_cmd(
850                 'source overcloudrc.v3;'
851                 'openstack server delete {}'.format(server))
852         if flavor:
853             controller_node.run_cmd(
854                 'source overcloudrc.v3;'
855                 'openstack flavor delete {}'.format(flavor))
856         if image:
857             controller_node.run_cmd(
858                 'source overcloudrc.v3;'
859                 'openstack image delete {}'.format(image))
860
861         self.__logger.debug('VM and other OpenStack resources deleted')
862
863     def test_dma_infofetch_get_data(self, compute, test_name):
864         compute_name = compute.get_name()
865         nodes = get_apex_nodes()
866         for node in nodes:
867             if compute_name == node.get_dict()['name']:
868                 stdout = node.run_cmd(
869                     'redis-cli keys "barometer-dma/vm/*/vminfo"'
870                     ' | while read k; do redis-cli get $k; done'
871                     ' | grep {}'.format(test_name))
872                 self.__logger.debug('InfoFetch data: {}'.format(stdout))
873                 if stdout and test_name in stdout:
874                     self.__logger.info('PASS')
875                     return True
876                 else:
877                     self.__logger.info('No test vm info')
878
879         self.__logger.info('FAIL')
880         return False