Fix wrong image name in test code
[barometer.git] / baro_tests / config_server.py
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright 2017 OPNFV
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License"); you may
6 # not use this file except in compliance with the License. You may obtain
7 # a copy of the License at
8 #
9 #      http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
13 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
14 # License for the specific language governing permissions and limitations
15 # under the License.
16
17 """Classes used by collectd.py"""
18
19 import time
20 import os.path
21 import os
22 import re
23 import yaml
24
25 from opnfv.deployment import factory
26 import paramiko
27 from functest.utils import constants
28
29 ID_RSA_PATH = '/root/.ssh/id_rsa'
30 SSH_KEYS_SCRIPT = '/home/opnfv/barometer/baro_utils/get_ssh_keys.sh'
31 DEF_PLUGIN_INTERVAL = 10
32 COLLECTD_CONF = '/etc/collectd.conf'
33 COLLECTD_CONF_DIR = '/etc/collectd/collectd.conf.d'
34 NOTIFICATION_FILE = '/var/log/python-notifications.dump'
35 COLLECTD_NOTIFICATION = '/etc/collectd_notification_dump.py'
36 APEX_IP = os.getenv("INSTALLER_IP").rstrip('\n')
37 APEX_USER = 'root'
38 APEX_USER_STACK = 'stack'
39 APEX_PKEY = '/root/.ssh/id_rsa'
40 TEST_VM_IMAGE = 'cirros-0.4.0-x86_64-disk.img'
41 TEST_VM_IMAGE_PATH = '/home/opnfv/functest/images/' + TEST_VM_IMAGE
42
43
44 class Node(object):
45     """Node configuration class"""
46     def __init__(self, attrs):
47         self.__null = attrs[0]
48         self.__id = attrs[1]
49         self.__name = attrs[2]
50         self.__status = attrs[3] if attrs[3] else None
51         self.__taskState = attrs[4]
52         self.__pwrState = attrs[5]
53         self.__ip = re.sub('^[a-z]+=', '', attrs[6])
54
55     def get_name(self):
56         """Get node name"""
57         return self.__name
58
59     def get_id(self):
60         """Get node ID"""
61         return self.__id
62
63     def get_ip(self):
64         """Get node IP address"""
65         return self.__ip
66
67     def get_roles(self):
68         """Get node role"""
69         return self.__roles
70
71
72 def get_apex_nodes():
73     handler = factory.Factory.get_handler('apex',
74                                           APEX_IP,
75                                           APEX_USER_STACK,
76                                           APEX_PKEY)
77     nodes = handler.get_nodes()
78     return nodes
79
80
81 class ConfigServer(object):
82     """Class to get env configuration"""
83     def __init__(self, host, user, logger, priv_key=None):
84         self.__host = host
85         self.__user = user
86         self.__passwd = None
87         self.__priv_key = priv_key
88         self.__nodes = list()
89         self.__logger = logger
90
91         self.__private_key_file = ID_RSA_PATH
92         if not os.path.isfile(self.__private_key_file):
93             self.__logger.error(
94                 "Private key file '{}'".format(self.__private_key_file)
95                 + " not found.")
96             raise IOError("Private key file '{}' not found.".format(
97                 self.__private_key_file))
98
99         # get list of available nodes
100         ssh, sftp = self.__open_sftp_session(
101             self.__host, self.__user, self.__passwd)
102         attempt = 1
103         fuel_node_passed = False
104
105         while (attempt <= 10) and not fuel_node_passed:
106             stdin, stdout, stderr = ssh.exec_command(
107                 "source stackrc; nova list")
108             stderr_lines = stderr.readlines()
109             if stderr_lines:
110                 self.__logger.warning(
111                     "'Apex node' command failed (try {}):".format(attempt))
112                 for line in stderr_lines:
113                     self.__logger.debug(line.strip())
114             else:
115                 fuel_node_passed = True
116                 if attempt > 1:
117                     self.__logger.info(
118                         "'Apex node' command passed (try {})".format(attempt))
119             attempt += 1
120         if not fuel_node_passed:
121             self.__logger.error(
122                 "'Apex node' command failed. This was the last try.")
123             raise OSError(
124                 "'Apex node' command failed. This was the last try.")
125         node_table = stdout.readlines()\
126
127         # skip table title and parse table values
128
129         for entry in node_table[3:]:
130             if entry[0] == '+' or entry[0] == '\n':
131                 print entry
132                 pass
133             else:
134                 self.__nodes.append(
135                     Node([str(x.strip(' \n')) for x in entry.split('|')]))
136
137     def get_controllers(self):
138         # Get list of controllers
139         print self.__nodes[0]._Node__ip
140         return (
141             [node for node in self.__nodes if 'controller' in node.get_name()])
142
143     def get_computes(self):
144         # Get list of computes
145         return (
146             [node for node in self.__nodes if 'compute' in node.get_name()])
147
148     def get_nodes(self):
149         # Get list of nodes
150         return self.__nodes
151
152     def __open_sftp_session(self, host, user, passwd=None):
153         # Connect to given host.
154         """Keyword arguments:
155         host -- host to connect
156         user -- user to use
157         passwd -- password to use
158
159         Return tuple of SSH and SFTP client instances.
160         """
161         # create SSH client
162         ssh = paramiko.SSHClient()
163         ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
164
165         # try a direct access using password or private key
166         if not passwd and not self.__priv_key:
167             # get private key
168             self.__priv_key = paramiko.RSAKey.from_private_key_file(
169                 self.__private_key_file)
170
171         # connect to the server
172         ssh.connect(
173             host, username=user, password=passwd, pkey=self.__priv_key)
174         sftp = ssh.open_sftp()
175
176         # return SFTP client instance
177         return ssh, sftp
178
179     def get_plugin_interval(self, compute, plugin):
180         """Find the plugin interval in collectd configuration.
181
182         Keyword arguments:
183         compute -- compute node instance
184         plugin -- plug-in name
185
186         If found, return interval value, otherwise the default value"""
187         default_interval = DEF_PLUGIN_INTERVAL
188         compute_name = compute.get_name()
189         nodes = get_apex_nodes()
190         for node in nodes:
191             if compute_name == node.get_dict()['name']:
192                 stdout = node.run_cmd(
193                     'cat /etc/collectd/collectd.conf.d/{}.conf'.format(plugin))
194                 if stdout is None:
195                     return default_interval
196                 for line in stdout.split('\n'):
197                     if 'Interval' in line:
198                         return 1
199         return default_interval
200
201     def get_plugin_config_values(self, compute, plugin, parameter):
202         """Get parameter values from collectd config file.
203
204         Keyword arguments:
205         compute -- compute node instance
206         plugin -- plug-in name
207         parameter -- plug-in parameter
208
209         Return list of found values."""
210         default_values = []
211         compute_name = compute.get_name()
212         nodes = get_apex_nodes()
213         for node in nodes:
214             if compute_name == node.get_dict()['name']:
215                 stdout = node.run_cmd(
216                     'cat /etc/collectd/collectd.conf.d/{}.conf' .format(plugin))
217                 if stdout is None:
218                     return default_values
219                 for line in stdout.split('\n'):
220                     if 'Interfaces' in line:
221                         return line.split(' ', 1)[1]
222                     elif 'Bridges' in line:
223                         return line.split(' ', 1)[1]
224                     elif 'Cores' in line:
225                         return line.split(' ', 1)[1]
226                     else:
227                         pass
228         return default_values
229
230     def execute_command(self, command, host_ip=None, ssh=None):
231         """Execute command on node and return list of lines of standard output.
232
233         Keyword arguments:
234         command -- command
235         host_ip -- IP of the node
236         ssh -- existing open SSH session to use
237
238         One of host_ip or ssh must not be None. If both are not None,
239         existing ssh session is used.
240         """
241         if host_ip is None and ssh is None:
242             raise ValueError('One of host_ip or ssh must not be None.')
243         if ssh is None:
244             ssh, sftp = self.__open_sftp_session(host_ip, 'root', 'opnfvapex')
245         stdin, stdout, stderr = ssh.exec_command(command)
246         return stdout.readlines()
247
248     def get_ovs_interfaces(self, compute):
249         """Get list of configured OVS interfaces
250
251         Keyword arguments:
252         compute -- compute node instance
253         """
254         compute_name = compute.get_name()
255         nodes = get_apex_nodes()
256         for node in nodes:
257             if compute_name == node.get_dict()['name']:
258                 stdout = node.run_cmd('sudo ovs-vsctl list-br')
259         return stdout
260
261     def is_gnocchi_running(self, controller):
262         """Check whether Gnocchi is running on controller.
263
264         Keyword arguments:
265         controller -- controller node instance
266
267         Return boolean value whether Gnocchi is running.
268         """
269         gnocchi_present = False
270         controller_name = controller.get_name()
271         nodes = get_apex_nodes()
272         for node in nodes:
273             if controller_name == node.get_dict()['name']:
274                 node.put_file(constants.ENV_FILE, 'overcloudrc.v3')
275                 stdout = node.run_cmd(
276                     "source overcloudrc.v3;"
277                     + "openstack catalog list | grep gnocchi")
278                 if stdout is None:
279                     return False
280                 elif 'gnocchi' in stdout:
281                     gnocchi_present = True
282                     return gnocchi_present
283                 else:
284                     return False
285         return gnocchi_present
286
287     def is_aodh_running(self, controller):
288         """Check whether aodh service is running on controller
289         """
290         aodh_present = False
291         controller_name = controller.get_name()
292         nodes = get_apex_nodes()
293         for node in nodes:
294             if controller_name == node.get_dict()['name']:
295                 node.put_file(constants.ENV_FILE, 'overcloudrc.v3')
296                 stdout = node.run_cmd(
297                     "source overcloudrc.v3;"
298                     + "openstack catalog list | grep aodh")
299                 if stdout is None:
300                     return False
301                 elif 'aodh' in stdout:
302                     aodh_present = True
303                     return aodh_present
304                 else:
305                     return False
306         return aodh_present
307
308     def is_redis_running(self, compute):
309         """Check whether redis service is running on compute"""
310         compute_name = compute.get_name()
311         nodes = get_apex_nodes()
312         for node in nodes:
313             if compute_name == node.get_dict()['name']:
314                 stdout = node.run_cmd('sudo systemctl status docker'
315                                       '&& sudo docker ps'
316                                       '| grep barometer-redis')
317                 if stdout and 'barometer-redis' in stdout:
318                     self.__logger.info(
319                         'Redis is running in node {}'.format(
320                          compute_name))
321                     return True
322         self.__logger.info(
323             'Redis is *not* running in node {}'.format(
324              compute_name))
325         return False
326
327     def is_dma_server_running(self, compute):
328         """Check whether DMA server is running on compute"""
329         compute_name = compute.get_name()
330         nodes = get_apex_nodes()
331         for node in nodes:
332             if compute_name == node.get_dict()['name']:
333                 stdout = node.run_cmd('sudo systemctl status docker'
334                                       '&& sudo docker ps'
335                                       '| grep opnfv/barometer-dma')
336                 if stdout and '/server' in stdout:
337                     self.__logger.info(
338                         'DMA Server is running in node {}'.format(
339                          compute_name))
340                     return True
341         self.__logger.info(
342             'DMA Server is *not* running in node {}'.format(
343              compute_name))
344         return False
345
346     def is_dma_infofetch_running(self, compute):
347         """Check whether DMA infofetch is running on compute"""
348         compute_name = compute.get_name()
349         nodes = get_apex_nodes()
350         for node in nodes:
351             if compute_name == node.get_dict()['name']:
352                 stdout = node.run_cmd('sudo systemctl status docker'
353                                       '&& sudo docker ps'
354                                       '| grep opnfv/barometer-dma')
355                 if stdout and '/infofetch' in stdout:
356                     self.__logger.info(
357                         'DMA InfoFetch is running in node {}'.format(
358                          compute_name))
359                     return True
360         self.__logger.info(
361             'DMA InfoFetch is *not* running in node {}'.format(
362              compute_name))
363         return False
364
365     def get_dma_config(self, compute):
366         """Get config values of DMA"""
367         compute_name = compute.get_name()
368         nodes = get_apex_nodes()
369         for node in nodes:
370             if compute_name == node.get_dict()['name']:
371                 # We use following after functest accept python-toml
372                 #     stdout = node.run_cmd(
373                 #         'cat /etc/barometer-dma/config.toml')
374                 #     try:
375                 #         agent_conf = toml.loads(stdout)
376                 #     except (TypeError, TomlDecodeError) as e:
377                 #         self.__logger.error(
378                 #             'DMA config error: {}'.format(e))
379                 #         agent_conf = None
380                 #     finally:
381                 #         return agent_conf
382                 readcmd = (
383                     'egrep "listen_port|amqp_"'
384                     ' /etc/barometer-dma/config.toml'
385                     '| sed -e "s/#.*$//" | sed -e "s/=/:/"'
386                     )
387                 stdout = node.run_cmd(readcmd)
388                 agent_conf = {"server": yaml.safe_load(stdout)}
389
390                 pingcmd = (
391                     'ping -n -c1 ' + agent_conf["server"]["amqp_host"] +
392                     '| sed -ne "s/^.*bytes from //p" | sed -e "s/:.*//"'
393                     )
394                 agent_conf["server"]["amqp_host"] = node.run_cmd(pingcmd)
395
396                 return agent_conf
397         return None
398
399     def is_mcelog_installed(self, compute, package):
400         """Check whether package exists on compute node.
401
402         Keyword arguments:
403         compute -- compute node instance
404         package -- Linux package to search for
405
406         Return boolean value whether package is installed.
407         """
408         compute_name = compute.get_name()
409         nodes = get_apex_nodes()
410         for node in nodes:
411             if compute_name == node.get_dict()['name']:
412                 stdout = node.run_cmd(
413                     'rpm -qa | grep mcelog')
414                 if stdout is None:
415                     return 0
416                 elif 'mcelog' in stdout:
417                     return 1
418                 else:
419                     return 0
420
421     def is_rdt_available(self, compute):
422         """Check whether the compute node is a virtual machine."""
423         compute_name = compute.get_name()
424         nodes = get_apex_nodes()
425         for node in nodes:
426             if compute_name == node.get_dict()['name']:
427                 stdout = node.run_cmd('cat /proc/cpuinfo | grep hypervisor')
428                 if 'hypervisor' in stdout:
429                     return False
430         return True
431
432     def is_libpqos_on_node(self, compute):
433         """Check whether libpqos is present on compute node"""
434
435         compute_name = compute.get_name()
436         nodes = get_apex_nodes()
437         for node in nodes:
438             if compute_name == node.get_dict()['name']:
439                 stdout = node.run_cmd('ls /usr/local/lib/ | grep libpqos')
440                 if 'libpqos' in stdout:
441                     return True
442         return False
443
444     def check_aodh_plugin_included(self, compute):
445         """Check if aodh plugin is included in collectd.conf file.
446         If not, try to enable it.
447
448         Keyword arguments:
449         compute -- compute node instance
450
451         Return boolean value whether AODH plugin is included
452         or it's enabling was successful.
453         """
454         compute_name = compute.get_name()
455         nodes = get_apex_nodes()
456         for node in nodes:
457             if compute_name == node.get_dict()['name']:
458                 aodh_conf = node.run_cmd('ls /etc/collectd/collectd.conf.d')
459                 if 'aodh.conf' not in aodh_conf:
460                     self.__logger.info(
461                         "AODH Plugin not included in {}".format(compute_name))
462                     return False
463                 else:
464                     self.__logger.info(
465                         "AODH plugin present in compute node {}" .format(
466                             compute_name))
467                     return True
468         return True
469
470     def check_gnocchi_plugin_included(self, compute):
471         """Check if gnocchi plugin is included in collectd.conf file.
472         If not, try to enable it.
473
474         Keyword arguments:
475         compute -- compute node instance
476
477         Return boolean value whether gnocchi plugin is included
478         or it's enabling was successful.
479         """
480         compute_name = compute.get_name()
481         nodes = get_apex_nodes()
482         for node in nodes:
483             if compute_name == node.get_dict()['name']:
484                 gnocchi_conf = node.run_cmd('ls /etc/collectd/collectd.conf.d')
485                 if 'collectd-ceilometer-plugin.conf' not in gnocchi_conf:
486                     self.__logger.info(
487                         "Gnocchi Plugin not included in node {}".format(
488                             compute_name))
489                     return False
490                 else:
491                     self.__logger.info(
492                         "Gnocchi plugin available in compute node {}" .format(
493                             compute_name))
494                     return True
495         return True
496
497     def check_snmp_plugin_included(self, compute):
498         """Check if SNMP plugin is active in compute node.
499         """
500         snmp_mib = '/usr/share/snmp/mibs/Intel-Rdt.txt'
501         snmp_string = 'INTEL-RDT-MIB::intelRdt'
502         compute_name = compute.get_name()
503         nodes = get_apex_nodes()
504         for node in nodes:
505             if compute_name == node.get_dict()['name']:
506                 stdout = node.run_cmd(
507                     'snmpwalk -v2c -m {0} -c public localhost {1}' .format(
508                         snmp_mib, snmp_string))
509                 self.__logger.info("snmp output = {}" .format(stdout))
510                 if 'OID' in stdout:
511                     return False
512                 else:
513                     return True
514
515     def enable_plugins(
516             self, compute, plugins, error_plugins, create_backup=True):
517         """Enable plugins on compute node
518
519         Keyword arguments:
520         compute -- compute node instance
521         plugins -- list of plugins to be enabled
522
523         Return boolean value indicating whether function was successful.
524         """
525         csv_file = os.path.dirname(os.path.realpath(__file__)) + '/csv.conf'
526         plugins = sorted(plugins)
527         compute_name = compute.get_name()
528         nodes = get_apex_nodes()
529         for node in nodes:
530             if compute_name == node.get_dict()['name']:
531                 node.put_file(csv_file, 'csv.conf')
532                 node.run_cmd(
533                     'sudo cp csv.conf '
534                     + '/etc/collectd/collectd.conf.d/csv.conf')
535         return True
536
537     def restart_collectd(self, compute):
538         """Restart collectd on compute node.
539
540         Keyword arguments:
541         compute -- compute node instance
542
543         Retrun tuple with boolean indicating success and list of warnings
544         received during collectd start.
545         """
546         compute_name = compute.get_name()
547         nodes = get_apex_nodes()
548
549         def get_collectd_processes(compute_node):
550             """Get number of running collectd processes.
551
552             Keyword arguments:
553             ssh_session -- instance of SSH session in which to check
554                 for processes
555             """
556             stdout = compute_node.run_cmd("pgrep collectd")
557             return len(stdout)
558
559         for node in nodes:
560             if compute_name == node.get_dict()['name']:
561                 # node.run_cmd('su; "opnfvapex"')
562                 self.__logger.info('Stopping collectd service...')
563                 node.run_cmd('sudo systemctl stop collectd')
564                 time.sleep(10)
565                 if get_collectd_processes(node):
566                     self.__logger.error('Collectd is still running...')
567                     return False, []
568                 self.__logger.info('Starting collectd service...')
569                 stdout = node.run_cmd('sudo systemctl start collectd')
570                 time.sleep(10)
571                 warning = [
572                     output.strip() for output in stdout if 'WARN: ' in output]
573                 if get_collectd_processes(node) == 0:
574                     self.__logger.error('Collectd is still not running...')
575                     return False, warning
576         return True, warning
577
578     def trigger_alarm_update(self, alarm, compute_node):
579         # TODO: move these actions to main, with criteria lists so that we can reference that
580         # i.e. test_plugin_with_aodh(self, compute, plugin.., logger, criteria_list, alarm_action)
581         if alarm == 'mcelog':
582             compute_node.run_cmd('sudo modprobe mce-inject')
583             compute_node.run_cmd('sudo ./mce-inject_ea < corrected')
584         if alarm == 'ovs_events':
585             compute_node.run_cmd('sudo ifconfig -a | grep br0')
586             compute_node.run_cmd('sudo ifconfig br0 down; sudo ifconfig br0 up')
587
588     def test_plugins_with_aodh(
589             self, compute, plugin_interval, logger,
590             criteria_list=[]):
591
592         metric_id = {}
593         timestamps1 = {}
594         timestamps2 = {}
595         nodes = get_apex_nodes()
596         compute_node = [node for node in nodes if node.get_dict()['name'] == compute][0]
597         for node in nodes:
598             if node.is_controller():
599                 self.__logger.info('Getting AODH Alarm list on {}' .format(
600                     (node.get_dict()['name'])))
601                 node.put_file(constants.ENV_FILE, 'overcloudrc.v3')
602                 self.trigger_alarm_update(criteria_list, compute_node)
603                 stdout = node.run_cmd(
604                     "source overcloudrc.v3;"
605                     + "aodh alarm list | grep {0} | grep {1}"
606                     .format(criteria_list, compute))
607                 if stdout is None:
608                     self.__logger.info("aodh alarm list was empty")
609                     return False
610                 for line in stdout.splitlines():
611                     line = line.replace('|', "")
612                     metric_id = line.split()[0]
613                     stdout = node.run_cmd(
614                         'source overcloudrc.v3; aodh alarm show {}' .format(
615                             metric_id))
616                     if stdout is None:
617                         self.__logger.info("aodh alarm list was empty")
618                         return False
619                     for line in stdout.splitlines()[3: -1]:
620                         line = line.replace('|', "")
621                         if line.split()[0] == 'state_timestamp':
622                             timestamps1 = line.split()[1]
623                     self.trigger_alarm_update(criteria_list, compute_node)
624                     time.sleep(12)
625                     stdout = node.run_cmd(
626                         "source overcloudrc.v3; aodh alarm show {}" .format(
627                             metric_id))
628                     if stdout is None:
629                         self.__logger.info("aodh alarm list was empty")
630                         return False
631                     for line in stdout.splitlines()[3:-1]:
632                         line = line.replace('|', "")
633                         if line.split()[0] == 'state_timestamp':
634                             timestamps2 = line.split()[1]
635                     if timestamps1 == timestamps2:
636                         self.__logger.info(
637                             "Data not updated after interval of 12 seconds")
638                         return False
639                     else:
640                         self.__logger.info("PASS")
641                         return True
642
643     def test_plugins_with_gnocchi(
644             self, compute, plugin_interval, logger,
645             criteria_list=[]):
646
647         metric_id = {}
648         timestamps1 = {}
649         timestamps2 = {}
650         nodes = get_apex_nodes()
651         if plugin_interval > 15:
652             sleep_time = plugin_interval*2
653         else:
654             sleep_time = 30
655
656         for node in nodes:
657             if node.is_controller():
658                 self.__logger.info('Getting gnocchi metric list on {}' .format(
659                     (node.get_dict()['name'])))
660                 node.put_file(constants.ENV_FILE, 'overcloudrc.v3')
661                 stdout = node.run_cmd(
662                     "source overcloudrc.v3;"
663                     + "gnocchi metric list | grep {0} | grep {1}"
664                     .format(criteria_list, compute))
665                 if stdout is None:
666                         self.__logger.info("gnocchi list was empty")
667                         return False
668                 for line in stdout.splitlines():
669                     line = line.replace('|', "")
670                     metric_id = line.split()[0]
671                     stdout = node.run_cmd(
672                         'source overcloudrc.v3;gnocchi measures show {}'.format(
673                             metric_id))
674                     if stdout is None:
675                         self.__logger.info("gnocchi list was empty")
676                         return False
677                     for line in stdout.splitlines()[3: -1]:
678                         if line[0] == '+':
679                             pass
680                         else:
681                             timestamps1 = line.replace('|', "")
682                             timestamps1 = timestamps1.split()[0]
683                     time.sleep(sleep_time)
684                     stdout = node.run_cmd(
685                         "source overcloudrc.v3;gnocchi measures show {}".format(
686                             metric_id))
687                     if stdout is None:
688                         self.__logger.info("gnocchi measures was empty")
689                         return False
690                     for line in stdout.splitlines()[3:-1]:
691                         if line[0] == '+':
692                             pass
693                         else:
694                             timestamps2 = line.replace('|', "")
695                             timestamps2 = timestamps2.split()[0]
696                     if timestamps1 == timestamps2:
697                         self.__logger.info(
698                             "Plugin Interval is {}" .format(plugin_interval))
699                         self.__logger.info(
700                             "Data not updated after {} seconds".format(
701                                 sleep_time))
702                         return False
703                     else:
704                         self.__logger.info("PASS")
705                         return True
706         return False
707
708     def test_plugins_with_snmp(
709             self, compute, plugin_interval, logger, plugin, snmp_mib_files=[],
710             snmp_mib_strings=[], snmp_in_commands=[]):
711
712         if plugin in ('hugepages', 'intel_rdt', 'mcelog'):
713             nodes = get_apex_nodes()
714             for node in nodes:
715                 if compute == node.get_dict()['name']:
716                     stdout = node.run_cmd(
717                         'snmpwalk -v2c -m {0} -c public localhost {1}' .format(
718                             snmp_mib_files, snmp_mib_strings))
719                     self.__logger.info("{}" .format(stdout))
720                     if stdout is None:
721                         self.__logger.info("No output from snmpwalk")
722                         return False
723                     elif 'OID' in stdout:
724                         self.__logger.info("SNMP query failed")
725                         return False
726                     else:
727                         counter1 = stdout.split()[3]
728                     time.sleep(10)
729                     stdout = node.run_cmd(
730                         'snmpwalk -v2c -m {0} -c public localhost {1}' .format(
731                             snmp_mib_files, snmp_mib_strings))
732                     self.__logger.info("{}" .format(stdout))
733                     if stdout is None:
734                         self.__logger.info("No output from snmpwalk")
735                     elif 'OID' in stdout:
736                         self.__logger.info(
737                             "SNMP query failed during second check")
738                         self.__logger.info("waiting for 10 sec")
739                         time.sleep(10)
740                     stdout = node.run_cmd(
741                         'snmpwalk -v2c -m {0} -c public localhost {1}' .format(
742                             snmp_mib_files, snmp_mib_strings))
743                     self.__logger.info("{}" .format(stdout))
744                     if stdout is None:
745                         self.__logger.info("No output from snmpwalk")
746                     elif 'OID' in stdout:
747                         self.__logger.info("SNMP query failed again")
748                         self.__logger.info("Failing this test case")
749                         return False
750                     else:
751                         counter2 = stdout.split()[3]
752
753                     if counter1 == counter2:
754                         return False
755                     else:
756                         return True
757         else:
758             return False
759
760     def check_dma_dummy_included(self, compute, name):
761         """Check if dummy collectd config by DMA
762            is included in collectd.conf file.
763
764         Keyword arguments:
765         compute -- compute node instance
766         name -- config file name
767         """
768         compute_name = compute.get_name()
769         nodes = get_apex_nodes()
770         for node in nodes:
771             if compute_name == node.get_dict()['name']:
772                 dummy_conf = node.run_cmd('ls /etc/collectd/collectd.conf.d')
773                 if name + '.conf' not in dummy_conf:
774                     self.__logger.error('check conf FAIL')
775                     return False
776                 else:
777                     self.__logger.info('check conf PASS')
778                     fullpath = '/etc/collectd/collectd.conf.d/{}'.format(
779                                name + '.conf')
780                     self.__logger.info('Delete file {}'.format(fullpath))
781                     node.run_cmd('sudo rm -f ' + fullpath)
782                     return True
783         self.__logger.error('Some panic, compute not found')
784         return False
785
786     def create_testvm(self, compute_node, test_name):
787         nodes = get_apex_nodes()
788         compute_name = compute_node.get_name()
789
790         controller_node = None
791         for node in nodes:
792             if node.is_controller():
793                 controller_node = node
794                 break
795
796         self.__logger.debug('Creating Test VM on {}' .format(compute_name))
797         self.__logger.debug('Create command is executed in {}' .format(
798             (controller_node.get_dict()['name'])))
799
800         node.put_file(constants.ENV_FILE, 'overcloudrc.v3')
801         node.put_file(TEST_VM_IMAGE_PATH, TEST_VM_IMAGE)
802         image = controller_node.run_cmd(
803             'source overcloudrc.v3;'
804             'openstack image create -f value -c id'
805             ' --disk-format qcow2 --file {0} {1}'
806             .format(TEST_VM_IMAGE, test_name))
807         flavor = controller_node.run_cmd(
808             'source overcloudrc.v3;'
809             'openstack flavor create -f value -c id {}'
810             .format(test_name))
811         host = controller_node.run_cmd(
812             'source overcloudrc.v3;'
813             'openstack hypervisor list -f value -c "Hypervisor Hostname"'
814             ' | grep "^{}\\."'
815             .format(compute_name))
816         server = controller_node.run_cmd(
817             'source overcloudrc.v3;'
818             'openstack server create -f value -c id'
819             ' --image {0} --flavor {1} --availability-zone {2} {3}'
820             .format(image, flavor, 'nova:' + host, test_name))
821
822         resources = {"image": image, "flavor": flavor, "server": server}
823
824         if server:
825             self.__logger.debug('VM created')
826         self.__logger.debug('VM info: {}'.format(resources))
827
828         return resources
829
830     def delete_testvm(self, resources):
831         nodes = get_apex_nodes()
832
833         controller_node = None
834         for node in nodes:
835             if node.is_controller():
836                 controller_node = node
837                 break
838
839         self.__logger.debug('Deleteing Test VM')
840         self.__logger.debug('VM to be deleted info: {}'.format(resources))
841         self.__logger.debug('Delete command is executed in {}' .format(
842             (controller_node.get_dict()['name'])))
843
844         server = resources.get('server', None)
845         flavor = resources.get('flavor', None)
846         image = resources.get('image', None)
847         if server:
848             controller_node.run_cmd(
849                 'source overcloudrc.v3;'
850                 'openstack server delete {}'.format(server))
851         if flavor:
852             controller_node.run_cmd(
853                 'source overcloudrc.v3;'
854                 'openstack flavor delete {}'.format(flavor))
855         if image:
856             controller_node.run_cmd(
857                 'source overcloudrc.v3;'
858                 'openstack image delete {}'.format(image))
859
860         self.__logger.debug('VM and other OpenStack resources deleted')
861
862     def test_dma_infofetch_get_data(self, compute, test_name):
863         compute_name = compute.get_name()
864         nodes = get_apex_nodes()
865         for node in nodes:
866             if compute_name == node.get_dict()['name']:
867                 stdout = node.run_cmd(
868                     'redis-cli keys "barometer-dma/vm/*/vminfo"'
869                     ' | while read k; do redis-cli get $k; done'
870                     ' | grep {}'.format(test_name))
871                 self.__logger.debug('InfoFetch data: {}'.format(stdout))
872                 if stdout and test_name in stdout:
873                     self.__logger.info('PASS')
874                     return True
875                 else:
876                     self.__logger.info('No test vm info')
877
878         self.__logger.info('FAIL')
879         return False