+
+ def trigger_alarm_update(self, alarm, compute_node):
+ # TODO: move these actions to main, with criteria lists so that we can reference that
+ # i.e. test_plugin_with_aodh(self, compute, plugin.., logger, criteria_list, alarm_action)
+ if alarm == 'mcelog':
+ compute_node.run_cmd('sudo modprobe mce-inject')
+ compute_node.run_cmd('sudo ./mce-inject_ea < corrected')
+ if alarm == 'ovs_events':
+ compute_node.run_cmd('sudo ifconfig -a | grep br0')
+ compute_node.run_cmd('sudo ifconfig br0 down; sudo ifconfig br0 up')
+
+ def test_plugins_with_aodh(
+ self, compute, plugin_interval, logger,
+ criteria_list=[]):
+
+ metric_id = {}
+ timestamps1 = {}
+ timestamps2 = {}
+ nodes = get_apex_nodes()
+ compute_node = [node for node in nodes if node.get_dict()['name'] == compute][0]
+ for node in nodes:
+ if node.is_controller():
+ self.__logger.info('Getting AODH Alarm list on {}' .format(
+ (node.get_dict()['name'])))
+ node.put_file(constants.ENV_FILE, 'overcloudrc.v3')
+ self.trigger_alarm_update(criteria_list, compute_node)
+ stdout = node.run_cmd(
+ "source overcloudrc.v3;"
+ + "aodh alarm list | grep {0} | grep {1}"
+ .format(criteria_list, compute))
+ if stdout is None:
+ self.__logger.info("aodh alarm list was empty")
+ return False
+ for line in stdout.splitlines():
+ line = line.replace('|', "")
+ metric_id = line.split()[0]
+ stdout = node.run_cmd(
+ 'source overcloudrc.v3; aodh alarm show {}' .format(
+ metric_id))
+ if stdout is None:
+ self.__logger.info("aodh alarm list was empty")
+ return False
+ for line in stdout.splitlines()[3: -1]:
+ line = line.replace('|', "")
+ if line.split()[0] == 'state_timestamp':
+ timestamps1 = line.split()[1]
+ self.trigger_alarm_update(criteria_list, compute_node)
+ time.sleep(12)
+ stdout = node.run_cmd(
+ "source overcloudrc.v3; aodh alarm show {}" .format(
+ metric_id))
+ if stdout is None:
+ self.__logger.info("aodh alarm list was empty")
+ return False
+ for line in stdout.splitlines()[3:-1]:
+ line = line.replace('|', "")
+ if line.split()[0] == 'state_timestamp':
+ timestamps2 = line.split()[1]
+ if timestamps1 == timestamps2:
+ self.__logger.info(
+ "Data not updated after interval of 12 seconds")
+ return False
+ else:
+ self.__logger.info("PASS")
+ return True
+
+ def test_plugins_with_gnocchi(
+ self, compute, plugin_interval, logger,
+ criteria_list=[]):
+
+ metric_id = {}
+ timestamps1 = {}
+ timestamps2 = {}
+ nodes = get_apex_nodes()
+ if plugin_interval > 15:
+ sleep_time = plugin_interval*2
+ else:
+ sleep_time = 30
+
+ for node in nodes:
+ if node.is_controller():
+ self.__logger.info('Getting gnocchi metric list on {}' .format(
+ (node.get_dict()['name'])))
+ node.put_file(constants.ENV_FILE, 'overcloudrc.v3')
+ stdout = node.run_cmd(
+ "source overcloudrc.v3;"
+ + "gnocchi metric list | grep {0} | grep {1}"
+ .format(criteria_list, compute))
+ if stdout is None:
+ self.__logger.info("gnocchi list was empty")
+ return False
+ for line in stdout.splitlines():
+ line = line.replace('|', "")
+ metric_id = line.split()[0]
+ stdout = node.run_cmd(
+ 'source overcloudrc.v3;gnocchi measures show {}'.format(
+ metric_id))
+ if stdout is None:
+ self.__logger.info("gnocchi list was empty")
+ return False
+ for line in stdout.splitlines()[3: -1]:
+ if line[0] == '+':
+ pass
+ else:
+ timestamps1 = line.replace('|', "")
+ timestamps1 = timestamps1.split()[0]
+ time.sleep(sleep_time)
+ stdout = node.run_cmd(
+ "source overcloudrc.v3;gnocchi measures show {}".format(
+ metric_id))
+ if stdout is None:
+ self.__logger.info("gnocchi measures was empty")
+ return False
+ for line in stdout.splitlines()[3:-1]:
+ if line[0] == '+':
+ pass
+ else:
+ timestamps2 = line.replace('|', "")
+ timestamps2 = timestamps2.split()[0]
+ if timestamps1 == timestamps2:
+ self.__logger.info(
+ "Plugin Interval is {}" .format(plugin_interval))
+ self.__logger.info(
+ "Data not updated after {} seconds".format(
+ sleep_time))
+ return False
+ else:
+ self.__logger.info("PASS")
+ return True
+ return False
+
+ def test_plugins_with_snmp(
+ self, compute, plugin_interval, logger, plugin, snmp_mib_files=[],
+ snmp_mib_strings=[], snmp_in_commands=[]):
+
+ if plugin in ('hugepages', 'intel_rdt', 'mcelog'):
+ nodes = get_apex_nodes()
+ for node in nodes:
+ if compute == node.get_dict()['name']:
+ stdout = node.run_cmd(
+ 'snmpwalk -v2c -m {0} -c public localhost {1}' .format(
+ snmp_mib_files, snmp_mib_strings))
+ self.__logger.info("{}" .format(stdout))
+ if stdout is None:
+ self.__logger.info("No output from snmpwalk")
+ return False
+ elif 'OID' in stdout:
+ self.__logger.info("SNMP query failed")
+ return False
+ else:
+ counter1 = stdout.split()[3]
+ time.sleep(10)
+ stdout = node.run_cmd(
+ 'snmpwalk -v2c -m {0} -c public localhost {1}' .format(
+ snmp_mib_files, snmp_mib_strings))
+ self.__logger.info("{}" .format(stdout))
+ if stdout is None:
+ self.__logger.info("No output from snmpwalk")
+ elif 'OID' in stdout:
+ self.__logger.info(
+ "SNMP query failed during second check")
+ self.__logger.info("waiting for 10 sec")
+ time.sleep(10)
+ stdout = node.run_cmd(
+ 'snmpwalk -v2c -m {0} -c public localhost {1}' .format(
+ snmp_mib_files, snmp_mib_strings))
+ self.__logger.info("{}" .format(stdout))
+ if stdout is None:
+ self.__logger.info("No output from snmpwalk")
+ elif 'OID' in stdout:
+ self.__logger.info("SNMP query failed again")
+ self.__logger.info("Failing this test case")
+ return False
+ else:
+ counter2 = stdout.split()[3]
+
+ if counter1 == counter2:
+ return False
+ else:
+ return True
+ else:
+ return False
+
+ def check_dma_dummy_included(self, compute, name):
+ """Check if dummy collectd config by DMA
+ is included in collectd.conf file.
+
+ Keyword arguments:
+ compute -- compute node instance
+ name -- config file name
+ """
+ compute_name = compute.get_name()
+ nodes = get_apex_nodes()
+ for node in nodes:
+ if compute_name == node.get_dict()['name']:
+ dummy_conf = node.run_cmd('ls /etc/collectd/collectd.conf.d')
+ if name + '.conf' not in dummy_conf:
+ self.__logger.error('check conf FAIL')
+ return False
+ else:
+ self.__logger.info('check conf PASS')
+ fullpath = '/etc/collectd/collectd.conf.d/{}'.format(
+ name + '.conf')
+ self.__logger.info('Delete file {}'.format(fullpath))
+ node.run_cmd('sudo rm -f ' + fullpath)
+ return True
+ self.__logger.error('Some panic, compute not found')
+ return False
+
+ def create_testvm(self, compute_node, test_name):
+ nodes = get_apex_nodes()
+ compute_name = compute_node.get_name()
+
+ controller_node = None
+ for node in nodes:
+ if node.is_controller():
+ controller_node = node
+ break
+
+ self.__logger.debug('Creating Test VM on {}' .format(compute_name))
+ self.__logger.debug('Create command is executed in {}' .format(
+ (controller_node.get_dict()['name'])))
+
+ node.put_file(constants.ENV_FILE, 'overcloudrc.v3')
+ node.put_file(TEST_VM_IMAGE_PATH, TEST_VM_IMAGE)
+ image = controller_node.run_cmd(
+ 'source overcloudrc.v3;'
+ 'openstack image create -f value -c id'
+ ' --disk-format qcow2 --file {0} {1}'
+ .format(TEST_VM_IMAGE, test_name))
+ flavor = controller_node.run_cmd(
+ 'source overcloudrc.v3;'
+ 'openstack flavor create -f value -c id {}'
+ .format(test_name))
+ host = controller_node.run_cmd(
+ 'source overcloudrc.v3;'
+ 'openstack hypervisor list -f value -c "Hypervisor Hostname"'
+ ' | grep "^{}\\."'
+ .format(compute_name))
+ server = controller_node.run_cmd(
+ 'source overcloudrc.v3;'
+ 'openstack server create -f value -c id'
+ ' --image {0} --flavor {1} --availability-zone {2} {3}'
+ .format(image, flavor, 'nova:' + host, test_name))
+
+ resources = {"image": image, "flavor": flavor, "server": server}
+
+ if server:
+ self.__logger.debug('VM created')
+ self.__logger.debug('VM info: {}'.format(resources))
+
+ return resources
+
+ def delete_testvm(self, resources):
+ nodes = get_apex_nodes()
+
+ controller_node = None
+ for node in nodes:
+ if node.is_controller():
+ controller_node = node
+ break
+
+ self.__logger.debug('Deleteing Test VM')
+ self.__logger.debug('VM to be deleted info: {}'.format(resources))
+ self.__logger.debug('Delete command is executed in {}' .format(
+ (controller_node.get_dict()['name'])))
+
+ server = resources.get('server', None)
+ flavor = resources.get('flavor', None)
+ image = resources.get('image', None)
+ if server:
+ controller_node.run_cmd(
+ 'source overcloudrc.v3;'
+ 'openstack server delete {}'.format(server))
+ if flavor:
+ controller_node.run_cmd(
+ 'source overcloudrc.v3;'
+ 'openstack flavor delete {}'.format(flavor))
+ if image:
+ controller_node.run_cmd(
+ 'source overcloudrc.v3;'
+ 'openstack image delete {}'.format(image))
+
+ self.__logger.debug('VM and other OpenStack resources deleted')
+
+ def test_dma_infofetch_get_data(self, compute, test_name):
+ compute_name = compute.get_name()
+ nodes = get_apex_nodes()
+ for node in nodes:
+ if compute_name == node.get_dict()['name']:
+ stdout = node.run_cmd(
+ 'redis-cli keys "barometer-dma/vm/*/vminfo"'
+ ' | while read k; do redis-cli get $k; done'
+ ' | grep {}'.format(test_name))
+ self.__logger.debug('InfoFetch data: {}'.format(stdout))
+ if stdout and test_name in stdout:
+ self.__logger.info('PASS')
+ return True
+ else:
+ self.__logger.info('No test vm info')
+
+ self.__logger.info('FAIL')
+ return False