X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=blobdiff_plain;f=baro_tests%2Fcollectd.py;h=c1a05afbbad5302d3a7b3fafb8ac2defc383a9ac;hb=HEAD;hp=2878d508c14c858d6376a96c76b2e377a0b01a46;hpb=151a5d6db45763a4c130a37d69f120704fac8039;p=barometer.git diff --git a/baro_tests/collectd.py b/baro_tests/collectd.py index 2878d508..c1a05afb 100644 --- a/baro_tests/collectd.py +++ b/baro_tests/collectd.py @@ -1,5 +1,7 @@ # -*- coding: utf-8 -*- - +# +# Copyright 2017 OPNFV +# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at @@ -11,6 +13,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +# Patch on October 10 2017 """Executing test of plugins""" @@ -22,15 +25,16 @@ import time import logging import config_server import tests -import subprocess +import dma +from distutils import version from opnfv.deployment import factory AODH_NAME = 'aodh' GNOCCHI_NAME = 'gnocchi' ID_RSA_SRC = '/root/.ssh/id_rsa' -ID_RSA_DST_DIR = '/home/opnfv/.ssh' +ID_RSA_DST_DIR = '/root/.ssh' ID_RSA_DST = ID_RSA_DST_DIR + '/id_rsa' -APEX_IP = subprocess.check_output("echo $INSTALLER_IP", shell=True) +APEX_IP = os.getenv("INSTALLER_IP").rstrip('\n') APEX_USER = 'root' APEX_USER_STACK = 'stack' APEX_PKEY = '/root/.ssh/id_rsa' @@ -65,6 +69,15 @@ class InvalidResponse(KeystoneException): "Invalid response", exc, response) +def get_apex_nodes(): + handler = factory.Factory.get_handler('apex', + APEX_IP, + APEX_USER_STACK, + APEX_PKEY) + nodes = handler.get_nodes() + return nodes + + class GnocchiClient(object): # Gnocchi Client to authenticate and request meters def __init__(self): @@ -164,31 +177,6 @@ class AodhClient(object): logger.warning('Aodh is not registered in service catalog') -class SNMPClient(object): - """Client to request SNMP meters""" - def __init__(self, conf, compute_node): - """ - Keyword arguments: - conf -- ConfigServer instance - compute_node -- Compute node object - """ - self.conf = conf - self.compute_node = compute_node - - def get_snmp_metrics(self, compute_node, mib_file, mib_strings): - snmp_output = {} - if mib_file is None: - cmd = "snmpwalk -v 2c -c public localhost IF-MIB::interfaces" - ip = compute_node.get_ip() - snmp_output = self.conf.execute_command(cmd, ip) - else: - for mib_string in mib_strings: - snmp_output[mib_string] = self.conf.execute_command( - "snmpwalk -v2c -m {} -c public localhost {}".format( - mib_file, mib_string), compute_node.get_ip()) - return snmp_output - - class CSVClient(object): """Client to request CSV meters""" def __init__(self, conf): @@ -209,30 +197,48 @@ class CSVClient(object): Return list of metrics. """ - stdout = self.conf.execute_command( - "date '+%Y-%m-%d'", compute_node.get_ip()) - date = stdout[0].strip() - metrics = [] - for plugin_subdir in plugin_subdirectories: - for meter_category in meter_categories: - stdout = self.conf.execute_command( - "tail -2 /var/lib/collectd/csv/" - + "{0}.jf.intel.com/{1}/{2}-{3}".format( - compute_node.get_name(), plugin_subdir, meter_category, - date), - compute_node.get_ip()) - # Storing last two values - values = stdout - if len(values) < 2: - logger.error( - 'Getting last two CSV entries of meter category ' - + '{0} in {1} subdir failed'.format( - meter_category, plugin_subdir)) - else: - old_value = int(values[0][0:values[0].index('.')]) - new_value = int(values[1][0:values[1].index('.')]) - metrics.append(( - plugin_subdir, meter_category, old_value, new_value)) + compute_name = compute_node.get_name() + nodes = get_apex_nodes() + for node in nodes: + if compute_name == node.get_dict()['name']: + date = node.run_cmd( + "date '+%Y-%m-%d'") + hostname = node.run_cmd('hostname -A') + hostname = hostname.split()[0] + metrics = [] + for plugin_subdir in plugin_subdirectories: + for meter_category in meter_categories: + stdout1 = node.run_cmd( + "tail -2 /var/lib/collectd/csv/" + + "{0}/{1}/{2}-{3}".format( + hostname, plugin_subdir, + meter_category, date)) + stdout2 = node.run_cmd( + "tail -1 /var/lib/collectd/csv/" + + "{0}/{1}/{2}-{3}".format( + hostname, plugin_subdir, + meter_category, date)) + # Storing last two values + values = stdout1 + values2 = stdout2 + if values is None: + logger.error( + 'Getting last two CSV entries of meter category' + + ' {0} in {1} subdir failed'.format( + meter_category, plugin_subdir)) + elif values2 is None: + logger.error( + 'Getting last CSV entries of meter category' + + ' {0} in {1} subdir failed'.format( + meter_category, plugin_subdir)) + else: + values = values.split(',') + old_value = float(values[0]) + values2 = values2.split(',') + new_value = float(values2[0]) + metrics.append(( + plugin_subdir, meter_category, old_value, + new_value)) return metrics @@ -253,7 +259,7 @@ def get_csv_categories_for_ipmi(conf, compute_node): return [category.strip()[:-11] for category in categories] -def _process_result(compute_node, test, result, results_list): +def _process_result(compute_node, out_plugin, test, result, results_list, node): """Print test result and append it to results list. Keyword arguments: @@ -263,13 +269,13 @@ def _process_result(compute_node, test, result, results_list): """ if result: logger.info( - 'Compute node {0} test case {1} PASSED.'.format( - compute_node, test)) + 'Test case for {0} with {1} PASSED on {2}.'.format( + node, out_plugin, test)) else: logger.error( - 'Compute node {0} test case {1} FAILED.'.format( - compute_node, test)) - results_list.append((compute_node, test, result)) + 'Test case for {0} with {1} FAILED on {2}.'.format( + node, out_plugin, test)) + results_list.append((compute_node, out_plugin, test, result)) def _print_label(label): @@ -314,22 +320,41 @@ def _print_final_result_of_plugin( """ print_line = '' for id in compute_ids: - if out_plugins[id] == out_plugin: - if (id, plugin, True) in results: + if out_plugin == 'Gnocchi': + if (id, out_plugin, plugin, True) in results: + print_line += ' PASS |' + elif (id, out_plugin, plugin, False) in results: + print_line += ' FAIL |' + else: + print_line += ' SKIP |' + elif out_plugin == 'AODH': + if (id, out_plugin, plugin, True) in results: + print_line += ' PASS |' + elif (id, out_plugin, plugin, False) in results: + print_line += ' FAIL |' + else: + print_line += ' SKIP |' + elif out_plugin == 'SNMP': + if (id, out_plugin, plugin, True) in results: print_line += ' PASS |' - elif (id, plugin, False) in results \ - and out_plugins[id] == out_plugin: + elif (id, out_plugin, plugin, False) in results: print_line += ' FAIL |' else: - print_line += ' NOT EX |' - elif out_plugin == 'Gnocchi': - print_line += ' NOT EX |' + print_line += ' SKIP |' + elif out_plugin == 'CSV': + if (id, out_plugin, plugin, True) in results: + print_line += ' PASS |' + elif (id, out_plugin, plugin, False) in results: + print_line += ' FAIL |' + else: + print_line += ' SKIP |' else: - print_line += ' NOT EX |' + print_line += ' SKIP |' return print_line -def print_overall_summary(compute_ids, tested_plugins, results, out_plugins): +def print_overall_summary( + compute_ids, tested_plugins, aodh_plugins, results, out_plugins): """Print overall summary table. Keyword arguments: @@ -340,7 +365,6 @@ def print_overall_summary(compute_ids, tested_plugins, results, out_plugins): """ compute_node_names = ['Node-{}'.format(i) for i in range( len((compute_ids)))] - # compute_node_names = ['Node-{}'.format(id) for id in compute_ids] all_computes_in_line = '' for compute in compute_node_names: all_computes_in_line += '| ' + compute + (' ' * (7 - len(compute))) @@ -358,46 +382,60 @@ def print_overall_summary(compute_ids, tested_plugins, results, out_plugins): logger.info(line_of_nodes) logger.info( '+' + ('-' * 16) + '+' + (('-' * 8) + '+') * len(compute_node_names)) - out_plugins_print = ['Gnocchi'] - if 'SNMP' in out_plugins.values(): - out_plugins_print.append('SNMP') - if 'AODH' in out_plugins.values(): - out_plugins_print.append('AODH') - if 'CSV' in out_plugins.values(): - out_plugins_print.append('CSV') + out_plugins_print = [] + out_plugins_print1 = [] + for key in out_plugins.keys(): + if 'Gnocchi' in out_plugins[key]: + out_plugins_print1.append('Gnocchi') + if 'AODH' in out_plugins[key]: + out_plugins_print1.append('AODH') + if 'SNMP' in out_plugins[key]: + out_plugins_print1.append('SNMP') + if 'CSV' in out_plugins[key]: + out_plugins_print1.append('CSV') + for i in out_plugins_print1: + if i not in out_plugins_print: + out_plugins_print.append(i) for out_plugin in out_plugins_print: output_plugins_line = '' for id in compute_ids: - out_plugin_result = 'FAIL' + out_plugin_result = '----' if out_plugin == 'Gnocchi': out_plugin_result = \ - 'PASS' if out_plugins[id] == out_plugin else 'FAIL' - if out_plugin == 'AODH': - if out_plugins[id] == out_plugin: - out_plugin_result = \ - 'PASS' if out_plugins[id] == out_plugin else 'FAIL' - if out_plugin == 'SNMP': - if out_plugins[id] == out_plugin: - out_plugin_result = \ - 'PASS' if out_plugins[id] == out_plugin else 'FAIL' - if out_plugin == 'CSV': - if out_plugins[id] == out_plugin: - out_plugin_result = \ - 'PASS' if [ - plugin for comp_id, plugin, res in results - if comp_id == id and res] else 'FAIL' - else: - out_plugin_result = 'SKIP' + 'PASS' + elif out_plugin == 'AODH': + out_plugin_result = \ + 'PASS' + elif out_plugin == 'SNMP': + out_plugin_result = \ + 'PASS' + elif out_plugin == 'CSV': + out_plugin_result = \ + 'PASS' if [ + plugin for comp_id, out_pl, plugin, res in results + if comp_id == id and res] else 'FAIL' + else: + out_plugin_result = \ + 'FAIL' output_plugins_line += '| ' + out_plugin_result + ' ' logger.info( '| OUT:{}'.format(out_plugin) + (' ' * (11 - len(out_plugin))) + output_plugins_line + '|') - for plugin in sorted(tested_plugins.values()): - line_plugin = _print_final_result_of_plugin( - plugin, compute_ids, results, out_plugins, out_plugin) - logger.info( - '| IN:{}'.format(plugin) + (' ' * (11-len(plugin))) - + '|' + line_plugin) + + if out_plugin == 'AODH': + for plugin in sorted(aodh_plugins.values()): + line_plugin = _print_final_result_of_plugin( + plugin, compute_ids, results, out_plugins, out_plugin) + logger.info( + '| IN:{}'.format(plugin) + (' ' * (11-len(plugin))) + + '|' + line_plugin) + else: + for plugin in sorted(tested_plugins.values()): + line_plugin = _print_final_result_of_plugin( + plugin, compute_ids, results, out_plugins, out_plugin) + logger.info( + '| IN:{}'.format(plugin) + (' ' * (11-len(plugin))) + + '|' + line_plugin) logger.info( '+' + ('-' * 16) + '+' + (('-' * 8) + '+') * len(compute_node_names)) @@ -405,8 +443,8 @@ def print_overall_summary(compute_ids, tested_plugins, results, out_plugins): def _exec_testcase( - test_labels, name, gnocchi_running, aodh_running, snmp_running, - controllers, compute_node, conf, results, error_plugins, out_plugins): + test_labels, name, out_plugin, controllers, compute_node, + conf, results, error_plugins, out_plugins): """Execute the testcase. Keyword arguments: @@ -434,11 +472,8 @@ def _exec_testcase( bridge for bridge in ovs_interfaces if bridge in ovs_configured_bridges] plugin_prerequisites = { - 'intel_rdt': [( - conf.is_libpqos_on_node(compute_node), - 'libpqos must be installed.')], 'mcelog': [( - conf.is_installed(compute_node, 'mcelog'), + conf.is_mcelog_installed(compute_node, 'mcelog'), 'mcelog must be installed.')], 'ovs_events': [( len(ovs_existing_configured_int) > 0 or len(ovs_interfaces) > 0, @@ -447,28 +482,22 @@ def _exec_testcase( len(ovs_existing_configured_bridges) > 0, 'Bridges must be configured.')]} gnocchi_criteria_lists = { - 'hugepages': ['hugepages'], - 'mcelog': ['mcelog'], - 'ovs_events': ['interface-ovs-system'], - 'ovs_stats': ['ovs_stats-br0.br0']} + 'hugepages': 'hugepages', + 'intel_rdt': 'rdt', + 'mcelog': 'mcelog', + 'ovs_events': 'interface-ovs-system', + 'ovs_stats': 'ovs_stats-br0.br0'} aodh_criteria_lists = { - 'mcelog': ['mcelog.errors'], - 'ovs_events': ['ovs_events.gauge']} + 'mcelog': 'mcelog', + 'ovs_events': 'ovs_events'} snmp_mib_files = { 'intel_rdt': '/usr/share/snmp/mibs/Intel-Rdt.txt', 'hugepages': '/usr/share/snmp/mibs/Intel-Hugepages.txt', 'mcelog': '/usr/share/snmp/mibs/Intel-Mcelog.txt'} snmp_mib_strings = { - 'intel_rdt': [ - 'INTEL-RDT-MIB::rdtLlc.1', - 'INTEL-RDT-MIB::rdtIpc.1', - 'INTEL-RDT-MIB::rdtMbmRemote.1', - 'INTEL-RDT-MIB::rdtMbmLocal.1'], - 'hugepages': [ - 'INTEL-HUGEPAGES-MIB::hugepagesPageFree'], - 'mcelog': [ - 'INTEL-MCELOG-MIB::memoryCorrectedErrors.1', - 'INTEL-MCELOG-MIB::memoryCorrectedErrors.2']} + 'intel_rdt': 'INTEL-RDT-MIB::rdtLlc.1', + 'hugepages': 'INTEL-HUGEPAGES-MIB::hugepagesPageFree', + 'mcelog': 'INTEL-MCELOG-MIB::memoryCorrectedErrors.1'} nr_hugepages = int(time.time()) % 10000 snmp_in_commands = { 'intel_rdt': None, @@ -477,41 +506,29 @@ def _exec_testcase( 'mcelog': '/root/mce-inject_df < /root/corrected'} csv_subdirs = { 'intel_rdt': [ - 'intel_rdt-{}'.format(core) - for core in conf.get_plugin_config_values( - compute_node, 'intel_rdt', 'Cores')], + 'intel_rdt-0-2'], 'hugepages': [ - 'hugepages-mm-2048Kb', 'hugepages-node0-2048Kb', - 'hugepages-node1-2048Kb', 'hugepages-mm-1048576Kb', - 'hugepages-node0-1048576Kb', 'hugepages-node1-1048576Kb'], - 'ipmi': ['ipmi'], + 'hugepages-mm-2048Kb', 'hugepages-node0-2048Kb',], + # 'ipmi': ['ipmi'], 'mcelog': [ 'mcelog-SOCKET_0_CHANNEL_0_DIMM_any', 'mcelog-SOCKET_0_CHANNEL_any_DIMM_any'], 'ovs_stats': [ - 'ovs_stats-{0}.{0}'.format(interface) - for interface in ovs_existing_configured_bridges], + 'ovs_stats-br0.br0'], 'ovs_events': [ - 'ovs_events-{}'.format(interface) - for interface in ( - ovs_existing_configured_int - if len(ovs_existing_configured_int) > 0 else ovs_interfaces)]} - csv_meter_categories_ipmi = get_csv_categories_for_ipmi(conf, compute_node) + 'ovs_events-br0']} + # csv_meter_categories_ipmi = get_csv_categories_for_ipmi(conf, + # compute_node) csv_meter_categories = { 'intel_rdt': [ - 'bytes-llc', 'ipc', 'memory_bandwidth-local', - 'memory_bandwidth-remote'], + 'bytes-llc', 'ipc'], 'hugepages': ['vmpage_number-free', 'vmpage_number-used'], - 'ipmi': csv_meter_categories_ipmi, + # 'ipmi': csv_meter_categories_ipmi, 'mcelog': [ 'errors-corrected_memory_errors', - 'errors-uncorrected_memory_errors', - 'errors-corrected_memory_errors_in_24h', - 'errors-uncorrected_memory_errors_in_24h'], + 'errors-uncorrected_memory_errors'], 'ovs_stats': [ - 'if_collisions', 'if_dropped', 'if_errors', 'if_packets', - 'if_rx_errors-crc', 'if_rx_errors-frame', 'if_rx_errors-over', - 'if_rx_octets', 'if_tx_octets'], + 'if_dropped', 'if_errors', 'if_packets'], 'ovs_events': ['gauge-link_status']} _print_plugin_label( @@ -525,7 +542,8 @@ def _exec_testcase( for error in plugin_critical_errors: logger.error(' * ' + error) _process_result( - compute_node.get_id(), test_labels[name], False, results) + compute_node.get_id(), out_plugin, test_labels[name], False, + results, compute_node.get_name()) else: plugin_errors = [ error for plugin, error, critical in error_plugins @@ -546,36 +564,53 @@ def _exec_testcase( + 'following prerequisites failed:') for prerequisite in failed_prerequisites: logger.error(' * {}'.format(prerequisite)) + # optional plugin + elif "intel_rdt" == name and not conf.is_rdt_available(compute_node): + #TODO: print log message + logger.info("RDT is not available on virtual nodes, skipping test.") + res = True + print("Results for {}, pre-processing".format(str(test_labels[name]))) + print(results) + _process_result( + compute_node.get_id(), out_plugin, test_labels[name], + res, results, compute_node.get_name()) + print("Results for {}, post-processing".format(str(test_labels[name]))) + print(results) else: - if gnocchi_running: - plugin_interval = conf.get_plugin_interval(compute_node, name) + plugin_interval = conf.get_plugin_interval(compute_node, name) + if out_plugin == 'Gnocchi': res = conf.test_plugins_with_gnocchi( - compute_node.get_id(), plugin_interval, logger, - criteria_list=gnocchi_criteria_lists[name]) - elif aodh_running: + compute_node.get_name(), plugin_interval, + logger, criteria_list=gnocchi_criteria_lists[name]) + if out_plugin == 'AODH': res = conf.test_plugins_with_aodh( - compute_node.get_id(), plugin_interval, - logger, creteria_list=aodh_criteria_lists[name]) - elif snmp_running: + compute_node.get_name(), plugin_interval, + logger, criteria_list=aodh_criteria_lists[name]) + if out_plugin == 'SNMP': res = \ name in snmp_mib_files and name in snmp_mib_strings \ - and tests.test_snmp_sends_data( - compute_node, - conf.get_plugin_interval(compute_node, name), logger, - SNMPClient(conf, compute_node), snmp_mib_files[name], - snmp_mib_strings[name], snmp_in_commands[name], conf) - else: + and conf.test_plugins_with_snmp( + compute_node.get_name(), plugin_interval, logger, name, + snmp_mib_files[name], snmp_mib_strings[name], + snmp_in_commands[name]) + if out_plugin == 'CSV': res = tests.test_csv_handles_plugin_data( compute_node, conf.get_plugin_interval(compute_node, name), name, csv_subdirs[name], csv_meter_categories[name], logger, CSVClient(conf)) + if res and plugin_errors: logger.info( 'Test works, but will be reported as failure,' + 'because of non-critical errors.') res = False + print("Results for {}, pre-processing".format(str(test_labels[name]))) + print(results) _process_result( - compute_node.get_id(), test_labels[name], res, results) + compute_node.get_id(), out_plugin, test_labels[name], + res, results, compute_node.get_name()) + print("Results for {}, post-processing".format(str(test_labels[name]))) + print(results) def get_results_for_ovs_events( @@ -591,51 +626,67 @@ def get_results_for_ovs_events( logger.info("Results for OVS Events = {}" .format(results)) +def create_ovs_bridge(): + """Create OVS brides on compute nodes""" + handler = factory.Factory.get_handler('apex', + APEX_IP, + APEX_USER_STACK, + APEX_PKEY) + nodes = handler.get_nodes() + logger.info("Creating OVS bridges on computes nodes") + for node in nodes: + if node.is_compute(): + node.run_cmd('sudo ovs-vsctl add-br br0') + node.run_cmd('sudo ovs-vsctl set-manager ptcp:6640') + logger.info('OVS Bridges created on compute nodes') + + def mcelog_install(): """Install mcelog on compute nodes.""" - _print_label('Enabling mcelog on compute nodes') + _print_label('Enabling mcelog and OVS bridges on compute nodes') handler = factory.Factory.get_handler('apex', APEX_IP, APEX_USER_STACK, APEX_PKEY) nodes = handler.get_nodes() + mce_bin = os.path.dirname(os.path.realpath(__file__)) + '/mce-inject_ea' for node in nodes: if node.is_compute(): centos_release = node.run_cmd('uname -r') - if '3.10.0-514.26.2.el7.x86_64' not in centos_release: + if version.LooseVersion(centos_release) < version.LooseVersion('3.10.0-514.26.2.el7.x86_64'): logger.info( - 'Mcelog will not be enabled ' - + 'on node-{0}, '.format(node.get_dict()['id']) - + 'unsupported CentOS release found ({1}).'.format( - centos_release)) + 'Mcelog will NOT be enabled on node-{}.' + + ' Unsupported CentOS release found ({}).'.format( + node.get_dict()['name'],centos_release)) else: logger.info( - 'Checking if mcelog is enabled' - + ' on node-{}...'.format(node.get_dict()['id'])) + 'Checking if mcelog is enabled' + + ' on node-{}...'.format(node.get_dict()['name'])) res = node.run_cmd('ls') - if 'mce-inject_ea' and 'corrected' in res: - logger.info( - 'Mcelog seems to be already installed ' - + 'on node-{}.'.format(node.get_dict()['id'])) - node.run_cmd('modprobe mce-inject_ea') - node.run_cmd('mce-inject_ea < corrected') - else: - logger.info( - 'Mcelog will be enabled on node-{}...'.format( - node.get_dict()['id'])) - node.put_file( - '/usr/local/lib/python2.7/dist-packages/baro_tests/' - + 'mce-inject_ea', 'mce-inject_ea') - node.run_cmd('chmod a+x mce-inject_ea') - node.run_cmd('echo "CPU 0 BANK 0" > corrected') - node.run_cmd( - 'echo "STATUS 0xcc00008000010090" >>' - + ' corrected') - node.run_cmd( - 'echo "ADDR 0x0010FFFFFFF" >> corrected') - node.run_cmd('modprobe mce-inject') - node.run_cmd('mce-inject_ea < corrected') - logger.info('Mcelog is installed on all compute nodes') + if 'mce-inject_ea' and 'corrected' in res: + logger.info( + 'Mcelog seems to be already installed ' + + 'on node-{}.'.format(node.get_dict()['name'])) + node.run_cmd('sudo modprobe mce-inject') + node.run_cmd('sudo ./mce-inject_ea < corrected') + else: + logger.info( + 'Mcelog will be enabled ' + + 'on node-{}...'.format(node.get_dict()['name'])) + node.put_file(mce_bin, 'mce-inject_ea') + node.run_cmd('chmod a+x mce-inject_ea') + node.run_cmd('echo "CPU 0 BANK 0" > corrected') + node.run_cmd( + 'echo "STATUS 0xcc00008000010090" >>' + + ' corrected') + node.run_cmd( + 'echo "ADDR 0x0010FFFFFFF" >> corrected') + node.run_cmd('sudo modprobe mce-inject') + node.run_cmd('sudo ./mce-inject_ea < corrected') + logger.info( + 'Mcelog was installed ' + + 'on node-{}.'.format(node.get_dict()['name'])) + def mcelog_delete(): @@ -650,7 +701,7 @@ def mcelog_delete(): node.run_cmd('rm mce-inject_ea') if 'corrected' in output: node.run_cmd('rm corrected') - node.run_cmd('systemctl restart mcelog') + node.run_cmd('sudo systemctl restart mcelog') logger.info('Mcelog is deleted from all compute nodes') @@ -704,42 +755,24 @@ def main(bt_logger=None): _print_label( 'Display of Control and Compute nodes available in the set up') - logger.info('controllers: {}'.format([('{0}: {1} ({2})'.format( - node.get_id(), node.get_name(), - node.get_ip())) for node in controllers])) - logger.info('computes: {}'.format([('{0}: {1} ({2})'.format( - node.get_id(), node.get_name(), node.get_ip())) - for node in computes])) + logger.info('controllers: {}'.format([('{0}: {1}'.format( + node.get_name(), node.get_ip())) for node in controllers])) + logger.info('computes: {}'.format([('{0}: {1}'.format( + node.get_name(), node.get_ip())) for node in computes])) mcelog_install() + create_ovs_bridge() gnocchi_running_on_con = False aodh_running_on_con = False + # Disabling SNMP write plug-in snmp_running = False - _print_label('Testing Gnocchi, AODH and SNMP on controller nodes') + _print_label('Testing Gnocchi and AODH plugins on nodes') for controller in controllers: - gnocchi_client = GnocchiClient() - gnocchi_client.auth_token() gnocchi_running = ( - gnocchi_running_on_con and conf.is_gnocchi_running(controller)) - aodh_client = AodhClient() - aodh_client.auth_token() + gnocchi_running_on_con or conf.is_gnocchi_running(controller)) aodh_running = ( - aodh_running_on_con and conf.is_aodh_running(controller)) - if gnocchi_running: - logger.info("Gnocchi is running on controller.") - elif aodh_running: - logger.error("Gnocchi is not running on controller.") - logger.info("AODH is running on controller.") - elif snmp_running: - logger.error("Gnocchi is not running on Controller") - logger.error("AODH is not running on controller.") - logger.info("SNMP is running on controller.") - else: - logger.error("Gnocchi is not running on Controller") - logger.error("AODH is not running on controller.") - logger.error("SNMP is not running on controller.") - logger.info("CSV will be enabled on compute nodes.") + aodh_running_on_con or conf.is_aodh_running(controller)) compute_ids = [] compute_node_names = [] @@ -751,119 +784,104 @@ def main(bt_logger=None): 'mcelog': 'Mcelog', 'ovs_stats': 'OVS stats', 'ovs_events': 'OVS events'} - out_plugins = { - 'gnocchi': 'Gnocchi', - 'aodh': 'AODH', - 'snmp': 'SNMP', - 'csv': 'CSV'} + aodh_plugin_labels = { + 'mcelog': 'Mcelog', + 'ovs_events': 'OVS events'} + out_plugins = {} for compute_node in computes: node_id = compute_node.get_id() node_name = compute_node.get_name() - out_plugins[node_id] = 'CSV' + out_plugins[node_id] = [] compute_ids.append(node_id) compute_node_names.append(node_name) plugins_to_enable = [] - _print_label('NODE {}: Test Gnocchi Plug-in'.format(node_name)) - logger.info('Checking if gnocchi plug-in is included in compute nodes.') - if not conf.check_gnocchi_plugin_included(compute_node): - logger.error('Gnocchi plug-in is not included.') - logger.info( - 'Testcases on node {} will not be executed'.format(node_name)) - else: - collectd_restarted, collectd_warnings = \ - conf.restart_collectd(compute_node) - sleep_time = 5 - logger.info( - 'Sleeping for {} seconds after collectd restart...'.format( - sleep_time)) - time.sleep(sleep_time) - if not collectd_restarted: - for warning in collectd_warnings: - logger.warning(warning) + error_plugins = [] + gnocchi_running_com = ( + gnocchi_running and conf.check_gnocchi_plugin_included( + compute_node)) + aodh_running_com = ( + aodh_running and conf.check_aodh_plugin_included(compute_node)) + # logger.info("SNMP enabled on {}" .format(node_name)) + if gnocchi_running_com: + out_plugins[node_id].append("Gnocchi") + if aodh_running_com: + out_plugins[node_id].append("AODH") + if snmp_running: + out_plugins[node_id].append("SNMP") + + if 'Gnocchi' in out_plugins[node_id]: + plugins_to_enable.append('csv') + out_plugins[node_id].append("CSV") + if plugins_to_enable: + _print_label( + 'NODE {}: Enabling Test Plug-in '.format(node_name) + + 'and Test case execution') + if plugins_to_enable and not conf.enable_plugins( + compute_node, plugins_to_enable, error_plugins, + create_backup=False): logger.error( - 'Restart of collectd on node {} failed'.format(node_name)) + 'Failed to test plugins on node {}.'.format(node_id)) logger.info( 'Testcases on node {} will not be executed'.format( - node_name)) - else: - for warning in collectd_warnings: - logger.warning(warning) - - if gnocchi_running: - out_plugins[node_id] = 'Gnocchi' - logger.info("Gnocchi is active and collecting data") - elif aodh_running: - out_plugins[node_id] = 'AODH' - logger.info("AODH withh be tested") - _print_label('Node {}: Test AODH' .format(node_name)) - logger.info("Checking if AODH is running") - logger.info("AODH is running") - elif snmp_running: - out_plugins[node_id] = 'SNMP' - logger.info("SNMP will be tested.") - _print_label('NODE {}: Test SNMP'.format(node_id)) - logger.info("Checking if SNMP is running.") - logger.info("SNMP is running.") - else: - plugins_to_enable.append('csv') - out_plugins[node_id] = 'CSV' - logger.error("Gnocchi, AODH, SNMP are not running") - logger.info( - "CSV will be enabled for verification " - + "of test plugins.") - if plugins_to_enable: - _print_label( - 'NODE {}: Enabling Test Plug-in '.format(node_name) - + 'and Test case execution') - error_plugins = [] - if plugins_to_enable and not conf.enable_plugins( - compute_node, plugins_to_enable, error_plugins, - create_backup=False): + node_id)) + + for i in out_plugins[node_id]: + if i == 'AODH': + for plugin_name in sorted(aodh_plugin_labels.keys()): + _exec_testcase( + aodh_plugin_labels, plugin_name, i, + controllers, compute_node, conf, results, + error_plugins, out_plugins[node_id]) + elif i == 'CSV': + _print_label("Node {}: Executing CSV Testcases".format( + node_name)) + logger.info("Restarting collectd for CSV tests") + collectd_restarted, collectd_warnings = \ + conf.restart_collectd(compute_node) + sleep_time = 10 + logger.info( + 'Sleeping for {} seconds'.format(sleep_time) + + ' after collectd restart...') + time.sleep(sleep_time) + if not collectd_restarted: + for warning in collectd_warnings: + logger.warning(warning) logger.error( - 'Failed to test plugins on node {}.'.format(node_id)) + 'Restart of collectd on node {} failed'.format( + compute_node)) logger.info( - 'Testcases on node {} will not be executed'.format( - node_id)) - else: - if plugins_to_enable: - collectd_restarted, collectd_warnings = \ - conf.restart_collectd(compute_node) - sleep_time = 30 - logger.info( - 'Sleeping for {} seconds'.format(sleep_time) - + ' after collectd restart...') - time.sleep(sleep_time) - if plugins_to_enable and not collectd_restarted: - for warning in collectd_warnings: - logger.warning(warning) - logger.error( - 'Restart of collectd on node {} failed'.format( - node_id)) - logger.info( - 'Testcases on node {}'.format(node_id) - + ' will not be executed.') - else: - if collectd_warnings: - for warning in collectd_warnings: - logger.warning(warning) - - for plugin_name in sorted(plugin_labels.keys()): - _exec_testcase( - plugin_labels, plugin_name, gnocchi_running, - aodh_running, snmp_running, controllers, - compute_node, conf, results, error_plugins, - out_plugins[node_id]) - - _print_label('NODE {}: Restoring config file'.format(node_name)) - conf.restore_config(compute_node) - mcelog_delete() - print_overall_summary(compute_ids, plugin_labels, results, out_plugins) - - if ((len([res for res in results if not res[2]]) > 0) - or (len(results) < len(computes) * len(plugin_labels))): - logger.error('Some tests have failed or have not been executed') - return 1 - return 0 + 'CSV Testcases on node {}'.format(compute_node) + + ' will not be executed.') + for plugin_name in sorted(plugin_labels.keys()): + _exec_testcase( + plugin_labels, plugin_name, i, + controllers, compute_node, conf, results, + error_plugins, out_plugins[node_id]) + + else: + for plugin_name in sorted(plugin_labels.keys()): + _exec_testcase( + plugin_labels, plugin_name, i, + controllers, compute_node, conf, results, + error_plugins, out_plugins[node_id]) + + mcelog_delete() + print_overall_summary( + compute_ids, plugin_labels, aodh_plugin_labels, results, out_plugins) + + res_overall = 0 + for res in results: + if not res[3]: + logger.error('Some tests have failed or have not been executed') + logger.error('Overall Result is Fail') + res_overall = 1 + else: + pass + + _print_label('Testing DMA on compute nodes') + res_agent = dma.dma_main(logger, conf, computes) + + return 0 if res_overall == 0 and res_agent == 0 else 1 if __name__ == '__main__':