# -*- coding: utf-8 -*-
-
+#
+# Copyright 2017 OPNFV
+#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+# Patch on October 10 2017
"""Executing test of plugins"""
import logging
import config_server
import tests
-import subprocess
+import dma
+from distutils import version
from opnfv.deployment import factory
AODH_NAME = 'aodh'
ID_RSA_SRC = '/root/.ssh/id_rsa'
ID_RSA_DST_DIR = '/root/.ssh'
ID_RSA_DST = ID_RSA_DST_DIR + '/id_rsa'
-APEX_IP = subprocess.check_output("echo $INSTALLER_IP", shell=True)
+APEX_IP = os.getenv("INSTALLER_IP").rstrip('\n')
APEX_USER = 'root'
APEX_USER_STACK = 'stack'
APEX_PKEY = '/root/.ssh/id_rsa'
logger.warning('Aodh is not registered in service catalog')
-class SNMPClient(object):
- """Client to request SNMP meters"""
- def __init__(self, conf, compute_node):
- """
- Keyword arguments:
- conf -- ConfigServer instance
- compute_node -- Compute node object
- """
- self.conf = conf
- self.compute_node = compute_node
-
- def get_snmp_metrics(self, compute_node, mib_file, mib_strings):
- snmp_output = {}
- if mib_file is None:
- cmd = "snmpwalk -v 2c -c public localhost IF-MIB::interfaces"
- ip = compute_node.get_ip()
- snmp_output = self.conf.execute_command(cmd, ip)
- else:
- for mib_string in mib_strings:
- snmp_output[mib_string] = self.conf.execute_command(
- "snmpwalk -v2c -m {} -c public localhost {}".format(
- mib_file, mib_string), compute_node.get_ip())
- return snmp_output
-
-
class CSVClient(object):
"""Client to request CSV meters"""
def __init__(self, conf):
meter_category, date))
stdout2 = node.run_cmd(
"tail -1 /var/lib/collectd/csv/"
- + "{0}.jf.intel.com/{1}/{2}-{3}".format(
- compute_node.get_name(), plugin_subdir,
+ + "{0}/{1}/{2}-{3}".format(
+ hostname, plugin_subdir,
meter_category, date))
- # Storing last two values
+ # Storing last two values
values = stdout1
+ values2 = stdout2
if values is None:
logger.error(
'Getting last two CSV entries of meter category'
+ ' {0} in {1} subdir failed'.format(
meter_category, plugin_subdir))
+ elif values2 is None:
+ logger.error(
+ 'Getting last CSV entries of meter category'
+ + ' {0} in {1} subdir failed'.format(
+ meter_category, plugin_subdir))
else:
values = values.split(',')
old_value = float(values[0])
- stdout2 = stdout2.split(',')
- new_value = float(stdout2[0])
+ values2 = values2.split(',')
+ new_value = float(values2[0])
metrics.append((
plugin_subdir, meter_category, old_value,
new_value))
return [category.strip()[:-11] for category in categories]
-def _process_result(compute_node, out_plugin, test, result, results_list):
+def _process_result(compute_node, out_plugin, test, result, results_list, node):
"""Print test result and append it to results list.
Keyword arguments:
"""
if result:
logger.info(
- 'Test case {0} PASSED with {1}.'.format(
- test, out_plugin))
+ 'Test case for {0} with {1} PASSED on {2}.'.format(
+ node, out_plugin, test))
else:
logger.error(
- 'Test case {0} FAILED with {1}.'.format(
- test, out_plugin))
+ 'Test case for {0} with {1} FAILED on {2}.'.format(
+ node, out_plugin, test))
results_list.append((compute_node, out_plugin, test, result))
elif (id, out_plugin, plugin, False) in results:
print_line += ' FAIL |'
else:
- print_line += ' NOT EX |'
+ print_line += ' SKIP |'
elif out_plugin == 'AODH':
if (id, out_plugin, plugin, True) in results:
print_line += ' PASS |'
elif (id, out_plugin, plugin, False) in results:
print_line += ' FAIL |'
else:
- print_line += ' NOT EX |'
+ print_line += ' SKIP |'
+ elif out_plugin == 'SNMP':
+ if (id, out_plugin, plugin, True) in results:
+ print_line += ' PASS |'
+ elif (id, out_plugin, plugin, False) in results:
+ print_line += ' FAIL |'
+ else:
+ print_line += ' SKIP |'
elif out_plugin == 'CSV':
if (id, out_plugin, plugin, True) in results:
print_line += ' PASS |'
elif (id, out_plugin, plugin, False) in results:
print_line += ' FAIL |'
else:
- print_line += ' NOT EX |'
+ print_line += ' SKIP |'
else:
print_line += ' SKIP |'
return print_line
out_plugin_result = '----'
if out_plugin == 'Gnocchi':
out_plugin_result = \
- 'PASS' if 'Gnocchi' in out_plugins_print else 'FAIL'
- if out_plugin == 'AODH':
+ 'PASS'
+ elif out_plugin == 'AODH':
out_plugin_result = \
- 'PASS' if out_plugin in out_plugins_print else 'FAIL'
- if out_plugin == 'SNMP':
+ 'PASS'
+ elif out_plugin == 'SNMP':
out_plugin_result = \
- 'PASS' if [
- plugin for comp_id, out_pl, plugin, res in results
- if comp_id == id and res] else 'FAIL'
- if out_plugin == 'CSV':
+ 'PASS'
+ elif out_plugin == 'CSV':
out_plugin_result = \
'PASS' if [
plugin for comp_id, out_pl, plugin, res in results
if comp_id == id and res] else 'FAIL'
else:
- out_plugin_result = 'FAIL'
+ out_plugin_result = \
+ 'FAIL'
output_plugins_line += '| ' + out_plugin_result + ' '
logger.info(
'| OUT:{}'.format(out_plugin) + (' ' * (11 - len(out_plugin)))
bridge for bridge in ovs_interfaces
if bridge in ovs_configured_bridges]
plugin_prerequisites = {
- 'intel_rdt': [(
- conf.is_libpqos_on_node(compute_node),
- 'libpqos must be installed.')],
'mcelog': [(
conf.is_mcelog_installed(compute_node, 'mcelog'),
'mcelog must be installed.')],
'Bridges must be configured.')]}
gnocchi_criteria_lists = {
'hugepages': 'hugepages',
+ 'intel_rdt': 'rdt',
'mcelog': 'mcelog',
'ovs_events': 'interface-ovs-system',
'ovs_stats': 'ovs_stats-br0.br0'}
'hugepages': '/usr/share/snmp/mibs/Intel-Hugepages.txt',
'mcelog': '/usr/share/snmp/mibs/Intel-Mcelog.txt'}
snmp_mib_strings = {
- 'intel_rdt': [
- 'INTEL-RDT-MIB::rdtLlc.1',
- 'INTEL-RDT-MIB::rdtIpc.1',
- 'INTEL-RDT-MIB::rdtMbmRemote.1',
- 'INTEL-RDT-MIB::rdtMbmLocal.1'],
- 'hugepages': [
- 'INTEL-HUGEPAGES-MIB::hugepagesPageFree'],
- 'mcelog': [
- 'INTEL-MCELOG-MIB::memoryCorrectedErrors.1',
- 'INTEL-MCELOG-MIB::memoryCorrectedErrors.2']}
+ 'intel_rdt': 'INTEL-RDT-MIB::rdtLlc.1',
+ 'hugepages': 'INTEL-HUGEPAGES-MIB::hugepagesPageFree',
+ 'mcelog': 'INTEL-MCELOG-MIB::memoryCorrectedErrors.1'}
nr_hugepages = int(time.time()) % 10000
snmp_in_commands = {
'intel_rdt': None,
'mcelog': '/root/mce-inject_df < /root/corrected'}
csv_subdirs = {
'intel_rdt': [
- 'intel_rdt-{}'.format(core)
- for core in conf.get_plugin_config_values(
- compute_node, 'intel_rdt', 'Cores')],
+ 'intel_rdt-0-2'],
'hugepages': [
- 'hugepages-mm-2048Kb', 'hugepages-node0-2048Kb',
- 'hugepages-node1-2048Kb'],
+ 'hugepages-mm-2048Kb', 'hugepages-node0-2048Kb',],
# 'ipmi': ['ipmi'],
'mcelog': [
'mcelog-SOCKET_0_CHANNEL_0_DIMM_any',
# compute_node)
csv_meter_categories = {
'intel_rdt': [
- 'bytes-llc', 'ipc', 'memory_bandwidth-local',
- 'memory_bandwidth-remote'],
+ 'bytes-llc', 'ipc'],
'hugepages': ['vmpage_number-free', 'vmpage_number-used'],
# 'ipmi': csv_meter_categories_ipmi,
'mcelog': [
logger.error(' * ' + error)
_process_result(
compute_node.get_id(), out_plugin, test_labels[name], False,
- results)
+ results, compute_node.get_name())
else:
plugin_errors = [
error for plugin, error, critical in error_plugins
+ 'following prerequisites failed:')
for prerequisite in failed_prerequisites:
logger.error(' * {}'.format(prerequisite))
+ # optional plugin
+ elif "intel_rdt" == name and not conf.is_rdt_available(compute_node):
+ #TODO: print log message
+ logger.info("RDT is not available on virtual nodes, skipping test.")
+ res = True
+ print("Results for {}, pre-processing".format(str(test_labels[name])))
+ print(results)
+ _process_result(
+ compute_node.get_id(), out_plugin, test_labels[name],
+ res, results, compute_node.get_name())
+ print("Results for {}, post-processing".format(str(test_labels[name])))
+ print(results)
else:
plugin_interval = conf.get_plugin_interval(compute_node, name)
if out_plugin == 'Gnocchi':
if out_plugin == 'SNMP':
res = \
name in snmp_mib_files and name in snmp_mib_strings \
- and tests.test_snmp_sends_data(
- compute_node,
- plugin_interval, logger,
- SNMPClient(conf, compute_node), snmp_mib_files[name],
- snmp_mib_strings[name], snmp_in_commands[name], conf)
+ and conf.test_plugins_with_snmp(
+ compute_node.get_name(), plugin_interval, logger, name,
+ snmp_mib_files[name], snmp_mib_strings[name],
+ snmp_in_commands[name])
if out_plugin == 'CSV':
res = tests.test_csv_handles_plugin_data(
compute_node, conf.get_plugin_interval(compute_node, name),
'Test works, but will be reported as failure,'
+ 'because of non-critical errors.')
res = False
+ print("Results for {}, pre-processing".format(str(test_labels[name])))
+ print(results)
_process_result(
compute_node.get_id(), out_plugin, test_labels[name],
- res, results)
+ res, results, compute_node.get_name())
+ print("Results for {}, post-processing".format(str(test_labels[name])))
+ print(results)
def get_results_for_ovs_events(
APEX_USER_STACK,
APEX_PKEY)
nodes = handler.get_nodes()
+ logger.info("Creating OVS bridges on computes nodes")
for node in nodes:
if node.is_compute():
node.run_cmd('sudo ovs-vsctl add-br br0')
def mcelog_install():
"""Install mcelog on compute nodes."""
- _print_label('Enabling mcelog on compute nodes')
+ _print_label('Enabling mcelog and OVS bridges on compute nodes')
handler = factory.Factory.get_handler('apex',
APEX_IP,
APEX_USER_STACK,
APEX_PKEY)
nodes = handler.get_nodes()
+ mce_bin = os.path.dirname(os.path.realpath(__file__)) + '/mce-inject_ea'
for node in nodes:
if node.is_compute():
centos_release = node.run_cmd('uname -r')
- if '3.10.0-514.26.2.el7.x86_64' not in centos_release:
+ if version.LooseVersion(centos_release) < version.LooseVersion('3.10.0-514.26.2.el7.x86_64'):
logger.info(
- 'Mcelog will not be enabled '
- + 'on node-{0}, '.format(node.get_dict()['name'])
- + 'unsupported CentOS release found ({1}).'.format(
- centos_release))
+ 'Mcelog will NOT be enabled on node-{}.'
+ + ' Unsupported CentOS release found ({}).'.format(
+ node.get_dict()['name'],centos_release))
else:
logger.info(
- 'Checking if mcelog is enabled'
+ 'Checking if mcelog is enabled'
+ ' on node-{}...'.format(node.get_dict()['name']))
res = node.run_cmd('ls')
- if 'mce-inject_ea' and 'corrected' in res:
- logger.info(
- 'Mcelog seems to be already installed '
- + 'on node-{}.'.format(node.get_dict()['name']))
- node.run_cmd('sudo modprobe mce-inject')
- node.run_cmd('sudo ./mce-inject_ea < corrected')
- else:
- logger.info(
- 'Mcelog will be enabled on node-{}...'.format(
- node.get_dict()['id']))
- node.put_file(
- '/usr/local/lib/python2.7/dist-packages/baro_tests/'
- + 'mce-inject_ea', 'mce-inject_ea')
- node.run_cmd('chmod a+x mce-inject_ea')
- node.run_cmd('echo "CPU 0 BANK 0" > corrected')
- node.run_cmd(
- 'echo "STATUS 0xcc00008000010090" >>'
- + ' corrected')
- node.run_cmd(
- 'echo "ADDR 0x0010FFFFFFF" >> corrected')
- node.run_cmd('sudo modprobe mce-inject')
- node.run_cmd('sudo ./mce-inject_ea < corrected')
- logger.info('Mcelog is installed on all compute nodes')
+ if 'mce-inject_ea' and 'corrected' in res:
+ logger.info(
+ 'Mcelog seems to be already installed '
+ + 'on node-{}.'.format(node.get_dict()['name']))
+ node.run_cmd('sudo modprobe mce-inject')
+ node.run_cmd('sudo ./mce-inject_ea < corrected')
+ else:
+ logger.info(
+ 'Mcelog will be enabled '
+ + 'on node-{}...'.format(node.get_dict()['name']))
+ node.put_file(mce_bin, 'mce-inject_ea')
+ node.run_cmd('chmod a+x mce-inject_ea')
+ node.run_cmd('echo "CPU 0 BANK 0" > corrected')
+ node.run_cmd(
+ 'echo "STATUS 0xcc00008000010090" >>'
+ + ' corrected')
+ node.run_cmd(
+ 'echo "ADDR 0x0010FFFFFFF" >> corrected')
+ node.run_cmd('sudo modprobe mce-inject')
+ node.run_cmd('sudo ./mce-inject_ea < corrected')
+ logger.info(
+ 'Mcelog was installed '
+ + 'on node-{}.'.format(node.get_dict()['name']))
+
def mcelog_delete():
create_ovs_bridge()
gnocchi_running_on_con = False
aodh_running_on_con = False
+ # Disabling SNMP write plug-in
snmp_running = False
- _print_label('Testing Gnocchi, AODH and SNMP on nodes')
+ _print_label('Testing Gnocchi and AODH plugins on nodes')
for controller in controllers:
gnocchi_running = (
- gnocchi_running_on_con and conf.is_gnocchi_running(controller))
+ gnocchi_running_on_con or conf.is_gnocchi_running(controller))
aodh_running = (
aodh_running_on_con or conf.is_aodh_running(controller))
'mcelog': 'Mcelog',
'ovs_events': 'OVS events'}
out_plugins = {}
- out_plugins_to_test = []
for compute_node in computes:
node_id = compute_node.get_id()
node_name = compute_node.get_name()
compute_node_names.append(node_name)
plugins_to_enable = []
error_plugins = []
- gnocchi_running = (
- gnocchi_running or conf.check_gnocchi_plugin_included(
+ gnocchi_running_com = (
+ gnocchi_running and conf.check_gnocchi_plugin_included(
compute_node))
- aodh_running = (
+ aodh_running_com = (
aodh_running and conf.check_aodh_plugin_included(compute_node))
- if gnocchi_running:
+ # logger.info("SNMP enabled on {}" .format(node_name))
+ if gnocchi_running_com:
out_plugins[node_id].append("Gnocchi")
- if aodh_running:
+ if aodh_running_com:
out_plugins[node_id].append("AODH")
if snmp_running:
- out_plugins_to_test.append("SNMP")
+ out_plugins[node_id].append("SNMP")
- if 'gnocchi' not in out_plugins[node_id]:
- logger.info("CSV will be enabled for verification")
+ if 'Gnocchi' in out_plugins[node_id]:
plugins_to_enable.append('csv')
out_plugins[node_id].append("CSV")
if plugins_to_enable:
logger.info(
'Testcases on node {} will not be executed'.format(
node_id))
- else:
- if plugins_to_enable:
- collectd_restarted, collectd_warnings = \
- conf.restart_collectd(compute_node)
- sleep_time = 10
- logger.info(
- 'Sleeping for {} seconds'.format(sleep_time)
- + ' after collectd restart...')
- time.sleep(sleep_time)
- if plugins_to_enable and not collectd_restarted:
+
+ for i in out_plugins[node_id]:
+ if i == 'AODH':
+ for plugin_name in sorted(aodh_plugin_labels.keys()):
+ _exec_testcase(
+ aodh_plugin_labels, plugin_name, i,
+ controllers, compute_node, conf, results,
+ error_plugins, out_plugins[node_id])
+ elif i == 'CSV':
+ _print_label("Node {}: Executing CSV Testcases".format(
+ node_name))
+ logger.info("Restarting collectd for CSV tests")
+ collectd_restarted, collectd_warnings = \
+ conf.restart_collectd(compute_node)
+ sleep_time = 10
+ logger.info(
+ 'Sleeping for {} seconds'.format(sleep_time)
+ + ' after collectd restart...')
+ time.sleep(sleep_time)
+ if not collectd_restarted:
for warning in collectd_warnings:
logger.warning(warning)
logger.error(
'Restart of collectd on node {} failed'.format(
- node_id))
+ compute_node))
logger.info(
- 'Testcases on node {}'.format(node_id)
+ 'CSV Testcases on node {}'.format(compute_node)
+ ' will not be executed.')
- else:
- if collectd_warnings:
- for warning in collectd_warnings:
- logger.warning(warning)
-
- for i in out_plugins[node_id]:
- if i == 'AODH':
- for plugin_name in sorted(aodh_plugin_labels.keys()):
+ for plugin_name in sorted(plugin_labels.keys()):
_exec_testcase(
- aodh_plugin_labels, plugin_name, i,
+ plugin_labels, plugin_name, i,
controllers, compute_node, conf, results,
error_plugins, out_plugins[node_id])
+
else:
for plugin_name in sorted(plugin_labels.keys()):
_exec_testcase(
print_overall_summary(
compute_ids, plugin_labels, aodh_plugin_labels, results, out_plugins)
- if ((len([res for res in results if not res[2]]) > 0)
- or (len(results) < len(computes) * len(plugin_labels))):
- logger.error('Some tests have failed or have not been executed')
- return 1
- return 0
+ res_overall = 0
+ for res in results:
+ if not res[3]:
+ logger.error('Some tests have failed or have not been executed')
+ logger.error('Overall Result is Fail')
+ res_overall = 1
+ else:
+ pass
+
+ _print_label('Testing DMA on compute nodes')
+ res_agent = dma.dma_main(logger, conf, computes)
+
+ return 0 if res_overall == 0 and res_agent == 0 else 1
if __name__ == '__main__':