[ansible][fedora] Update package name
[barometer.git] / baro_tests / collectd.py
index cd436df..c1a05af 100644 (file)
@@ -1,6 +1,7 @@
-"""Executing test of plugins"""
 # -*- coding: utf-8 -*-
-
+#
+# Copyright 2017 OPNFV
+#
 # Licensed under the Apache License, Version 2.0 (the "License"); you may
 # not use this file except in compliance with the License. You may obtain
 # a copy of the License at
 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 # License for the specific language governing permissions and limitations
 # under the License.
+# Patch on October 10 2017
+
+"""Executing test of plugins"""
 
 import requests
 from keystoneclient.v3 import client
 import os
+import sys
 import time
 import logging
-from config_server import *
-from tests import *
+import config_server
+import tests
+import dma
+from distutils import version
 from opnfv.deployment import factory
-from functest.utils import functest_utils
-from functest.utils.constants import CONST
 
-CEILOMETER_NAME = 'ceilometer'
+AODH_NAME = 'aodh'
+GNOCCHI_NAME = 'gnocchi'
 ID_RSA_SRC = '/root/.ssh/id_rsa'
-ID_RSA_DST_DIR = '/home/opnfv/.ssh'
+ID_RSA_DST_DIR = '/root/.ssh'
 ID_RSA_DST = ID_RSA_DST_DIR + '/id_rsa'
-INSTALLER_PARAMS_YAML = os.path.join(CONST.dir_repo_functest, 'functest/ci/installer_params.yaml')
-FUEL_IP = functest_utils.get_parameter_from_yaml('fuel.ip', INSTALLER_PARAMS_YAML)
-FUEL_USER = functest_utils.get_parameter_from_yaml('fuel.user', INSTALLER_PARAMS_YAML)
-FUEL_PW = functest_utils.get_parameter_from_yaml('fuel.password', INSTALLER_PARAMS_YAML)
+APEX_IP = os.getenv("INSTALLER_IP").rstrip('\n')
+APEX_USER = 'root'
+APEX_USER_STACK = 'stack'
+APEX_PKEY = '/root/.ssh/id_rsa'
 
 
 class KeystoneException(Exception):
@@ -63,53 +69,53 @@ class InvalidResponse(KeystoneException):
             "Invalid response", exc, response)
 
 
-class CeilometerClient(object):
-    """Ceilometer Client to authenticate and request meters"""
-    def __init__(self, bc_logger):
-        """
-        Keyword arguments:
-        bc_logger - logger instance
-        """
+def get_apex_nodes():
+    handler = factory.Factory.get_handler('apex',
+                                          APEX_IP,
+                                          APEX_USER_STACK,
+                                          APEX_PKEY)
+    nodes = handler.get_nodes()
+    return nodes
+
+
+class GnocchiClient(object):
+    # Gnocchi Client to authenticate and request meters
+    def __init__(self):
         self._auth_token = None
-        self._ceilometer_url = None
+        self._gnocchi_url = None
         self._meter_list = None
-        self._logger = bc_logger
 
     def auth_token(self):
-        """Get auth token"""
+        # Get auth token
         self._auth_server()
         return self._auth_token
 
-    def get_ceilometer_url(self):
-        """Get Ceilometer URL"""
-        return self._ceilometer_url
+    def get_gnocchi_url(self):
+        # Get Gnocchi  URL
+        return self._gnocchi_url
 
-    def get_ceil_metrics(self, criteria=None):
-        """Get Ceilometer metrics for given criteria
-
-        Keyword arguments:
-        criteria -- criteria for ceilometer meter list
-        """
+    def get_gnocchi_metrics(self, criteria=None):
+        # Subject to change if metric gathering is different for gnocchi
         self._request_meters(criteria)
         return self._meter_list
 
     def _auth_server(self):
-        """Request token in authentication server"""
-        self._logger.debug('Connecting to the auth server {}'.format(os.environ['OS_AUTH_URL']))
+        # Request token in authentication server
+        logger.debug('Connecting to the auth server {}'.format(
+            os.environ['OS_AUTH_URL']))
         keystone = client.Client(username=os.environ['OS_USERNAME'],
                                  password=os.environ['OS_PASSWORD'],
-                                 tenant_name=os.environ['OS_TENANT_NAME'],
+                                 tenant_name=os.environ['OS_USERNAME'],
                                  auth_url=os.environ['OS_AUTH_URL'])
         self._auth_token = keystone.auth_token
         for service in keystone.service_catalog.get_data():
-            if service['name'] == CEILOMETER_NAME:
+            if service['name'] == GNOCCHI_NAME:
                 for service_type in service['endpoints']:
                     if service_type['interface'] == 'internal':
-                        self._ceilometer_url = service_type['url']
-                        break
+                        self._gnocchi_url = service_type['url']
 
-        if self._ceilometer_url is None:
-            self._logger.warning('Ceilometer is not registered in service catalog')
+        if self._gnocchi_url is None:
+            logger.warning('Gnocchi is not registered in service catalog')
 
     def _request_meters(self, criteria):
         """Request meter list values from ceilometer
@@ -118,9 +124,10 @@ class CeilometerClient(object):
         criteria -- criteria for ceilometer meter list
         """
         if criteria is None:
-            url = self._ceilometer_url + ('/v2/samples?limit=400')
+            url = self._gnocchi_url + ('/v2/metric?limit=400')
         else:
-            url = self._ceilometer_url + ('/v2/meters/%s?q.field=resource_id&limit=400' % criteria)
+            url = self._gnocchi_url \
+                + ('/v3/metric/%s?q.field=metric&limit=400' % criteria)
         headers = {'X-Auth-Token': self._auth_token}
         resp = requests.get(url, headers=headers)
         try:
@@ -130,18 +137,57 @@ class CeilometerClient(object):
             raise InvalidResponse(err, resp)
 
 
+class AodhClient(object):
+    # Gnocchi Client to authenticate and request meters
+    def __init__(self):
+        self._auth_token = None
+        self._aodh_url = None
+        self._meter_list = None
+
+    def auth_token(self):
+        # Get auth token
+        self._auth_server()
+        return self._auth_token
+
+    def get_aodh_url(self):
+        # Get Gnocchi  URL
+        return self._gnocchi_url
+
+    def get_aodh_metrics(self, criteria=None):
+        # Subject to change if metric gathering is different for gnocchi
+        self._request_meters(criteria)
+        return self._meter_list
+
+    def _auth_server(self):
+        # Request token in authentication server
+        logger.debug('Connecting to the AODH auth server {}'.format(
+            os.environ['OS_AUTH_URL']))
+        keystone = client.Client(username=os.environ['OS_USERNAME'],
+                                 password=os.environ['OS_PASSWORD'],
+                                 tenant_name=os.environ['OS_USERNAME'],
+                                 auth_url=os.environ['OS_AUTH_URL'])
+        self._auth_token = keystone.auth_token
+        for service in keystone.service_catalog.get_data():
+            if service['name'] == AODH_NAME:
+                for service_type in service['endpoints']:
+                    if service_type['interface'] == 'internal':
+                        self._gnocchi_url = service_type['url']
+
+        if self._aodh_url is None:
+            logger.warning('Aodh is not registered in service catalog')
+
+
 class CSVClient(object):
     """Client to request CSV meters"""
-    def __init__(self, bc_logger, conf):
+    def __init__(self, conf):
         """
         Keyword arguments:
-        bc_logger - logger instance
         conf -- ConfigServer instance
         """
-        self._logger = bc_logger
         self.conf = conf
 
-    def get_csv_metrics(self, compute_node, plugin_subdirectories, meter_categories):
+    def get_csv_metrics(
+            self, compute_node, plugin_subdirectories, meter_categories):
         """Get CSV metrics.
 
         Keyword arguments:
@@ -151,37 +197,69 @@ class CSVClient(object):
 
         Return list of metrics.
         """
-        stdout = self.conf.execute_command("date '+%Y-%m-%d'", compute_node.get_ip())
-        date = stdout[0].strip()
-        metrics = []
-        for plugin_subdir in plugin_subdirectories:
-            for meter_category in meter_categories:
-                stdout = self.conf.execute_command(
-                    "tail -2 /var/lib/collectd/csv/node-"
-                    + "{0}.domain.tld/{1}/{2}-{3}".format(
-                        compute_node.get_id(), plugin_subdir, meter_category, date),
-                    compute_node.get_ip())
-                # Storing last two values
-                values = stdout
-                if len(values) < 2:
-                    self._logger.error(
-                        'Getting last two CSV entries of meter category '
-                        + '{0} in {1} subdir failed'.format(meter_category, plugin_subdir))
-                else:
-                    old_value = int(values[0][0:values[0].index('.')])
-                    new_value = int(values[1][0:values[1].index('.')])
-                    metrics.append((plugin_subdir, meter_category, old_value, new_value))
+        compute_name = compute_node.get_name()
+        nodes = get_apex_nodes()
+        for node in nodes:
+            if compute_name == node.get_dict()['name']:
+                date = node.run_cmd(
+                    "date '+%Y-%m-%d'")
+                hostname = node.run_cmd('hostname -A')
+                hostname = hostname.split()[0]
+                metrics = []
+                for plugin_subdir in plugin_subdirectories:
+                    for meter_category in meter_categories:
+                        stdout1 = node.run_cmd(
+                            "tail -2 /var/lib/collectd/csv/"
+                            + "{0}/{1}/{2}-{3}".format(
+                                hostname, plugin_subdir,
+                                meter_category, date))
+                        stdout2 = node.run_cmd(
+                            "tail -1 /var/lib/collectd/csv/"
+                            + "{0}/{1}/{2}-{3}".format(
+                                hostname, plugin_subdir,
+                                meter_category, date))
+                        # Storing last two values
+                        values = stdout1
+                        values2 = stdout2
+                        if values is None:
+                            logger.error(
+                                'Getting last two CSV entries of meter category'
+                                + ' {0} in {1} subdir failed'.format(
+                                    meter_category, plugin_subdir))
+                        elif values2 is None:
+                            logger.error(
+                                'Getting last CSV entries of meter category'
+                                + ' {0} in {1} subdir failed'.format(
+                                    meter_category, plugin_subdir))
+                        else:
+                            values = values.split(',')
+                            old_value = float(values[0])
+                            values2 = values2.split(',')
+                            new_value = float(values2[0])
+                            metrics.append((
+                                plugin_subdir, meter_category, old_value,
+                                new_value))
         return metrics
 
 
-def _check_logger():
-    """Check whether there is global logger available and if not, define one."""
-    if 'logger' not in globals():
-        global logger
-        logger = logger.Logger("barometercollectd").getLogger()
+def get_csv_categories_for_ipmi(conf, compute_node):
+    """Get CSV metrics.
 
+    Keyword arguments:
+    compute_node -- compute node instance
 
-def _process_result(compute_node, test, result, results_list):
+    Return list of categories.
+    """
+    stdout = conf.execute_command(
+        "date '+%Y-%m-%d'", compute_node.get_ip())
+    date = stdout[0].strip()
+    categories = conf.execute_command(
+        "ls /var/lib/collectd/csv/{0}.jf.intel.com/ipmi | grep {1}".format(
+            compute_node.get_name(), date), compute_node.get_ip())
+    return [category.strip()[:-11] for category in categories]
+
+
+def _process_result(compute_node, out_plugin, test, result, results_list, node):
     """Print test result and append it to results list.
 
     Keyword arguments:
@@ -190,10 +268,14 @@ def _process_result(compute_node, test, result, results_list):
     results_list -- results list
     """
     if result:
-        logger.info('Compute node {0} test case {1} PASSED.'.format(compute_node, test))
+        logger.info(
+            'Test case for {0} with {1} PASSED on {2}.'.format(
+                node, out_plugin, test))
     else:
-        logger.error('Compute node {0} test case {1} FAILED.'.format(compute_node, test))
-    results_list.append((compute_node, test, result))
+        logger.error(
+            'Test case for {0} with {1} FAILED on {2}.'.format(
+                node, out_plugin, test))
+    results_list.append((compute_node, out_plugin, test, result))
 
 
 def _print_label(label):
@@ -214,17 +296,19 @@ def _print_label(label):
     logger.info(('=' * length1) + label + ('=' * length2))
 
 
-def _print_plugin_label(plugin, node_id):
+def _print_plugin_label(plugin, node_name):
     """Print plug-in label.
 
     Keyword arguments:
     plugin -- plug-in name
     node_id -- node ID
     """
-    _print_label('Node {0}: Plug-in {1} Test case execution'.format(node_id, plugin))
+    _print_label(
+        'Node {0}: Plug-in {1} Test case execution'.format(node_name, plugin))
 
 
-def _print_final_result_of_plugin(plugin, compute_ids, results, out_plugins, out_plugin):
+def _print_final_result_of_plugin(
+        plugin, compute_ids, results, out_plugins, out_plugin):
     """Print final results of plug-in.
 
     Keyword arguments:
@@ -236,21 +320,41 @@ def _print_final_result_of_plugin(plugin, compute_ids, results, out_plugins, out
     """
     print_line = ''
     for id in compute_ids:
-        if out_plugins[id] == out_plugin:
-            if (id, plugin, True) in results:
+        if out_plugin == 'Gnocchi':
+            if (id, out_plugin, plugin, True) in results:
                 print_line += ' PASS   |'
-            elif (id, plugin, False) in results and out_plugins[id] == out_plugin:
+            elif (id, out_plugin, plugin, False) in results:
                 print_line += ' FAIL   |'
             else:
-                print_line += ' NOT EX |'
-        elif out_plugin == 'Ceilometer':
-            print_line += ' NOT EX |'
+                print_line += ' SKIP   |'
+        elif out_plugin == 'AODH':
+            if (id, out_plugin, plugin, True) in results:
+                print_line += ' PASS   |'
+            elif (id, out_plugin, plugin, False) in results:
+                print_line += ' FAIL   |'
+            else:
+                print_line += ' SKIP   |'
+        elif out_plugin == 'SNMP':
+            if (id, out_plugin, plugin, True) in results:
+                print_line += ' PASS   |'
+            elif (id, out_plugin, plugin, False) in results:
+                print_line += ' FAIL   |'
+            else:
+                print_line += ' SKIP   |'
+        elif out_plugin == 'CSV':
+            if (id, out_plugin, plugin, True) in results:
+                print_line += ' PASS   |'
+            elif (id, out_plugin, plugin, False) in results:
+                print_line += ' FAIL   |'
+            else:
+                print_line += ' SKIP   |'
         else:
             print_line += ' SKIP   |'
     return print_line
 
 
-def print_overall_summary(compute_ids, tested_plugins, results, out_plugins):
+def print_overall_summary(
+        compute_ids, tested_plugins, aodh_plugins, results, out_plugins):
     """Print overall summary table.
 
     Keyword arguments:
@@ -259,51 +363,88 @@ def print_overall_summary(compute_ids, tested_plugins, results, out_plugins):
     results -- results list
     out_plugins --  list of used out plug-ins
     """
-    compute_node_names = ['Node-{}'.format(id) for id in compute_ids]
+    compute_node_names = ['Node-{}'.format(i) for i in range(
+        len((compute_ids)))]
     all_computes_in_line = ''
     for compute in compute_node_names:
-        all_computes_in_line = all_computes_in_line + '| ' + compute + (' ' * (7 - len(compute)))
+        all_computes_in_line += '| ' + compute + (' ' * (7 - len(compute)))
     line_of_nodes = '| Test           ' + all_computes_in_line + '|'
     logger.info('=' * 70)
     logger.info('+' + ('-' * ((9 * len(compute_node_names))+16)) + '+')
     logger.info(
-        '|' + ' ' * ((9*len(compute_node_names))/2) + ' OVERALL SUMMARY'
-        + ' ' * (9*len(compute_node_names) - (9*len(compute_node_names))/2) + '|')
-    logger.info('+' + ('-' * 16) + '+' + (('-' * 8) + '+') * len(compute_node_names))
+        '|' + ' ' * ((9*len(compute_node_names))/2)
+        + ' OVERALL SUMMARY'
+        + ' ' * (
+            9*len(compute_node_names) - (9*len(compute_node_names))/2)
+        + '|')
+    logger.info(
+        '+' + ('-' * 16) + '+' + (('-' * 8) + '+') * len(compute_node_names))
     logger.info(line_of_nodes)
-    logger.info('+' + ('-' * 16) + '+' + (('-' * 8) + '+') * len(compute_node_names))
-    out_plugins_print = ['Ceilometer']
-    if 'CSV' in out_plugins.values():
-        out_plugins_print.append('CSV')
+    logger.info(
+        '+' + ('-' * 16) + '+' + (('-' * 8) + '+') * len(compute_node_names))
+    out_plugins_print = []
+    out_plugins_print1 = []
+    for key in out_plugins.keys():
+        if 'Gnocchi' in out_plugins[key]:
+            out_plugins_print1.append('Gnocchi')
+        if 'AODH' in out_plugins[key]:
+            out_plugins_print1.append('AODH')
+        if 'SNMP' in out_plugins[key]:
+            out_plugins_print1.append('SNMP')
+        if 'CSV' in out_plugins[key]:
+            out_plugins_print1.append('CSV')
+    for i in out_plugins_print1:
+        if i not in out_plugins_print:
+            out_plugins_print.append(i)
     for out_plugin in out_plugins_print:
         output_plugins_line = ''
         for id in compute_ids:
             out_plugin_result = '----'
-            if out_plugin == 'Ceilometer':
-                out_plugin_result = 'PASS' if out_plugins[id] == out_plugin else 'FAIL'
-            if out_plugin == 'CSV':
-                if out_plugins[id] == out_plugin:
-                    out_plugin_result = \
-                        'PASS' if [
-                            plugin for comp_id, plugin,
-                            res in results if comp_id == id and res] else 'FAIL'
-                else:
-                    out_plugin_result = 'SKIP'
+            if out_plugin == 'Gnocchi':
+                out_plugin_result = \
+                    'PASS'
+            elif out_plugin == 'AODH':
+                out_plugin_result = \
+                    'PASS'
+            elif out_plugin == 'SNMP':
+                out_plugin_result = \
+                    'PASS'
+            elif out_plugin == 'CSV':
+                out_plugin_result = \
+                    'PASS' if [
+                        plugin for comp_id, out_pl, plugin, res in results
+                        if comp_id == id and res] else 'FAIL'
+            else:
+                out_plugin_result = \
+                    'FAIL'
             output_plugins_line += '| ' + out_plugin_result + '   '
         logger.info(
             '| OUT:{}'.format(out_plugin) + (' ' * (11 - len(out_plugin)))
             + output_plugins_line + '|')
-        for plugin in sorted(tested_plugins.values()):
-            line_plugin = _print_final_result_of_plugin(
-                plugin, compute_ids, results, out_plugins, out_plugin)
-            logger.info('|  IN:{}'.format(plugin) + (' ' * (11-len(plugin))) + '|' + line_plugin)
-        logger.info('+' + ('-' * 16) + '+' + (('-' * 8) + '+') * len(compute_node_names))
+
+        if out_plugin == 'AODH':
+            for plugin in sorted(aodh_plugins.values()):
+                line_plugin = _print_final_result_of_plugin(
+                    plugin, compute_ids, results, out_plugins, out_plugin)
+                logger.info(
+                    '|  IN:{}'.format(plugin) + (' ' * (11-len(plugin)))
+                    + '|' + line_plugin)
+        else:
+            for plugin in sorted(tested_plugins.values()):
+                line_plugin = _print_final_result_of_plugin(
+                    plugin, compute_ids, results, out_plugins, out_plugin)
+                logger.info(
+                    '|  IN:{}'.format(plugin) + (' ' * (11-len(plugin)))
+                    + '|' + line_plugin)
+        logger.info(
+            '+' + ('-' * 16) + '+'
+            + (('-' * 8) + '+') * len(compute_node_names))
     logger.info('=' * 70)
 
 
 def _exec_testcase(
-        test_labels, name, ceilometer_running, compute_node,
-        conf, results, error_plugins):
+        test_labels, name, out_plugin, controllers, compute_node,
+        conf, results, error_plugins, out_plugins):
     """Execute the testcase.
 
     Keyword arguments:
@@ -313,7 +454,8 @@ def _exec_testcase(
     compute_node -- compute node ID
     conf -- ConfigServer instance
     results -- results list
-    error_plugins -- list of tuples with plug-in errors (plugin, error_description, is_critical):
+    error_plugins -- list of tuples with plug-in errors
+        (plugin, error_description, is_critical):
         plugin -- plug-in ID, key of test_labels dictionary
         error_decription -- description of the error
         is_critical -- boolean value indicating whether error is critical
@@ -321,46 +463,91 @@ def _exec_testcase(
     ovs_interfaces = conf.get_ovs_interfaces(compute_node)
     ovs_configured_interfaces = conf.get_plugin_config_values(
         compute_node, 'ovs_events', 'Interfaces')
+    ovs_configured_bridges = conf.get_plugin_config_values(
+         compute_node, 'ovs_stats', 'Bridges')
     ovs_existing_configured_int = [
         interface for interface in ovs_interfaces
         if interface in ovs_configured_interfaces]
+    ovs_existing_configured_bridges = [
+        bridge for bridge in ovs_interfaces
+        if bridge in ovs_configured_bridges]
     plugin_prerequisites = {
-        'mcelog': [(conf.is_installed(compute_node, 'mcelog'), 'mcelog must be installed.')],
+        'mcelog': [(
+            conf.is_mcelog_installed(compute_node, 'mcelog'),
+            'mcelog must be installed.')],
         'ovs_events': [(
             len(ovs_existing_configured_int) > 0 or len(ovs_interfaces) > 0,
-            'Interfaces must be configured.')]}
-    ceilometer_criteria_lists = {
-        'hugepages': ['hugepages.vmpage_number'],
-        'mcelog': ['mcelog.errors'],
-        'ovs_events': ['ovs_events.gauge']}
-    ceilometer_substr_lists = {
-        'ovs_events': ovs_existing_configured_int if len(ovs_existing_configured_int) > 0 else ovs_interfaces}
+            'Interfaces must be configured.')],
+        'ovs_stats': [(
+            len(ovs_existing_configured_bridges) > 0,
+            'Bridges must be configured.')]}
+    gnocchi_criteria_lists = {
+        'hugepages': 'hugepages',
+        'intel_rdt': 'rdt',
+        'mcelog': 'mcelog',
+        'ovs_events': 'interface-ovs-system',
+        'ovs_stats': 'ovs_stats-br0.br0'}
+    aodh_criteria_lists = {
+        'mcelog': 'mcelog',
+        'ovs_events': 'ovs_events'}
+    snmp_mib_files = {
+        'intel_rdt': '/usr/share/snmp/mibs/Intel-Rdt.txt',
+        'hugepages': '/usr/share/snmp/mibs/Intel-Hugepages.txt',
+        'mcelog': '/usr/share/snmp/mibs/Intel-Mcelog.txt'}
+    snmp_mib_strings = {
+        'intel_rdt': 'INTEL-RDT-MIB::rdtLlc.1',
+        'hugepages': 'INTEL-HUGEPAGES-MIB::hugepagesPageFree',
+        'mcelog': 'INTEL-MCELOG-MIB::memoryCorrectedErrors.1'}
+    nr_hugepages = int(time.time()) % 10000
+    snmp_in_commands = {
+        'intel_rdt': None,
+        'hugepages': 'echo {} > /sys/kernel/'.format(nr_hugepages)
+                     + 'mm/hugepages/hugepages-2048kB/nr_hugepages',
+        'mcelog': '/root/mce-inject_df < /root/corrected'}
     csv_subdirs = {
+        'intel_rdt': [
+            'intel_rdt-0-2'],
         'hugepages': [
-            'hugepages-mm-2048Kb', 'hugepages-node0-2048Kb', 'hugepages-node1-2048Kb',
-            'hugepages-mm-1048576Kb', 'hugepages-node0-1048576Kb', 'hugepages-node1-1048576Kb'],
-        'mcelog': ['mcelog-SOCKET_0_CHANNEL_0_DIMM_any', 'mcelog-SOCKET_0_CHANNEL_any_DIMM_any'],
+            'hugepages-mm-2048Kb', 'hugepages-node0-2048Kb',],
+        # 'ipmi': ['ipmi'],
+        'mcelog': [
+            'mcelog-SOCKET_0_CHANNEL_0_DIMM_any',
+            'mcelog-SOCKET_0_CHANNEL_any_DIMM_any'],
+        'ovs_stats': [
+            'ovs_stats-br0.br0'],
         'ovs_events': [
-            'ovs_events-{}'.format(interface)
-            for interface in (ovs_existing_configured_int if len(ovs_existing_configured_int) > 0 else ovs_interfaces)]}
+            'ovs_events-br0']}
+    # csv_meter_categories_ipmi = get_csv_categories_for_ipmi(conf,
+    # compute_node)
     csv_meter_categories = {
+        'intel_rdt': [
+            'bytes-llc', 'ipc'],
         'hugepages': ['vmpage_number-free', 'vmpage_number-used'],
+        # 'ipmi': csv_meter_categories_ipmi,
         'mcelog': [
-            'errors-corrected_memory_errors', 'errors-uncorrected_memory_errors',
-            'errors-corrected_memory_errors_in_24h', 'errors-uncorrected_memory_errors_in_24h'],
+            'errors-corrected_memory_errors',
+            'errors-uncorrected_memory_errors'],
+        'ovs_stats': [
+            'if_dropped', 'if_errors', 'if_packets'],
         'ovs_events': ['gauge-link_status']}
 
-    _print_plugin_label(test_labels[name] if name in test_labels else name, compute_node.get_id())
+    _print_plugin_label(
+        test_labels[name] if name in test_labels else name,
+        compute_node.get_name())
     plugin_critical_errors = [
-        error for plugin, error, critical in error_plugins if plugin == name and critical]
+        error for plugin, error, critical in error_plugins
+        if plugin == name and critical]
     if plugin_critical_errors:
         logger.error('Following critical errors occurred:'.format(name))
         for error in plugin_critical_errors:
             logger.error(' * ' + error)
-        _process_result(compute_node.get_id(), test_labels[name], False, results)
+        _process_result(
+            compute_node.get_id(), out_plugin, test_labels[name], False,
+            results, compute_node.get_name())
     else:
         plugin_errors = [
-            error for plugin, error, critical in error_plugins if plugin == name and not critical]
+            error for plugin, error, critical in error_plugins
+            if plugin == name and not critical]
         if plugin_errors:
             logger.warning('Following non-critical errors occured:')
             for error in plugin_errors:
@@ -369,94 +556,152 @@ def _exec_testcase(
         if name in plugin_prerequisites:
             failed_prerequisites = [
                 prerequisite_name for prerequisite_passed,
-                prerequisite_name in plugin_prerequisites[name] if not prerequisite_passed]
+                prerequisite_name in plugin_prerequisites[name]
+                if not prerequisite_passed]
         if failed_prerequisites:
             logger.error(
                 '{} test will not be executed, '.format(name)
                 + 'following prerequisites failed:')
             for prerequisite in failed_prerequisites:
                 logger.error(' * {}'.format(prerequisite))
+        # optional plugin
+        elif "intel_rdt" == name and not conf.is_rdt_available(compute_node):
+            #TODO: print log message
+            logger.info("RDT is not available on virtual nodes, skipping test.")
+            res = True
+            print("Results for {}, pre-processing".format(str(test_labels[name])))
+            print(results)
+            _process_result(
+                compute_node.get_id(), out_plugin, test_labels[name],
+                res, results, compute_node.get_name())
+            print("Results for {}, post-processing".format(str(test_labels[name])))
+            print(results)
         else:
-            if ceilometer_running:
-                res = test_ceilometer_node_sends_data(
-                    compute_node.get_id(), conf.get_plugin_interval(compute_node, name),
-                    logger=logger, client=CeilometerClient(logger),
-                    criteria_list=ceilometer_criteria_lists[name],
-                    resource_id_substrings=(ceilometer_substr_lists[name]
-                                            if name in ceilometer_substr_lists else ['']))
-            else:
-                res = test_csv_handles_plugin_data(
-                    compute_node, conf.get_plugin_interval(compute_node, name), name,
-                    csv_subdirs[name], csv_meter_categories[name], logger,
-                    CSVClient(logger, conf))
+            plugin_interval = conf.get_plugin_interval(compute_node, name)
+            if out_plugin == 'Gnocchi':
+                res = conf.test_plugins_with_gnocchi(
+                    compute_node.get_name(), plugin_interval,
+                    logger, criteria_list=gnocchi_criteria_lists[name])
+            if out_plugin == 'AODH':
+                res = conf.test_plugins_with_aodh(
+                    compute_node.get_name(), plugin_interval,
+                    logger, criteria_list=aodh_criteria_lists[name])
+            if out_plugin == 'SNMP':
+                res = \
+                    name in snmp_mib_files and name in snmp_mib_strings \
+                    and conf.test_plugins_with_snmp(
+                        compute_node.get_name(), plugin_interval, logger, name,
+                        snmp_mib_files[name], snmp_mib_strings[name],
+                        snmp_in_commands[name])
+            if out_plugin == 'CSV':
+                res = tests.test_csv_handles_plugin_data(
+                    compute_node, conf.get_plugin_interval(compute_node, name),
+                    name, csv_subdirs[name], csv_meter_categories[name],
+                    logger, CSVClient(conf))
+
             if res and plugin_errors:
                 logger.info(
                     'Test works, but will be reported as failure,'
                     + 'because of non-critical errors.')
                 res = False
-            _process_result(compute_node.get_id(), test_labels[name], res, results)
-
-
-def mcelog_install(logger):
-    """Install mcelog on compute nodes.
-
-    Keyword arguments:
-    logger - logger instance
+            print("Results for {}, pre-processing".format(str(test_labels[name])))
+            print(results)
+            _process_result(
+                compute_node.get_id(), out_plugin, test_labels[name],
+                res, results, compute_node.get_name())
+            print("Results for {}, post-processing".format(str(test_labels[name])))
+            print(results)
+
+
+def get_results_for_ovs_events(
+        plugin_labels, plugin_name, gnocchi_running,
+        compute_node, conf, results, error_plugins):
+    """ Testing OVS Events with python plugin
     """
-    _print_label('Enabling mcelog on compute nodes')
-    handler = factory.Factory.get_handler('fuel', FUEL_IP, FUEL_USER, installer_pwd='')
+    plugin_label = 'OVS events'
+    res = conf.enable_ovs_events(
+        compute_node, plugin_label, error_plugins, create_backup=False)
+    _process_result(
+        compute_node.get_id(), plugin_label, res, results)
+    logger.info("Results for OVS Events = {}" .format(results))
+
+
+def create_ovs_bridge():
+    """Create OVS brides on compute nodes"""
+    handler = factory.Factory.get_handler('apex',
+                                          APEX_IP,
+                                          APEX_USER_STACK,
+                                          APEX_PKEY)
     nodes = handler.get_nodes()
-    openstack_version = handler.get_openstack_version()
-    if openstack_version.find('14.') != 0:
-        logger.info('Mcelog will not be installed,'
-                    + ' unsupported Openstack version found ({}).'.format(openstack_version))
-    else:
-        for node in nodes:
-            if node.is_compute():
-                ubuntu_release = node.run_cmd('lsb_release -r')
-                if '16.04' not in ubuntu_release:
-                    logger.info('Mcelog will not be enabled'
-                                + 'on node-{0}, unsupported Ubuntu release found ({1}).'.format(
-                                node.get_dict()['id'], ubuntu_release))
+    logger.info("Creating OVS bridges on computes nodes")
+    for node in nodes:
+        if node.is_compute():
+            node.run_cmd('sudo ovs-vsctl add-br br0')
+            node.run_cmd('sudo ovs-vsctl set-manager ptcp:6640')
+    logger.info('OVS Bridges created on compute nodes')
+
+
+def mcelog_install():
+    """Install mcelog on compute nodes."""
+    _print_label('Enabling mcelog and OVS bridges on compute nodes')
+    handler = factory.Factory.get_handler('apex',
+                                          APEX_IP,
+                                          APEX_USER_STACK,
+                                          APEX_PKEY)
+    nodes = handler.get_nodes()
+    mce_bin = os.path.dirname(os.path.realpath(__file__)) + '/mce-inject_ea'
+    for node in nodes:
+        if node.is_compute():
+            centos_release = node.run_cmd('uname -r')
+            if version.LooseVersion(centos_release) < version.LooseVersion('3.10.0-514.26.2.el7.x86_64'):
+                logger.info(
+                    'Mcelog will NOT be enabled on node-{}.'
+                    + ' Unsupported CentOS release found ({}).'.format(
+                        node.get_dict()['name'],centos_release))
+            else:
+                logger.info(
+                    'Checking if mcelog is enabled'
+                    + ' on node-{}...'.format(node.get_dict()['name']))
+                res = node.run_cmd('ls')
+                if 'mce-inject_ea' and 'corrected' in res:
+                    logger.info(
+                        'Mcelog seems to be already installed '
+                        + 'on node-{}.'.format(node.get_dict()['name']))
+                    node.run_cmd('sudo modprobe mce-inject')
+                    node.run_cmd('sudo ./mce-inject_ea < corrected')
                 else:
-                    logger.info('Checking if  mcelog is enabled on node-{}...'.format(
-                        node.get_dict()['id']))
-                    res = node.run_cmd('ls /root/')
-                    if 'mce-inject_df' and 'corrected' in res:
-                        logger.info('Mcelog seems to be already installed on node-{}.'.format(
-                            node.get_dict()['id']))
-                        res = node.run_cmd('modprobe mce-inject')
-                        res = node.run_cmd('/root/mce-inject_df < /root/corrected')
-                    else:
-                        logger.info('Mcelog will be enabled on node-{}...'.format(
-                            node.get_dict()['id']))
-                        res = node.put_file('/home/opnfv/repos/barometer/baro_utils/mce-inject_df',
-                                            '/root/mce-inject_df')
-                        res = node.run_cmd('chmod a+x /root/mce-inject_df')
-                        res = node.run_cmd('echo "CPU 0 BANK 0" > /root/corrected')
-                        res = node.run_cmd('echo "STATUS 0xcc00008000010090" >> /root/corrected')
-                        res = node.run_cmd('echo "ADDR 0x0010FFFFFFF" >> /root/corrected')
-                        res = node.run_cmd('modprobe mce-inject')
-                        res = node.run_cmd('/root/mce-inject_df < /root/corrected')
-        logger.info('Mcelog is installed on all compute nodes')
-
-
-def mcelog_delete(logger):
-    """Uninstall mcelog from compute nodes.
-
-    Keyword arguments:
-    logger - logger instance
-    """
-    handler = factory.Factory.get_handler('fuel', FUEL_IP, FUEL_USER, installer_pwd='')
+                    logger.info(
+                        'Mcelog will be enabled '
+                        + 'on node-{}...'.format(node.get_dict()['name']))
+                    node.put_file(mce_bin, 'mce-inject_ea')
+                    node.run_cmd('chmod a+x mce-inject_ea')
+                    node.run_cmd('echo "CPU 0 BANK 0" > corrected')
+                    node.run_cmd(
+                        'echo "STATUS 0xcc00008000010090" >>'
+                        + ' corrected')
+                    node.run_cmd(
+                        'echo "ADDR 0x0010FFFFFFF" >> corrected')
+                    node.run_cmd('sudo modprobe mce-inject')
+                    node.run_cmd('sudo ./mce-inject_ea < corrected')
+                    logger.info(
+                        'Mcelog was installed '
+                        + 'on node-{}.'.format(node.get_dict()['name']))
+
+
+
+def mcelog_delete():
+    """Uninstall mcelog from compute nodes."""
+    handler = factory.Factory.get_handler(
+            'apex', APEX_IP, APEX_USER, APEX_PKEY)
     nodes = handler.get_nodes()
     for node in nodes:
         if node.is_compute():
-            output = node.run_cmd('ls /root/')
-            if 'mce-inject_df' in output:
-                res = node.run_cmd('rm /root/mce-inject_df')
+            output = node.run_cmd('ls')
+            if 'mce-inject_ea' in output:
+                node.run_cmd('rm mce-inject_ea')
             if 'corrected' in output:
-                res = node.run_cmd('rm /root/corrected')
-            res = node.run_cmd('systemctl restart mcelog')
+                node.run_cmd('rm corrected')
+            node.run_cmd('sudo systemctl restart mcelog')
     logger.info('Mcelog is deleted from all compute nodes')
 
 
@@ -464,16 +709,26 @@ def get_ssh_keys():
     if not os.path.isdir(ID_RSA_DST_DIR):
         os.makedirs(ID_RSA_DST_DIR)
     if not os.path.isfile(ID_RSA_DST):
-        logger.info("RSA key file {} doesn't exist, it will be downloaded from installer node.".format(ID_RSA_DST))
-        handler = factory.Factory.get_handler('fuel', FUEL_IP, FUEL_USER, installer_pwd=FUEL_PW)
-        fuel = handler.get_installer_node()
-        fuel.get_file(ID_RSA_SRC, ID_RSA_DST)
+        logger.info(
+            "RSA key file {} doesn't exist".format(ID_RSA_DST)
+            + ", it will be downloaded from installer node.")
+        handler = factory.Factory.get_handler(
+            'apex', APEX_IP, APEX_USER, APEX_PKEY)
+        apex = handler.get_installer_node()
+        apex.get_file(ID_RSA_SRC, ID_RSA_DST)
     else:
         logger.info("RSA key file {} exists.".format(ID_RSA_DST))
 
 
+def _check_logger():
+    """Check whether there is global logger available and if not, define one."""
+    if 'logger' not in globals():
+        global logger
+        logger = logger.Logger("barometercollectd").getLogger()
+
+
 def main(bt_logger=None):
-    """Check each compute node sends ceilometer metrics.
+    """Check each compute node sends gnocchi metrics.
 
     Keyword arguments:
     bt_logger -- logger instance
@@ -486,8 +741,9 @@ def main(bt_logger=None):
     else:
         global logger
         logger = bt_logger
+    _print_label("Starting barometer tests suite")
     get_ssh_keys()
-    conf = ConfigServer(FUEL_IP, FUEL_USER, logger)
+    conf = config_server.ConfigServer(APEX_IP, APEX_USER, logger)
     controllers = conf.get_controllers()
     if len(controllers) == 0:
         logger.error('No controller nodes found!')
@@ -497,111 +753,135 @@ def main(bt_logger=None):
         logger.error('No compute nodes found!')
         return 1
 
-    _print_label('Display of Control and Compute nodes available in the set up')
-    logger.info('controllers: {}'.format([('{0}: {1} ({2})'.format(
-        node.get_id(), node.get_name(), node.get_ip())) for node in controllers]))
-    logger.info('computes: {}'.format([('{0}: {1} ({2})'.format(
-        node.get_id(), node.get_name(), node.get_ip())) for node in computes]))
-
-    mcelog_install(logger)  # installation of mcelog
+    _print_label(
+        'Display of Control and Compute nodes available in the set up')
+    logger.info('controllers: {}'.format([('{0}: {1}'.format(
+        node.get_name(), node.get_ip())) for node in controllers]))
+    logger.info('computes: {}'.format([('{0}: {1}'.format(
+        node.get_name(), node.get_ip())) for node in computes]))
+
+    mcelog_install()
+    create_ovs_bridge()
+    gnocchi_running_on_con = False
+    aodh_running_on_con = False
+    # Disabling SNMP write plug-in
+    snmp_running = False
+    _print_label('Testing Gnocchi and AODH plugins on nodes')
 
-    ceilometer_running_on_con = False
-    _print_label('Test Ceilometer on control nodes')
     for controller in controllers:
-        ceil_client = CeilometerClient(logger)
-        ceil_client.auth_token()
-        ceilometer_running_on_con = (
-            ceilometer_running_on_con or conf.is_ceilometer_running(controller))
-    if ceilometer_running_on_con:
-        logger.info("Ceilometer is running on control node.")
-    else:
-        logger.error("Ceilometer is not running on control node.")
-        logger.info("CSV will be enabled on compute nodes.")
+        gnocchi_running = (
+            gnocchi_running_on_con or conf.is_gnocchi_running(controller))
+        aodh_running = (
+            aodh_running_on_con or conf.is_aodh_running(controller))
+
     compute_ids = []
+    compute_node_names = []
     results = []
     plugin_labels = {
+        'intel_rdt': 'Intel RDT',
         'hugepages': 'Hugepages',
+        # 'ipmi': 'IPMI',
+        'mcelog': 'Mcelog',
+        'ovs_stats': 'OVS stats',
+        'ovs_events': 'OVS events'}
+    aodh_plugin_labels = {
         'mcelog': 'Mcelog',
         'ovs_events': 'OVS events'}
     out_plugins = {}
     for compute_node in computes:
         node_id = compute_node.get_id()
-        out_plugins[node_id] = 'CSV'
+        node_name = compute_node.get_name()
+        out_plugins[node_id] = []
         compute_ids.append(node_id)
-        # plugins_to_enable = plugin_labels.keys()
+        compute_node_names.append(node_name)
         plugins_to_enable = []
-        _print_label('NODE {}: Test Ceilometer Plug-in'.format(node_id))
-        logger.info('Checking if ceilometer plug-in is included.')
-        if not conf.check_ceil_plugin_included(compute_node):
-            logger.error('Ceilometer plug-in is not included.')
-            logger.info('Testcases on node {} will not be executed'.format(node_id))
-        else:
-            collectd_restarted, collectd_warnings = conf.restart_collectd(compute_node)
-            sleep_time = 30
-            logger.info('Sleeping for {} seconds after collectd restart...'.format(sleep_time))
-            time.sleep(sleep_time)
-            if not collectd_restarted:
-                for warning in collectd_warnings:
-                    logger.warning(warning)
-                logger.error('Restart of collectd on node {} failed'.format(node_id))
-                logger.info('Testcases on node {} will not be executed'.format(node_id))
+        error_plugins = []
+        gnocchi_running_com = (
+            gnocchi_running and conf.check_gnocchi_plugin_included(
+                compute_node))
+        aodh_running_com = (
+            aodh_running and conf.check_aodh_plugin_included(compute_node))
+        # logger.info("SNMP enabled on {}" .format(node_name))
+        if gnocchi_running_com:
+            out_plugins[node_id].append("Gnocchi")
+        if aodh_running_com:
+            out_plugins[node_id].append("AODH")
+        if snmp_running:
+            out_plugins[node_id].append("SNMP")
+
+        if 'Gnocchi' in out_plugins[node_id]:
+            plugins_to_enable.append('csv')
+            out_plugins[node_id].append("CSV")
+            if plugins_to_enable:
+                _print_label(
+                    'NODE {}: Enabling Test Plug-in '.format(node_name)
+                    + 'and Test case execution')
+            if plugins_to_enable and not conf.enable_plugins(
+                    compute_node, plugins_to_enable, error_plugins,
+                    create_backup=False):
+                logger.error(
+                    'Failed to test plugins on node {}.'.format(node_id))
+                logger.info(
+                    'Testcases on node {} will not be executed'.format(
+                        node_id))
+
+        for i in out_plugins[node_id]:
+            if i == 'AODH':
+                for plugin_name in sorted(aodh_plugin_labels.keys()):
+                    _exec_testcase(
+                        aodh_plugin_labels, plugin_name, i,
+                        controllers, compute_node, conf, results,
+                        error_plugins, out_plugins[node_id])
+            elif i == 'CSV':
+                _print_label("Node {}: Executing CSV Testcases".format(
+                    node_name))
+                logger.info("Restarting collectd for CSV tests")
+                collectd_restarted, collectd_warnings = \
+                    conf.restart_collectd(compute_node)
+                sleep_time = 10
+                logger.info(
+                    'Sleeping for {} seconds'.format(sleep_time)
+                    + ' after collectd restart...')
+                time.sleep(sleep_time)
+                if not collectd_restarted:
+                    for warning in collectd_warnings:
+                        logger.warning(warning)
+                    logger.error(
+                        'Restart of collectd on node {} failed'.format(
+                            compute_node))
+                    logger.info(
+                        'CSV Testcases on node {}'.format(compute_node)
+                        + ' will not be executed.')
+                for plugin_name in sorted(plugin_labels.keys()):
+                    _exec_testcase(
+                        plugin_labels, plugin_name, i,
+                        controllers, compute_node, conf, results,
+                        error_plugins, out_plugins[node_id])
+
             else:
-                for warning in collectd_warnings:
-                    logger.warning(warning)
-                ceilometer_running = (
-                    ceilometer_running_on_con and test_ceilometer_node_sends_data(
-                        node_id, 10, logger=logger, client=CeilometerClient(logger)))
-                if ceilometer_running:
-                    out_plugins[node_id] = 'Ceilometer'
-                    logger.info("Ceilometer is running.")
-                else:
-                    plugins_to_enable.append('csv')
-                    out_plugins[node_id] = 'CSV'
-                    logger.error("Ceilometer is not running.")
-                    logger.info("CSV will be enabled for verification of test plugins.")
-                if plugins_to_enable:
-                    _print_label(
-                        'NODE {}: Enabling Test Plug-in '.format(node_id)
-                        + 'and Test case execution')
-                error_plugins = []
-                if plugins_to_enable and not conf.enable_plugins(
-                        compute_node, plugins_to_enable, error_plugins, create_backup=False):
-                    logger.error('Failed to test plugins on node {}.'.format(node_id))
-                    logger.info('Testcases on node {} will not be executed'.format(node_id))
-                else:
-                    if plugins_to_enable:
-                        collectd_restarted, collectd_warnings = conf.restart_collectd(compute_node)
-                        sleep_time = 30
-                        logger.info(
-                            'Sleeping for {} seconds after collectd restart...'.format(sleep_time))
-                        time.sleep(sleep_time)
-                    if plugins_to_enable and not collectd_restarted:
-                        for warning in collectd_warnings:
-                            logger.warning(warning)
-                        logger.error('Restart of collectd on node {} failed'.format(node_id))
-                        logger.info('Testcases on node {} will not be executed'.format(node_id))
-                    else:
-                        if collectd_warnings:
-                            for warning in collectd_warnings:
-                                logger.warning(warning)
-
-                        for plugin_name in sorted(plugin_labels.keys()):
-                            _exec_testcase(
-                                plugin_labels, plugin_name, ceilometer_running,
-                                compute_node, conf, results, error_plugins)
-
-            _print_label('NODE {}: Restoring config file'.format(node_id))
-            conf.restore_config(compute_node)
-
-    mcelog_delete(logger)  # uninstalling mcelog from compute nodes
-
-    print_overall_summary(compute_ids, plugin_labels, results, out_plugins)
-
-    if ((len([res for res in results if not res[2]]) > 0)
-            or (len(results) < len(computes) * len(plugin_labels))):
-        logger.error('Some tests have failed or have not been executed')
-        return 1
-    return 0
+                for plugin_name in sorted(plugin_labels.keys()):
+                    _exec_testcase(
+                        plugin_labels, plugin_name, i,
+                        controllers, compute_node, conf, results,
+                        error_plugins, out_plugins[node_id])
+
+    mcelog_delete()
+    print_overall_summary(
+        compute_ids, plugin_labels, aodh_plugin_labels, results, out_plugins)
+
+    res_overall = 0
+    for res in results:
+        if not res[3]:
+            logger.error('Some tests have failed or have not been executed')
+            logger.error('Overall Result is Fail')
+            res_overall = 1
+        else:
+            pass
+
+    _print_label('Testing DMA on compute nodes')
+    res_agent = dma.dma_main(logger, conf, computes)
+
+    return 0 if res_overall == 0 and res_agent == 0 else 1
 
 
 if __name__ == '__main__':