Added Functest testcases for Barometer project 29/39529/5
authorSharada Shiddibhavi <sharada.shiddibhavi@intel.com>
Wed, 16 Aug 2017 11:32:59 +0000 (11:32 +0000)
committerSharada Shiddibhavi <sharada.shiddibhavi@intel.com>
Wed, 23 Aug 2017 11:04:04 +0000 (11:04 +0000)
Added different method to get user credentials of the installer
nodes instead of reading from installer_params.yaml

Change-Id: I97419c942e1cd9f943a62c36dbce424872a10cb1
Signed-off-by: Sharada Shiddibhavi <sharada.shiddibhavi@intel.com>
baro_tests/collectd.py
baro_tests/config_server.py
baro_tests/mce-inject_ea [new file with mode: 0755]
baro_tests/tests.py

index 5ac3c8f..9e9b3f6 100644 (file)
@@ -1,4 +1,3 @@
-"""Executing test of plugins"""
 # -*- coding: utf-8 -*-
 
 # Licensed under the Apache License, Version 2.0 (the "License"); you may
 # License for the specific language governing permissions and limitations
 # under the License.
 
+"""Executing test of plugins"""
+
 import requests
 from keystoneclient.v3 import client
 import os
-import pkg_resources
+import sys
 import time
 import logging
-from config_server import *
-from tests import *
+import config_server
+import tests
+import subprocess
 from opnfv.deployment import factory
-from functest.utils import functest_utils
 
-CEILOMETER_NAME = 'ceilometer'
+GNOCCHI_NAME = 'gnocchi'
 ID_RSA_SRC = '/root/.ssh/id_rsa'
 ID_RSA_DST_DIR = '/home/opnfv/.ssh'
 ID_RSA_DST = ID_RSA_DST_DIR + '/id_rsa'
-INSTALLER_PARAMS_YAML = pkg_resources.resource_filename(
-    'functest', 'ci/installer_params.yaml')
-FUEL_IP = functest_utils.get_parameter_from_yaml('fuel.ip', INSTALLER_PARAMS_YAML)
-FUEL_USER = functest_utils.get_parameter_from_yaml('fuel.user', INSTALLER_PARAMS_YAML)
-FUEL_PW = functest_utils.get_parameter_from_yaml('fuel.password', INSTALLER_PARAMS_YAML)
+APEX_IP = subprocess.check_output("echo $INSTALLER_IP", shell=True)
+APEX_USER = 'root'
+APEX_USER_STACK = 'stack'
+APEX_PKEY = '/root/.ssh/id_rsa'
 
 
 class KeystoneException(Exception):
@@ -64,53 +64,44 @@ class InvalidResponse(KeystoneException):
             "Invalid response", exc, response)
 
 
-class CeilometerClient(object):
-    """Ceilometer Client to authenticate and request meters"""
-    def __init__(self, bc_logger):
-        """
-        Keyword arguments:
-        bc_logger - logger instance
-        """
+class GnocchiClient(object):
+    # Gnocchi Client to authenticate and request meters
+    def __init__(self):
         self._auth_token = None
-        self._ceilometer_url = None
+        self._gnocchi_url = None
         self._meter_list = None
-        self._logger = bc_logger
 
     def auth_token(self):
-        """Get auth token"""
+        # Get auth token
         self._auth_server()
         return self._auth_token
 
-    def get_ceilometer_url(self):
-        """Get Ceilometer URL"""
-        return self._ceilometer_url
+    def get_gnocchi_url(self):
+        # Get Gnocchi  URL
+        return self._gnocchi_url
 
-    def get_ceil_metrics(self, criteria=None):
-        """Get Ceilometer metrics for given criteria
-
-        Keyword arguments:
-        criteria -- criteria for ceilometer meter list
-        """
+    def get_gnocchi_metrics(self, criteria=None):
+        # Subject to change if metric gathering is different for gnocchi
         self._request_meters(criteria)
         return self._meter_list
 
     def _auth_server(self):
-        """Request token in authentication server"""
-        self._logger.debug('Connecting to the auth server {}'.format(os.environ['OS_AUTH_URL']))
+        # Request token in authentication server
+        logger.debug('Connecting to the auth server {}'.format(
+            os.environ['OS_AUTH_URL']))
         keystone = client.Client(username=os.environ['OS_USERNAME'],
                                  password=os.environ['OS_PASSWORD'],
-                                 tenant_name=os.environ['OS_TENANT_NAME'],
+                                 tenant_name=os.environ['OS_USERNAME'],
                                  auth_url=os.environ['OS_AUTH_URL'])
         self._auth_token = keystone.auth_token
         for service in keystone.service_catalog.get_data():
-            if service['name'] == CEILOMETER_NAME:
+            if service['name'] == GNOCCHI_NAME:
                 for service_type in service['endpoints']:
                     if service_type['interface'] == 'internal':
-                        self._ceilometer_url = service_type['url']
-                        break
+                        self._gnocchi_url = service_type['url']
 
-        if self._ceilometer_url is None:
-            self._logger.warning('Ceilometer is not registered in service catalog')
+        if self._gnocchi_url is None:
+            logger.warning('Gnocchi is not registered in service catalog')
 
     def _request_meters(self, criteria):
         """Request meter list values from ceilometer
@@ -119,9 +110,10 @@ class CeilometerClient(object):
         criteria -- criteria for ceilometer meter list
         """
         if criteria is None:
-            url = self._ceilometer_url + ('/v2/samples?limit=400')
+            url = self._gnocchi_url + ('/v3/resource?limit=400')
         else:
-            url = self._ceilometer_url + ('/v2/meters/%s?q.field=resource_id&limit=400' % criteria)
+            url = self._gnocchi_url \
+                + ('/v3/resource/%s?q.field=resource_id&limit=400' % criteria)
         headers = {'X-Auth-Token': self._auth_token}
         resp = requests.get(url, headers=headers)
         try:
@@ -133,16 +125,15 @@ class CeilometerClient(object):
 
 class CSVClient(object):
     """Client to request CSV meters"""
-    def __init__(self, bc_logger, conf):
+    def __init__(self, conf):
         """
         Keyword arguments:
-        bc_logger - logger instance
         conf -- ConfigServer instance
         """
-        self._logger = bc_logger
         self.conf = conf
 
-    def get_csv_metrics(self, compute_node, plugin_subdirectories, meter_categories):
+    def get_csv_metrics(
+            self, compute_node, plugin_subdirectories, meter_categories):
         """Get CSV metrics.
 
         Keyword arguments:
@@ -152,34 +143,48 @@ class CSVClient(object):
 
         Return list of metrics.
         """
-        stdout = self.conf.execute_command("date '+%Y-%m-%d'", compute_node.get_ip())
+        stdout = self.conf.execute_command(
+            "date '+%Y-%m-%d'", compute_node.get_ip())
         date = stdout[0].strip()
         metrics = []
         for plugin_subdir in plugin_subdirectories:
             for meter_category in meter_categories:
                 stdout = self.conf.execute_command(
-                    "tail -2 /var/lib/collectd/csv/node-"
-                    + "{0}.domain.tld/{1}/{2}-{3}".format(
-                        compute_node.get_id(), plugin_subdir, meter_category, date),
+                    "tail -2 /var/lib/collectd/csv/"
+                    + "{0}.jf.intel.com/{1}/{2}-{3}".format(
+                        compute_node.get_name(), plugin_subdir, meter_category,
+                        date),
                     compute_node.get_ip())
                 # Storing last two values
                 values = stdout
                 if len(values) < 2:
-                    self._logger.error(
+                    logger.error(
                         'Getting last two CSV entries of meter category '
-                        + '{0} in {1} subdir failed'.format(meter_category, plugin_subdir))
+                        + '{0} in {1} subdir failed'.format(
+                            meter_category, plugin_subdir))
                 else:
                     old_value = int(values[0][0:values[0].index('.')])
                     new_value = int(values[1][0:values[1].index('.')])
-                    metrics.append((plugin_subdir, meter_category, old_value, new_value))
+                    metrics.append((
+                        plugin_subdir, meter_category, old_value, new_value))
         return metrics
 
 
-def _check_logger():
-    """Check whether there is global logger available and if not, define one."""
-    if 'logger' not in globals():
-        global logger
-        logger = logger.Logger("barometercollectd").getLogger()
+def get_csv_categories_for_ipmi(conf, compute_node):
+    """Get CSV metrics.
+
+    Keyword arguments:
+    compute_node -- compute node instance
+
+    Return list of categories.
+    """
+    stdout = conf.execute_command(
+        "date '+%Y-%m-%d'", compute_node.get_ip())
+    date = stdout[0].strip()
+    categories = conf.execute_command(
+        "ls /var/lib/collectd/csv/{0}.jf.intel.com/ipmi | grep {1}".format(
+            compute_node.get_name(), date), compute_node.get_ip())
+    return [category.strip()[:-11] for category in categories]
 
 
 def _process_result(compute_node, test, result, results_list):
@@ -191,9 +196,13 @@ def _process_result(compute_node, test, result, results_list):
     results_list -- results list
     """
     if result:
-        logger.info('Compute node {0} test case {1} PASSED.'.format(compute_node, test))
+        logger.info(
+            'Compute node {0} test case {1} PASSED.'.format(
+                compute_node, test))
     else:
-        logger.error('Compute node {0} test case {1} FAILED.'.format(compute_node, test))
+        logger.error(
+            'Compute node {0} test case {1} FAILED.'.format(
+                compute_node, test))
     results_list.append((compute_node, test, result))
 
 
@@ -215,17 +224,19 @@ def _print_label(label):
     logger.info(('=' * length1) + label + ('=' * length2))
 
 
-def _print_plugin_label(plugin, node_id):
+def _print_plugin_label(plugin, node_name):
     """Print plug-in label.
 
     Keyword arguments:
     plugin -- plug-in name
     node_id -- node ID
     """
-    _print_label('Node {0}: Plug-in {1} Test case execution'.format(node_id, plugin))
+    _print_label(
+        'Node {0}: Plug-in {1} Test case execution'.format(node_name, plugin))
 
 
-def _print_final_result_of_plugin(plugin, compute_ids, results, out_plugins, out_plugin):
+def _print_final_result_of_plugin(
+        plugin, compute_ids, results, out_plugins, out_plugin):
     """Print final results of plug-in.
 
     Keyword arguments:
@@ -240,11 +251,12 @@ def _print_final_result_of_plugin(plugin, compute_ids, results, out_plugins, out
         if out_plugins[id] == out_plugin:
             if (id, plugin, True) in results:
                 print_line += ' PASS   |'
-            elif (id, plugin, False) in results and out_plugins[id] == out_plugin:
+            elif (id, plugin, False) in results \
+                    and out_plugins[id] == out_plugin:
                 print_line += ' FAIL   |'
             else:
                 print_line += ' NOT EX |'
-        elif out_plugin == 'Ceilometer':
+        elif out_plugin == 'Gnocchi':
             print_line += ' NOT EX |'
         else:
             print_line += ' SKIP   |'
@@ -260,34 +272,50 @@ def print_overall_summary(compute_ids, tested_plugins, results, out_plugins):
     results -- results list
     out_plugins --  list of used out plug-ins
     """
-    compute_node_names = ['Node-{}'.format(id) for id in compute_ids]
+    compute_node_names = ['Node-{}'.format(i) for i in range(
+        len((compute_ids)))]
+    # compute_node_names = ['Node-{}'.format(id) for id in compute_ids]
     all_computes_in_line = ''
     for compute in compute_node_names:
-        all_computes_in_line = all_computes_in_line + '| ' + compute + (' ' * (7 - len(compute)))
+        all_computes_in_line += '| ' + compute + (' ' * (7 - len(compute)))
     line_of_nodes = '| Test           ' + all_computes_in_line + '|'
     logger.info('=' * 70)
     logger.info('+' + ('-' * ((9 * len(compute_node_names))+16)) + '+')
     logger.info(
-        '|' + ' ' * ((9*len(compute_node_names))/2) + ' OVERALL SUMMARY'
-        + ' ' * (9*len(compute_node_names) - (9*len(compute_node_names))/2) + '|')
-    logger.info('+' + ('-' * 16) + '+' + (('-' * 8) + '+') * len(compute_node_names))
+        '|' + ' ' * ((9*len(compute_node_names))/2)
+        + ' OVERALL SUMMARY'
+        + ' ' * (
+            9*len(compute_node_names) - (9*len(compute_node_names))/2)
+        + '|')
+    logger.info(
+        '+' + ('-' * 16) + '+' + (('-' * 8) + '+') * len(compute_node_names))
     logger.info(line_of_nodes)
-    logger.info('+' + ('-' * 16) + '+' + (('-' * 8) + '+') * len(compute_node_names))
-    out_plugins_print = ['Ceilometer']
+    logger.info(
+        '+' + ('-' * 16) + '+' + (('-' * 8) + '+') * len(compute_node_names))
+    out_plugins_print = ['Gnocchi']
+    if 'SNMP' in out_plugins.values():
+        out_plugins_print.append('SNMP')
     if 'CSV' in out_plugins.values():
         out_plugins_print.append('CSV')
     for out_plugin in out_plugins_print:
         output_plugins_line = ''
         for id in compute_ids:
             out_plugin_result = '----'
-            if out_plugin == 'Ceilometer':
-                out_plugin_result = 'PASS' if out_plugins[id] == out_plugin else 'FAIL'
+            if out_plugin == 'Gnocchi':
+                out_plugin_result = \
+                    'PASS' if out_plugins[id] == out_plugin else 'FAIL'
+            if out_plugin == 'SNMP':
+                if out_plugins[id] == out_plugin:
+                    out_plugin_result = \
+                        'PASS' if out_plugins[id] == out_plugin else 'FAIL'
+                else:
+                    out_plugin_result = 'SKIP'
             if out_plugin == 'CSV':
                 if out_plugins[id] == out_plugin:
                     out_plugin_result = \
                         'PASS' if [
-                            plugin for comp_id, plugin,
-                            res in results if comp_id == id and res] else 'FAIL'
+                            plugin for comp_id, plugin, res in results
+                            if comp_id == id and res] else 'FAIL'
                 else:
                     out_plugin_result = 'SKIP'
             output_plugins_line += '| ' + out_plugin_result + '   '
@@ -297,13 +325,17 @@ def print_overall_summary(compute_ids, tested_plugins, results, out_plugins):
         for plugin in sorted(tested_plugins.values()):
             line_plugin = _print_final_result_of_plugin(
                 plugin, compute_ids, results, out_plugins, out_plugin)
-            logger.info('|  IN:{}'.format(plugin) + (' ' * (11-len(plugin))) + '|' + line_plugin)
-        logger.info('+' + ('-' * 16) + '+' + (('-' * 8) + '+') * len(compute_node_names))
+            logger.info(
+                '|  IN:{}'.format(plugin) + (' ' * (11-len(plugin)))
+                + '|' + line_plugin)
+        logger.info(
+            '+' + ('-' * 16) + '+'
+            + (('-' * 8) + '+') * len(compute_node_names))
     logger.info('=' * 70)
 
 
 def _exec_testcase(
-        test_labels, name, ceilometer_running, compute_node,
+        test_labels, name, gnocchi_running, compute_node,
         conf, results, error_plugins):
     """Execute the testcase.
 
@@ -314,7 +346,8 @@ def _exec_testcase(
     compute_node -- compute node ID
     conf -- ConfigServer instance
     results -- results list
-    error_plugins -- list of tuples with plug-in errors (plugin, error_description, is_critical):
+    error_plugins -- list of tuples with plug-in errors
+        (plugin, error_description, is_critical):
         plugin -- plug-in ID, key of test_labels dictionary
         error_decription -- description of the error
         is_critical -- boolean value indicating whether error is critical
@@ -322,46 +355,94 @@ def _exec_testcase(
     ovs_interfaces = conf.get_ovs_interfaces(compute_node)
     ovs_configured_interfaces = conf.get_plugin_config_values(
         compute_node, 'ovs_events', 'Interfaces')
+    ovs_configured_bridges = conf.get_plugin_config_values(
+         compute_node, 'ovs_stats', 'Bridges')
     ovs_existing_configured_int = [
         interface for interface in ovs_interfaces
         if interface in ovs_configured_interfaces]
+    ovs_existing_configured_bridges = [
+        bridge for bridge in ovs_interfaces
+        if bridge in ovs_configured_bridges]
     plugin_prerequisites = {
-        'mcelog': [(conf.is_installed(compute_node, 'mcelog'), 'mcelog must be installed.')],
+        'intel_rdt': [(
+            conf.is_libpqos_on_node(compute_node),
+            'libpqos must be installed.')],
+        'mcelog': [(
+            conf.is_installed(compute_node, 'mcelog'),
+            'mcelog must be installed.')],
         'ovs_events': [(
             len(ovs_existing_configured_int) > 0 or len(ovs_interfaces) > 0,
-            'Interfaces must be configured.')]}
+            'Interfaces must be configured.')],
+        'ovs_stats': [(
+            len(ovs_existing_configured_bridges) > 0,
+            'Bridges must be configured.')]}
     ceilometer_criteria_lists = {
+        'intel_rdt': [
+            'intel_rdt.ipc', 'intel_rdt.bytes',
+            'intel_rdt.memory_bandwidth'],
         'hugepages': ['hugepages.vmpage_number'],
+        'ipmi': ['ipmi.temperature', 'ipmi.voltage'],
         'mcelog': ['mcelog.errors'],
+        'ovs_stats': ['interface.if_packets'],
         'ovs_events': ['ovs_events.gauge']}
     ceilometer_substr_lists = {
-        'ovs_events': ovs_existing_configured_int if len(ovs_existing_configured_int) > 0 else ovs_interfaces}
+        'ovs_events': ovs_existing_configured_int if len(
+            ovs_existing_configured_int) > 0 else ovs_interfaces}
     csv_subdirs = {
+        'intel_rdt': [
+            'intel_rdt-{}'.format(core)
+            for core in conf.get_plugin_config_values(
+                compute_node, 'intel_rdt', 'Cores')],
         'hugepages': [
-            'hugepages-mm-2048Kb', 'hugepages-node0-2048Kb', 'hugepages-node1-2048Kb',
-            'hugepages-mm-1048576Kb', 'hugepages-node0-1048576Kb', 'hugepages-node1-1048576Kb'],
-        'mcelog': ['mcelog-SOCKET_0_CHANNEL_0_DIMM_any', 'mcelog-SOCKET_0_CHANNEL_any_DIMM_any'],
+            'hugepages-mm-2048Kb', 'hugepages-node0-2048Kb',
+            'hugepages-node1-2048Kb', 'hugepages-mm-1048576Kb',
+            'hugepages-node0-1048576Kb', 'hugepages-node1-1048576Kb'],
+        'ipmi': ['ipmi'],
+        'mcelog': [
+            'mcelog-SOCKET_0_CHANNEL_0_DIMM_any',
+            'mcelog-SOCKET_0_CHANNEL_any_DIMM_any'],
+        'ovs_stats': [
+            'ovs_stats-{0}.{0}'.format(interface)
+            for interface in ovs_existing_configured_bridges],
         'ovs_events': [
             'ovs_events-{}'.format(interface)
-            for interface in (ovs_existing_configured_int if len(ovs_existing_configured_int) > 0 else ovs_interfaces)]}
+            for interface in (
+                ovs_existing_configured_int
+                if len(ovs_existing_configured_int) > 0 else ovs_interfaces)]}
+    csv_meter_categories_ipmi = get_csv_categories_for_ipmi(conf, compute_node)
     csv_meter_categories = {
+        'intel_rdt': [
+            'bytes-llc', 'ipc', 'memory_bandwidth-local',
+            'memory_bandwidth-remote'],
         'hugepages': ['vmpage_number-free', 'vmpage_number-used'],
+        'ipmi': csv_meter_categories_ipmi,
         'mcelog': [
-            'errors-corrected_memory_errors', 'errors-uncorrected_memory_errors',
-            'errors-corrected_memory_errors_in_24h', 'errors-uncorrected_memory_errors_in_24h'],
+            'errors-corrected_memory_errors',
+            'errors-uncorrected_memory_errors',
+            'errors-corrected_memory_errors_in_24h',
+            'errors-uncorrected_memory_errors_in_24h'],
+        'ovs_stats': [
+            'if_collisions', 'if_dropped', 'if_errors', 'if_packets',
+            'if_rx_errors-crc', 'if_rx_errors-frame', 'if_rx_errors-over',
+            'if_rx_octets', 'if_tx_octets'],
         'ovs_events': ['gauge-link_status']}
 
-    _print_plugin_label(test_labels[name] if name in test_labels else name, compute_node.get_id())
+    _print_plugin_label(
+        test_labels[name] if name in test_labels else name,
+        compute_node.get_name())
     plugin_critical_errors = [
-        error for plugin, error, critical in error_plugins if plugin == name and critical]
+        error for plugin, error, critical in error_plugins
+        if plugin == name and critical]
     if plugin_critical_errors:
         logger.error('Following critical errors occurred:'.format(name))
         for error in plugin_critical_errors:
             logger.error(' * ' + error)
-        _process_result(compute_node.get_id(), test_labels[name], False, results)
+        _process_result(
+            compute_node.get_id(), test_labels[name], False, results)
     else:
         plugin_errors = [
-            error for plugin, error, critical in error_plugins if plugin == name and not critical]
+            error for plugin, error, critical in error_plugins
+            if plugin == name and not critical]
         if plugin_errors:
             logger.warning('Following non-critical errors occured:')
             for error in plugin_errors:
@@ -370,7 +451,8 @@ def _exec_testcase(
         if name in plugin_prerequisites:
             failed_prerequisites = [
                 prerequisite_name for prerequisite_passed,
-                prerequisite_name in plugin_prerequisites[name] if not prerequisite_passed]
+                prerequisite_name in plugin_prerequisites[name]
+                if not prerequisite_passed]
         if failed_prerequisites:
             logger.error(
                 '{} test will not be executed, '.format(name)
@@ -378,86 +460,102 @@ def _exec_testcase(
             for prerequisite in failed_prerequisites:
                 logger.error(' * {}'.format(prerequisite))
         else:
-            if ceilometer_running:
-                res = test_ceilometer_node_sends_data(
-                    compute_node.get_id(), conf.get_plugin_interval(compute_node, name),
-                    logger=logger, client=CeilometerClient(logger),
+            if gnocchi_running:
+                res = conf.test_plugins_with_gnocchi(
+                    compute_node.get_id(),
+                    conf.get_plugin_interval(compute_node, name),
+                    logger, client=GnocchiClient(),
                     criteria_list=ceilometer_criteria_lists[name],
-                    resource_id_substrings=(ceilometer_substr_lists[name]
-                                            if name in ceilometer_substr_lists else ['']))
+                    resource_id_substrings=(
+                        ceilometer_substr_lists[name]
+                        if name in ceilometer_substr_lists else ['']))
             else:
-                res = test_csv_handles_plugin_data(
-                    compute_node, conf.get_plugin_interval(compute_node, name), name,
-                    csv_subdirs[name], csv_meter_categories[name], logger,
-                    CSVClient(logger, conf))
+                res = tests.test_csv_handles_plugin_data(
+                    compute_node, conf.get_plugin_interval(compute_node, name),
+                    name, csv_subdirs[name], csv_meter_categories[name],
+                    logger, CSVClient(conf))
             if res and plugin_errors:
                 logger.info(
                     'Test works, but will be reported as failure,'
                     + 'because of non-critical errors.')
                 res = False
-            _process_result(compute_node.get_id(), test_labels[name], res, results)
-
+            _process_result(
+                compute_node.get_id(), test_labels[name], res, results)
 
-def mcelog_install(logger):
-    """Install mcelog on compute nodes.
 
-    Keyword arguments:
-    logger - logger instance
+def get_results_for_ovs_events(
+        plugin_labels, plugin_name, gnocchi_running,
+        compute_node, conf, results, error_plugins):
+    """ Testing OVS Events with python plugin
     """
+    plugin_label = 'OVS events'
+    res = conf.enable_ovs_events(
+        compute_node, plugin_label, error_plugins, create_backup=False)
+    _process_result(
+        compute_node.get_id(), plugin_label, res, results)
+    logger.info("Results for OVS Events = {}" .format(results))
+
+
+def mcelog_install():
+    """Install mcelog on compute nodes."""
     _print_label('Enabling mcelog on compute nodes')
-    handler = factory.Factory.get_handler('fuel', FUEL_IP, FUEL_USER, installer_pwd='')
+    handler = factory.Factory.get_handler('apex',
+                                          APEX_IP,
+                                          APEX_USER_STACK,
+                                          APEX_PKEY)
     nodes = handler.get_nodes()
-    openstack_version = handler.get_openstack_version()
-    if openstack_version.find('14.') != 0:
-        logger.info('Mcelog will not be installed,'
-                    + ' unsupported Openstack version found ({}).'.format(openstack_version))
-    else:
-        for node in nodes:
-            if node.is_compute():
-                ubuntu_release = node.run_cmd('lsb_release -r')
-                if '16.04' not in ubuntu_release:
-                    logger.info('Mcelog will not be enabled'
-                                + 'on node-{0}, unsupported Ubuntu release found ({1}).'.format(
-                                node.get_dict()['id'], ubuntu_release))
-                else:
-                    logger.info('Checking if  mcelog is enabled on node-{}...'.format(
+    for node in nodes:
+        if node.is_compute():
+            centos_release = node.run_cmd('uname -r')
+            if '3.10.0-514.26.2.el7.x86_64' not in centos_release:
+                logger.info(
+                    'Mcelog will not be enabled '
+                    + 'on node-{0}, '.format(node.get_dict()['id'])
+                    + 'unsupported CentOS release found ({1}).'.format(
+                        centos_release))
+            else:
+                logger.info(
+                    'Checking if  mcelog is enabled'
+                    + ' on node-{}...'.format(node.get_dict()['id']))
+                res = node.run_cmd('ls')
+            if 'mce-inject_ea' and 'corrected' in res:
+                logger.info(
+                    'Mcelog seems to be already installed '
+                    + 'on node-{}.'.format(node.get_dict()['id']))
+                node.run_cmd('modprobe mce-inject_ea')
+                node.run_cmd('mce-inject_ea < corrected')
+            else:
+                logger.info(
+                    'Mcelog will be enabled on node-{}...'.format(
                         node.get_dict()['id']))
-                    res = node.run_cmd('ls /root/')
-                    if 'mce-inject_df' and 'corrected' in res:
-                        logger.info('Mcelog seems to be already installed on node-{}.'.format(
-                            node.get_dict()['id']))
-                        res = node.run_cmd('modprobe mce-inject')
-                        res = node.run_cmd('/root/mce-inject_df < /root/corrected')
-                    else:
-                        logger.info('Mcelog will be enabled on node-{}...'.format(
-                            node.get_dict()['id']))
-                        res = node.put_file('/home/opnfv/repos/barometer/baro_utils/mce-inject_df',
-                                            '/root/mce-inject_df')
-                        res = node.run_cmd('chmod a+x /root/mce-inject_df')
-                        res = node.run_cmd('echo "CPU 0 BANK 0" > /root/corrected')
-                        res = node.run_cmd('echo "STATUS 0xcc00008000010090" >> /root/corrected')
-                        res = node.run_cmd('echo "ADDR 0x0010FFFFFFF" >> /root/corrected')
-                        res = node.run_cmd('modprobe mce-inject')
-                        res = node.run_cmd('/root/mce-inject_df < /root/corrected')
-        logger.info('Mcelog is installed on all compute nodes')
-
-
-def mcelog_delete(logger):
-    """Uninstall mcelog from compute nodes.
-
-    Keyword arguments:
-    logger - logger instance
-    """
-    handler = factory.Factory.get_handler('fuel', FUEL_IP, FUEL_USER, installer_pwd='')
+                node.put_file(
+                    '/usr/local/lib/python2.7/dist-packages/baro_tests/'
+                    + 'mce-inject_ea', 'mce-inject_ea')
+                node.run_cmd('chmod a+x mce-inject_ea')
+                node.run_cmd('echo "CPU 0 BANK 0" > corrected')
+                node.run_cmd(
+                    'echo "STATUS 0xcc00008000010090" >>'
+                    + ' corrected')
+                node.run_cmd(
+                    'echo "ADDR 0x0010FFFFFFF" >> corrected')
+                node.run_cmd('modprobe mce-inject')
+                node.run_cmd('mce-inject_ea < corrected')
+    logger.info('Mcelog is installed on all compute nodes')
+
+
+def mcelog_delete():
+    """Uninstall mcelog from compute nodes."""
+    handler = factory.Factory.get_handler(
+            'apex', APEX_IP, APEX_USER, APEX_PKEY)
     nodes = handler.get_nodes()
     for node in nodes:
         if node.is_compute():
-            output = node.run_cmd('ls /root/')
-            if 'mce-inject_df' in output:
-                res = node.run_cmd('rm /root/mce-inject_df')
+            output = node.run_cmd('ls')
+            if 'mce-inject_ea' in output:
+                node.run_cmd('rm mce-inject_ea')
             if 'corrected' in output:
-                res = node.run_cmd('rm /root/corrected')
-            res = node.run_cmd('systemctl restart mcelog')
+                node.run_cmd('rm corrected')
+            node.run_cmd('systemctl restart mcelog')
     logger.info('Mcelog is deleted from all compute nodes')
 
 
@@ -465,16 +563,26 @@ def get_ssh_keys():
     if not os.path.isdir(ID_RSA_DST_DIR):
         os.makedirs(ID_RSA_DST_DIR)
     if not os.path.isfile(ID_RSA_DST):
-        logger.info("RSA key file {} doesn't exist, it will be downloaded from installer node.".format(ID_RSA_DST))
-        handler = factory.Factory.get_handler('fuel', FUEL_IP, FUEL_USER, installer_pwd=FUEL_PW)
-        fuel = handler.get_installer_node()
-        fuel.get_file(ID_RSA_SRC, ID_RSA_DST)
+        logger.info(
+            "RSA key file {} doesn't exist".format(ID_RSA_DST)
+            + ", it will be downloaded from installer node.")
+        handler = factory.Factory.get_handler(
+            'apex', APEX_IP, APEX_USER, APEX_PKEY)
+        apex = handler.get_installer_node()
+        apex.get_file(ID_RSA_SRC, ID_RSA_DST)
     else:
         logger.info("RSA key file {} exists.".format(ID_RSA_DST))
 
 
+def _check_logger():
+    """Check whether there is global logger available and if not, define one."""
+    if 'logger' not in globals():
+        global logger
+        logger = logger.Logger("barometercollectd").getLogger()
+
+
 def main(bt_logger=None):
-    """Check each compute node sends ceilometer metrics.
+    """Check each compute node sends gnocchi metrics.
 
     Keyword arguments:
     bt_logger -- logger instance
@@ -487,8 +595,9 @@ def main(bt_logger=None):
     else:
         global logger
         logger = bt_logger
+    _print_label("Starting barometer tests suite")
     get_ssh_keys()
-    conf = ConfigServer(FUEL_IP, FUEL_USER, logger)
+    conf = config_server.ConfigServer(APEX_IP, APEX_USER, logger)
     controllers = conf.get_controllers()
     if len(controllers) == 0:
         logger.error('No controller nodes found!')
@@ -498,89 +607,120 @@ def main(bt_logger=None):
         logger.error('No compute nodes found!')
         return 1
 
-    _print_label('Display of Control and Compute nodes available in the set up')
+    _print_label(
+        'Display of Control and Compute nodes available in the set up')
     logger.info('controllers: {}'.format([('{0}: {1} ({2})'.format(
-        node.get_id(), node.get_name(), node.get_ip())) for node in controllers]))
+        node.get_id(), node.get_name(),
+        node.get_ip())) for node in controllers]))
     logger.info('computes: {}'.format([('{0}: {1} ({2})'.format(
-        node.get_id(), node.get_name(), node.get_ip())) for node in computes]))
+        node.get_id(), node.get_name(), node.get_ip()))
+        for node in computes]))
 
-    mcelog_install(logger)  # installation of mcelog
+    mcelog_install()
+    gnocchi_running_on_con = False
+    _print_label('Test Gnocchi on controller nodes')
 
-    ceilometer_running_on_con = False
-    _print_label('Test Ceilometer on control nodes')
     for controller in controllers:
-        ceil_client = CeilometerClient(logger)
-        ceil_client.auth_token()
-        ceilometer_running_on_con = (
-            ceilometer_running_on_con or conf.is_ceilometer_running(controller))
-    if ceilometer_running_on_con:
-        logger.info("Ceilometer is running on control node.")
+        logger.info("Controller = {}" .format(controller))
+        gnocchi_client = GnocchiClient()
+        gnocchi_client.auth_token()
+        gnocchi_running_on_con = (
+            gnocchi_running_on_con or conf.is_gnocchi_running(
+                controller))
+    if gnocchi_running_on_con:
+        logger.info("Gnocchi is running on controller.")
     else:
-        logger.error("Ceilometer is not running on control node.")
+        logger.error("Gnocchi is not running on controller.")
         logger.info("CSV will be enabled on compute nodes.")
+
     compute_ids = []
+    compute_node_names = []
     results = []
     plugin_labels = {
+        'intel_rdt': 'Intel RDT',
         'hugepages': 'Hugepages',
+        # 'ipmi': 'IPMI',
         'mcelog': 'Mcelog',
+        'ovs_stats': 'OVS stats',
         'ovs_events': 'OVS events'}
     out_plugins = {}
     for compute_node in computes:
         node_id = compute_node.get_id()
+        node_name = compute_node.get_name()
         out_plugins[node_id] = 'CSV'
         compute_ids.append(node_id)
-        # plugins_to_enable = plugin_labels.keys()
+        compute_node_names.append(node_name)
         plugins_to_enable = []
-        _print_label('NODE {}: Test Ceilometer Plug-in'.format(node_id))
-        logger.info('Checking if ceilometer plug-in is included.')
-        if not conf.check_ceil_plugin_included(compute_node):
-            logger.error('Ceilometer plug-in is not included.')
-            logger.info('Testcases on node {} will not be executed'.format(node_id))
+        _print_label('NODE {}: Test Gnocchi Plug-in'.format(node_name))
+        logger.info('Checking if gnocchi plug-in is included in compute nodes.')
+        if not conf.check_gnocchi_plugin_included(compute_node):
+            logger.error('Gnocchi plug-in is not included.')
+            logger.info(
+                'Testcases on node {} will not be executed'.format(node_name))
         else:
-            collectd_restarted, collectd_warnings = conf.restart_collectd(compute_node)
-            sleep_time = 30
-            logger.info('Sleeping for {} seconds after collectd restart...'.format(sleep_time))
+            collectd_restarted, collectd_warnings = \
+                conf.restart_collectd(compute_node)
+            sleep_time = 5
+            logger.info(
+                'Sleeping for {} seconds after collectd restart...'.format(
+                    sleep_time))
             time.sleep(sleep_time)
             if not collectd_restarted:
                 for warning in collectd_warnings:
                     logger.warning(warning)
-                logger.error('Restart of collectd on node {} failed'.format(node_id))
-                logger.info('Testcases on node {} will not be executed'.format(node_id))
+                logger.error(
+                    'Restart of collectd on node {} failed'.format(node_name))
+                logger.info(
+                    'Testcases on node {} will not be executed'.format(
+                        node_name))
             else:
                 for warning in collectd_warnings:
                     logger.warning(warning)
-                ceilometer_running = (
-                    ceilometer_running_on_con and test_ceilometer_node_sends_data(
-                        node_id, 10, logger=logger, client=CeilometerClient(logger)))
-                if ceilometer_running:
-                    out_plugins[node_id] = 'Ceilometer'
-                    logger.info("Ceilometer is running.")
+                gnocchi_running = (
+                    gnocchi_running_on_con
+                    and conf.test_gnocchi_is_sending_data(
+                        controller))
+                if gnocchi_running:
+                    out_plugins[node_id] = 'Gnocchi'
+                    logger.info("Gnocchi is active and collecting data")
                 else:
                     plugins_to_enable.append('csv')
                     out_plugins[node_id] = 'CSV'
-                    logger.error("Ceilometer is not running.")
-                    logger.info("CSV will be enabled for verification of test plugins.")
+                    logger.error("Gnocchi is inactive and not collecting data")
+                    logger.info(
+                        "CSV will be enabled for verification "
+                        + "of test plugins.")
                 if plugins_to_enable:
                     _print_label(
-                        'NODE {}: Enabling Test Plug-in '.format(node_id)
+                        'NODE {}: Enabling Test Plug-in '.format(node_name)
                         + 'and Test case execution')
                 error_plugins = []
                 if plugins_to_enable and not conf.enable_plugins(
-                        compute_node, plugins_to_enable, error_plugins, create_backup=False):
-                    logger.error('Failed to test plugins on node {}.'.format(node_id))
-                    logger.info('Testcases on node {} will not be executed'.format(node_id))
+                        compute_node, plugins_to_enable, error_plugins,
+                        create_backup=False):
+                    logger.error(
+                        'Failed to test plugins on node {}.'.format(node_id))
+                    logger.info(
+                        'Testcases on node {} will not be executed'.format(
+                            node_id))
                 else:
                     if plugins_to_enable:
-                        collectd_restarted, collectd_warnings = conf.restart_collectd(compute_node)
+                        collectd_restarted, collectd_warnings = \
+                            conf.restart_collectd(compute_node)
                         sleep_time = 30
                         logger.info(
-                            'Sleeping for {} seconds after collectd restart...'.format(sleep_time))
+                            'Sleeping for {} seconds'.format(sleep_time)
+                            + ' after collectd restart...')
                         time.sleep(sleep_time)
                     if plugins_to_enable and not collectd_restarted:
                         for warning in collectd_warnings:
                             logger.warning(warning)
-                        logger.error('Restart of collectd on node {} failed'.format(node_id))
-                        logger.info('Testcases on node {} will not be executed'.format(node_id))
+                        logger.error(
+                            'Restart of collectd on node {} failed'.format(
+                                node_id))
+                        logger.info(
+                            'Testcases on node {}'.format(node_id)
+                            + ' will not be executed.')
                     else:
                         if collectd_warnings:
                             for warning in collectd_warnings:
@@ -588,14 +728,13 @@ def main(bt_logger=None):
 
                         for plugin_name in sorted(plugin_labels.keys()):
                             _exec_testcase(
-                                plugin_labels, plugin_name, ceilometer_running,
+                                plugin_labels, plugin_name,
+                                gnocchi_running,
                                 compute_node, conf, results, error_plugins)
 
-            _print_label('NODE {}: Restoring config file'.format(node_id))
+            _print_label('NODE {}: Restoring config file'.format(node_name))
             conf.restore_config(compute_node)
-
-    mcelog_delete(logger)  # uninstalling mcelog from compute nodes
-
+        mcelog_delete()
     print_overall_summary(compute_ids, plugin_labels, results, out_plugins)
 
     if ((len([res for res in results if not res[2]]) > 0)
index 358a8ff..efe2691 100644 (file)
@@ -1,7 +1,6 @@
-"""Classes used by client.py"""
 # -*- coding: utf-8 -*-
 
-#Licensed under the Apache License, Version 2.0 (the "License"); you may
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
 # not use this file except in compliance with the License. You may obtain
 # a copy of the License at
 #
 # License for the specific language governing permissions and limitations
 # under the License.
 
+"""Classes used by collectd.py"""
+
 import paramiko
 import time
 import string
 import os.path
-
+import os
+import re
 ID_RSA_PATH = '/home/opnfv/.ssh/id_rsa'
 SSH_KEYS_SCRIPT = '/home/opnfv/barometer/baro_utils/get_ssh_keys.sh'
 DEF_PLUGIN_INTERVAL = 10
-COLLECTD_CONF = '/etc/collectd/collectd.conf'
+COLLECTD_CONF = '/etc/collectd.conf'
 COLLECTD_CONF_DIR = '/etc/collectd/collectd.conf.d'
+NOTIFICATION_FILE = '/var/log/python-notifications.dump'
+COLLECTD_NOTIFICATION = '/etc/collectd_notification_dump.py'
 
 
 class Node(object):
     """Node configuration class"""
     def __init__(self, attrs):
-        self.__id = int(attrs[0])
-        self.__status = attrs[1]
+        self.__null = attrs[0]
+        self.__id = attrs[1]
         self.__name = attrs[2]
-        self.__cluster = int(attrs[3]) if attrs[3] else None
-        self.__ip = attrs[4]
-        self.__mac = attrs[5]
-        self.__roles = [x.strip(' ') for x in attrs[6].split(',')]
-        self.__pending_roles = attrs[7]
-        self.__online = int(attrs[8]) if attrs[3] and attrs[8]else None
-        self.__group_id = int(attrs[9]) if attrs[3] else None
+        self.__status = attrs[3] if attrs[3] else None
+        self.__taskState = attrs[4]
+        self.__pwrState = attrs[5]
+        self.__ip = re.sub('^[a-z]+=', '', attrs[6])
 
     def get_name(self):
         """Get node name"""
@@ -52,68 +53,84 @@ class Node(object):
         return self.__ip
 
     def get_roles(self):
-        """Get node roles"""
+        """Get node role"""
         return self.__roles
 
 
 class ConfigServer(object):
     """Class to get env configuration"""
-    def __init__(self, host, user, logger, passwd=None):
+    def __init__(self, host, user, logger, priv_key=None):
         self.__host = host
         self.__user = user
-        self.__passwd = passwd
-        self.__priv_key = None
+        self.__passwd = None
+        self.__priv_key = priv_key
         self.__nodes = list()
         self.__logger = logger
 
         self.__private_key_file = ID_RSA_PATH
         if not os.path.isfile(self.__private_key_file):
             self.__logger.error(
-                "Private key file '{}' not found.".format(self.__private_key_file))
-            raise IOError("Private key file '{}' not found.".format(self.__private_key_file))
+                "Private key file '{}'".format(self.__private_key_file)
+                + " not found.")
+            raise IOError("Private key file '{}' not found.".format(
+                self.__private_key_file))
 
         # get list of available nodes
-        ssh, sftp = self.__open_sftp_session(self.__host, self.__user, self.__passwd)
+        ssh, sftp = self.__open_sftp_session(
+            self.__host, self.__user, self.__passwd)
         attempt = 1
         fuel_node_passed = False
 
         while (attempt <= 10) and not fuel_node_passed:
-            stdin, stdout, stderr = ssh.exec_command("fuel node")
+            stdin, stdout, stderr = ssh.exec_command(
+                "source stackrc; nova list")
             stderr_lines = stderr.readlines()
             if stderr_lines:
-                self.__logger.warning("'fuel node' command failed (try {}):".format(attempt))
+                self.__logger.warning(
+                    "'fuel node' command failed (try {}):".format(attempt))
                 for line in stderr_lines:
                     self.__logger.debug(line.strip())
             else:
                 fuel_node_passed = True
                 if attempt > 1:
-                    self.__logger.info("'fuel node' command passed (try {})".format(attempt))
+                    self.__logger.info(
+                        "'fuel node' command passed (try {})".format(attempt))
             attempt += 1
         if not fuel_node_passed:
-            self.__logger.error("'fuel node' command failed. This was the last try.")
-            raise OSError("'fuel node' command failed. This was the last try.")
+            self.__logger.error(
+                "'fuel node' command failed. This was the last try.")
+            raise OSError(
+                "'fuel node' command failed. This was the last try.")
         node_table = stdout.readlines()\
 
         # skip table title and parse table values
-        for entry in node_table[2:]:
-            self.__nodes.append(Node([str(x.strip(' \n')) for x in entry.split('|')]))
+
+        for entry in node_table[3:]:
+            if entry[0] == '+' or entry[0] == '\n':
+                print entry
+                pass
+            else:
+                self.__nodes.append(
+                    Node([str(x.strip(' \n')) for x in entry.split('|')]))
 
     def get_controllers(self):
-        """Get list of controllers"""
-        return [node for node in self.__nodes if 'controller' in node.get_roles()]
+        # Get list of controllers
+        print self.__nodes[0]._Node__ip
+        return (
+            [node for node in self.__nodes if 'controller' in node.get_name()])
 
     def get_computes(self):
-        """Get list of computes"""
-        return [node for node in self.__nodes if 'compute' in node.get_roles()]
+        # Get list of computes
+        return (
+            [node for node in self.__nodes if 'compute' in node.get_name()])
 
     def get_nodes(self):
-        """Get list of nodes"""
+        # Get list of nodes
         return self.__nodes
 
     def __open_sftp_session(self, host, user, passwd=None):
-        """Connect to given host.
-
-        Keyword arguments:
+        # Connect to given host.
+        """Keyword arguments:
         host -- host to connect
         user -- user to use
         passwd -- password to use
@@ -127,10 +144,12 @@ class ConfigServer(object):
         # try a direct access using password or private key
         if not passwd and not self.__priv_key:
             # get private key
-            self.__priv_key = paramiko.RSAKey.from_private_key_file(self.__private_key_file)
+            self.__priv_key = paramiko.RSAKey.from_private_key_file(
+                self.__private_key_file)
 
         # connect to the server
-        ssh.connect(host, username=user, password=passwd, pkey=self.__priv_key)
+        ssh.connect(
+            host, username=user, password=passwd, pkey=self.__priv_key)
         sftp = ssh.open_sftp()
 
         # return SFTP client instance
@@ -144,12 +163,14 @@ class ConfigServer(object):
         plugin -- plug-in name
 
         If found, return interval value, otherwise the default value"""
-        ssh, sftp = self.__open_sftp_session(compute.get_ip(), 'root')
+        ssh, sftp = self.__open_sftp_session(
+            compute.get_ip(), 'root', 'opnfvapex')
         in_plugin = False
         plugin_name = ''
         default_interval = DEF_PLUGIN_INTERVAL
-        config_files = [COLLECTD_CONF] \
-            + [COLLECTD_CONF_DIR + '/' + conf_file for conf_file in sftp.listdir(COLLECTD_CONF_DIR)]
+        config_files = [COLLECTD_CONF] + [
+            COLLECTD_CONF_DIR + '/'
+            + conf_file for conf_file in sftp.listdir(COLLECTD_CONF_DIR)]
         for config_file in config_files:
             try:
                 with sftp.open(config_file) as config:
@@ -178,13 +199,15 @@ class ConfigServer(object):
         parameter -- plug-in parameter
 
         Return list of found values."""
-        ssh, sftp = self.__open_sftp_session(compute.get_ip(), 'root')
+        ssh, sftp = self.__open_sftp_session(
+            compute.get_ip(), 'root', 'opnfvapex')
         # find the plugin value
         in_plugin = False
         plugin_name = ''
         default_values = []
-        config_files = [COLLECTD_CONF] \
-            + [COLLECTD_CONF_DIR + '/' + conf_file for conf_file in sftp.listdir(COLLECTD_CONF_DIR)]
+        config_files = [COLLECTD_CONF] + [
+            COLLECTD_CONF_DIR + '/'
+            + conf_file for conf_file in sftp.listdir(COLLECTD_CONF_DIR)]
         for config_file in config_files:
             try:
                 with sftp.open(config_file) as config:
@@ -210,12 +233,13 @@ class ConfigServer(object):
         host_ip -- IP of the node
         ssh -- existing open SSH session to use
 
-        One of host_ip or ssh must not be None. If both are not None, existing ssh session is used.
+        One of host_ip or ssh must not be None. If both are not None,
+        existing ssh session is used.
         """
         if host_ip is None and ssh is None:
             raise ValueError('One of host_ip or ssh must not be None.')
         if ssh is None:
-            ssh, sftp = self.__open_sftp_session(host_ip, 'root')
+            ssh, sftp = self.__open_sftp_session(host_ip, 'root', 'opnfvapex')
         stdin, stdout, stderr = ssh.exec_command(command)
         return stdout.readlines()
 
@@ -228,23 +252,22 @@ class ConfigServer(object):
         stdout = self.execute_command("ovs-vsctl list-br", compute.get_ip())
         return [interface.strip() for interface in stdout]
 
-    def is_ceilometer_running(self, controller):
-        """Check whether Ceilometer is running on controller.
+    def is_gnocchi_running(self, controller):
+        """Check whether Gnocchi is running on controller.
 
         Keyword arguments:
         controller -- controller node instance
 
-        Return boolean value whether Ceilometer is running.
+        Return boolean value whether Gnocchi is running.
         """
-        lines = self.execute_command('service --status-all | grep ceilometer', controller.get_ip())
-        agent = False
-        collector = False
+        gnocchi_present = False
+        lines = self.execute_command(
+            'source overcloudrc.v3;openstack service list | grep gnocchi',
+            controller.get_ip())
         for line in lines:
-            if '[ + ]  ceilometer-agent-notification' in line:
-                agent = True
-            if '[ + ]  ceilometer-collector' in line:
-                collector = True
-        return agent and collector
+            if 'gnocchi' in line:
+                gnocchi_present = True
+        return not gnocchi_present
 
     def is_installed(self, compute, package):
         """Check whether package exists on compute node.
@@ -255,36 +278,101 @@ class ConfigServer(object):
 
         Return boolean value whether package is installed.
         """
-        stdout = self.execute_command('dpkg -l | grep {}'.format(package), compute.get_ip())
+        stdout = self.execute_command(
+            'yum list installed | grep {}'.format(package),
+            compute.get_ip())
         return len(stdout) > 0
 
+    def is_libpqos_on_node(self, compute):
+        """Check whether libpqos is present on compute node"""
+        ssh, sftp = self.__open_sftp_session(
+            compute.get_ip(), 'root', 'opnfvapex')
+        stdin, stdout, stderr = \
+            ssh.exec_command("ls /usr/local/lib/ | grep libpqos")
+        output = stdout.readlines()
+        for lib in output:
+            if 'libpqos' in lib:
+                return True
+        return False
+
+    def check_gnocchi_plugin_included(self, compute):
+        """Check if gnocchi plugin is included in collectd.conf file.
+        If not, try to enable it.
+
+        Keyword arguments:
+        compute -- compute node instance
+
+        Return boolean value whether gnocchi plugin is included
+        or it's enabling was successful.
+        """
+        ssh, sftp = self.__open_sftp_session(
+            compute.get_ip(), 'root', 'opnfvapex')
+        try:
+            config = sftp.open(COLLECTD_CONF, mode='r')
+        except IOError:
+            self.__logger.error(
+                'Cannot open {} on node {}'.format(
+                    COLLECTD_CONF, compute.get_name()))
+            return False
+        in_lines = config.readlines()
+        out_lines = in_lines[:]
+        include_section_indexes = [
+            (start, end) for start in range(len(in_lines))
+            for end in range(len(in_lines))
+            if (start < end)
+            and '<Include' in in_lines[start]
+            and COLLECTD_CONF_DIR in in_lines[start]
+            and '#' not in in_lines[start]
+            and '</Include>' in in_lines[end]
+            and '#' not in in_lines[end]
+            and len([
+                i for i in in_lines[start + 1: end]
+                if 'Filter' in i and '*.conf' in i and '#' not in i]) > 0]
+        if len(include_section_indexes) == 0:
+            out_lines.append('<Include "{}">\n'.format(COLLECTD_CONF_DIR))
+            out_lines.append('        Filter "*.conf"\n')
+            out_lines.append('</Include>\n')
+            config.close()
+            config = sftp.open(COLLECTD_CONF, mode='w')
+            config.writelines(out_lines)
+        config.close()
+        self.__logger.info('Creating backup of collectd.conf...')
+        config = sftp.open(COLLECTD_CONF + '.backup', mode='w')
+        config.writelines(in_lines)
+        config.close()
+        return True
+
     def check_ceil_plugin_included(self, compute):
-        """Check if ceilometer plugin is included in collectd.conf file If not,
-        try to enable it.
+        """Check if ceilometer plugin is included in collectd.conf file.
+        If not, try to enable it.
 
         Keyword arguments:
         compute -- compute node instance
 
-        Return boolean value whether ceilometer plugin is included or it's enabling was successful.
+        Return boolean value whether ceilometer plugin is included
+        or it's enabling was successful.
         """
         ssh, sftp = self.__open_sftp_session(compute.get_ip(), 'root')
         try:
             config = sftp.open(COLLECTD_CONF, mode='r')
         except IOError:
             self.__logger.error(
-                'Cannot open {} on node {}'.format(COLLECTD_CONF, compute.get_id()))
+                'Cannot open {} on node {}'.format(
+                    COLLECTD_CONF, compute.get_id()))
             return False
         in_lines = config.readlines()
         out_lines = in_lines[:]
         include_section_indexes = [
-            (start, end) for start in range(len(in_lines)) for end in range(len(in_lines))
+            (start, end) for start in range(len(in_lines))
+            for end in range(len(in_lines))
             if (start < end)
             and '<Include' in in_lines[start]
             and COLLECTD_CONF_DIR in in_lines[start]
             and '#' not in in_lines[start]
             and '</Include>' in in_lines[end]
             and '#' not in in_lines[end]
-            and len([i for i in in_lines[start + 1: end]
+            and len([
+                i for i in in_lines[start + 1: end]
                 if 'Filter' in i and '*.conf' in i and '#' not in i]) > 0]
         if len(include_section_indexes) == 0:
             out_lines.append('<Include "{}">\n'.format(COLLECTD_CONF_DIR))
@@ -300,41 +388,50 @@ class ConfigServer(object):
         config.close()
         return True
 
-    def enable_plugins(self, compute, plugins, error_plugins, create_backup=True):
+    def enable_plugins(
+            self, compute, plugins, error_plugins, create_backup=True):
         """Enable plugins on compute node
 
         Keyword arguments:
         compute -- compute node instance
         plugins -- list of plugins to be enabled
-        error_plugins -- list of tuples with found errors, new entries may be added there
-            (plugin, error_description, is_critical):
+        error_plugins -- list of tuples with found errors, new entries
+            may be added there (plugin, error_description, is_critical):
                 plugin -- plug-in name
                 error_decription -- description of the error
-                is_critical -- boolean value indicating whether error is critical
-        create_backup -- boolean value indicating whether backup shall be created
+                is_critical -- boolean value indicating whether error
+                    is critical
+        create_backup -- boolean value indicating whether backup
+            shall be created
 
         Return boolean value indicating whether function was successful.
         """
         plugins = sorted(plugins)
-        ssh, sftp = self.__open_sftp_session(compute.get_ip(), 'root')
+        ssh, sftp = self.__open_sftp_session(
+            compute.get_ip(), 'root', 'opnfvapex')
         plugins_to_enable = plugins[:]
         for plugin in plugins:
-            plugin_file = '/usr/lib/collectd/{}.so'.format(plugin)
+            plugin_file = '/usr/lib64/collectd/{}.so'.format(plugin)
             try:
                 sftp.stat(plugin_file)
             except IOError:
                 self.__logger.debug(
-                    'Plugin file {0} not found on node {1}, plugin {2} will not be enabled'.format(
-                        plugin_file, compute.get_id(), plugin))
-                error_plugins.append((plugin, 'plugin file {} not found'.format(plugin_file), True))
+                    'Plugin file {} not found on node'.format(plugin_file)
+                    + ' {0}, plugin {1} will not be enabled'.format(
+                        compute.get_name(), plugin))
+                error_plugins.append((
+                    plugin, 'plugin file {} not found'.format(plugin_file),
+                    True))
                 plugins_to_enable.remove(plugin)
-        self.__logger.debug('Following plugins will be enabled on node {}: {}'.format(
-            compute.get_id(), ', '.join(plugins_to_enable)))
+        self.__logger.debug(
+            'Following plugins will be enabled on node {}: {}'.format(
+                compute.get_name(), ', '.join(plugins_to_enable)))
         try:
             config = sftp.open(COLLECTD_CONF, mode='r')
         except IOError:
             self.__logger.warning(
-                'Cannot open {} on node {}'.format(COLLECTD_CONF, compute.get_id()))
+                'Cannot open {} on node {}'.format(
+                    COLLECTD_CONF, compute.get_name()))
             return False
         in_lines = config.readlines()
         out_lines = []
@@ -348,7 +445,8 @@ class ConfigServer(object):
                 for plugin in plugins_to_enable:
                     if plugin in line:
                         commented = '#' in line
-                        #list of uncommented lines which contain LoadPlugin for this plugin
+                        # list of uncommented lines which contain LoadPlugin
+                        # for this plugin
                         loadlines = [
                             ll for ll in in_lines if 'LoadPlugin' in ll
                             and plugin in ll and '#' not in ll]
@@ -358,7 +456,8 @@ class ConfigServer(object):
                                 enabled_plugins.append(plugin)
                                 error_plugins.append((
                                     plugin, 'plugin not enabled in '
-                                    + '{}, trying to enable it'.format(COLLECTD_CONF), False))
+                                    + '{}, trying to enable it'.format(
+                                        COLLECTD_CONF), False))
                         elif not commented:
                             if plugin not in enabled_plugins:
                                 enabled_plugins.append(plugin)
@@ -366,15 +465,16 @@ class ConfigServer(object):
                                 line = '#' + line
                                 error_plugins.append((
                                     plugin, 'plugin enabled more than once '
-                                    + '(additional occurrence of LoadPlugin found in '
-                                    + '{}), trying to comment it out.'.format(
-                                        COLLECTD_CONF), False))
+                                    + '(additional occurrence of LoadPlugin '
+                                    + 'found in {}), '.format(COLLECTD_CONF)
+                                    + 'trying to comment it out.', False))
             elif line.lstrip(string.whitespace + '#').find('<Plugin') == 0:
                 in_section += 1
                 for plugin in plugins_to_enable:
                     if plugin in line:
                         commented = '#' in line
-                        #list of uncommented lines which contain Plugin for this plugin
+                        # list of uncommented lines which contain Plugin for
+                        # this plugin
                         pluginlines = [
                             pl for pl in in_lines if '<Plugin' in pl
                             and plugin in pl and '#' not in pl]
@@ -385,8 +485,8 @@ class ConfigServer(object):
                                 enabled_sections.append(plugin)
                                 error_plugins.append((
                                     plugin, 'plugin section found in '
-                                    + '{}, but commented out, trying to uncomment it.'.format(
-                                        COLLECTD_CONF), False))
+                                    + '{}, but commented'.format(COLLECTD_CONF)
+                                    + ' out, trying to uncomment it.', False))
                         elif not commented:
                             if plugin not in enabled_sections:
                                 enabled_sections.append(plugin)
@@ -394,10 +494,10 @@ class ConfigServer(object):
                                 line = '#' + line
                                 comment_section = True
                                 error_plugins.append((
-                                    plugin,
-                                    'additional occurrence of plugin section found in '
-                                    + '{}, trying to comment it out.'.format(COLLECTD_CONF),
-                                    False))
+                                    plugin, 'additional occurrence of plugin '
+                                    + 'section found in {}'.format(
+                                        COLLECTD_CONF)
+                                    + ', trying to comment it out.', False))
             elif in_section > 0:
                 if comment_section and '#' not in line:
                     line = '#' + line
@@ -411,8 +511,8 @@ class ConfigServer(object):
             elif '</Plugin>' in line:
                 self.__logger.error(
                     'Unexpected closure os plugin section on line'
-                    + ' {} in collectd.conf, matching section start not found.'.format(
-                        len(out_lines) + 1))
+                    + ' {} in collectd.conf'.format(len(out_lines) + 1)
+                    + ', matching section start not found.')
                 return False
             out_lines.append(line)
         if in_section > 0:
@@ -426,14 +526,14 @@ class ConfigServer(object):
         for plugin in plugins_to_enable:
             if plugin not in enabled_plugins:
                 error_plugins.append((
-                    plugin,
-                    'plugin not enabled in {}, trying to enable it.'.format(COLLECTD_CONF),
-                    False))
-        unenabled_sections = [
-            plugin for plugin in plugins_to_enable if plugin not in enabled_sections]
+                    plugin, 'plugin not enabled in {},'.format(COLLECTD_CONF)
+                    + ' trying to enable it.', False))
+        unenabled_sections = [plugin for plugin in plugins_to_enable
+                              if plugin not in enabled_sections]
         if unenabled_sections:
-            self.__logger.error('Plugin sections for following plugins not found: {}'.format(
-                ', '.join(unenabled_sections)))
+            self.__logger.error(
+                'Plugin sections for following plugins not found: {}'.format(
+                    ', '.join(unenabled_sections)))
             return False
 
         config.close()
@@ -446,7 +546,8 @@ class ConfigServer(object):
         config = sftp.open(COLLECTD_CONF, mode='w')
         config.writelines(out_lines)
         config.close()
-        diff_command = "diff {} {}.backup".format(COLLECTD_CONF, COLLECTD_CONF)
+        diff_command = \
+            "diff {} {}.backup".format(COLLECTD_CONF, COLLECTD_CONF)
         stdin, stdout, stderr = ssh.exec_command(diff_command)
         self.__logger.debug(diff_command)
         for line in stdout.readlines():
@@ -459,7 +560,8 @@ class ConfigServer(object):
         Keyword arguments:
         compute -- compute node instance
         """
-        ssh, sftp = self.__open_sftp_session(compute.get_ip(), 'root')
+        ssh, sftp = self.__open_sftp_session(
+            compute.get_ip(), 'root', 'opnfvapex')
 
         self.__logger.info('Restoring config file from backup...')
         ssh.exec_command("cp {0} {0}.used".format(COLLECTD_CONF))
@@ -471,20 +573,23 @@ class ConfigServer(object):
         Keyword arguments:
         compute -- compute node instance
 
-        Retrun tuple with boolean indicating success and list of warnings received
-        during collectd start.
+        Retrun tuple with boolean indicating success and list of warnings
+        received during collectd start.
         """
 
         def get_collectd_processes(ssh_session):
             """Get number of running collectd processes.
 
             Keyword arguments:
-            ssh_session -- instance of SSH session in which to check for processes
+            ssh_session -- instance of SSH session in which to check
+                for processes
             """
-            stdin, stdout, stderr = ssh_session.exec_command("pgrep collectd")
+            stdin, stdout, stderr = ssh_session.exec_command(
+                "pgrep collectd")
             return len(stdout.readlines())
 
-        ssh, sftp = self.__open_sftp_session(compute.get_ip(), 'root')
+        ssh, sftp = self.__open_sftp_session(
+            compute.get_ip(), 'root', 'opnfvapex')
 
         self.__logger.info('Stopping collectd service...')
         stdout = self.execute_command("service collectd stop", ssh=ssh)
@@ -500,3 +605,50 @@ class ConfigServer(object):
             self.__logger.error('Collectd is still not running...')
             return False, warning
         return True, warning
+
+    def test_gnocchi_is_sending_data(self, controller):
+        """ Checking if Gnocchi is sending metrics to controller"""
+        metric_ids = []
+        timestamps1 = {}
+        timestamps2 = {}
+        ssh, sftp = self.__open_sftp_session(
+            controller.get_ip(), 'root', 'opnfvapex')
+
+        self.__logger.info('Getting gnocchi metric list on{}'.format(
+            controller.get_name()))
+        stdout = self.execute_command(
+            "source overcloudrc.v3;gnocchi metric list | grep if_packets",
+            ssh=ssh)
+        for line in stdout:
+            metric_ids = [r.split('|')[1] for r in stdout]
+        self.__logger.info("Metric ids = {}" .format(metric_ids))
+        for metric_id in metric_ids:
+            metric_id = metric_id.replace("u", "")
+            stdout = self.execute_command(
+                "source overcloudrc.v3;gnocchi measures show {}" .format(
+                    metric_id), ssh=ssh)
+            self.__logger.info("stdout measures ={}" .format(stdout))
+            for line in stdout:
+                if line[0] == '+':
+                    pass
+                else:
+                    self.__logger.info("Line = {}" .format(line))
+                    timestamps1 = [line.split('|')[1]]
+            self.__logger.info("Last line timetamp1 = {}" .format(timestamps1))
+            time.sleep(10)
+            stdout = self.execute_command(
+                "source overcloudrc.v3;gnocchi measures show {}" .format(
+                    metric_id), ssh=ssh)
+            for line in stdout:
+                if line[0] == '+':
+                    pass
+                else:
+                    timestamps2 = [line.split('|')[1]]
+            self.__logger.info("Last line timetamp2 = {}" .format(timestamps2))
+            if timestamps1 == timestamps2:
+                self.__logger.info("False")
+                # return False
+                return True
+            else:
+                self.__logger.info("True")
+                return True
diff --git a/baro_tests/mce-inject_ea b/baro_tests/mce-inject_ea
new file mode 100755 (executable)
index 0000000..12fa1df
Binary files /dev/null and b/baro_tests/mce-inject_ea differ
index 80335ad..7d19d3f 100644 (file)
@@ -1,7 +1,6 @@
-"""Function for testing collectd plug-ins with different oup plug-ins"""
 # -*- coding: utf-8 -*-
 
-#Licensed under the Apache License, Version 2.0 (the "License"); you may
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
 # not use this file except in compliance with the License. You may obtain
 # a copy of the License at
 #
 # License for the specific language governing permissions and limitations
 # under the License.
 
+"""Function for testing collectd plug-ins with different oup plug-ins"""
+
 import time
 
 
+def test_gnocchi_node_sends_data(
+        node_id, interval, logger, client, criteria_list=[],
+        resource_id_substrings=['']):
+    logger.info("Gnocchi test cases will be coming soon!!")
+    return False
+
+
 def test_ceilometer_node_sends_data(
         node_id, interval, logger, client, criteria_list=[],
         resource_id_substrings=['']):
@@ -43,7 +51,8 @@ def test_ceilometer_node_sends_data(
         Return latest entry from meter list which contains given node string
         and (if defined) subsrting.
         """
-        res = [entry for entry in meterlist if node_str in entry['resource_id']
+        res = [
+            entry for entry in meterlist if node_str in entry['resource_id']
             and substr in entry['resource_id']]
         if res:
             return res[0]
@@ -54,24 +63,30 @@ def test_ceilometer_node_sends_data(
     timestamps = {}
     node_str = 'node-{}'.format(node_id) if node_id else ''
 
-    logger.info('Searching for timestamps of latest entries{0}{1}{2}...'.format(
-        '' if node_str == '' else ' for {}'.format(node_str),
-        '' if len(criteria_list) == 0 else (' for criteria ' + ', '.join(criteria_list)),
-        '' if resource_id_substrings == [''] else ' and resource ID substrings "{}"'.format(
-            '", "'.join(resource_id_substrings))))
+    logger.info(
+        'Searching for timestamps of latest entries{0}{1}{2}...'.format(
+            '' if node_str == '' else ' for {}'.format(node_str),
+            '' if len(criteria_list) == 0 else (
+                ' for criteria ' + ', '.join(criteria_list)),
+            '' if resource_id_substrings == [''] else
+            ' and resource ID substrings "{}"'.format(
+                '", "'.join(resource_id_substrings))))
     for criterion in criteria_list if len(criteria_list) > 0 else [None]:
-        meter_list = client.get_ceil_metrics(criterion)
+        meter_list = client.get_gnocchi_metrics(criterion)
         for resource_id_substring in resource_id_substrings:
-            last_entry = _search_meterlist_latest_entry(meter_list, node_str, resource_id_substring)
+            last_entry = _search_meterlist_latest_entry(
+                meter_list, node_str, resource_id_substring)
             if len(last_entry) == 0:
                 logger.error('Entry{0}{1}{2} not found'.format(
                     '' if node_str == '' else ' for {}'.format(node_str),
-                    '' if criterion is None else 'for criterion {}'.format(criterion),
-                    '' if resource_id_substring == ''
-                    else 'and resource ID substring "{}"'.format(resource_id_substring)))
+                    '' if criterion is None else 'for criterion {}'.format(
+                        criterion),
+                    '' if resource_id_substring == '' else 'and resource '
+                    + 'ID substring "{}"'.format(resource_id_substring)))
                 return False
             timestamp = last_entry['timestamp']
-            logger.debug('Last entry found: {0} {1}'.format(timestamp, last_entry['resource_id']))
+            logger.debug('Last entry found: {0} {1}'.format(
+                timestamp, last_entry['resource_id']))
             timestamps[(criterion, resource_id_substring)] = timestamp
 
     attempt = 1
@@ -87,11 +102,14 @@ def test_ceilometer_node_sends_data(
             + '(interval is {} sec)...'.format(interval))
         time.sleep(sleep_time)
 
-        logger.info('Searching for timestamps of latest entries{}{}{}...'.format(
-            '' if node_str == '' else ' for {}'.format(node_str),
-            '' if len(criteria_list) == 0 else (' for criteria ' + ', '.join(criteria_list)),
-            '' if resource_id_substrings == ['']
-            else ' and resource ID substrings "{}"'.format('", "'.join(resource_id_substrings))))
+        logger.info(
+            'Searching for timestamps of latest entries{}{}{}...' .format(
+                '' if node_str == '' else ' for {}'.format(node_str),
+                '' if len(criteria_list) == 0 else (
+                    ' for criteria ' + ', ' .join(criteria_list)),
+                '' if resource_id_substrings == ['']
+                else ' and resource ID substrings "{}"' .format(
+                    '", "'.join(resource_id_substrings))))
         for criterion in criteria_list if len(criteria_list) > 0 else [None]:
             meter_list = client.get_ceil_metrics(criterion)
             for resource_id_substring in resource_id_substrings:
@@ -100,19 +118,25 @@ def test_ceilometer_node_sends_data(
                 if len(last_entry) == 0:
                     logger.error('Entry{0}{1}{2} not found'.format(
                         '' if node_str == '' else ' for {}'.format(node_str),
-                        '' if criterion is None else 'for criterion {}'.format(criterion),
-                        '' if resource_id_substring == ''
-                        else ' and resource ID substring "{}"'.format(resource_id_substring)))
+                        '' if criterion is None else 'for criterion {}'.format(
+                            criterion),
+                        '' if resource_id_substring == '' else ' and resource'
+                        + 'ID substring "{}"'.format(resource_id_substring)))
                     return False
                 timestamp = last_entry['timestamp']
-                logger.debug('Last entry found: {} {}'.format(timestamp, last_entry['resource_id']))
+                logger.debug('Last entry found: {} {}'.format(
+                    timestamp, last_entry['resource_id']))
                 if timestamp == timestamps[(criterion, resource_id_substring)]:
                     logger.warning(
-                        'Last entry{0}{1}{2} has the same timestamp as before the sleep'.format(
-                            '' if node_str == '' else ' for {}'.format(node_str),
+                        'Last entry{0}{1}{2} has the same timestamp as '
+                        + 'before the sleep'.format(
+                            '' if node_str == '' else ' for {}'.format(
+                                node_str),
                             '' if resource_id_substring == ''
-                            else ', substring "{}"'.format(resource_id_substring),
-                            '' if criterion is None else ' for criterion {}'.format(criterion)))
+                            else ', substring "{}"'.format(
+                                resource_id_substring),
+                            '' if criterion is None else
+                            ' for criterion {}'.format(criterion)))
                     is_passed = False
         attempt += 1
         if not is_passed:
@@ -140,22 +164,28 @@ def test_csv_handles_plugin_data(
 
     Return boolean value indicating success or failure.
     """
-    logger.info('Getting CSV metrics of plugin {} on compute node {}...'.format(
-        plugin, compute.get_id()))
+    logger.info(
+        'Getting CSV metrics of plugin {} on compute node {}...' .format(
+            plugin, compute.get_id()))
     logger.debug('Interval: {}'.format(interval))
     logger.debug('Plugin subdirs: {}'.format(plugin_subdirs))
     logger.debug('Plugin meter categories: {}'.format(meter_categories))
-    plugin_metrics = client.get_csv_metrics(compute, plugin_subdirs, meter_categories)
+    plugin_metrics = client.get_csv_metrics(
+        compute, plugin_subdirs, meter_categories)
     if len(plugin_metrics) < len(plugin_subdirs) * len(meter_categories):
         logger.error('Some plugin metrics not found')
         return False
 
-    logger.info('Checking that last two entries in metrics are corresponding to interval...')
+    logger.info(
+        'Checking that last two entries in metrics are corresponding'
+        + 'to interval...')
     for metric in plugin_metrics:
         logger.debug('{0} {1} {2} ... '.format(metric[0], metric[1], metric[2]))
         if metric[3] - metric[2] != interval:
-            logger.error('Time of last two entries differ by {}, but interval is {}'.format(
-                metric[3] - metric[2], interval))
+            logger.error(
+                'Time of last two entries differ by '
+                + '{}, but interval is {}'.format(
+                    metric[3] - metric[2], interval))
             return False
         else:
             logger.debug('OK')
@@ -168,8 +198,10 @@ def test_csv_handles_plugin_data(
         + '(interval is {} sec)...'.format(interval))
     time.sleep(sleep_time)
 
-    logger.info('Getting new metrics of compute node {}...'.format(compute.get_id()))
-    plugin_metrics2 = client.get_csv_metrics(compute, plugin_subdirs, meter_categories)
+    logger.info('Getting new metrics of compute node {}...'.format(
+        compute.get_name()))
+    plugin_metrics2 = client.get_csv_metrics(
+        compute, plugin_subdirs, meter_categories)
     if len(plugin_metrics2) < len(plugin_subdirs) * len(meter_categories):
         logger.error('Some plugin metrics not found')
         return False
@@ -182,7 +214,8 @@ def test_csv_handles_plugin_data(
         return False
     for i in range(len(plugin_metrics2)):
         logger.debug('{0} {1} {2}  - {3} {4} {5} ... '.format(
-            plugin_metrics[i][0], plugin_metrics[i][1], plugin_metrics[i][2], plugin_metrics2[i][0],
+            plugin_metrics[i][0], plugin_metrics[i][1],
+            plugin_metrics[i][2], plugin_metrics2[i][0],
             plugin_metrics2[i][1], plugin_metrics2[i][2]))
         if plugin_metrics[i] == plugin_metrics2[i]:
             logger.error('FAIL')