[ansible][fedora] Update package name
[barometer.git] / baro_tests / config_server.py
index 2a4bc16..a6849f0 100644 (file)
@@ -1,16 +1,19 @@
 # -*- coding: utf-8 -*-
 #
+# Copyright(c) 2017-2019 Intel Corporation and OPNFV. All rights reserved.
+#
 # Licensed under the Apache License, Version 2.0 (the "License"); you may
 # not use this file except in compliance with the License. You may obtain
 # a copy of the License at
 #
-#      http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
 #
 # Unless required by applicable law or agreed to in writing, software
 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 # License for the specific language governing permissions and limitations
 # under the License.
+#
 
 """Classes used by collectd.py"""
 
@@ -18,6 +21,7 @@ import time
 import os.path
 import os
 import re
+import yaml
 
 from opnfv.deployment import factory
 import paramiko
@@ -34,6 +38,8 @@ APEX_IP = os.getenv("INSTALLER_IP").rstrip('\n')
 APEX_USER = 'root'
 APEX_USER_STACK = 'stack'
 APEX_PKEY = '/root/.ssh/id_rsa'
+TEST_VM_IMAGE = 'cirros-0.4.0-x86_64-disk.img'
+TEST_VM_IMAGE_PATH = '/home/opnfv/functest/images/' + TEST_VM_IMAGE
 
 
 class Node(object):
@@ -300,6 +306,97 @@ class ConfigServer(object):
                     return False
         return aodh_present
 
+    def is_redis_running(self, compute):
+        """Check whether redis service is running on compute"""
+        compute_name = compute.get_name()
+        nodes = get_apex_nodes()
+        for node in nodes:
+            if compute_name == node.get_dict()['name']:
+                stdout = node.run_cmd('sudo systemctl status docker'
+                                      '&& sudo docker ps'
+                                      '| grep barometer-redis')
+                if stdout and 'barometer-redis' in stdout:
+                    self.__logger.info(
+                        'Redis is running in node {}'.format(
+                         compute_name))
+                    return True
+        self.__logger.info(
+            'Redis is *not* running in node {}'.format(
+             compute_name))
+        return False
+
+    def is_dma_server_running(self, compute):
+        """Check whether DMA server is running on compute"""
+        compute_name = compute.get_name()
+        nodes = get_apex_nodes()
+        for node in nodes:
+            if compute_name == node.get_dict()['name']:
+                stdout = node.run_cmd('sudo systemctl status docker'
+                                      '&& sudo docker ps'
+                                      '| grep opnfv/barometer-dma')
+                if stdout and '/server' in stdout:
+                    self.__logger.info(
+                        'DMA Server is running in node {}'.format(
+                         compute_name))
+                    return True
+        self.__logger.info(
+            'DMA Server is *not* running in node {}'.format(
+             compute_name))
+        return False
+
+    def is_dma_infofetch_running(self, compute):
+        """Check whether DMA infofetch is running on compute"""
+        compute_name = compute.get_name()
+        nodes = get_apex_nodes()
+        for node in nodes:
+            if compute_name == node.get_dict()['name']:
+                stdout = node.run_cmd('sudo systemctl status docker'
+                                      '&& sudo docker ps'
+                                      '| grep opnfv/barometer-dma')
+                if stdout and '/infofetch' in stdout:
+                    self.__logger.info(
+                        'DMA InfoFetch is running in node {}'.format(
+                         compute_name))
+                    return True
+        self.__logger.info(
+            'DMA InfoFetch is *not* running in node {}'.format(
+             compute_name))
+        return False
+
+    def get_dma_config(self, compute):
+        """Get config values of DMA"""
+        compute_name = compute.get_name()
+        nodes = get_apex_nodes()
+        for node in nodes:
+            if compute_name == node.get_dict()['name']:
+                # We use following after functest accept python-toml
+                #     stdout = node.run_cmd(
+                #         'cat /etc/barometer-dma/config.toml')
+                #     try:
+                #         agent_conf = toml.loads(stdout)
+                #     except (TypeError, TomlDecodeError) as e:
+                #         self.__logger.error(
+                #             'DMA config error: {}'.format(e))
+                #         agent_conf = None
+                #     finally:
+                #         return agent_conf
+                readcmd = (
+                    'egrep "listen_port|amqp_"'
+                    ' /etc/barometer-dma/config.toml'
+                    '| sed -e "s/#.*$//" | sed -e "s/=/:/"'
+                    )
+                stdout = node.run_cmd(readcmd)
+                agent_conf = {"server": yaml.safe_load(stdout)}
+
+                pingcmd = (
+                    'ping -n -c1 ' + agent_conf["server"]["amqp_host"] +
+                    '| sed -ne "s/^.*bytes from //p" | sed -e "s/:.*//"'
+                    )
+                agent_conf["server"]["amqp_host"] = node.run_cmd(pingcmd)
+
+                return agent_conf
+        return None
+
     def is_mcelog_installed(self, compute, package):
         """Check whether package exists on compute node.
 
@@ -479,6 +576,16 @@ class ConfigServer(object):
                     return False, warning
         return True, warning
 
+    def trigger_alarm_update(self, alarm, compute_node):
+        # TODO: move these actions to main, with criteria lists so that we can reference that
+        # i.e. test_plugin_with_aodh(self, compute, plugin.., logger, criteria_list, alarm_action)
+        if alarm == 'mcelog':
+            compute_node.run_cmd('sudo modprobe mce-inject')
+            compute_node.run_cmd('sudo ./mce-inject_ea < corrected')
+        if alarm == 'ovs_events':
+            compute_node.run_cmd('sudo ifconfig -a | grep br0')
+            compute_node.run_cmd('sudo ifconfig br0 down; sudo ifconfig br0 up')
+
     def test_plugins_with_aodh(
             self, compute, plugin_interval, logger,
             criteria_list=[]):
@@ -487,11 +594,13 @@ class ConfigServer(object):
         timestamps1 = {}
         timestamps2 = {}
         nodes = get_apex_nodes()
+        compute_node = [node for node in nodes if node.get_dict()['name'] == compute][0]
         for node in nodes:
             if node.is_controller():
                 self.__logger.info('Getting AODH Alarm list on {}' .format(
                     (node.get_dict()['name'])))
                 node.put_file(constants.ENV_FILE, 'overcloudrc.v3')
+                self.trigger_alarm_update(criteria_list, compute_node)
                 stdout = node.run_cmd(
                     "source overcloudrc.v3;"
                     + "aodh alarm list | grep {0} | grep {1}"
@@ -510,10 +619,9 @@ class ConfigServer(object):
                         return False
                     for line in stdout.splitlines()[3: -1]:
                         line = line.replace('|', "")
-                        if line.split()[0] == 'timestamp':
+                        if line.split()[0] == 'state_timestamp':
                             timestamps1 = line.split()[1]
-                        else:
-                            pass
+                    self.trigger_alarm_update(criteria_list, compute_node)
                     time.sleep(12)
                     stdout = node.run_cmd(
                         "source overcloudrc.v3; aodh alarm show {}" .format(
@@ -523,10 +631,8 @@ class ConfigServer(object):
                         return False
                     for line in stdout.splitlines()[3:-1]:
                         line = line.replace('|', "")
-                        if line.split()[0] == 'timestamp':
+                        if line.split()[0] == 'state_timestamp':
                             timestamps2 = line.split()[1]
-                        else:
-                            pass
                     if timestamps1 == timestamps2:
                         self.__logger.info(
                             "Data not updated after interval of 12 seconds")
@@ -604,7 +710,7 @@ class ConfigServer(object):
             self, compute, plugin_interval, logger, plugin, snmp_mib_files=[],
             snmp_mib_strings=[], snmp_in_commands=[]):
 
-        if plugin == 'hugepages' or 'intel_rdt' or 'mcelog':
+        if plugin in ('hugepages', 'intel_rdt', 'mcelog'):
             nodes = get_apex_nodes()
             for node in nodes:
                 if compute == node.get_dict()['name']:
@@ -651,3 +757,124 @@ class ConfigServer(object):
                         return True
         else:
             return False
+
+    def check_dma_dummy_included(self, compute, name):
+        """Check if dummy collectd config by DMA
+           is included in collectd.conf file.
+
+        Keyword arguments:
+        compute -- compute node instance
+        name -- config file name
+        """
+        compute_name = compute.get_name()
+        nodes = get_apex_nodes()
+        for node in nodes:
+            if compute_name == node.get_dict()['name']:
+                dummy_conf = node.run_cmd('ls /etc/collectd/collectd.conf.d')
+                if name + '.conf' not in dummy_conf:
+                    self.__logger.error('check conf FAIL')
+                    return False
+                else:
+                    self.__logger.info('check conf PASS')
+                    fullpath = '/etc/collectd/collectd.conf.d/{}'.format(
+                               name + '.conf')
+                    self.__logger.info('Delete file {}'.format(fullpath))
+                    node.run_cmd('sudo rm -f ' + fullpath)
+                    return True
+        self.__logger.error('Some panic, compute not found')
+        return False
+
+    def create_testvm(self, compute_node, test_name):
+        nodes = get_apex_nodes()
+        compute_name = compute_node.get_name()
+
+        controller_node = None
+        for node in nodes:
+            if node.is_controller():
+                controller_node = node
+                break
+
+        self.__logger.debug('Creating Test VM on {}' .format(compute_name))
+        self.__logger.debug('Create command is executed in {}' .format(
+            (controller_node.get_dict()['name'])))
+
+        node.put_file(constants.ENV_FILE, 'overcloudrc.v3')
+        node.put_file(TEST_VM_IMAGE_PATH, TEST_VM_IMAGE)
+        image = controller_node.run_cmd(
+            'source overcloudrc.v3;'
+            'openstack image create -f value -c id'
+            ' --disk-format qcow2 --file {0} {1}'
+            .format(TEST_VM_IMAGE, test_name))
+        flavor = controller_node.run_cmd(
+            'source overcloudrc.v3;'
+            'openstack flavor create -f value -c id {}'
+            .format(test_name))
+        host = controller_node.run_cmd(
+            'source overcloudrc.v3;'
+            'openstack hypervisor list -f value -c "Hypervisor Hostname"'
+            ' | grep "^{}\\."'
+            .format(compute_name))
+        server = controller_node.run_cmd(
+            'source overcloudrc.v3;'
+            'openstack server create -f value -c id'
+            ' --image {0} --flavor {1} --availability-zone {2} {3}'
+            .format(image, flavor, 'nova:' + host, test_name))
+
+        resources = {"image": image, "flavor": flavor, "server": server}
+
+        if server:
+            self.__logger.debug('VM created')
+        self.__logger.debug('VM info: {}'.format(resources))
+
+        return resources
+
+    def delete_testvm(self, resources):
+        nodes = get_apex_nodes()
+
+        controller_node = None
+        for node in nodes:
+            if node.is_controller():
+                controller_node = node
+                break
+
+        self.__logger.debug('Deleteing Test VM')
+        self.__logger.debug('VM to be deleted info: {}'.format(resources))
+        self.__logger.debug('Delete command is executed in {}' .format(
+            (controller_node.get_dict()['name'])))
+
+        server = resources.get('server', None)
+        flavor = resources.get('flavor', None)
+        image = resources.get('image', None)
+        if server:
+            controller_node.run_cmd(
+                'source overcloudrc.v3;'
+                'openstack server delete {}'.format(server))
+        if flavor:
+            controller_node.run_cmd(
+                'source overcloudrc.v3;'
+                'openstack flavor delete {}'.format(flavor))
+        if image:
+            controller_node.run_cmd(
+                'source overcloudrc.v3;'
+                'openstack image delete {}'.format(image))
+
+        self.__logger.debug('VM and other OpenStack resources deleted')
+
+    def test_dma_infofetch_get_data(self, compute, test_name):
+        compute_name = compute.get_name()
+        nodes = get_apex_nodes()
+        for node in nodes:
+            if compute_name == node.get_dict()['name']:
+                stdout = node.run_cmd(
+                    'redis-cli keys "barometer-dma/vm/*/vminfo"'
+                    ' | while read k; do redis-cli get $k; done'
+                    ' | grep {}'.format(test_name))
+                self.__logger.debug('InfoFetch data: {}'.format(stdout))
+                if stdout and test_name in stdout:
+                    self.__logger.info('PASS')
+                    return True
+                else:
+                    self.__logger.info('No test vm info')
+
+        self.__logger.info('FAIL')
+        return False