Initial Barometer Functest Scripts 77/28877/3
authorCalin Gherghe <calin.gherghe@intel.com>
Thu, 16 Feb 2017 13:07:20 +0000 (08:07 -0500)
committerCalin Gherghe <calin.gherghe@intel.com>
Fri, 17 Feb 2017 18:33:18 +0000 (18:33 +0000)
This patch adds scripts to be used with Functest for the
barometer project. This will be expanded with future patchsets.

Change-Id: I627f5e9c2b0d693f34bdc4f205989b0913f338db
Signed-off-by: Calin Gherghe <calin.gherghe@intel.com>
baro_tests/collectd.py
baro_tests/config_server.py [new file with mode: 0644]
baro_tests/tests.py [new file with mode: 0644]
baro_utils/get_ssh_keys.sh [new file with mode: 0755]

index 5631cf5..3f2067a 100644 (file)
-#!/usr/bin/python
+"""Executing test of plugins"""
+# -*- coding: utf-8 -*-
 
-import sys
+#Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
 
+import requests
+from keystoneclient.v3 import client
+import os
+import time
+import logging
+from config_server import *
+from tests import *
 
-def main(logger):
-    logger.info("Running Baromtercollectd test suite...")
-    #
-    # TODO: implementation
-    #
-    logger.info("Test suite successfully completed.")
+CEILOMETER_NAME = 'ceilometer'
+
+
+class KeystoneException(Exception):
+    """Keystone exception class"""
+    def __init__(self, message, exc=None, response=None):
+        """
+        Keyword arguments:
+        message -- error message
+        exc -- exception
+        response -- response
+        """
+        if exc:
+            message += "\nReason: %s" % exc
+        super(KeystoneException, self).__init__(message)
+
+        self.response = response
+        self.exception = exc
+
+
+class InvalidResponse(KeystoneException):
+    """Invalid Keystone exception class"""
+    def __init__(self, exc, response):
+        """
+        Keyword arguments:
+        exc -- exception
+        response -- response
+        """
+        super(InvalidResponse, self).__init__(
+            "Invalid response", exc, response)
+
+
+class CeilometerClient(object):
+    """Ceilometer Client to authenticate and request meters"""
+    def __init__(self, bc_logger):
+        """
+        Keyword arguments:
+        bc_logger - logger instance
+        """
+        self._auth_token = None
+        self._ceilometer_url = None
+        self._meter_list = None
+        self._logger = bc_logger
+
+    def auth_token(self):
+        """Get auth token"""
+        self._auth_server()
+        return self._auth_token
+
+    def get_ceilometer_url(self):
+        """Get Ceilometer URL"""
+        return self._ceilometer_url
+
+    def get_ceil_metrics(self, criteria=None):
+        """Get Ceilometer metrics for given criteria
+
+        Keyword arguments:
+        criteria -- criteria for ceilometer meter list
+        """
+        self._request_meters(criteria)
+        return self._meter_list
+
+    def _auth_server(self):
+        """Request token in authentication server"""
+        self._logger.debug('Connecting to the auth server {}'.format(os.environ['OS_AUTH_URL']))
+        keystone = client.Client(username=os.environ['OS_USERNAME'],
+                                 password=os.environ['OS_PASSWORD'],
+                                 tenant_name=os.environ['OS_TENANT_NAME'],
+                                 auth_url=os.environ['OS_AUTH_URL'])
+        self._auth_token = keystone.auth_token
+        for service in keystone.service_catalog.get_data():
+            if service['name'] == CEILOMETER_NAME:
+                for service_type in service['endpoints']:
+                    if service_type['interface'] == 'internal':
+                        self._ceilometer_url = service_type['url']
+                        break
+
+        if self._ceilometer_url is None:
+            self._logger.warning('Ceilometer is not registered in service catalog')
+
+    def _request_meters(self, criteria):
+        """Request meter list values from ceilometer
+
+        Keyword arguments:
+        criteria -- criteria for ceilometer meter list
+        """
+        if criteria is None:
+            url = self._ceilometer_url + ('/v2/samples?limit=400')
+        else:
+            url = self._ceilometer_url + ('/v2/meters/%s?q.field=resource_id&limit=400' % criteria)
+        headers = {'X-Auth-Token': self._auth_token}
+        resp = requests.get(url, headers=headers)
+        try:
+            resp.raise_for_status()
+            self._meter_list = resp.json()
+        except (KeyError, ValueError, requests.exceptions.HTTPError) as err:
+            raise InvalidResponse(err, resp)
+
+
+class CSVClient(object):
+    """Client to request CSV meters"""
+    def __init__(self, bc_logger, conf):
+        """
+        Keyword arguments:
+        bc_logger - logger instance
+        conf -- ConfigServer instance
+        """
+        self._logger = bc_logger
+        self.conf = conf
+
+    def get_csv_metrics(self, compute_node, plugin_subdirectories, meter_categories):
+        """Get CSV metrics.
+
+        Keyword arguments:
+        compute_node -- compute node instance
+        plugin_subdirectories -- list of subdirectories of plug-in
+        meter_categories -- categories which will be tested
+
+        Return list of metrics.
+        """
+        stdout = self.conf.execute_command("date '+%Y-%m-%d'", compute_node.get_ip())
+        date = stdout[0].strip()
+        metrics = []
+        for plugin_subdir in plugin_subdirectories:
+            for meter_category in meter_categories:
+                stdout = self.conf.execute_command(
+                    "tail -2 /var/lib/collectd/csv/node-"
+                    + "{0}.domain.tld/{1}/{2}-{3}".format(
+                        compute_node.get_id(), plugin_subdir, meter_category, date),
+                    compute_node.get_ip())
+                #Storing last two values
+                values = stdout
+                if len(values) < 2:
+                    self._logger.error(
+                        'Getting last two CSV entries of meter category '
+                        + '{0} in {1} subdir failed'.format(meter_category, plugin_subdir))
+                else:
+                    old_value = int(values[0][0:values[0].index('.')])
+                    new_value = int(values[1][0:values[1].index('.')])
+                    metrics.append((plugin_subdir, meter_category, old_value, new_value))
+        return metrics
+
+
+def _check_logger():
+    """Check whether there is global logger available and if not, define one."""
+    if 'logger' not in globals():
+        global logger
+        logger = logger.Logger("barometercollectd").getLogger()
+
+
+def _process_result(compute_node, test, result, results_list):
+    """Print test result and append it to results list.
+
+    Keyword arguments:
+    test -- testcase name
+    result -- boolean test result
+    results_list -- results list
+    """
+    if result:
+        logger.info('Compute node {0} test case {1} PASSED.'.format(compute_node, test))
+    else:
+        logger.error('Compute node {0} test case {1} FAILED.'.format(compute_node, test))
+    results_list.append((compute_node, test, result))
+
+
+def _print_label(label):
+    """Print label on the screen
+
+    Keyword arguments:
+    label -- label string
+    """
+    label = label.strip()
+    length = 70
+    if label != '':
+        label = ' ' + label + ' '
+    length_label = len(label)
+    length1 = (length - length_label) / 2
+    length2 = length - length_label - length1
+    length1 = max(3, length1)
+    length2 = max(3, length2)
+    logger.info(('=' * length1) + label + ('=' * length2))
+
+
+def _print_plugin_label(plugin, node_id):
+    """Print plug-in label.
+
+    Keyword arguments:
+    plugin -- plug-in name
+    node_id -- node ID
+    """
+    _print_label('Node {0}: Plug-in {1} Test case execution'.format(node_id, plugin))
+
+
+def _print_final_result_of_plugin(plugin, compute_ids, results, out_plugins, out_plugin):
+    """Print final results of plug-in.
+
+    Keyword arguments:
+    plugin -- plug-in name
+    compute_ids -- list of compute node IDs
+    results -- results list
+    out_plugins -- list of out plug-ins
+    out_plugin -- used out plug-in
+    """
+    print_line = ''
+    for id in compute_ids:
+        if out_plugins[id] == out_plugin:
+            if (id, plugin, True) in results:
+                print_line += ' PASS   |'
+            elif (id, plugin, False) in results and out_plugins[id] == out_plugin:
+                print_line += ' FAIL   |'
+            else:
+                print_line += ' NOT EX |'
+        elif out_plugin == 'Ceilometer':
+            print_line += ' NOT EX |'
+        else:
+            print_line += ' SKIP   |'
+    return print_line
+
+
+def print_overall_summary(compute_ids, tested_plugins, results, out_plugins):
+    """Print overall summary table.
+
+    Keyword arguments:
+    compute_ids -- list of compute IDs
+    tested_plugins -- list of plug-ins
+    results -- results list
+    out_plugins --  list of used out plug-ins
+    """
+    compute_node_names = ['Node-{}'.format(id) for id in compute_ids]
+    all_computes_in_line = ''
+    for compute in compute_node_names:
+        all_computes_in_line = all_computes_in_line + '| ' + compute + (' ' * (7 - len(compute)))
+    line_of_nodes = '| Test           ' + all_computes_in_line + '|'
+    logger.info('=' * 70)
+    logger.info('+' + ('-' * ((9 * len(compute_node_names))+16)) + '+')
+    logger.info(
+        '|' + ' ' * ((9*len(compute_node_names))/2) + ' OVERALL SUMMARY'
+        + ' ' * (9*len(compute_node_names) - (9*len(compute_node_names))/2) + '|')
+    logger.info('+' + ('-' * 16) + '+' + (('-' * 8) + '+') * len(compute_node_names))
+    logger.info(line_of_nodes)
+    logger.info('+' + ('-' * 16) + '+' + (('-' * 8) + '+') * len(compute_node_names))
+    out_plugins_print = ['Ceilometer']
+    if 'CSV' in out_plugins.values():
+        out_plugins_print.append('CSV')
+    for out_plugin in out_plugins_print:
+        output_plugins_line = ''
+        for id in compute_ids:
+            out_plugin_result = '----'
+            if out_plugin == 'Ceilometer':
+                out_plugin_result = 'PASS' if out_plugins[id] == out_plugin else 'FAIL'
+            if out_plugin == 'CSV':
+                if out_plugins[id] == out_plugin:
+                    out_plugin_result = \
+                        'PASS' if [
+                            plugin for comp_id, plugin,
+                            res in results if comp_id == id and res] else 'FAIL'
+                else:
+                    out_plugin_result = 'SKIP'
+            output_plugins_line += '| ' + out_plugin_result + '   '
+        logger.info(
+            '| OUT:{}'.format(out_plugin) + (' ' * (11 - len(out_plugin)))
+            + output_plugins_line + '|')
+        for plugin in sorted(tested_plugins.values()):
+            line_plugin = _print_final_result_of_plugin(
+                plugin, compute_ids, results, out_plugins, out_plugin)
+            logger.info('|  IN:{}'.format(plugin) + (' ' * (11-len(plugin))) + '|' + line_plugin)
+        logger.info('+' + ('-' * 16) + '+' + (('-' * 8) + '+') * len(compute_node_names))
+    logger.info('=' * 70)
+
+
+def _exec_testcase(
+        test_labels, name, ceilometer_running, compute_node,
+        conf, results, error_plugins):
+    """Execute the testcase.
+
+    Keyword arguments:
+    test_labels -- dictionary of plug-in IDs and their display names
+    name -- plug-in ID, key of test_labels dictionary
+    ceilometer_running -- boolean indicating whether Ceilometer is running
+    compute_node -- compute node ID
+    conf -- ConfigServer instance
+    results -- results list
+    error_plugins -- list of tuples with plug-in errors (plugin, error_description, is_critical):
+        plugin -- plug-in ID, key of test_labels dictionary
+        error_decription -- description of the error
+        is_critical -- boolean value indicating whether error is critical
+    """
+    ovs_interfaces = conf.get_ovs_interfaces(compute_node)
+    ovs_configured_interfaces = conf.get_plugin_config_values(
+        compute_node, 'ovs_events', 'Interfaces')
+    ovs_existing_configured_int = [
+        interface for interface in ovs_interfaces
+        if interface in ovs_configured_interfaces]
+    plugin_prerequisites = {
+        'mcelog': [(conf.is_installed(compute_node, 'mcelog'), 'mcelog must be installed.')],
+        'ovs_events': [(
+            len(ovs_existing_configured_int) > 0 or len(ovs_interfaces) > 0,
+            'Interfaces must be configured.')]}
+    ceilometer_criteria_lists = {
+        'hugepages': ['hugepages.vmpage_number'],
+        'mcelog': ['mcelog.errors'],
+        'ovs_events': ['ovs_events.gauge']}
+    ceilometer_substr_lists = {
+        'ovs_events': ovs_existing_configured_int if len(ovs_existing_configured_int) > 0 else ovs_interfaces}
+    csv_subdirs = {
+        'hugepages': [
+            'hugepages-mm-2048Kb', 'hugepages-node0-2048Kb', 'hugepages-node1-2048Kb',
+            'hugepages-mm-1048576Kb', 'hugepages-node0-1048576Kb', 'hugepages-node1-1048576Kb'],
+        'mcelog': ['mcelog-SOCKET_0_CHANNEL_0_DIMM_any', 'mcelog-SOCKET_0_CHANNEL_any_DIMM_any'],
+        'ovs_events': [
+            'ovs_events-{}'.format(interface)
+            for interface in (ovs_existing_configured_int if len(ovs_existing_configured_int) > 0 else ovs_interfaces)]}
+    csv_meter_categories = {
+        'hugepages': ['vmpage_number-free', 'vmpage_number-used'],
+        'mcelog': [
+            'errors-corrected_memory_errors', 'errors-uncorrected_memory_errors',
+            'errors-corrected_memory_errors_in_24h', 'errors-uncorrected_memory_errors_in_24h'],
+        'ovs_events': ['gauge-link_status']}
+
+    _print_plugin_label(test_labels[name] if name in test_labels else name, compute_node.get_id())
+    plugin_critical_errors = [
+        error for plugin, error, critical in error_plugins if plugin == name and critical]
+    if plugin_critical_errors:
+        logger.error('Following critical errors occurred:'.format(name))
+        for error in plugin_critical_errors:
+            logger.error(' * ' + error)
+        _process_result(compute_node.get_id(), test_labels[name], False, results)
+    else:
+        plugin_errors = [
+            error for plugin, error, critical in error_plugins if plugin == name and not critical]
+        if plugin_errors:
+            logger.warning('Following non-critical errors occured:')
+            for error in plugin_errors:
+                logger.warning(' * ' + error)
+        failed_prerequisites = []
+        if name in plugin_prerequisites:
+            failed_prerequisites = [
+                prerequisite_name for prerequisite_passed,
+                prerequisite_name in plugin_prerequisites[name] if not prerequisite_passed]
+        if failed_prerequisites:
+            logger.error(
+                '{} test will not be executed, '.format(name)
+                + 'following prerequisites failed:')
+            for prerequisite in failed_prerequisites:
+                logger.error(' * {}'.format(prerequisite))
+        else:
+            if ceilometer_running:
+                res = test_ceilometer_node_sends_data(
+                    compute_node.get_id(), conf.get_plugin_interval(compute_node, name),
+                    logger=logger, client=CeilometerClient(logger),
+                    criteria_list=ceilometer_criteria_lists[name],
+                    resource_id_substrings = (ceilometer_substr_lists[name]
+                        if name in ceilometer_substr_lists else ['']))
+            else:
+                res = test_csv_handles_plugin_data(
+                    compute_node, conf.get_plugin_interval(compute_node, name), name,
+                    csv_subdirs[name], csv_meter_categories[name], logger,
+                    CSVClient(logger, conf))
+            if res and plugin_errors:
+                logger.info(
+                    'Test works, but will be reported as failure,'
+                    + 'because of non-critical errors.')
+                res = False
+            _process_result(compute_node.get_id(), test_labels[name], res, results)
+
+
+def main(bt_logger=None):
+    """Check each compute node sends ceilometer metrics.
+
+    Keyword arguments:
+    bt_logger -- logger instance
+    """
+    logging.getLogger("paramiko").setLevel(logging.WARNING)
+    logging.getLogger("stevedore").setLevel(logging.WARNING)
+    if bt_logger is None:
+        _check_logger()
+    else:
+        global logger
+        logger = bt_logger
+    conf = ConfigServer('10.20.0.2', 'root', logger)
+    controllers = conf.get_controllers()
+    if len(controllers) == 0:
+        logger.error('No controller nodes found!')
+        return 1
+    computes = conf.get_computes()
+    if len(computes) == 0:
+        logger.error('No compute nodes found!')
+        return 1
+
+    _print_label('Display of Control and Compute nodes available in the set up')
+    logger.info('controllers: {}'.format([('{0}: {1} ({2})'.format(
+        node.get_id(), node.get_name(), node.get_ip())) for node in controllers]))
+    logger.info('computes: {}'.format([('{0}: {1} ({2})'.format(
+        node.get_id(), node.get_name(), node.get_ip())) for node in computes]))
+
+    ceilometer_running_on_con = False
+    _print_label('Test Ceilometer on control nodes')
+    for controller in controllers:
+        ceil_client = CeilometerClient(logger)
+        ceil_client.auth_token()
+        ceilometer_running_on_con = (
+            ceilometer_running_on_con or conf.is_ceilometer_running(controller))
+    if ceilometer_running_on_con:
+        logger.info("Ceilometer is running on control node.")
+    else:
+        logger.error("Ceilometer is not running on control node.")
+        logger.info("CSV will be enabled on compute nodes.")
+    compute_ids = []
+    results = []
+    plugin_labels = {
+        'hugepages': 'Hugepages',
+        'mcelog': 'Mcelog',
+        'ovs_events': 'OVS events'}
+    out_plugins = {}
+    for compute_node in computes:
+        node_id = compute_node.get_id()
+        out_plugins[node_id] = 'CSV'
+        compute_ids.append(node_id)
+        #plugins_to_enable = plugin_labels.keys()
+        plugins_to_enable = []
+        _print_label('NODE {}: Test Ceilometer Plug-in'.format(node_id))
+        logger.info('Checking if ceilometer plug-in is included.')
+        if not conf.check_ceil_plugin_included(compute_node):
+            logger.error('Ceilometer plug-in is not included.')
+            logger.info('Testcases on node {} will not be executed'.format(node_id))
+        else:
+            collectd_restarted, collectd_warnings = conf.restart_collectd(compute_node)
+            sleep_time = 30
+            logger.info('Sleeping for {} seconds after collectd restart...'.format(sleep_time))
+            time.sleep(sleep_time)
+            if not collectd_restarted:
+                for warning in collectd_warnings:
+                    logger.warning(warning)
+                logger.error('Restart of collectd on node {} failed'.format(node_id))
+                logger.info('Testcases on node {} will not be executed'.format(node_id))
+            else:
+                for warning in collectd_warnings:
+                    logger.warning(warning)
+                ceilometer_running = (
+                    ceilometer_running_on_con and test_ceilometer_node_sends_data(
+                        node_id, 10, logger=logger, client=CeilometerClient(logger)))
+                if ceilometer_running:
+                    out_plugins[node_id] = 'Ceilometer'
+                    logger.info("Ceilometer is running.")
+                else:
+                    plugins_to_enable.append('csv')
+                    out_plugins[node_id] = 'CSV'
+                    logger.error("Ceilometer is not running.")
+                    logger.info("CSV will be enabled for verification of test plugins.")
+                if plugins_to_enable:
+                    _print_label(
+                        'NODE {}: Enabling Test Plug-in '.format(node_id)
+                        + 'and Test case execution')
+                error_plugins = []
+                if plugins_to_enable and not conf.enable_plugins(
+                        compute_node, plugins_to_enable, error_plugins, create_backup=False):
+                    logger.error('Failed to test plugins on node {}.'.format(node_id))
+                    logger.info('Testcases on node {} will not be executed'.format(node_id))
+                else:
+                    if plugins_to_enable:
+                        collectd_restarted, collectd_warnings = conf.restart_collectd(compute_node)
+                        sleep_time = 30
+                        logger.info(
+                            'Sleeping for {} seconds after collectd restart...'.format(sleep_time))
+                        time.sleep(sleep_time)
+                    if plugins_to_enable and not collectd_restarted:
+                        for warning in collectd_warnings:
+                            logger.warning(warning)
+                        logger.error('Restart of collectd on node {} failed'.format(node_id))
+                        logger.info('Testcases on node {} will not be executed'.format(node_id))
+                    else:
+                        if collectd_warnings:
+                            for warning in collectd_warnings:
+                                logger.warning(warning)
+
+                        for plugin_name in sorted(plugin_labels.keys()):
+                            _exec_testcase(
+                                plugin_labels, plugin_name, ceilometer_running,
+                                compute_node, conf, results, error_plugins)
+
+            _print_label('NODE {}: Restoring config file'.format(node_id))
+            conf.restore_config(compute_node)
+
+    print_overall_summary(compute_ids, plugin_labels, results, out_plugins)
+
+    if ((len([res for res in results if not res[2]]) > 0)
+            or (len(results) < len(computes) * len(plugin_labels))):
+        logger.error('Some tests have failed or have not been executed')
+        return 1
     return 0
 
 if __name__ == '__main__':
-    sys.exit(main())
+     sys.exit(main())
diff --git a/baro_tests/config_server.py b/baro_tests/config_server.py
new file mode 100644 (file)
index 0000000..4d64926
--- /dev/null
@@ -0,0 +1,503 @@
+"""Classes used by client.py"""
+# -*- coding: utf-8 -*-
+
+#Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import paramiko
+import time
+import string
+import os.path
+
+ID_RSA_PATH = '/home/opnfv/.ssh/id_rsa'
+SSH_KEYS_SCRIPT = '/home/opnfv/barometer/baro_utils/get_ssh_keys.sh'
+DEF_PLUGIN_INTERVAL = 10
+COLLECTD_CONF = '/etc/collectd/collectd.conf'
+COLLECTD_CONF_DIR = '/etc/collectd/collectd.conf.d'
+
+
+class Node(object):
+    """Node configuration class"""
+    def __init__(self, attrs):
+        self.__id = int(attrs[0])
+        self.__status = attrs[1]
+        self.__name = attrs[2]
+        self.__cluster = int(attrs[3]) if attrs[3] else None
+        self.__ip = attrs[4]
+        self.__mac = attrs[5]
+        self.__roles = [x.strip(' ') for x in attrs[6].split(',')]
+        self.__pending_roles = attrs[7]
+        self.__online = int(attrs[8]) if attrs[3] and attrs[8]else None
+        self.__group_id = int(attrs[9]) if attrs[3] else None
+
+    def get_name(self):
+        """Get node name"""
+        return self.__name
+
+    def get_id(self):
+        """Get node ID"""
+        return self.__id
+
+    def get_ip(self):
+        """Get node IP address"""
+        return self.__ip
+
+    def get_roles(self):
+        """Get node roles"""
+        return self.__roles
+
+
+class ConfigServer(object):
+    """Class to get env configuration"""
+    def __init__(self, host, user, logger, passwd=None):
+        self.__host = host
+        self.__user = user
+        self.__passwd = passwd
+        self.__priv_key = None
+        self.__nodes = list()
+        self.__logger = logger
+
+        self.__private_key_file = ID_RSA_PATH
+        if not os.path.isfile(self.__private_key_file):
+            self.__logger.error(
+                "Private key file '{}'".format(self.__private_key_file)
+                + " not found. Please try to run {} script.".format(SSH_KEYS_SCRIPT))
+            raise IOError("Private key file '{}' not found.".format(self.__private_key_file))
+
+        # get list of available nodes
+        ssh, sftp = self.__open_sftp_session(self.__host, self.__user, self.__passwd)
+        attempt = 1
+        fuel_node_passed = False
+
+        while (attempt <= 10) and not fuel_node_passed:
+            stdin, stdout, stderr = ssh.exec_command("fuel node")
+            stderr_lines = stderr.readlines()
+            if stderr_lines:
+                self.__logger.warning("'fuel node' command failed (try {}):".format(attempt))
+                for line in stderr_lines:
+                    self.__logger.debug(line.strip())
+            else:
+                fuel_node_passed = True
+                if attempt > 1:
+                    self.__logger.info("'fuel node' command passed (try {})".format(attempt))
+            attempt += 1
+        if not fuel_node_passed:
+            self.__logger.error("'fuel node' command failed. This was the last try.")
+            raise OSError("'fuel node' command failed. This was the last try.")
+        node_table = stdout.readlines()\
+
+        # skip table title and parse table values
+        for entry in node_table[2:]:
+            self.__nodes.append(Node([str(x.strip(' \n')) for x in entry.split('|')]))
+
+    def get_controllers(self):
+        """Get list of controllers"""
+        return [node for node in self.__nodes if 'controller' in node.get_roles()]
+
+    def get_computes(self):
+        """Get list of computes"""
+        return [node for node in self.__nodes if 'compute' in node.get_roles()]
+
+    def get_nodes(self):
+        """Get list of nodes"""
+        return self.__nodes
+
+    def __open_sftp_session(self, host, user, passwd=None):
+        """Connect to given host.
+
+        Keyword arguments:
+        host -- host to connect
+        user -- user to use
+        passwd -- password to use
+
+        Return tuple of SSH and SFTP client instances.
+        """
+        # create SSH client
+        ssh = paramiko.SSHClient()
+        ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+
+        # try a direct access using password or private key
+        if not passwd and not self.__priv_key:
+            # get private key
+            self.__priv_key = paramiko.RSAKey.from_private_key_file(self.__private_key_file)
+
+        # connect to the server
+        ssh.connect(host, username=user, password=passwd, pkey=self.__priv_key)
+        sftp = ssh.open_sftp()
+
+        # return SFTP client instance
+        return ssh, sftp
+
+    def get_plugin_interval(self, compute, plugin):
+        """Find the plugin interval in collectd configuration.
+
+        Keyword arguments:
+        compute -- compute node instance
+        plugin -- plug-in name
+
+        If found, return interval value, otherwise the default value"""
+        ssh, sftp = self.__open_sftp_session(compute.get_ip(), 'root')
+        in_plugin = False
+        plugin_name = ''
+        default_interval = DEF_PLUGIN_INTERVAL
+        config_files = [COLLECTD_CONF] \
+            + [COLLECTD_CONF_DIR + '/' + conf_file for conf_file in sftp.listdir(COLLECTD_CONF_DIR)]
+        for config_file in config_files:
+            try:
+                with sftp.open(config_file) as config:
+                    for line in config.readlines():
+                        words = line.split()
+                        if len(words) > 1 and words[0] == '<LoadPlugin':
+                            in_plugin = True
+                            plugin_name = words[1].strip('">')
+                        if words and words[0] == '</LoadPlugin>':
+                            in_plugin = False
+                        if words and words[0] == 'Interval':
+                            if in_plugin and plugin_name == plugin:
+                                return int(words[1])
+                            if not in_plugin:
+                                default_interval = int(words[1])
+            except IOError:
+                self.__logger.error("Could not open collectd.conf file.")
+        return default_interval
+
+    def get_plugin_config_values(self, compute, plugin, parameter):
+        """Get parameter values from collectd config file.
+
+        Keyword arguments:
+        compute -- compute node instance
+        plugin -- plug-in name
+        parameter -- plug-in parameter
+
+        Return list of found values."""
+        ssh, sftp = self.__open_sftp_session(compute.get_ip(), 'root')
+        # find the plugin value
+        in_plugin = False
+        plugin_name = ''
+        default_values = []
+        config_files = [COLLECTD_CONF] \
+            + [COLLECTD_CONF_DIR + '/' + conf_file for conf_file in sftp.listdir(COLLECTD_CONF_DIR)]
+        for config_file in config_files:
+            try:
+                with sftp.open(config_file) as config:
+                    for line in config.readlines():
+                        words = line.split()
+                        if len(words) > 1 and words[0] == '<Plugin':
+                            in_plugin = True
+                            plugin_name = words[1].strip('">')
+                        if len(words) > 0 and words[0] == '</Plugin>':
+                            in_plugin = False
+                        if len(words) > 0 and words[0] == parameter:
+                            if in_plugin and plugin_name == plugin:
+                                return [word.strip('"') for word in words[1:]]
+            except IOError:
+                self.__logger.error("Could not open collectd.conf file.")
+        return default_values
+
+    def execute_command(self, command, host_ip=None, ssh=None):
+        """Execute command on node and return list of lines of standard output.
+
+        Keyword arguments:
+        command -- command
+        host_ip -- IP of the node
+        ssh -- existing open SSH session to use
+
+        One of host_ip or ssh must not be None. If both are not None, existing ssh session is used.
+        """
+        if host_ip is None and ssh is None:
+            raise ValueError('One of host_ip or ssh must not be None.')
+        if ssh is None:
+            ssh, sftp = self.__open_sftp_session(host_ip, 'root')
+        stdin, stdout, stderr = ssh.exec_command(command)
+        return stdout.readlines()
+
+    def get_ovs_interfaces(self, compute):
+        """Get list of configured OVS interfaces
+
+        Keyword arguments:
+        compute -- compute node instance
+        """
+        stdout = self.execute_command("ovs-vsctl list-br", compute.get_ip())
+        return [interface.strip() for interface in stdout]
+
+    def is_ceilometer_running(self, controller):
+        """Check whether Ceilometer is running on controller.
+
+        Keyword arguments:
+        controller -- controller node instance
+
+        Return boolean value whether Ceilometer is running.
+        """
+        lines = self.execute_command('service --status-all | grep ceilometer', controller.get_ip())
+        agent = False
+        collector = False
+        for line in lines:
+            if '[ + ]  ceilometer-agent-notification' in line:
+                agent = True
+            if '[ + ]  ceilometer-collector' in line:
+                collector = True
+        return agent and collector
+
+    def is_installed(self, compute, package):
+        """Check whether package exists on compute node.
+
+        Keyword arguments:
+        compute -- compute node instance
+        package -- Linux package to search for
+
+        Return boolean value whether package is installed.
+        """
+        stdout = self.execute_command('dpkg -l | grep {}'.format(package), compute.get_ip())
+        return len(stdout) > 0
+
+    def check_ceil_plugin_included(self, compute):
+        """Check if ceilometer plugin is included in collectd.conf file If not,
+        try to enable it.
+
+        Keyword arguments:
+        compute -- compute node instance
+
+        Return boolean value whether ceilometer plugin is included or it's enabling was successful.
+        """
+        ssh, sftp = self.__open_sftp_session(compute.get_ip(), 'root')
+        try:
+            config = sftp.open(COLLECTD_CONF, mode='r')
+        except IOError:
+            self.__logger.error(
+                'Cannot open {} on node {}'.format(COLLECTD_CONF, compute.get_id()))
+            return False
+        in_lines = config.readlines()
+        out_lines = in_lines[:]
+        include_section_indexes = [
+            (start, end) for start in range(len(in_lines)) for end in range(len(in_lines))
+            if (start < end)
+            and '<Include' in in_lines[start]
+            and COLLECTD_CONF_DIR in in_lines[start]
+            and '#' not in in_lines[start]
+            and '</Include>' in in_lines[end]
+            and '#' not in in_lines[end]
+            and len([i for i in in_lines[start + 1: end]
+                if 'Filter' in i and '*.conf' in i and '#' not in i]) > 0]
+        if len(include_section_indexes) == 0:
+            out_lines.append('<Include "{}">\n'.format(COLLECTD_CONF_DIR))
+            out_lines.append('        Filter "*.conf"\n')
+            out_lines.append('</Include>\n')
+            config.close()
+            config = sftp.open(COLLECTD_CONF, mode='w')
+            config.writelines(out_lines)
+        config.close()
+        self.__logger.info('Creating backup of collectd.conf...')
+        config = sftp.open(COLLECTD_CONF + '.backup', mode='w')
+        config.writelines(in_lines)
+        config.close()
+        return True
+
+    def enable_plugins(self, compute, plugins, error_plugins, create_backup=True):
+        """Enable plugins on compute node
+
+        Keyword arguments:
+        compute -- compute node instance
+        plugins -- list of plugins to be enabled
+        error_plugins -- list of tuples with found errors, new entries may be added there
+            (plugin, error_description, is_critical):
+                plugin -- plug-in name
+                error_decription -- description of the error
+                is_critical -- boolean value indicating whether error is critical
+        create_backup -- boolean value indicating whether backup shall be created
+
+        Return boolean value indicating whether function was successful.
+        """
+        plugins = sorted(plugins)
+        ssh, sftp = self.__open_sftp_session(compute.get_ip(), 'root')
+        plugins_to_enable = plugins[:]
+        for plugin in plugins:
+            plugin_file = '/usr/lib/collectd/{}.so'.format(plugin)
+            try:
+                sftp.stat(plugin_file)
+            except IOError:
+                self.__logger.debug(
+                    'Plugin file {0} not found on node {1}, plugin {2} will not be enabled'.format(
+                        plugin_file, compute.get_id(), plugin))
+                error_plugins.append((plugin, 'plugin file {} not found'.format(plugin_file), True))
+                plugins_to_enable.remove(plugin)
+        self.__logger.debug('Following plugins will be enabled on node {}: {}'.format(
+            compute.get_id(), ', '.join(plugins_to_enable)))
+        try:
+            config = sftp.open(COLLECTD_CONF, mode='r')
+        except IOError:
+            self.__logger.warning(
+                'Cannot open {} on node {}'.format(COLLECTD_CONF, compute.get_id()))
+            return False
+        in_lines = config.readlines()
+        out_lines = []
+        enabled_plugins = []
+        enabled_sections = []
+        in_section = 0
+        comment_section = False
+        uncomment_section = False
+        for line in in_lines:
+            if 'LoadPlugin' in line:
+                for plugin in plugins_to_enable:
+                    if plugin in line:
+                        commented = '#' in line
+                        #list of uncommented lines which contain LoadPlugin for this plugin
+                        loadlines = [
+                            ll for ll in in_lines if 'LoadPlugin' in ll
+                            and plugin in ll and '#' not in ll]
+                        if len(loadlines) == 0:
+                            if plugin not in enabled_plugins:
+                                line = line.lstrip(string.whitespace + '#')
+                                enabled_plugins.append(plugin)
+                                error_plugins.append((
+                                    plugin, 'plugin not enabled in '
+                                    + '{}, trying to enable it'.format(COLLECTD_CONF), False))
+                        elif not commented:
+                            if plugin not in enabled_plugins:
+                                enabled_plugins.append(plugin)
+                            else:
+                                line = '#' + line
+                                error_plugins.append((
+                                    plugin, 'plugin enabled more than once '
+                                    + '(additional occurrence of LoadPlugin found in '
+                                    + '{}), trying to comment it out.'.format(
+                                        COLLECTD_CONF), False))
+            elif line.lstrip(string.whitespace + '#').find('<Plugin') == 0:
+                in_section += 1
+                for plugin in plugins_to_enable:
+                    if plugin in line:
+                        commented = '#' in line
+                        #list of uncommented lines which contain Plugin for this plugin
+                        pluginlines = [
+                            pl for pl in in_lines if '<Plugin' in pl
+                            and plugin in pl and '#' not in pl]
+                        if len(pluginlines) == 0:
+                            if plugin not in enabled_sections:
+                                line = line[line.rfind('#') + 1:]
+                                uncomment_section = True
+                                enabled_sections.append(plugin)
+                                error_plugins.append((
+                                    plugin, 'plugin section found in '
+                                    + '{}, but commented out, trying to uncomment it.'.format(
+                                        COLLECTD_CONF), False))
+                        elif not commented:
+                            if plugin not in enabled_sections:
+                                enabled_sections.append(plugin)
+                            else:
+                                line = '#' + line
+                                comment_section = True
+                                error_plugins.append((
+                                    plugin,
+                                    'additional occurrence of plugin section found in '
+                                    + '{}, trying to comment it out.'.format(COLLECTD_CONF),
+                                    False))
+            elif in_section > 0:
+                if comment_section and '#' not in line:
+                    line = '#' + line
+                if uncomment_section and '#' in line:
+                    line = line[line.rfind('#') + 1:]
+                if '</Plugin>' in line:
+                    in_section -= 1
+                    if in_section == 0:
+                        comment_section = False
+                        uncomment_section = False
+            elif '</Plugin>' in line:
+                self.__logger.error(
+                    'Unexpected closure os plugin section on line'
+                    + ' {} in collectd.conf, matching section start not found.'.format(
+                        len(out_lines) + 1))
+                return False
+            out_lines.append(line)
+        if in_section > 0:
+            self.__logger.error(
+                'Unexpected end of file collectd.conf, '
+                + 'closure of last plugin section not found.')
+            return False
+        out_lines = [
+            'LoadPlugin {}\n'.format(plugin) for plugin in plugins_to_enable
+            if plugin not in enabled_plugins] + out_lines
+        for plugin in plugins_to_enable:
+            if plugin not in enabled_plugins:
+                error_plugins.append((
+                    plugin,
+                    'plugin not enabled in {}, trying to enable it.'.format(COLLECTD_CONF),
+                    False))
+        unenabled_sections = [
+            plugin for plugin in plugins_to_enable if plugin not in enabled_sections]
+        if unenabled_sections:
+            self.__logger.error('Plugin sections for following plugins not found: {}'.format(
+                ', '.join(unenabled_sections)))
+            return False
+
+        config.close()
+        if create_backup:
+            self.__logger.info('Creating backup of collectd.conf...')
+            config = sftp.open(COLLECTD_CONF + '.backup', mode='w')
+            config.writelines(in_lines)
+            config.close()
+        self.__logger.info('Updating collectd.conf...')
+        config = sftp.open(COLLECTD_CONF, mode='w')
+        config.writelines(out_lines)
+        config.close()
+        diff_command = "diff {} {}.backup".format(COLLECTD_CONF, COLLECTD_CONF)
+        stdin, stdout, stderr = ssh.exec_command(diff_command)
+        self.__logger.debug(diff_command)
+        for line in stdout.readlines():
+            self.__logger.debug(line.strip())
+        return True
+
+    def restore_config(self, compute):
+        """Restore collectd config file from backup on compute node.
+
+        Keyword arguments:
+        compute -- compute node instance
+        """
+        ssh, sftp = self.__open_sftp_session(compute.get_ip(), 'root')
+
+        self.__logger.info('Restoring config file from backup...')
+        ssh.exec_command("cp {0} {0}.used".format(COLLECTD_CONF))
+        ssh.exec_command("cp {0}.backup {0}".format(COLLECTD_CONF))
+
+    def restart_collectd(self, compute):
+        """Restart collectd on compute node.
+
+        Keyword arguments:
+        compute -- compute node instance
+
+        Retrun tuple with boolean indicating success and list of warnings received
+        during collectd start.
+        """
+
+        def get_collectd_processes(ssh_session):
+            """Get number of running collectd processes.
+
+            Keyword arguments:
+            ssh_session -- instance of SSH session in which to check for processes
+            """
+            stdin, stdout, stderr = ssh_session.exec_command("pgrep collectd")
+            return len(stdout.readlines())
+
+        ssh, sftp = self.__open_sftp_session(compute.get_ip(), 'root')
+
+        self.__logger.info('Stopping collectd service...')
+        stdout = self.execute_command("service collectd stop", ssh=ssh)
+        time.sleep(10)
+        if get_collectd_processes(ssh):
+            self.__logger.error('Collectd is still running...')
+            return False, []
+        self.__logger.info('Starting collectd service...')
+        stdout = self.execute_command("service collectd start", ssh=ssh)
+        time.sleep(10)
+        warning = [output.strip() for output in stdout if 'WARN: ' in output]
+        if get_collectd_processes(ssh) == 0:
+            self.__logger.error('Collectd is still not running...')
+            return False, warning
+        return True, warning
diff --git a/baro_tests/tests.py b/baro_tests/tests.py
new file mode 100644 (file)
index 0000000..80335ad
--- /dev/null
@@ -0,0 +1,194 @@
+"""Function for testing collectd plug-ins with different oup plug-ins"""
+# -*- coding: utf-8 -*-
+
+#Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import time
+
+
+def test_ceilometer_node_sends_data(
+        node_id, interval, logger, client, criteria_list=[],
+        resource_id_substrings=['']):
+    """ Test that data reported by Ceilometer are updated in the given interval.
+
+    Keyword arguments:
+    node_id -- node ID
+    interval -- interval to check
+    logger -- logger instance
+    client -- CeilometerClient instance
+    criteria_list -- list of criteria used in ceilometer calls
+    resource_id_substrings -- list of substrings to search for in resource ID
+
+    Return boolean value indicating success or failure.
+    """
+
+    def _search_meterlist_latest_entry(meterlist, node_str, substr=''):
+        """Search for latest entry in meter list
+
+        Keyword arguments:
+        meterlist -- list of metrics
+        node_str -- name of node, which will be found in meter list
+        substr -- substring which will be found in meter list
+
+        Return latest entry from meter list which contains given node string
+        and (if defined) subsrting.
+        """
+        res = [entry for entry in meterlist if node_str in entry['resource_id']
+            and substr in entry['resource_id']]
+        if res:
+            return res[0]
+        else:
+            return []
+
+    client.auth_token()
+    timestamps = {}
+    node_str = 'node-{}'.format(node_id) if node_id else ''
+
+    logger.info('Searching for timestamps of latest entries{0}{1}{2}...'.format(
+        '' if node_str == '' else ' for {}'.format(node_str),
+        '' if len(criteria_list) == 0 else (' for criteria ' + ', '.join(criteria_list)),
+        '' if resource_id_substrings == [''] else ' and resource ID substrings "{}"'.format(
+            '", "'.join(resource_id_substrings))))
+    for criterion in criteria_list if len(criteria_list) > 0 else [None]:
+        meter_list = client.get_ceil_metrics(criterion)
+        for resource_id_substring in resource_id_substrings:
+            last_entry = _search_meterlist_latest_entry(meter_list, node_str, resource_id_substring)
+            if len(last_entry) == 0:
+                logger.error('Entry{0}{1}{2} not found'.format(
+                    '' if node_str == '' else ' for {}'.format(node_str),
+                    '' if criterion is None else 'for criterion {}'.format(criterion),
+                    '' if resource_id_substring == ''
+                    else 'and resource ID substring "{}"'.format(resource_id_substring)))
+                return False
+            timestamp = last_entry['timestamp']
+            logger.debug('Last entry found: {0} {1}'.format(timestamp, last_entry['resource_id']))
+            timestamps[(criterion, resource_id_substring)] = timestamp
+
+    attempt = 1
+    is_passed = False
+    while (attempt <= 10) and not is_passed:
+        is_passed = True
+        # wait Interval time + 2 sec for db update
+        sleep_time = interval + 2
+        if attempt > 1:
+            logger.info('Starting attempt {}'.format(attempt))
+        logger.info(
+            'Sleeping for {} seconds to get updated entries '.format(sleep_time)
+            + '(interval is {} sec)...'.format(interval))
+        time.sleep(sleep_time)
+
+        logger.info('Searching for timestamps of latest entries{}{}{}...'.format(
+            '' if node_str == '' else ' for {}'.format(node_str),
+            '' if len(criteria_list) == 0 else (' for criteria ' + ', '.join(criteria_list)),
+            '' if resource_id_substrings == ['']
+            else ' and resource ID substrings "{}"'.format('", "'.join(resource_id_substrings))))
+        for criterion in criteria_list if len(criteria_list) > 0 else [None]:
+            meter_list = client.get_ceil_metrics(criterion)
+            for resource_id_substring in resource_id_substrings:
+                last_entry = _search_meterlist_latest_entry(
+                    meter_list, node_str, resource_id_substring)
+                if len(last_entry) == 0:
+                    logger.error('Entry{0}{1}{2} not found'.format(
+                        '' if node_str == '' else ' for {}'.format(node_str),
+                        '' if criterion is None else 'for criterion {}'.format(criterion),
+                        '' if resource_id_substring == ''
+                        else ' and resource ID substring "{}"'.format(resource_id_substring)))
+                    return False
+                timestamp = last_entry['timestamp']
+                logger.debug('Last entry found: {} {}'.format(timestamp, last_entry['resource_id']))
+                if timestamp == timestamps[(criterion, resource_id_substring)]:
+                    logger.warning(
+                        'Last entry{0}{1}{2} has the same timestamp as before the sleep'.format(
+                            '' if node_str == '' else ' for {}'.format(node_str),
+                            '' if resource_id_substring == ''
+                            else ', substring "{}"'.format(resource_id_substring),
+                            '' if criterion is None else ' for criterion {}'.format(criterion)))
+                    is_passed = False
+        attempt += 1
+        if not is_passed:
+            logger.warning('After sleep new entries were not found.')
+    if not is_passed:
+        logger.error('This was the last attempt.')
+        return False
+    logger.info('All latest entries found.')
+    return True
+
+
+def test_csv_handles_plugin_data(
+        compute, interval, plugin, plugin_subdirs, meter_categories,
+        logger, client):
+    """Check that CSV data are updated by the plugin.
+
+    Keyword arguments:
+    compute -- object compute node
+    interval -- interval to check
+    plugin -- plugin which will be tested
+    plugin_subdirs -- list subdirectories in csv folder
+    meter_categories -- list of meter categories which will be tested
+    logger -- logger instance
+    client -- CSVClient instance
+
+    Return boolean value indicating success or failure.
+    """
+    logger.info('Getting CSV metrics of plugin {} on compute node {}...'.format(
+        plugin, compute.get_id()))
+    logger.debug('Interval: {}'.format(interval))
+    logger.debug('Plugin subdirs: {}'.format(plugin_subdirs))
+    logger.debug('Plugin meter categories: {}'.format(meter_categories))
+    plugin_metrics = client.get_csv_metrics(compute, plugin_subdirs, meter_categories)
+    if len(plugin_metrics) < len(plugin_subdirs) * len(meter_categories):
+        logger.error('Some plugin metrics not found')
+        return False
+
+    logger.info('Checking that last two entries in metrics are corresponding to interval...')
+    for metric in plugin_metrics:
+        logger.debug('{0} {1} {2} ... '.format(metric[0], metric[1], metric[2]))
+        if metric[3] - metric[2] != interval:
+            logger.error('Time of last two entries differ by {}, but interval is {}'.format(
+                metric[3] - metric[2], interval))
+            return False
+        else:
+            logger.debug('OK')
+    logger.info('OK')
+
+    # wait Interval time + 2 sec
+    sleep_time = interval + 2
+    logger.info(
+        'Sleeping for {} seconds to get updated entries '.format(sleep_time)
+        + '(interval is {} sec)...'.format(interval))
+    time.sleep(sleep_time)
+
+    logger.info('Getting new metrics of compute node {}...'.format(compute.get_id()))
+    plugin_metrics2 = client.get_csv_metrics(compute, plugin_subdirs, meter_categories)
+    if len(plugin_metrics2) < len(plugin_subdirs) * len(meter_categories):
+        logger.error('Some plugin metrics not found')
+        return False
+
+    logger.info('Comparing old and new metrics...')
+    logger.debug(plugin_metrics)
+    logger.debug(plugin_metrics2)
+    if len(plugin_metrics) != len(plugin_metrics2):
+        logger.error('Plugin metrics length before and after sleep differ')
+        return False
+    for i in range(len(plugin_metrics2)):
+        logger.debug('{0} {1} {2}  - {3} {4} {5} ... '.format(
+            plugin_metrics[i][0], plugin_metrics[i][1], plugin_metrics[i][2], plugin_metrics2[i][0],
+            plugin_metrics2[i][1], plugin_metrics2[i][2]))
+        if plugin_metrics[i] == plugin_metrics2[i]:
+            logger.error('FAIL')
+            return False
+        else:
+            logger.debug('OK')
+    logger.info('OK')
+
+    return True
diff --git a/baro_utils/get_ssh_keys.sh b/baro_utils/get_ssh_keys.sh
new file mode 100755 (executable)
index 0000000..f90c32c
--- /dev/null
@@ -0,0 +1,16 @@
+#!/bin/bash
+# -*- coding: utf-8 -*-
+
+#Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+mkdir -p /home/opnfv/.ssh/
+scp root@"$INSTALLER_IP":/root/.ssh/* /home/opnfv/.ssh/