modules.opnfv: fuel adapter: Switch to MCP 43/62943/5
authorAlexandru Avadanii <Alexandru.Avadanii@enea.com>
Mon, 24 Sep 2018 22:18:36 +0000 (00:18 +0200)
committerAlexandru Avadanii <Alexandru.Avadanii@enea.com>
Wed, 26 Sep 2018 00:26:34 +0000 (02:26 +0200)
The current codebase was intended for Fuel@OPNFV up to and including
the Danube release (based on Fuel@Openstack).
Starting with the Euphrates release, Fuel@OPNFV is based on Mirantis
Cloud Platform (MCP), so the Fuel adapter in modules.opnfv needs a
rework to accomodate the new platform:
- remove unused obsolete code in opnfv.utils;
- switch to key-based SSH auth instead of password-based;
- drop proxy-ing SSH to cluster nodes via installer node, instead
  SSH directly via PXE/admin network;

Notes:
- MaaS node (when present) is marked with 'installer' role to prevent
  ovs-vsctl queries on it;
- gtw01 node (when present) is not marked with any role yet;

JIRA: FUEL-394

Change-Id: If2b7744b950cd502df8eaca0137c9f845a9d077d
Signed-off-by: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
modules/opnfv/deployment/example.py
modules/opnfv/deployment/factory.py
modules/opnfv/deployment/fuel/adapter.py
modules/opnfv/deployment/manager.py
modules/opnfv/utils/Credentials.py
modules/opnfv/utils/ssh_utils.py
modules/requirements.txt

index 52d9b56..1e54321 100644 (file)
@@ -23,15 +23,17 @@ print(handler.get_deployment_info())
 
 
 print("########## FUEL ##########")
+# NOTE: If you get traces containing <paramiko.ecdsakey.ECDSAKey object [...]>
+# make sure 10.20.0.2 is not already in ~/.ssh/known_hosts with another sig
 handler = factory.Factory.get_handler('fuel',
                                       '10.20.0.2',
-                                      'root',
-                                      installer_pwd='r00tme')
+                                      'ubuntu',
+                                      pkey_file='/var/lib/opnfv/mcp.rsa')
 
 print(handler.get_deployment_info())
 
-print("List of nodes in cluster 4:")
-nodes = handler.get_nodes({'cluster': '4'})
+print("List of nodes in cluster")
+nodes = handler.get_nodes()
 for node in nodes:
     print(node)
 
index 1fd8d44..cd2fc36 100644 (file)
@@ -42,7 +42,7 @@ class Factory(object):
         elif installer.lower() == "fuel":
             return fuel_adapter.FuelAdapter(installer_ip=installer_ip,
                                             installer_user=installer_user,
-                                            installer_pwd=installer_pwd)
+                                            pkey_file=pkey_file)
         elif installer.lower() == "compass":
             return compass_adapter.ContainerizedCompassAdapter(
                 installer_ip=installer_ip,
index a217767..a57168d 100644 (file)
@@ -1,5 +1,5 @@
 ##############################################################################
-# Copyright (c) 2017 Ericsson AB and others.
+# Copyright (c) 2018 Ericsson AB and others.
 # Author: Jose Lausuch (jose.lausuch@ericsson.com)
 #         George Paraskevopoulos (geopar@intracom-telecom.com)
 # All rights reserved. This program and the accompanying materials
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
+'''
+    This modules implements the Fuel@OPNFV adapter
 
+    - host executing this module needs network connectivity to a cluster via:
+      * mcpcontrol network (usually 10.20.0.0/24, created by installer);
+      * PXE/admin network;
+      The above are always true for an OPNFV Pharos jumpserver.
+    - key-based SSH auth is used throughout the cluster, without proxy-ing
+      cluster node access via Salt master (old Fuel@OPNFV used to);
+'''
+
+from yaml import safe_load, YAMLError
 
 from opnfv.deployment import manager
 from opnfv.utils import opnfv_logger as logger
 from opnfv.utils import ssh_utils
 
-logger = logger.Logger(__name__).getLogger()
+LOGGER = logger.Logger(__name__).getLogger()
 
 
 class FuelAdapter(manager.DeploymentHandler):
+    '''
+        This class extends the generic handler with Fuel@OPNFV specifics
+    '''
 
-    def __init__(self, installer_ip, installer_user, installer_pwd):
+    def __init__(self, installer_ip, installer_user, pkey_file):
         super(FuelAdapter, self).__init__(installer='fuel',
                                           installer_ip=installer_ip,
                                           installer_user=installer_user,
-                                          installer_pwd=installer_pwd,
-                                          pkey_file=None)
-
-    def _get_clusters(self):
-        environments = []
-        output = self.runcmd_fuel_env()
-        lines = output.rsplit('\n')
-        if len(lines) < 2:
-            logger.info("No environments found in the deployment.")
-            return None
-        else:
-            fields = lines[0].rsplit(' | ')
-
-            index_id = -1
-            index_status = -1
-            index_name = -1
-            index_release_id = -1
-
-            for i in range(len(fields)):
-                if "id" in fields[i]:
-                    index_id = i
-                elif "status" in fields[i]:
-                    index_status = i
-                elif "name" in fields[i]:
-                    index_name = i
-                elif "release_id" in fields[i]:
-                    index_release_id = i
-
-            # order env info
-            for i in range(2, len(lines)):
-                fields = lines[i].rsplit(' | ')
-                dict = {"id": fields[index_id].strip(),
-                        "status": fields[index_status].strip(),
-                        "name": fields[index_name].strip(),
-                        "release_id": fields[index_release_id].strip()}
-                environments.append(dict)
-
-        return environments
+                                          installer_pwd=None,
+                                          pkey_file=pkey_file)
 
     def get_nodes(self, options=None):
-
-        if options and options['cluster'] and len(self.nodes) > 0:
-            n = []
-            for node in self.nodes:
-                if str(node.info['cluster']) == str(options['cluster']):
-                    n.append(node)
-            return n
-
+        '''
+            Generates a list of all the nodes in the deployment
+        '''
+        # Unlike old Fuel@Openstack, we don't keep track of different clusters
+        # explicitly, but through domain names.
+        # For simplicity, we will assume a single cluster per Salt master node.
         try:
             # if we have retrieved previously all the nodes, don't do it again
             # This fails the first time when the constructor calls this method
             # therefore the try/except
             if len(self.nodes) > 0:
                 return self.nodes
+        # pylint: disable=bare-except
         except:
             pass
 
+        # Manager roles to reclass properties mapping
+        _map = {
+            'salt:master:enabled': manager.Role.INSTALLER,
+            'maas:region:enabled': manager.Role.INSTALLER,
+            'nova:controller:enabled': manager.Role.CONTROLLER,
+            'nova:compute:enabled': manager.Role.COMPUTE,
+            'opendaylight:server:enabled': manager.Role.ODL,
+        }
         nodes = []
-        cmd = 'fuel node'
+        cmd = ("sudo salt '*' pillar.item {} --out yaml --static 2>/dev/null"
+               .format(' '.join(_map.keys() + ['_param:pxe_admin_address'])))
+        # Sample output (for one node):
+        #   cmp001.mcp-ovs-noha.local:
+        #     _param:pxe_admin_address: 192.168.11.34
+        #     maas:region:enabled: ''
+        #     nova:compute:enabled: true
+        #     nova:controller:enabled: ''
+        #     opendaylight:server:enabled: ''
+        #     retcode: 0
+        #     salt:master:enabled: ''
         output = self.installer_node.run_cmd(cmd)
-        lines = output.rsplit('\n')
-        if len(lines) < 2:
-            logger.info("No nodes found in the deployment.")
+        if output.startswith('No minions matched the target'):
+            LOGGER.info('No nodes found in the deployment.')
             return nodes
 
-        # get fields indexes
-        fields = lines[0].rsplit(' | ')
-
-        index_id = -1
-        index_status = -1
-        index_name = -1
-        index_cluster = -1
-        index_ip = -1
-        index_mac = -1
-        index_roles = -1
-        index_online = -1
-
-        for i in range(len(fields)):
-            if "group_id" in fields[i]:
-                break
-            elif "id" in fields[i]:
-                index_id = i
-            elif "status" in fields[i]:
-                index_status = i
-            elif "name" in fields[i]:
-                index_name = i
-            elif "cluster" in fields[i]:
-                index_cluster = i
-            elif "ip" in fields[i]:
-                index_ip = i
-            elif "mac" in fields[i]:
-                index_mac = i
-            elif "roles " in fields[i] and "pending_roles" not in fields[i]:
-                index_roles = i
-            elif "online" in fields[i]:
-                index_online = i
-
-        # order nodes info
-        for i in range(2, len(lines)):
-            fields = lines[i].rsplit(' | ')
-            id = fields[index_id].strip().encode()
-            ip = fields[index_ip].strip().encode()
-            status_node = fields[index_status].strip().encode().lower()
-            name = fields[index_name].strip().encode()
-            roles_all = fields[index_roles].strip().encode().lower()
-
-            roles = [x for x in [manager.Role.CONTROLLER,
-                                 manager.Role.COMPUTE,
-                                 manager.Role.ODL] if x in roles_all]
-
-            dict = {"cluster": fields[index_cluster].strip().encode(),
-                    "mac": fields[index_mac].strip().encode(),
-                    "status_node": status_node,
-                    "online": fields[index_online].strip().encode()}
-
-            ssh_client = None
-            if status_node == 'ready':
-                status = manager.NodeStatus.STATUS_OK
-                proxy = {'ip': self.installer_ip,
-                         'username': self.installer_user,
-                         'password': self.installer_pwd}
-                ssh_client = ssh_utils.get_ssh_client(hostname=ip,
-                                                      username='root',
-                                                      proxy=proxy)
-            elif 'error' in status_node:
-                status = manager.NodeStatus.STATUS_ERROR
-            elif 'off' in status_node:
-                status = manager.NodeStatus.STATUS_OFFLINE
-            elif 'discover' in status_node:
-                status = manager.NodeStatus.STATUS_UNUSED
-            else:
-                status = manager.NodeStatus.STATUS_INACTIVE
-
+        try:
+            yaml_output = safe_load(output)
+        except YAMLError as exc:
+            LOGGER.error(exc)
+        for node_name in yaml_output.keys():
+            ip_addr = yaml_output[node_name]['_param:pxe_admin_address']
+            ssh_client = ssh_utils.get_ssh_client(hostname=ip_addr,
+                                                  username='ubuntu',
+                                                  pkey_file=self.pkey_file)
             node = manager.Node(
-                id, ip, name, status, roles, ssh_client, dict)
-            if options and options['cluster']:
-                if fields[index_cluster].strip() == options['cluster']:
-                    nodes.append(node)
-            else:
-                nodes.append(node)
+                id=node_name,
+                ip=ip_addr,
+                name=node_name,
+                status=manager.NodeStatus.STATUS_OK,
+                roles=[_map[x] for x in _map if yaml_output[node_name][x]],
+                ssh_client=ssh_client)
+            nodes.append(node)
 
-        self.get_nodes_called = True
         return nodes
 
     def get_openstack_version(self):
-        cmd = 'source openrc;nova-manage version 2>/dev/null'
-        version = None
-        for node in self.nodes:
-            if node.is_controller() and node.is_active():
-                version = node.run_cmd(cmd)
-                break
-        return version
+        '''
+        Returns a string of the openstack version (nova-compute)
+        '''
+        cmd = ("sudo salt -C 'I@nova:controller and *01*' "
+               "cmd.run 'nova-manage version 2>/dev/null' --out yaml --static")
+        nova_version = self.installer_node.run_cmd(cmd)
+        if nova_version:
+            return nova_version.split(' ')[-1]
+        return None
 
     def get_sdn_version(self):
-        cmd = "apt-cache policy opendaylight|grep Installed"
+        '''
+        Returns a string of the sdn controller and its version, if exists
+        '''
+        cmd = ("sudo salt -C 'I@opendaylight:server and *01*'"
+               "pkg.version opendaylight --out yaml --static")
         version = None
         for node in self.nodes:
             if manager.Role.ODL in node.roles and node.is_active():
-                odl_version = node.run_cmd(cmd)
+                odl_version = self.installer_node.run_cmd(cmd)
                 if odl_version:
                     version = 'OpenDaylight ' + odl_version.split(' ')[-1]
                     break
         return version
 
     def get_deployment_status(self):
-        cmd = "fuel env|tail -1|awk '{print $3}'"
-        result = self.installer_node.run_cmd(cmd)
-        if result is None or len(result) == 0:
-            return 'unknown'
-        elif 'operational' in result:
-            return 'active'
-        elif 'deploy' in result:
-            return 'deploying'
-        else:
-            return 'active'
+        '''
+        Returns a string of the status of the deployment
+        '''
+        # NOTE: Requires Fuel-side signaling of deployment status, stub it
+        return 'active'
index 694df77..2b5aedb 100644 (file)
@@ -241,13 +241,13 @@ class Node(object):
         Returns the ovs version installed
         '''
         if self.is_active():
-            cmd = "ovs-vsctl --version|head -1| sed 's/^.*) //'"
-            return self.run_cmd(cmd)
+            cmd = "ovs-vsctl --version 2>/dev/null|head -1| sed 's/^.*) //'"
+            return self.run_cmd(cmd) or None
         return None
 
     def get_system_info(self):
         '''
-        Returns the ovs version installed
+        Returns system information
         '''
         cmd = 'grep MemTotal /proc/meminfo'
         memory = self.run_cmd(cmd).partition('MemTotal:')[-1].strip().encode()
index 141ecbd..193a10a 100644 (file)
@@ -7,9 +7,9 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 #
-# Usage example:
+# Usage example (note: Fuel actually uses key-based auth, not user/pass):
 #   from opnfv.utils.Credentials import Credentials as credentials
-#   credentials("fuel", "10.20.0.2", "root", "r00tme").fetch('./openrc')
+#   credentials("fuel", "10.20.0.2", "user", "password").fetch('./openrc')
 #
 
 import os
@@ -74,12 +74,7 @@ class Credentials(object):
         pass
 
     def __fetch_creds_fuel(self, target_path):
-        creds_file = '/root/openrc'
-        try:
-            self.handler.get_file_from_controller(creds_file, target_path)
-        except Exception as e:
-            self.logger.error(
-                "Cannot get %s from controller. %e" % (creds_file, e))
+        # TODO
         pass
 
     def __fetch_creds_joid(self, target_path):
index 175a380..2272717 100644 (file)
@@ -49,10 +49,11 @@ def get_ssh_client(hostname,
             client = paramiko.SSHClient()
         else:
             client = ProxyHopClient()
+            proxy_password = proxy.get('password', None)
             proxy_pkey_file = proxy.get('pkey_file', '/root/.ssh/id_rsa')
             client.configure_jump_host(proxy['ip'],
                                        proxy['username'],
-                                       proxy['password'],
+                                       proxy_password,
                                        proxy_pkey_file)
         if client is None:
             raise Exception('Could not connect to client')
index 0718fa3..2c51daa 100644 (file)
@@ -1,3 +1,4 @@
 paramiko>=2.0 # LGPLv2.1+
 mock>=2.0 # BSD
 requests>=2.14.2 # Apache-2.0
+pyyaml>=3.11 # MIT