1 ##############################################################################
2 # Copyright (c) 2018 Ericsson AB and others.
3 # Author: Jose Lausuch (jose.lausuch@ericsson.com)
4 # George Paraskevopoulos (geopar@intracom-telecom.com)
5 # All rights reserved. This program and the accompanying materials
6 # are made available under the terms of the Apache License, Version 2.0
7 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
9 ##############################################################################
11 This modules implements the Fuel@OPNFV adapter
13 - host executing this module needs network connectivity to a cluster via:
14 * mcpcontrol network (usually 10.20.0.0/24, created by installer);
16 The above are always true for an OPNFV Pharos jumpserver.
17 - key-based SSH auth is used throughout the cluster, without proxy-ing
18 cluster node access via Salt master (old Fuel@OPNFV used to);
21 from yaml import safe_load, YAMLError
23 from opnfv.deployment import manager
24 from opnfv.utils import opnfv_logger as logger
25 from opnfv.utils import ssh_utils
27 LOGGER = logger.Logger(__name__).getLogger()
30 class FuelAdapter(manager.DeploymentHandler):
32 This class extends the generic handler with Fuel@OPNFV specifics
35 def __init__(self, installer_ip, installer_user, pkey_file):
36 super(FuelAdapter, self).__init__(installer='fuel',
37 installer_ip=installer_ip,
38 installer_user=installer_user,
42 def get_nodes(self, options=None):
44 Generates a list of all the nodes in the deployment
46 # Unlike old Fuel@Openstack, we don't keep track of different clusters
47 # explicitly, but through domain names.
48 # For simplicity, we will assume a single cluster per Salt master node.
50 # if we have retrieved previously all the nodes, don't do it again
51 # This fails the first time when the constructor calls this method
52 # therefore the try/except
53 if len(self.nodes) > 0:
55 # pylint: disable=bare-except
59 # Manager roles to reclass properties mapping
61 'salt:master:enabled': manager.Role.INSTALLER,
62 'maas:region:enabled': manager.Role.INSTALLER,
63 'nova:controller:enabled': manager.Role.CONTROLLER,
64 'nova:compute:enabled': manager.Role.COMPUTE,
65 'opendaylight:server:enabled': manager.Role.ODL,
68 cmd = ("sudo salt '*' pillar.item {} --out yaml --static 2>/dev/null"
69 .format(' '.join(_map.keys() + ['_param:pxe_admin_address'])))
70 # Sample output (for one node):
71 # cmp001.mcp-ovs-noha.local:
72 # _param:pxe_admin_address: 192.168.11.34
73 # maas:region:enabled: ''
74 # nova:compute:enabled: true
75 # nova:controller:enabled: ''
76 # opendaylight:server:enabled: ''
78 # salt:master:enabled: ''
79 output = self.installer_node.run_cmd(cmd)
80 if output.startswith('No minions matched the target'):
81 LOGGER.info('No nodes found in the deployment.')
85 yaml_output = safe_load(output)
86 except YAMLError as exc:
88 for node_name in yaml_output.keys():
89 ip_addr = yaml_output[node_name]['_param:pxe_admin_address']
90 ssh_client = ssh_utils.get_ssh_client(hostname=ip_addr,
92 pkey_file=self.pkey_file)
97 status=manager.NodeStatus.STATUS_OK,
98 roles=[_map[x] for x in _map if yaml_output[node_name][x]],
99 ssh_client=ssh_client)
104 def get_openstack_version(self):
106 Returns a string of the openstack version (nova-compute)
108 cmd = ("sudo salt -C 'I@nova:controller and *01*' "
109 "cmd.run 'nova-manage version 2>/dev/null' --out yaml --static")
110 nova_version = self.installer_node.run_cmd(cmd)
112 return nova_version.split(' ')[-1]
115 def get_sdn_version(self):
117 Returns a string of the sdn controller and its version, if exists
119 cmd = ("sudo salt -C 'I@opendaylight:server and *01*'"
120 "pkg.version opendaylight --out yaml --static")
122 for node in self.nodes:
123 if manager.Role.ODL in node.roles and node.is_active():
124 odl_version = self.installer_node.run_cmd(cmd)
126 version = 'OpenDaylight ' + odl_version.split(' ')[-1]
130 def get_deployment_status(self):
132 Returns a string of the status of the deployment
134 # NOTE: Requires Fuel-side signaling of deployment status, stub it