From: Alex Yang Date: Thu, 21 Dec 2017 08:06:13 +0000 (+0800) Subject: Prepare integration of daisy and dovetail X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?p=releng.git;a=commitdiff_plain;h=652e4dbb90252153e85cee498ccf64c05490814c Prepare integration of daisy and dovetail JARA: DAISY-76 1. add daisy adapter in releng utils and modules 2. add daisy adapter in dovetail jjb script 3. support id_dsa as ssh key Change-Id: I15a1c0cfcc17ae1fe6b7020f674b8bbbb72f4a7a Signed-off-by: Alex Yang --- diff --git a/jjb/dovetail/dovetail-run.sh b/jjb/dovetail/dovetail-run.sh index e084e4bd0..b214d8530 100755 --- a/jjb/dovetail/dovetail-run.sh +++ b/jjb/dovetail/dovetail-run.sh @@ -109,6 +109,8 @@ if [[ ! -f ${DOVETAIL_CONFIG}/pod.yaml ]]; then options="-u root -p r00tme" elif [[ ${INSTALLER_TYPE} == apex ]]; then options="-u stack -k /root/.ssh/id_rsa" + elif [[ ${INSTALLER_TYPE} == daisy ]]; then + options="-u root -p r00tme" else echo "Don't support to generate pod.yaml on ${INSTALLER_TYPE} currently." echo "HA test cases may not run properly." @@ -147,6 +149,12 @@ if [ "$INSTALLER_TYPE" == "apex" ]; then sudo scp $ssh_options stack@${INSTALLER_IP}:~/.ssh/id_rsa ${DOVETAIL_CONFIG}/id_rsa fi +if [ "$INSTALLER_TYPE" == "daisy" ]; then + echo "Fetching id_dsa file from jump_server $INSTALLER_IP..." + sshpass -p r00tme sudo scp $ssh_options root@${INSTALLER_IP}:~/.ssh/id_dsa ${DOVETAIL_CONFIG}/id_rsa +fi + + image_path=${HOME}/opnfv/dovetail/images if [[ ! -d ${image_path} ]]; then mkdir -p ${image_path} diff --git a/modules/opnfv/deployment/daisy/__init__.py b/modules/opnfv/deployment/daisy/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/modules/opnfv/deployment/daisy/adapter.py b/modules/opnfv/deployment/daisy/adapter.py new file mode 100644 index 000000000..5634e242b --- /dev/null +++ b/modules/opnfv/deployment/daisy/adapter.py @@ -0,0 +1,202 @@ +############################################################################## +# Copyright (c) 2017 ZTE Corporation and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + + +from opnfv.deployment import manager +from opnfv.utils import opnfv_logger as logger +from opnfv.utils import ssh_utils + +logger = logger.Logger(__name__).getLogger() + + +class DaisyAdapter(manager.DeploymentHandler): + + def __init__(self, installer_ip, installer_user, installer_pwd): + super(DaisyAdapter, self).__init__(installer='daisy', + installer_ip=installer_ip, + installer_user=installer_user, + installer_pwd=installer_pwd, + pkey_file=None) + + def _get_clusters(self): + clusters = [] + cmd = 'source /root/daisyrc_admin; daisy cluster-list | grep -v "+--"' + output = self.installer_node.run_cmd(cmd) + lines = output.rsplit('\n') + if len(lines) < 2: + logger.info("No environments found in the deployment.") + return None + else: + fields = lines[0].rsplit('|') + + index_id = -1 + index_status = -1 + index_name = -1 + index_nodes = -1 + + for i in range(len(fields)): + if "ID" in fields[i]: + index_id = i + elif "Status" in fields[i]: + index_status = i + elif "Name" in fields[i]: + index_name = i + elif "Nodes" in fields[i]: + index_nodes = i + + # order env info + for i in range(1, len(lines)): + fields = lines[i].rsplit('|') + dict = {"id": fields[index_id].strip(), + "status": fields[index_status].strip(), + "name": fields[index_name].strip(), + "nodes": fields[index_nodes].strip()} + clusters.append(dict) + + return clusters + + def get_nodes(self, options=None): + if hasattr(self, 'nodes') and len(self.nodes) > 0: + if options and 'cluster' in options and options['cluster']: + nodes = [] + for node in self.nodes: + if str(node.info['cluster']) == str(options['cluster']): + nodes.append(node) + return nodes + else: + return self.nodes + + clusters = self._get_clusters() + nodes = [] + for cluster in clusters: + if options and 'cluster' in options and options['cluster']: + if cluster["id"] != options['cluster']: + continue + cmd = 'source /root/daisyrc_admin; daisy host-list ' \ + '--cluster-id {} | grep -v "+--"'.format(cluster["id"]) + output = self.installer_node.run_cmd(cmd) + lines = output.rsplit('\n') + if len(lines) < 2: + logger.info("No nodes found in the cluster {}".format( + cluster["id"])) + continue + + fields = lines[0].rsplit('|') + index_id = -1 + index_status = -1 + index_name = -1 + + for i in range(len(fields)): + if "ID" in fields[i]: + index_id = i + elif "Role_status" in fields[i]: + index_status = i + elif "Name" in fields[i]: + index_name = i + + for i in range(1, len(lines)): + fields = lines[i].rsplit('|') + id = fields[index_id].strip().encode() + status_node = fields[index_status].strip().encode().lower() + name = fields[index_name].strip().encode() + ip = ".".join(name.split("-")[1:]) + + cmd_role = 'source /root/daisyrc_admin; ' \ + 'daisy host-detail {} | grep "^| role"'.format(id) + output_role = self.installer_node.run_cmd(cmd_role) + role_all = output_role.rsplit('|')[2].strip().encode() + roles = [] + if 'COMPUTER' in role_all: + roles.append(manager.Role.COMPUTE) + if 'CONTROLLER_LB' in role_all or 'CONTROLLER_HA' in role_all: + roles.append(manager.Role.CONTROLLER) + + ssh_client = None + if status_node == 'active': + status = manager.NodeStatus.STATUS_OK + proxy = {'ip': self.installer_ip, + 'username': self.installer_user, + 'password': self.installer_pwd, + 'pkey_file': '/root/.ssh/id_dsa'} + ssh_client = ssh_utils.get_ssh_client(hostname=ip, + username='root', + proxy=proxy) + else: + status = manager.NodeStatus.STATUS_INACTIVE + + node = DaisyNode(id, ip, name, status, roles, ssh_client) + nodes.append(node) + return nodes + + def get_openstack_version(self): + cmd = 'docker exec nova_api nova-manage version 2>/dev/null' + version = None + for node in self.nodes: + if node.is_controller() and node.is_active(): + version = node.run_cmd(cmd) + break + return version + + def get_sdn_version(self): + version = None + for node in self.nodes: + if manager.Role.CONTROLLER in node.roles and node.is_active(): + cmd = 'docker inspect --format=\'{{.Name}}\' `docker ps -q`' + output = node.run_cmd(cmd) + if '/opendaylight' in output.rsplit('\n'): + cmd2 = 'docker exec opendaylight ' \ + 'sudo yum info opendaylight 2>/dev/null ' \ + '| grep Version | tail -1' + odl_ver = node.run_cmd(cmd2) + if odl_ver: + version = 'OpenDaylight: ' + odl_ver.split(' ')[-1] + break + return version + + def get_deployment_status(self): + clusters = self._get_clusters() + if clusters is None or len(clusters) == 0: + return 'unknown' + else: + return clusters[0]['status'] + + +class DaisyNode(manager.Node): + + def __init__(self, + id, + ip, + name, + status, + roles=None, + ssh_client=None, + info=None): + super(DaisyNode, self).__init__(id, ip, name, status, + roles, ssh_client, info) + + def is_odl(self): + ''' + Returns if the node is an opendaylight + ''' + if manager.Role.CONTROLLER in self.roles and self.is_active(): + cmd = 'docker inspect --format=\'{{.Name}}\' `docker ps -q`' + output = self.run_cmd(cmd) + if '/opendaylight' in output.rsplit('\n'): + return True + return False + + def get_ovs_info(self): + ''' + Returns the ovs version installed + ''' + if self.is_active(): + cmd = 'docker exec openvswitch_vswitchd ' \ + 'ovs-vsctl --version | head -1 | awk \'{print $NF}\'' + return self.run_cmd(cmd) + return None diff --git a/modules/opnfv/deployment/factory.py b/modules/opnfv/deployment/factory.py index e14783fe2..2788e5eaa 100644 --- a/modules/opnfv/deployment/factory.py +++ b/modules/opnfv/deployment/factory.py @@ -12,6 +12,7 @@ from opnfv.deployment.apex import adapter as apex_adapter from opnfv.deployment.compass import adapter as compass_adapter from opnfv.deployment.fuel import adapter as fuel_adapter from opnfv.deployment.osa import adapter as osa_adapter +from opnfv.deployment.daisy import adapter as daisy_adapter from opnfv.utils import opnfv_logger as logger logger = logger.Logger(__name__).getLogger() @@ -51,6 +52,10 @@ class Factory(object): return osa_adapter.OSAAdapter(installer_ip=installer_ip, installer_user=installer_user, pkey_file=pkey_file) + elif installer.lower() == "daisy": + return daisy_adapter.DaisyAdapter(installer_ip=installer_ip, + installer_user=installer_user, + installer_pwd=installer_pwd) else: raise Exception("Installer adapter is not implemented for " "the given installer.") diff --git a/modules/opnfv/utils/ssh_utils.py b/modules/opnfv/utils/ssh_utils.py index 4c5ff5c1b..175a38078 100644 --- a/modules/opnfv/utils/ssh_utils.py +++ b/modules/opnfv/utils/ssh_utils.py @@ -49,9 +49,11 @@ def get_ssh_client(hostname, client = paramiko.SSHClient() else: client = ProxyHopClient() + proxy_pkey_file = proxy.get('pkey_file', '/root/.ssh/id_rsa') client.configure_jump_host(proxy['ip'], proxy['username'], - proxy['password']) + proxy['password'], + proxy_pkey_file) if client is None: raise Exception('Could not connect to client') @@ -115,6 +117,8 @@ class ProxyHopClient(paramiko.SSHClient): jh_ssh_key='/root/.ssh/id_rsa'): self.proxy_ip = jh_ip self.proxy_ssh_key = jh_ssh_key + self.local_ssh_key = os.path.join(os.getcwd(), + jh_ssh_key.split('/')[-1]) self.proxy_ssh = paramiko.SSHClient() self.proxy_ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) self.proxy_ssh.connect(jh_ip, @@ -138,8 +142,12 @@ class ProxyHopClient(paramiko.SSHClient): self.local_ssh_key) if get_file_res is None: raise Exception('Could\'t fetch SSH key from jump host') - proxy_key = (paramiko.RSAKey - .from_private_key_file(self.local_ssh_key)) + if self.proxy_ssh_key.split('/')[-1] == 'id_dsa': + proxy_key = (paramiko.DSSKey + .from_private_key_file(self.local_ssh_key)) + else: + proxy_key = (paramiko.RSAKey + .from_private_key_file(self.local_ssh_key)) self.proxy_channel = self.proxy_transport.open_channel( "direct-tcpip", diff --git a/utils/create_pod_file.py b/utils/create_pod_file.py index def5ecca8..a60ece475 100644 --- a/utils/create_pod_file.py +++ b/utils/create_pod_file.py @@ -92,6 +92,9 @@ def create_file(handler, INSTALLER_TYPE): if args.INSTALLER_TYPE == 'compass': for item in node_list: item['password'] = 'root' + elif args.INSTALLER_TYPE == 'daisy': + for item in node_list: + item['key_filename'] = '/root/.ssh/id_dsa' else: for item in node_list: item['key_filename'] = args.sshkey