3 # Copyright (c) 2018 Orange and others.
5 # All rights reserved. This program and the accompanying materials
6 # are made available under the terms of the Apache License, Version 2.0
7 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 """Cloudify testcase implementation."""
12 from __future__ import division
19 from cloudify_rest_client import CloudifyClient
20 from cloudify_rest_client.executions import Execution
23 from functest.core import singlevm
26 class Cloudify(singlevm.SingleVm2):
27 """Cloudify Orchestrator Case."""
29 __logger = logging.getLogger(__name__)
31 filename = ('/home/opnfv/functest/images/'
32 'ubuntu-18.04-server-cloudimg-amd64.img')
37 ssh_connect_loops = 12
38 create_server_timeout = 600
39 ports = [80, 443, 5671, 53333]
41 cloudify_archive = ('/home/opnfv/functest/images/'
42 'cloudify-docker-manager-community-19.01.24.tar')
43 cloudify_container = "docker-cfy-manager:latest"
45 def __init__(self, **kwargs):
46 """Initialize Cloudify testcase object."""
47 if "case_name" not in kwargs:
48 kwargs["case_name"] = "cloudify"
49 super().__init__(**kwargs)
50 self.cfy_client = None
54 for port in self.ports:
55 self.cloud.create_security_group_rule(
56 self.sec.id, port_range_min=port, port_range_max=port,
57 protocol='tcp', direction='ingress')
61 Deploy Cloudify Manager.
63 scpc = scp.SCPClient(self.ssh.get_transport())
64 scpc.put(self.cloudify_archive,
65 remote_path=os.path.basename(self.cloudify_archive))
66 (_, stdout, stderr) = self.ssh.exec_command(
67 "sudo apt-get update && "
68 "sudo apt-get install -y docker.io && "
69 "sudo docker load -i "
70 f"~/{os.path.basename(self.cloudify_archive)} && "
71 "sudo docker run --name cfy_manager_local -d "
72 "--restart unless-stopped -v /sys/fs/cgroup:/sys/fs/cgroup:ro "
73 "--tmpfs /run --tmpfs /run/lock --security-opt seccomp:unconfined "
74 f"--cap-add SYS_ADMIN --network=host {self.cloudify_container}")
75 self.__logger.debug("output:\n%s", stdout.read().decode("utf-8"))
76 self.__logger.debug("error:\n%s", stderr.read().decode("utf-8"))
77 self.cfy_client = CloudifyClient(
78 host=self.fip.floating_ip_address if self.fip else (
79 self.sshvm.public_v4),
80 username='admin', password='admin', tenant='default_tenant')
81 self.__logger.info("Attemps running status of the Manager")
84 for loop in range(20):
87 "status %s", self.cfy_client.manager.get_status())
88 cfy_status = self.cfy_client.manager.get_status()['status']
90 "The current manager status is %s", cfy_status)
91 if str(cfy_status) != 'running':
92 raise Exception("Cloudify Manager isn't up and running")
93 for secret in iter(self.cfy_client.secrets.list()):
94 if secret_key == secret["key"]:
95 self.__logger.debug("Updating secrets: %s", secret_key)
96 self.cfy_client.secrets.update(
97 secret_key, secret_value)
100 self.__logger.debug("Creating secrets: %s", secret_key)
101 self.cfy_client.secrets.create(secret_key, secret_value)
102 self.cfy_client.secrets.delete(secret_key)
103 self.__logger.info("Secrets API successfully reached")
105 except Exception: # pylint: disable=broad-except
107 "try %s: Cloudify Manager isn't up and running \n%s",
108 loop + 1, traceback.format_exc())
111 self.__logger.error("Cloudify Manager isn't up and running")
113 self.__logger.info("Cloudify Manager is up and running")
116 def put_private_key(self):
117 """Put private keypair in manager"""
118 self.__logger.info("Put private keypair in manager")
119 scpc = scp.SCPClient(self.ssh.get_transport())
120 scpc.put(self.key_filename, remote_path='~/cloudify_ims.pem')
121 (_, stdout, stderr) = self.ssh.exec_command(
122 "sudo docker cp ~/cloudify_ims.pem "
123 "cfy_manager_local:/etc/cloudify/ && "
124 "sudo docker exec cfy_manager_local "
125 "chmod 444 /etc/cloudify/cloudify_ims.pem")
126 self.__logger.debug("output:\n%s", stdout.read().decode("utf-8"))
127 self.__logger.debug("error:\n%s", stderr.read().decode("utf-8"))
129 def upload_cfy_plugins(self, yaml, wgn):
130 """Upload Cloudify plugins"""
131 (_, stdout, stderr) = self.ssh.exec_command(
132 "sudo docker exec cfy_manager_local "
133 f"cfy plugins upload -y {yaml} {wgn} && "
134 "sudo docker exec cfy_manager_local cfy status")
135 self.__logger.debug("output:\n%s", stdout.read().decode("utf-8"))
136 self.__logger.debug("error:\n%s", stderr.read().decode("utf-8"))
138 def kill_existing_execution(self, dep_name):
139 """kill existing execution"""
141 self.__logger.info('Deleting the current deployment')
142 exec_list = self.cfy_client.executions.list()
143 for execution in exec_list:
144 if execution['status'] == "started":
146 self.cfy_client.executions.cancel(
147 execution['id'], force=True)
148 except Exception: # pylint: disable=broad-except
149 self.__logger.warning("Can't cancel the current exec")
150 execution = self.cfy_client.executions.start(
151 dep_name, 'uninstall', parameters=dict(ignore_failure=True))
152 wait_for_execution(self.cfy_client, execution, self.__logger)
153 self.cfy_client.deployments.delete(dep_name)
155 self.cfy_client.blueprints.delete(dep_name)
156 except Exception: # pylint: disable=broad-except
157 self.__logger.exception("Some issue during the undeployment ..")
160 def wait_for_execution(client, execution, logger, timeout=3600, ):
161 """Wait for a workflow execution on Cloudify Manager."""
162 # if execution already ended - return without waiting
163 if execution.status in Execution.END_STATES:
166 if timeout is not None:
167 deadline = time.time() + timeout
169 # Poll for execution status and execution logs, until execution ends
170 # and we receive an event of type in WORKFLOW_END_TYPES
174 execution_ended = False
176 event_list = client.events.list(
177 execution_id=execution.id,
181 sort='@timestamp').items
183 offset = offset + len(event_list)
184 for event in event_list:
185 logger.debug(event.get('message'))
187 if timeout is not None:
188 if time.time() > deadline:
190 'execution of operation {execution.workflow_id} for '
191 'deployment {execution.deployment_id} timed out')
192 # update the remaining timeout
193 timeout = deadline - time.time()
195 if not execution_ended:
196 execution = client.executions.get(execution.id)
197 execution_ended = execution.status in Execution.END_STATES
207 def get_execution_id(client, deployment_id):
209 Get the execution id of a env preparation.
211 network, security group, fip, VM creation
213 executions = client.executions.list(deployment_id=deployment_id)
214 for execution in executions:
215 if execution.workflow_id == 'create_deployment_environment':
217 raise RuntimeError('Failed to get create_deployment_environment '
218 'workflow execution.'
219 f'Available executions: {executions}')