Update to Alpine 3.14
[functest.git] / functest / core / cloudify.py
1 #!/usr/bin/env python
2
3 # Copyright (c) 2018 Orange and others.
4 #
5 # All rights reserved. This program and the accompanying materials
6 # are made available under the terms of the Apache License, Version 2.0
7 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
9
10 """Cloudify testcase implementation."""
11
12 from __future__ import division
13
14 import logging
15 import os
16 import time
17 import traceback
18
19 from cloudify_rest_client import CloudifyClient
20 from cloudify_rest_client.executions import Execution
21 import scp
22
23 from functest.core import singlevm
24
25
26 class Cloudify(singlevm.SingleVm2):
27     """Cloudify Orchestrator Case."""
28
29     __logger = logging.getLogger(__name__)
30
31     filename = ('/home/opnfv/functest/images/'
32                 'ubuntu-16.04-server-cloudimg-amd64-disk1.img')
33     flavor_ram = 4096
34     flavor_vcpus = 2
35     flavor_disk = 40
36     username = 'ubuntu'
37     ssh_connect_loops = 12
38     create_server_timeout = 600
39     ports = [80, 443, 5671, 53333]
40
41     cloudify_archive = ('/home/opnfv/functest/images/'
42                         'cloudify-docker-manager-community-19.01.24.tar')
43     cloudify_container = "docker-cfy-manager:latest"
44
45     def __init__(self, **kwargs):
46         """Initialize Cloudify testcase object."""
47         if "case_name" not in kwargs:
48             kwargs["case_name"] = "cloudify"
49         super().__init__(**kwargs)
50         self.cfy_client = None
51
52     def prepare(self):
53         super().prepare()
54         for port in self.ports:
55             self.cloud.create_security_group_rule(
56                 self.sec.id, port_range_min=port, port_range_max=port,
57                 protocol='tcp', direction='ingress')
58
59     def execute(self):
60         """
61         Deploy Cloudify Manager.
62         """
63         scpc = scp.SCPClient(self.ssh.get_transport())
64         scpc.put(self.cloudify_archive,
65                  remote_path=os.path.basename(self.cloudify_archive))
66         (_, stdout, stderr) = self.ssh.exec_command(
67             "sudo wget https://get.docker.com/ -O script.sh && "
68             "sudo chmod +x script.sh && "
69             "sudo ./script.sh && "
70             "sudo docker load -i ~/{} && "
71             "sudo docker run --name cfy_manager_local -d "
72             "--restart unless-stopped -v /sys/fs/cgroup:/sys/fs/cgroup:ro "
73             "--tmpfs /run --tmpfs /run/lock --security-opt seccomp:unconfined "
74             "--cap-add SYS_ADMIN --network=host {}".format(
75                 os.path.basename(self.cloudify_archive),
76                 self.cloudify_container))
77         self.__logger.debug("output:\n%s", stdout.read().decode("utf-8"))
78         self.__logger.debug("error:\n%s", stderr.read().decode("utf-8"))
79         self.cfy_client = CloudifyClient(
80             host=self.fip.floating_ip_address if self.fip else (
81                 self.sshvm.public_v4),
82             username='admin', password='admin', tenant='default_tenant')
83         self.__logger.info("Attemps running status of the Manager")
84         secret_key = "foo"
85         secret_value = "bar"
86         for loop in range(20):
87             try:
88                 self.__logger.debug(
89                     "status %s", self.cfy_client.manager.get_status())
90                 cfy_status = self.cfy_client.manager.get_status()['status']
91                 self.__logger.info(
92                     "The current manager status is %s", cfy_status)
93                 if str(cfy_status) != 'running':
94                     raise Exception("Cloudify Manager isn't up and running")
95                 for secret in iter(self.cfy_client.secrets.list()):
96                     if secret_key == secret["key"]:
97                         self.__logger.debug("Updating secrets: %s", secret_key)
98                         self.cfy_client.secrets.update(
99                             secret_key, secret_value)
100                         break
101                 else:
102                     self.__logger.debug("Creating secrets: %s", secret_key)
103                     self.cfy_client.secrets.create(secret_key, secret_value)
104                 self.cfy_client.secrets.delete(secret_key)
105                 self.__logger.info("Secrets API successfully reached")
106                 break
107             except Exception:  # pylint: disable=broad-except
108                 self.__logger.debug(
109                     "try %s: Cloudify Manager isn't up and running \n%s",
110                     loop + 1, traceback.format_exc())
111                 time.sleep(30)
112         else:
113             self.__logger.error("Cloudify Manager isn't up and running")
114             return 1
115         self.__logger.info("Cloudify Manager is up and running")
116         return 0
117
118     def put_private_key(self):
119         """Put private keypair in manager"""
120         self.__logger.info("Put private keypair in manager")
121         scpc = scp.SCPClient(self.ssh.get_transport())
122         scpc.put(self.key_filename, remote_path='~/cloudify_ims.pem')
123         (_, stdout, stderr) = self.ssh.exec_command(
124             "sudo docker cp ~/cloudify_ims.pem "
125             "cfy_manager_local:/etc/cloudify/ && "
126             "sudo docker exec cfy_manager_local "
127             "chmod 444 /etc/cloudify/cloudify_ims.pem")
128         self.__logger.debug("output:\n%s", stdout.read().decode("utf-8"))
129         self.__logger.debug("error:\n%s", stderr.read().decode("utf-8"))
130
131     def upload_cfy_plugins(self, yaml, wgn):
132         """Upload Cloudify plugins"""
133         (_, stdout, stderr) = self.ssh.exec_command(
134             "sudo docker exec cfy_manager_local "
135             "cfy plugins upload -y {} {} && "
136             "sudo docker exec cfy_manager_local cfy status".format(yaml, wgn))
137         self.__logger.debug("output:\n%s", stdout.read().decode("utf-8"))
138         self.__logger.debug("error:\n%s", stderr.read().decode("utf-8"))
139
140     def kill_existing_execution(self, dep_name):
141         """kill existing execution"""
142         try:
143             self.__logger.info('Deleting the current deployment')
144             exec_list = self.cfy_client.executions.list()
145             for execution in exec_list:
146                 if execution['status'] == "started":
147                     try:
148                         self.cfy_client.executions.cancel(
149                             execution['id'], force=True)
150                     except Exception:  # pylint: disable=broad-except
151                         self.__logger.warning("Can't cancel the current exec")
152             execution = self.cfy_client.executions.start(
153                 dep_name, 'uninstall', parameters=dict(ignore_failure=True))
154             wait_for_execution(self.cfy_client, execution, self.__logger)
155             self.cfy_client.deployments.delete(dep_name)
156             time.sleep(10)
157             self.cfy_client.blueprints.delete(dep_name)
158         except Exception:  # pylint: disable=broad-except
159             self.__logger.exception("Some issue during the undeployment ..")
160
161
162 def wait_for_execution(client, execution, logger, timeout=3600, ):
163     """Wait for a workflow execution on Cloudify Manager."""
164     # if execution already ended - return without waiting
165     if execution.status in Execution.END_STATES:
166         return execution
167
168     if timeout is not None:
169         deadline = time.time() + timeout
170
171     # Poll for execution status and execution logs, until execution ends
172     # and we receive an event of type in WORKFLOW_END_TYPES
173     offset = 0
174     batch_size = 50
175     event_list = []
176     execution_ended = False
177     while True:
178         event_list = client.events.list(
179             execution_id=execution.id,
180             _offset=offset,
181             _size=batch_size,
182             include_logs=True,
183             sort='@timestamp').items
184
185         offset = offset + len(event_list)
186         for event in event_list:
187             logger.debug(event.get('message'))
188
189         if timeout is not None:
190             if time.time() > deadline:
191                 raise RuntimeError(
192                     'execution of operation {0} for deployment {1} '
193                     'timed out'.format(execution.workflow_id,
194                                        execution.deployment_id))
195             # update the remaining timeout
196             timeout = deadline - time.time()
197
198         if not execution_ended:
199             execution = client.executions.get(execution.id)
200             execution_ended = execution.status in Execution.END_STATES
201
202         if execution_ended:
203             break
204
205         time.sleep(5)
206
207     return execution
208
209
210 def get_execution_id(client, deployment_id):
211     """
212     Get the execution id of a env preparation.
213
214     network, security group, fip, VM creation
215     """
216     executions = client.executions.list(deployment_id=deployment_id)
217     for execution in executions:
218         if execution.workflow_id == 'create_deployment_environment':
219             return execution
220     raise RuntimeError('Failed to get create_deployment_environment '
221                        'workflow execution.'
222                        'Available executions: {0}'.format(executions))