b5bd1b3c2569fe13804276aa18ddff467fb70345
[functest.git] / functest / core / cloudify.py
1 #!/usr/bin/env python
2
3 # Copyright (c) 2018 Orange and others.
4 #
5 # All rights reserved. This program and the accompanying materials
6 # are made available under the terms of the Apache License, Version 2.0
7 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
9
10 """Cloudify testcase implementation."""
11
12 from __future__ import division
13
14 import logging
15 import os
16 import time
17 import traceback
18
19 from cloudify_rest_client import CloudifyClient
20 from cloudify_rest_client.executions import Execution
21 import scp
22
23 from functest.core import singlevm
24
25
26 class Cloudify(singlevm.SingleVm2):
27     """Cloudify Orchestrator Case."""
28
29     __logger = logging.getLogger(__name__)
30
31     filename = ('/home/opnfv/functest/images/'
32                 'ubuntu-16.04-server-cloudimg-amd64-disk1.img')
33     flavor_ram = 4096
34     flavor_vcpus = 2
35     flavor_disk = 40
36     username = 'ubuntu'
37     ssh_connect_loops = 12
38     create_server_timeout = 600
39     ports = [80, 443, 5671, 53333]
40
41     cloudify_archive = ('/home/opnfv/functest/images/'
42                         'cloudify-docker-manager-community-19.01.24.tar')
43     cloudify_container = "docker-cfy-manager:latest"
44
45     def __init__(self, **kwargs):
46         """Initialize Cloudify testcase object."""
47         if "case_name" not in kwargs:
48             kwargs["case_name"] = "cloudify"
49         super(Cloudify, self).__init__(**kwargs)
50         self.cfy_client = None
51
52     def prepare(self):
53         super(Cloudify, self).prepare()
54         for port in self.ports:
55             self.cloud.create_security_group_rule(
56                 self.sec.id, port_range_min=port, port_range_max=port,
57                 protocol='tcp', direction='ingress')
58
59     def execute(self):
60         """
61         Deploy Cloudify Manager.
62         """
63         scpc = scp.SCPClient(self.ssh.get_transport())
64         scpc.put(self.cloudify_archive,
65                  remote_path=os.path.basename(self.cloudify_archive))
66         (_, stdout, stderr) = self.ssh.exec_command(
67             "sudo wget https://get.docker.com/ -O script.sh && "
68             "sudo chmod +x script.sh && "
69             "sudo ./script.sh && "
70             "sudo docker load -i ~/{} && "
71             "sudo docker run --name cfy_manager_local -d "
72             "--restart unless-stopped -v /sys/fs/cgroup:/sys/fs/cgroup:ro "
73             "--tmpfs /run --tmpfs /run/lock --security-opt seccomp:unconfined "
74             "--cap-add SYS_ADMIN --network=host {}".format(
75                 os.path.basename(self.cloudify_archive),
76                 self.cloudify_container))
77         self.__logger.debug("output:\n%s", stdout.read())
78         self.__logger.debug("error:\n%s", stderr.read())
79         self.cfy_client = CloudifyClient(
80             host=self.fip.floating_ip_address,
81             username='admin', password='admin', tenant='default_tenant')
82         self.__logger.info("Attemps running status of the Manager")
83         secret_key = "foo"
84         secret_value = "bar"
85         for loop in range(20):
86             try:
87                 self.__logger.debug(
88                     "status %s", self.cfy_client.manager.get_status())
89                 cfy_status = self.cfy_client.manager.get_status()['status']
90                 self.__logger.info(
91                     "The current manager status is %s", cfy_status)
92                 if str(cfy_status) != 'running':
93                     raise Exception("Cloudify Manager isn't up and running")
94                 for secret in iter(self.cfy_client.secrets.list()):
95                     if secret_key == secret["key"]:
96                         self.__logger.debug("Updating secrets: %s", secret_key)
97                         self.cfy_client.secrets.update(
98                             secret_key, secret_value)
99                         break
100                 else:
101                     self.__logger.debug("Creating secrets: %s", secret_key)
102                     self.cfy_client.secrets.create(secret_key, secret_value)
103                 self.cfy_client.secrets.delete(secret_key)
104                 self.__logger.info("Secrets API successfully reached")
105                 break
106             except Exception:  # pylint: disable=broad-except
107                 self.__logger.debug(
108                     "try %s: Cloudify Manager isn't up and running \n%s",
109                     loop + 1, traceback.format_exc())
110                 time.sleep(30)
111         else:
112             self.__logger.error("Cloudify Manager isn't up and running")
113             return 1
114         self.__logger.info("Cloudify Manager is up and running")
115         return 0
116
117     def put_private_key(self):
118         """Put private keypair in manager"""
119         self.__logger.info("Put private keypair in manager")
120         scpc = scp.SCPClient(self.ssh.get_transport())
121         scpc.put(self.key_filename, remote_path='~/cloudify_ims.pem')
122         (_, stdout, stderr) = self.ssh.exec_command(
123             "sudo docker cp ~/cloudify_ims.pem "
124             "cfy_manager_local:/etc/cloudify/ && "
125             "sudo docker exec cfy_manager_local "
126             "chmod 444 /etc/cloudify/cloudify_ims.pem")
127         self.__logger.debug("output:\n%s", stdout.read())
128         self.__logger.debug("error:\n%s", stderr.read())
129
130     def upload_cfy_plugins(self, yaml, wgn):
131         """Upload Cloudify plugins"""
132         (_, stdout, stderr) = self.ssh.exec_command(
133             "sudo docker exec cfy_manager_local "
134             "cfy plugins upload -y {} {} && "
135             "sudo docker exec cfy_manager_local cfy status".format(yaml, wgn))
136         self.__logger.debug("output:\n%s", stdout.read())
137         self.__logger.debug("error:\n%s", stderr.read())
138
139     def kill_existing_execution(self, dep_name):
140         """kill existing execution"""
141         try:
142             self.__logger.info('Deleting the current deployment')
143             exec_list = self.cfy_client.executions.list()
144             for execution in exec_list:
145                 if execution['status'] == "started":
146                     try:
147                         self.cfy_client.executions.cancel(
148                             execution['id'], force=True)
149                     except Exception:  # pylint: disable=broad-except
150                         self.__logger.warning("Can't cancel the current exec")
151             execution = self.cfy_client.executions.start(
152                 dep_name, 'uninstall', parameters=dict(ignore_failure=True))
153             wait_for_execution(self.cfy_client, execution, self.__logger)
154             self.cfy_client.deployments.delete(dep_name)
155             time.sleep(10)
156             self.cfy_client.blueprints.delete(dep_name)
157         except Exception:  # pylint: disable=broad-except
158             self.__logger.exception("Some issue during the undeployment ..")
159
160
161 def wait_for_execution(client, execution, logger, timeout=3600, ):
162     """Wait for a workflow execution on Cloudify Manager."""
163     # if execution already ended - return without waiting
164     if execution.status in Execution.END_STATES:
165         return execution
166
167     if timeout is not None:
168         deadline = time.time() + timeout
169
170     # Poll for execution status and execution logs, until execution ends
171     # and we receive an event of type in WORKFLOW_END_TYPES
172     offset = 0
173     batch_size = 50
174     event_list = []
175     execution_ended = False
176     while True:
177         event_list = client.events.list(
178             execution_id=execution.id,
179             _offset=offset,
180             _size=batch_size,
181             include_logs=True,
182             sort='@timestamp').items
183
184         offset = offset + len(event_list)
185         for event in event_list:
186             logger.debug(event.get('message'))
187
188         if timeout is not None:
189             if time.time() > deadline:
190                 raise RuntimeError(
191                     'execution of operation {0} for deployment {1} '
192                     'timed out'.format(execution.workflow_id,
193                                        execution.deployment_id))
194             # update the remaining timeout
195             timeout = deadline - time.time()
196
197         if not execution_ended:
198             execution = client.executions.get(execution.id)
199             execution_ended = execution.status in Execution.END_STATES
200
201         if execution_ended:
202             break
203
204         time.sleep(5)
205
206     return execution
207
208
209 def get_execution_id(client, deployment_id):
210     """
211     Get the execution id of a env preparation.
212
213     network, security group, fip, VM creation
214     """
215     executions = client.executions.list(deployment_id=deployment_id)
216     for execution in executions:
217         if execution.workflow_id == 'create_deployment_environment':
218             return execution
219     raise RuntimeError('Failed to get create_deployment_environment '
220                        'workflow execution.'
221                        'Available executions: {0}'.format(executions))