Stop using docker install script
[functest.git] / functest / core / cloudify.py
1 #!/usr/bin/env python
2
3 # Copyright (c) 2018 Orange and others.
4 #
5 # All rights reserved. This program and the accompanying materials
6 # are made available under the terms of the Apache License, Version 2.0
7 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
9
10 """Cloudify testcase implementation."""
11
12 from __future__ import division
13
14 import logging
15 import os
16 import time
17 import traceback
18
19 from cloudify_rest_client import CloudifyClient
20 from cloudify_rest_client.executions import Execution
21 import scp
22
23 from functest.core import singlevm
24
25
26 class Cloudify(singlevm.SingleVm2):
27     """Cloudify Orchestrator Case."""
28
29     __logger = logging.getLogger(__name__)
30
31     filename = ('/home/opnfv/functest/images/'
32                 'ubuntu-18.04-server-cloudimg-amd64.img')
33     flavor_ram = 4096
34     flavor_vcpus = 2
35     flavor_disk = 40
36     username = 'ubuntu'
37     ssh_connect_loops = 12
38     create_server_timeout = 600
39     ports = [80, 443, 5671, 53333]
40
41     cloudify_archive = ('/home/opnfv/functest/images/'
42                         'cloudify-docker-manager-community-19.01.24.tar')
43     cloudify_container = "docker-cfy-manager:latest"
44
45     def __init__(self, **kwargs):
46         """Initialize Cloudify testcase object."""
47         if "case_name" not in kwargs:
48             kwargs["case_name"] = "cloudify"
49         super().__init__(**kwargs)
50         self.cfy_client = None
51
52     def prepare(self):
53         super().prepare()
54         for port in self.ports:
55             self.cloud.create_security_group_rule(
56                 self.sec.id, port_range_min=port, port_range_max=port,
57                 protocol='tcp', direction='ingress')
58
59     def execute(self):
60         """
61         Deploy Cloudify Manager.
62         """
63         scpc = scp.SCPClient(self.ssh.get_transport())
64         scpc.put(self.cloudify_archive,
65                  remote_path=os.path.basename(self.cloudify_archive))
66         (_, stdout, stderr) = self.ssh.exec_command(
67             "sudo apt-get update && "
68             "sudo apt-get install -y docker.io && "
69             "sudo docker load -i "
70             f"~/{os.path.basename(self.cloudify_archive)} && "
71             "sudo docker run --name cfy_manager_local -d "
72             "--restart unless-stopped -v /sys/fs/cgroup:/sys/fs/cgroup:ro "
73             "--tmpfs /run --tmpfs /run/lock --security-opt seccomp:unconfined "
74             f"--cap-add SYS_ADMIN --network=host {self.cloudify_container}")
75         self.__logger.debug("output:\n%s", stdout.read().decode("utf-8"))
76         self.__logger.debug("error:\n%s", stderr.read().decode("utf-8"))
77         self.cfy_client = CloudifyClient(
78             host=self.fip.floating_ip_address if self.fip else (
79                 self.sshvm.public_v4),
80             username='admin', password='admin', tenant='default_tenant')
81         self.__logger.info("Attemps running status of the Manager")
82         secret_key = "foo"
83         secret_value = "bar"
84         for loop in range(20):
85             try:
86                 self.__logger.debug(
87                     "status %s", self.cfy_client.manager.get_status())
88                 cfy_status = self.cfy_client.manager.get_status()['status']
89                 self.__logger.info(
90                     "The current manager status is %s", cfy_status)
91                 if str(cfy_status) != 'running':
92                     raise Exception("Cloudify Manager isn't up and running")
93                 for secret in iter(self.cfy_client.secrets.list()):
94                     if secret_key == secret["key"]:
95                         self.__logger.debug("Updating secrets: %s", secret_key)
96                         self.cfy_client.secrets.update(
97                             secret_key, secret_value)
98                         break
99                 else:
100                     self.__logger.debug("Creating secrets: %s", secret_key)
101                     self.cfy_client.secrets.create(secret_key, secret_value)
102                 self.cfy_client.secrets.delete(secret_key)
103                 self.__logger.info("Secrets API successfully reached")
104                 break
105             except Exception:  # pylint: disable=broad-except
106                 self.__logger.debug(
107                     "try %s: Cloudify Manager isn't up and running \n%s",
108                     loop + 1, traceback.format_exc())
109                 time.sleep(30)
110         else:
111             self.__logger.error("Cloudify Manager isn't up and running")
112             return 1
113         self.__logger.info("Cloudify Manager is up and running")
114         return 0
115
116     def put_private_key(self):
117         """Put private keypair in manager"""
118         self.__logger.info("Put private keypair in manager")
119         scpc = scp.SCPClient(self.ssh.get_transport())
120         scpc.put(self.key_filename, remote_path='~/cloudify_ims.pem')
121         (_, stdout, stderr) = self.ssh.exec_command(
122             "sudo docker cp ~/cloudify_ims.pem "
123             "cfy_manager_local:/etc/cloudify/ && "
124             "sudo docker exec cfy_manager_local "
125             "chmod 444 /etc/cloudify/cloudify_ims.pem")
126         self.__logger.debug("output:\n%s", stdout.read().decode("utf-8"))
127         self.__logger.debug("error:\n%s", stderr.read().decode("utf-8"))
128
129     def upload_cfy_plugins(self, yaml, wgn):
130         """Upload Cloudify plugins"""
131         (_, stdout, stderr) = self.ssh.exec_command(
132             "sudo docker exec cfy_manager_local "
133             f"cfy plugins upload -y {yaml} {wgn} && "
134             "sudo docker exec cfy_manager_local cfy status")
135         self.__logger.debug("output:\n%s", stdout.read().decode("utf-8"))
136         self.__logger.debug("error:\n%s", stderr.read().decode("utf-8"))
137
138     def kill_existing_execution(self, dep_name):
139         """kill existing execution"""
140         try:
141             self.__logger.info('Deleting the current deployment')
142             exec_list = self.cfy_client.executions.list()
143             for execution in exec_list:
144                 if execution['status'] == "started":
145                     try:
146                         self.cfy_client.executions.cancel(
147                             execution['id'], force=True)
148                     except Exception:  # pylint: disable=broad-except
149                         self.__logger.warning("Can't cancel the current exec")
150             execution = self.cfy_client.executions.start(
151                 dep_name, 'uninstall', parameters=dict(ignore_failure=True))
152             wait_for_execution(self.cfy_client, execution, self.__logger)
153             self.cfy_client.deployments.delete(dep_name)
154             time.sleep(10)
155             self.cfy_client.blueprints.delete(dep_name)
156         except Exception:  # pylint: disable=broad-except
157             self.__logger.exception("Some issue during the undeployment ..")
158
159
160 def wait_for_execution(client, execution, logger, timeout=3600, ):
161     """Wait for a workflow execution on Cloudify Manager."""
162     # if execution already ended - return without waiting
163     if execution.status in Execution.END_STATES:
164         return execution
165
166     if timeout is not None:
167         deadline = time.time() + timeout
168
169     # Poll for execution status and execution logs, until execution ends
170     # and we receive an event of type in WORKFLOW_END_TYPES
171     offset = 0
172     batch_size = 50
173     event_list = []
174     execution_ended = False
175     while True:
176         event_list = client.events.list(
177             execution_id=execution.id,
178             _offset=offset,
179             _size=batch_size,
180             include_logs=True,
181             sort='@timestamp').items
182
183         offset = offset + len(event_list)
184         for event in event_list:
185             logger.debug(event.get('message'))
186
187         if timeout is not None:
188             if time.time() > deadline:
189                 raise RuntimeError(
190                     'execution of operation {execution.workflow_id} for '
191                     'deployment {execution.deployment_id} timed out')
192             # update the remaining timeout
193             timeout = deadline - time.time()
194
195         if not execution_ended:
196             execution = client.executions.get(execution.id)
197             execution_ended = execution.status in Execution.END_STATES
198
199         if execution_ended:
200             break
201
202         time.sleep(5)
203
204     return execution
205
206
207 def get_execution_id(client, deployment_id):
208     """
209     Get the execution id of a env preparation.
210
211     network, security group, fip, VM creation
212     """
213     executions = client.executions.list(deployment_id=deployment_id)
214     for execution in executions:
215         if execution.workflow_id == 'create_deployment_environment':
216             return execution
217     raise RuntimeError('Failed to get create_deployment_environment '
218                        'workflow execution.'
219                        f'Available executions: {executions}')