Stop using docker install script
[functest.git] / functest / core / cloudify.py
index 7362024..966d336 100644 (file)
 from __future__ import division
 
 import logging
+import os
 import time
 import traceback
 
 from cloudify_rest_client import CloudifyClient
 from cloudify_rest_client.executions import Execution
+import scp
 
 from functest.core import singlevm
 
@@ -27,7 +29,7 @@ class Cloudify(singlevm.SingleVm2):
     __logger = logging.getLogger(__name__)
 
     filename = ('/home/opnfv/functest/images/'
-                'ubuntu-16.04-server-cloudimg-amd64-disk1.img')
+                'ubuntu-18.04-server-cloudimg-amd64.img')
     flavor_ram = 4096
     flavor_vcpus = 2
     flavor_disk = 40
@@ -36,17 +38,19 @@ class Cloudify(singlevm.SingleVm2):
     create_server_timeout = 600
     ports = [80, 443, 5671, 53333]
 
-    cloudify_container = "cloudifyplatform/community:19.01.24"
+    cloudify_archive = ('/home/opnfv/functest/images/'
+                        'cloudify-docker-manager-community-19.01.24.tar')
+    cloudify_container = "docker-cfy-manager:latest"
 
     def __init__(self, **kwargs):
         """Initialize Cloudify testcase object."""
         if "case_name" not in kwargs:
             kwargs["case_name"] = "cloudify"
-        super(Cloudify, self).__init__(**kwargs)
+        super().__init__(**kwargs)
         self.cfy_client = None
 
     def prepare(self):
-        super(Cloudify, self).prepare()
+        super().prepare()
         for port in self.ports:
             self.cloud.create_security_group_rule(
                 self.sec.id, port_range_min=port, port_range_max=port,
@@ -56,19 +60,23 @@ class Cloudify(singlevm.SingleVm2):
         """
         Deploy Cloudify Manager.
         """
+        scpc = scp.SCPClient(self.ssh.get_transport())
+        scpc.put(self.cloudify_archive,
+                 remote_path=os.path.basename(self.cloudify_archive))
         (_, stdout, stderr) = self.ssh.exec_command(
-            "sudo wget https://get.docker.com/ -O script.sh && "
-            "sudo chmod +x script.sh && "
-            "sudo ./script.sh && "
+            "sudo apt-get update && "
+            "sudo apt-get install -y docker.io && "
+            "sudo docker load -i "
+            f"~/{os.path.basename(self.cloudify_archive)} && "
             "sudo docker run --name cfy_manager_local -d "
             "--restart unless-stopped -v /sys/fs/cgroup:/sys/fs/cgroup:ro "
             "--tmpfs /run --tmpfs /run/lock --security-opt seccomp:unconfined "
-            "--cap-add SYS_ADMIN --network=host {}".format(
-                self.cloudify_container))
-        self.__logger.debug("output:\n%s", stdout.read())
-        self.__logger.debug("error:\n%s", stderr.read())
+            f"--cap-add SYS_ADMIN --network=host {self.cloudify_container}")
+        self.__logger.debug("output:\n%s", stdout.read().decode("utf-8"))
+        self.__logger.debug("error:\n%s", stderr.read().decode("utf-8"))
         self.cfy_client = CloudifyClient(
-            host=self.fip.floating_ip_address,
+            host=self.fip.floating_ip_address if self.fip else (
+                self.sshvm.public_v4),
             username='admin', password='admin', tenant='default_tenant')
         self.__logger.info("Attemps running status of the Manager")
         secret_key = "foo"
@@ -105,6 +113,49 @@ class Cloudify(singlevm.SingleVm2):
         self.__logger.info("Cloudify Manager is up and running")
         return 0
 
+    def put_private_key(self):
+        """Put private keypair in manager"""
+        self.__logger.info("Put private keypair in manager")
+        scpc = scp.SCPClient(self.ssh.get_transport())
+        scpc.put(self.key_filename, remote_path='~/cloudify_ims.pem')
+        (_, stdout, stderr) = self.ssh.exec_command(
+            "sudo docker cp ~/cloudify_ims.pem "
+            "cfy_manager_local:/etc/cloudify/ && "
+            "sudo docker exec cfy_manager_local "
+            "chmod 444 /etc/cloudify/cloudify_ims.pem")
+        self.__logger.debug("output:\n%s", stdout.read().decode("utf-8"))
+        self.__logger.debug("error:\n%s", stderr.read().decode("utf-8"))
+
+    def upload_cfy_plugins(self, yaml, wgn):
+        """Upload Cloudify plugins"""
+        (_, stdout, stderr) = self.ssh.exec_command(
+            "sudo docker exec cfy_manager_local "
+            f"cfy plugins upload -y {yaml} {wgn} && "
+            "sudo docker exec cfy_manager_local cfy status")
+        self.__logger.debug("output:\n%s", stdout.read().decode("utf-8"))
+        self.__logger.debug("error:\n%s", stderr.read().decode("utf-8"))
+
+    def kill_existing_execution(self, dep_name):
+        """kill existing execution"""
+        try:
+            self.__logger.info('Deleting the current deployment')
+            exec_list = self.cfy_client.executions.list()
+            for execution in exec_list:
+                if execution['status'] == "started":
+                    try:
+                        self.cfy_client.executions.cancel(
+                            execution['id'], force=True)
+                    except Exception:  # pylint: disable=broad-except
+                        self.__logger.warning("Can't cancel the current exec")
+            execution = self.cfy_client.executions.start(
+                dep_name, 'uninstall', parameters=dict(ignore_failure=True))
+            wait_for_execution(self.cfy_client, execution, self.__logger)
+            self.cfy_client.deployments.delete(dep_name)
+            time.sleep(10)
+            self.cfy_client.blueprints.delete(dep_name)
+        except Exception:  # pylint: disable=broad-except
+            self.__logger.exception("Some issue during the undeployment ..")
+
 
 def wait_for_execution(client, execution, logger, timeout=3600, ):
     """Wait for a workflow execution on Cloudify Manager."""
@@ -136,12 +187,10 @@ def wait_for_execution(client, execution, logger, timeout=3600, ):
         if timeout is not None:
             if time.time() > deadline:
                 raise RuntimeError(
-                    'execution of operation {0} for deployment {1} '
-                    'timed out'.format(execution.workflow_id,
-                                       execution.deployment_id))
-            else:
-                # update the remaining timeout
-                timeout = deadline - time.time()
+                    'execution of operation {execution.workflow_id} for '
+                    'deployment {execution.deployment_id} timed out')
+            # update the remaining timeout
+            timeout = deadline - time.time()
 
         if not execution_ended:
             execution = client.executions.get(execution.id)
@@ -167,4 +216,4 @@ def get_execution_id(client, deployment_id):
             return execution
     raise RuntimeError('Failed to get create_deployment_environment '
                        'workflow execution.'
-                       'Available executions: {0}'.format(executions))
+                       f'Available executions: {executions}')