Enable back Patrole volume testing
[functest.git] / functest / core / cloudify.py
index 7362024..0fb4f6e 100644 (file)
 from __future__ import division
 
 import logging
+import os
 import time
 import traceback
 
 from cloudify_rest_client import CloudifyClient
 from cloudify_rest_client.executions import Execution
+import scp
 
 from functest.core import singlevm
 
@@ -36,7 +38,9 @@ class Cloudify(singlevm.SingleVm2):
     create_server_timeout = 600
     ports = [80, 443, 5671, 53333]
 
-    cloudify_container = "cloudifyplatform/community:19.01.24"
+    cloudify_archive = ('/home/opnfv/functest/images/'
+                        'cloudify-docker-manager-community-19.01.24.tar')
+    cloudify_container = "docker-cfy-manager:latest"
 
     def __init__(self, **kwargs):
         """Initialize Cloudify testcase object."""
@@ -56,17 +60,22 @@ class Cloudify(singlevm.SingleVm2):
         """
         Deploy Cloudify Manager.
         """
+        scpc = scp.SCPClient(self.ssh.get_transport())
+        scpc.put(self.cloudify_archive,
+                 remote_path=os.path.basename(self.cloudify_archive))
         (_, stdout, stderr) = self.ssh.exec_command(
             "sudo wget https://get.docker.com/ -O script.sh && "
             "sudo chmod +x script.sh && "
             "sudo ./script.sh && "
+            "sudo docker load -i ~/{} && "
             "sudo docker run --name cfy_manager_local -d "
             "--restart unless-stopped -v /sys/fs/cgroup:/sys/fs/cgroup:ro "
             "--tmpfs /run --tmpfs /run/lock --security-opt seccomp:unconfined "
             "--cap-add SYS_ADMIN --network=host {}".format(
+                os.path.basename(self.cloudify_archive),
                 self.cloudify_container))
-        self.__logger.debug("output:\n%s", stdout.read())
-        self.__logger.debug("error:\n%s", stderr.read())
+        self.__logger.debug("output:\n%s", stdout.read().decode("utf-8"))
+        self.__logger.debug("error:\n%s", stderr.read().decode("utf-8"))
         self.cfy_client = CloudifyClient(
             host=self.fip.floating_ip_address,
             username='admin', password='admin', tenant='default_tenant')
@@ -105,6 +114,49 @@ class Cloudify(singlevm.SingleVm2):
         self.__logger.info("Cloudify Manager is up and running")
         return 0
 
+    def put_private_key(self):
+        """Put private keypair in manager"""
+        self.__logger.info("Put private keypair in manager")
+        scpc = scp.SCPClient(self.ssh.get_transport())
+        scpc.put(self.key_filename, remote_path='~/cloudify_ims.pem')
+        (_, stdout, stderr) = self.ssh.exec_command(
+            "sudo docker cp ~/cloudify_ims.pem "
+            "cfy_manager_local:/etc/cloudify/ && "
+            "sudo docker exec cfy_manager_local "
+            "chmod 444 /etc/cloudify/cloudify_ims.pem")
+        self.__logger.debug("output:\n%s", stdout.read().decode("utf-8"))
+        self.__logger.debug("error:\n%s", stderr.read().decode("utf-8"))
+
+    def upload_cfy_plugins(self, yaml, wgn):
+        """Upload Cloudify plugins"""
+        (_, stdout, stderr) = self.ssh.exec_command(
+            "sudo docker exec cfy_manager_local "
+            "cfy plugins upload -y {} {} && "
+            "sudo docker exec cfy_manager_local cfy status".format(yaml, wgn))
+        self.__logger.debug("output:\n%s", stdout.read().decode("utf-8"))
+        self.__logger.debug("error:\n%s", stderr.read().decode("utf-8"))
+
+    def kill_existing_execution(self, dep_name):
+        """kill existing execution"""
+        try:
+            self.__logger.info('Deleting the current deployment')
+            exec_list = self.cfy_client.executions.list()
+            for execution in exec_list:
+                if execution['status'] == "started":
+                    try:
+                        self.cfy_client.executions.cancel(
+                            execution['id'], force=True)
+                    except Exception:  # pylint: disable=broad-except
+                        self.__logger.warning("Can't cancel the current exec")
+            execution = self.cfy_client.executions.start(
+                dep_name, 'uninstall', parameters=dict(ignore_failure=True))
+            wait_for_execution(self.cfy_client, execution, self.__logger)
+            self.cfy_client.deployments.delete(dep_name)
+            time.sleep(10)
+            self.cfy_client.blueprints.delete(dep_name)
+        except Exception:  # pylint: disable=broad-except
+            self.__logger.exception("Some issue during the undeployment ..")
+
 
 def wait_for_execution(client, execution, logger, timeout=3600, ):
     """Wait for a workflow execution on Cloudify Manager."""
@@ -139,9 +191,8 @@ def wait_for_execution(client, execution, logger, timeout=3600, ):
                     'execution of operation {0} for deployment {1} '
                     'timed out'.format(execution.workflow_id,
                                        execution.deployment_id))
-            else:
-                # update the remaining timeout
-                timeout = deadline - time.time()
+            # update the remaining timeout
+            timeout = deadline - time.time()
 
         if not execution_ended:
             execution = client.executions.get(execution.id)