Add a new testcase leveraging on xrally/kubernetes 75/70475/1
authorCédric Ollivier <cedric.ollivier@orange.com>
Sun, 30 Jun 2019 13:03:00 +0000 (15:03 +0200)
committerCédric Ollivier <cedric.ollivier@orange.com>
Mon, 6 Jul 2020 10:58:52 +0000 (12:58 +0200)
It partially runs the all-in-one task for checking basic functionality
of Kubernetes cluster.

Change-Id: Iaf7a29d2c1a364073e4caaeef69d68ee79ea56bc
Signed-off-by: Cédric Ollivier <cedric.ollivier@orange.com>
(cherry picked from commit 413b52a98a359094dbc251749c04d2789e0ab24c)

ansible/site.yml
docker/core/Dockerfile
docker/smoke/testcases.yaml
functest_kubernetes/rally/__init__.py [new file with mode: 0644]
functest_kubernetes/rally/all-in-one.yaml [new file with mode: 0644]
functest_kubernetes/rally/rally_kubernetes.py [new file with mode: 0644]
requirements.txt
setup.cfg
tox.ini

index d706961..734d1fb 100644 (file)
@@ -38,4 +38,5 @@
         - repo: opnfv
           container: functest-kubernetes-smoke
           tests:
+            - xrally_kubernetes
             - k8s_conformance
index f51d890..9585d90 100644 (file)
@@ -18,6 +18,9 @@ RUN apk --no-cache add --update python py-pip bash git grep libffi openssl mailc
     rm -rf /src/functest-kubernetes && \
     bash -c "mkdir -p /var/lib/xtesting /home/opnfv" && \
     ln -s /var/lib/xtesting /home/opnfv/functest && \
+    mkdir -p /etc/rally && \
+    printf "[database]\nconnection = 'sqlite:////var/lib/rally/database/rally.sqlite'" > /etc/rally/rally.conf && \
+    mkdir -p /var/lib/rally/database && rally db create && \
     apk del .build-deps
 COPY logging.ini /usr/lib/python2.7/site-packages/xtesting/ci/logging.ini
 CMD ["run_tests", "-t", "all"]
index 6144070..012fed7 100644 (file)
@@ -19,3 +19,15 @@ tiers:
                     - DEPLOY_SCENARIO: 'k8-*'
                 run:
                     name: k8s_conformance
+            -
+                case_name: xrally_kubernetes
+                project_name: functest
+                criteria: 100
+                blocking: false
+                description: >-
+                    All in one tasks for checking basic functionality of
+                    Kubernetes cluster.
+                dependencies:
+                    - DEPLOY_SCENARIO: 'k8-*'
+                run:
+                    name: xrally_kubernetes
diff --git a/functest_kubernetes/rally/__init__.py b/functest_kubernetes/rally/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/functest_kubernetes/rally/all-in-one.yaml b/functest_kubernetes/rally/all-in-one.yaml
new file mode 100644 (file)
index 0000000..134c9f5
--- /dev/null
@@ -0,0 +1,386 @@
+---
+version: 2
+title: All in one tasks for checking basic functionality of Kubernetes cluster
+subtasks:
+
+  - title: Run a single workload with listing existing kubernetes namespaces
+    scenario:
+      Kubernetes.list_namespaces: {}
+    runner:
+      constant:
+        concurrency: 1
+        times: 1
+
+  - title: Run a single workload with create/read/delete namespace
+    scenario:
+      Kubernetes.create_and_delete_namespace: {}
+    runner:
+      constant:
+        concurrency: 1
+        times: 1
+
+  - title: Run a single workload with create/read/delete pod
+    scenario:
+      Kubernetes.create_and_delete_pod:
+        image: kubernetes/pause
+    runner:
+      constant:
+        concurrency: 1
+        times: 1
+    contexts:
+      namespaces:
+        count: 3
+        with_serviceaccount: true
+
+  - title: Run a single workload with create/read/delete replication controller
+    scenario:
+      Kubernetes.create_and_delete_replication_controller:
+        image: kubernetes/pause
+        replicas: 2
+    runner:
+      constant:
+        concurrency: 1
+        times: 1
+    contexts:
+      namespaces:
+        count: 3
+        with_serviceaccount: true
+
+  - title: Run a single workload with create/scale/delete replication controller
+    scenario:
+      Kubernetes.create_scale_and_delete_replication_controller:
+        image: kubernetes/pause
+        replicas: 2
+        scale_replicas: 3
+    runner:
+      constant:
+        concurrency: 1
+        times: 1
+    contexts:
+      namespaces:
+        count: 3
+        with_serviceaccount: true
+
+  - title: Run a single workload with create/read/delete replicaset
+    scenario:
+      Kubernetes.create_and_delete_replicaset:
+        image: kubernetes/pause
+        replicas: 1
+    runner:
+      constant:
+        concurrency: 1
+        times: 1
+    contexts:
+      namespaces:
+        count: 3
+        with_serviceaccount: true
+
+  - title: Run a single workload with create/scale/delete replicaset
+    scenario:
+      Kubernetes.create_scale_and_delete_replicaset:
+        image: kubernetes/pause
+        replicas: 1
+        scale_replicas: 2
+    runner:
+      constant:
+        concurrency: 1
+        times: 1
+    contexts:
+      namespaces:
+        count: 3
+        with_serviceaccount: true
+
+  - title:
+      Run a single workload with create/read/delete pod with emptyDir volume
+    scenario:
+      Kubernetes.create_and_delete_pod_with_emptydir_volume:
+        image: kubernetes/pause
+        mount_path: /opt/check
+    runner:
+      constant:
+        concurrency: 1
+        times: 1
+    contexts:
+      namespaces:
+        count: 3
+        with_serviceaccount: true
+
+  - title: >-
+      Run a single workload with create/read/check/delete pod with emptyDir
+      volume
+    scenario:
+      Kubernetes.create_and_delete_pod_with_emptydir_volume:
+        image: busybox
+        command:
+          - sleep
+          - "3600"
+        mount_path: /opt/check
+        check_cmd:
+          - ls
+          - /opt/check
+        error_regexp: No such file
+    runner:
+      constant:
+        concurrency: 1
+        times: 1
+    contexts:
+      namespaces:
+        count: 3
+        with_serviceaccount: true
+
+  - title: Run a single workload with create/read/delete pod with secret volume
+    scenario:
+      Kubernetes.create_and_delete_pod_with_secret_volume:
+        image: busybox
+        command:
+          - sleep
+          - "3600"
+        mount_path: /opt/check
+    runner:
+      constant:
+        concurrency: 1
+        times: 1
+    contexts:
+      namespaces:
+        count: 3
+        with_serviceaccount: true
+
+  - title: Run a single workload with create/check/delete pod with secret volume
+    scenario:
+      Kubernetes.create_and_delete_pod_with_secret_volume:
+        image: busybox
+        command:
+          - sleep
+          - "3600"
+        mount_path: /opt/check
+        check_cmd:
+          - ls
+          - /opt/check
+        error_regexp: No such file
+    runner:
+      constant:
+        concurrency: 1
+        times: 1
+    contexts:
+      namespaces:
+        count: 3
+        with_serviceaccount: true
+
+  - title: >-
+      Run a single workload with create/read/check/delete pod with hostPath
+      volume
+    scenario:
+      Kubernetes.create_and_delete_pod_with_hostpath_volume:
+        image: busybox
+        command:
+          - sleep
+          - "3600"
+        mount_path: /opt/check
+        check_cmd:
+          - ls
+          - /opt/check
+        error_regexp: No such file
+        volume_type: Directory
+        volume_path: /tmp/
+    runner:
+      constant:
+        concurrency: 1
+        times: 1
+    contexts:
+      namespaces:
+        count: 3
+        with_serviceaccount: true
+
+  - title:
+      Run a single workload with create/read/delete pod with configMap volume
+    scenario:
+      Kubernetes.create_and_delete_pod_with_configmap_volume:
+        image: busybox
+        command:
+          - "sleep"
+          - "3600"
+        mount_path: /var/log/check.txt
+        subpath: check.txt
+        configmap_data:
+          check.txt: |
+            test
+    runner:
+      constant:
+        concurrency: 1
+        times: 1
+    contexts:
+      namespaces:
+        count: 3
+        with_serviceaccount: true
+
+  - title: >-
+      Run a single workload with create/read/check/delete pod with configMap
+      volume
+    scenario:
+      Kubernetes.create_and_delete_pod_with_configmap_volume:
+        image: busybox
+        command:
+          - "sleep"
+          - "3600"
+        mount_path: /var/log/check.txt
+        subpath: check.txt
+        configmap_data:
+          check.txt: |
+            test
+        check_cmd:
+          - cat
+          - /var/log/check.txt
+        error_regexp: No such file
+    runner:
+      constant:
+        concurrency: 1
+        times: 1
+    contexts:
+      namespaces:
+        count: 3
+        with_serviceaccount: true
+
+  - title: Run a single workload with create/read/delete deployment
+    scenario:
+      Kubernetes.create_and_delete_deployment:
+        image: kubernetes/pause
+        replicas: 2
+    runner:
+      constant:
+        concurrency: 1
+        times: 1
+    contexts:
+      namespaces:
+        count: 3
+        with_serviceaccount: true
+
+  - title: Run a single workload with create/rollout/delete deployment
+    scenario:
+      Kubernetes.create_rollout_and_delete_deployment:
+        image: busybox
+        replicas: 1
+        command:
+          - sleep
+          - "3600"
+        env:
+          - name: "UPD"
+            value: "false"
+        changes:
+          env:
+            - name: "UPD"
+              value: "true"
+    runner:
+      constant:
+        concurrency: 1
+        times: 1
+    contexts:
+      namespaces:
+        count: 3
+        with_serviceaccount: true
+
+  - title: Run a single workload with create/read/delete statefulset
+    scenario:
+      Kubernetes.create_and_delete_statefulset:
+        image: kubernetes/pause
+        replicas: 2
+    runner:
+      constant:
+        concurrency: 1
+        times: 1
+    contexts:
+      namespaces:
+        count: 3
+        with_serviceaccount: true
+
+  - title: Run a single workload with create/scale/delete statefulset
+    scenario:
+      Kubernetes.create_scale_and_delete_statefulset:
+        image: kubernetes/pause
+        replicas: 1
+        scale_replicas: 2
+    runner:
+      constant:
+        concurrency: 1
+        times: 1
+    contexts:
+      namespaces:
+        count: 3
+        with_serviceaccount: true
+
+  - title: Run a single workload with create/read/delete job
+    scenario:
+      Kubernetes.create_and_delete_job:
+        image: busybox
+        command:
+          - echo
+          - "SUCCESS"
+    runner:
+      constant:
+        concurrency: 1
+        times: 1
+    contexts:
+      namespaces:
+        count: 3
+        with_serviceaccount: true
+
+  - title: Run a single workload with create/check/delete clusterIP service
+    scenario:
+      Kubernetes.create_check_and_delete_pod_with_cluster_ip_service:
+        image: gcr.io/google-samples/hello-go-gke:1.0
+        port: 80
+        protocol: TCP
+    runner:
+      constant:
+        concurrency: 1
+        times: 2
+    contexts:
+      namespaces:
+        count: 3
+        with_serviceaccount: true
+
+  - title: Run a single workload with create/check/delete daemonset
+    scenario:
+      Kubernetes.create_check_and_delete_daemonset:
+        image: kubernetes/pause
+    runner:
+      constant:
+        concurrency: 1
+        times: 1
+    contexts:
+      namespaces:
+        count: 3
+        with_serviceaccount: true
+
+  - title: >-
+      Run a single workload with create/check/delete clusterIP service with
+      custom endpoint
+    scenario:
+      Kubernetes.create_check_and_delete_pod_with_cluster_ip_service:
+        image: gcr.io/google-samples/hello-go-gke:1.0
+        port: 80
+        protocol: TCP
+        custom_endpoint: true
+    runner:
+      constant:
+        concurrency: 1
+        times: 1
+    contexts:
+      namespaces:
+        count: 3
+        with_serviceaccount: true
+
+  - title: Run a single workload with create/check/delete NodePort service
+    scenario:
+      Kubernetes.create_check_and_delete_pod_with_node_port_service:
+        image: gcr.io/google-samples/hello-go-gke:1.0
+        port: 80
+        protocol: TCP
+        request_timeout: 10
+    runner:
+      constant:
+        concurrency: 1
+        times: 1
+    contexts:
+      namespaces:
+        count: 3
+        with_serviceaccount: true
diff --git a/functest_kubernetes/rally/rally_kubernetes.py b/functest_kubernetes/rally/rally_kubernetes.py
new file mode 100644 (file)
index 0000000..5955b89
--- /dev/null
@@ -0,0 +1,85 @@
+#!/usr/bin/env python
+
+"""Run workloads via Rally against Kubernetes platform
+
+xrally/kubernetes_ provides xRally plugins for Kubernetes platform.
+
+.. _xrally/kubernetes: https://github.com/xrally/xrally-kubernetes/
+"""
+
+import logging
+import os
+import time
+
+import pkg_resources
+from rally import api
+from rally import exceptions
+from rally.common import yamlutils as yaml
+import rally.common.logging
+from rally.env import env_mgr
+
+from xtesting.core import testcase
+
+
+class RallyKubernetes(testcase.TestCase):
+    """Run tasks for checking basic functionality of Kubernetes cluster"""
+
+    __logger = logging.getLogger(__name__)
+
+    def __init__(self, **kwargs):
+        super(RallyKubernetes, self).__init__(**kwargs)
+        self.res_dir = "/home/opnfv/functest/results/{}".format(
+            self.case_name)
+
+    def run(self, **kwargs):
+        self.start_time = time.time()
+        if not os.path.exists(self.res_dir):
+            os.makedirs(self.res_dir)
+        rapi = api.API()
+        api.CONF.set_default("use_stderr", False)
+        api.CONF.set_default('log_dir', self.res_dir)
+        api.CONF.set_default('log_file', 'rally.log')
+        rally.common.logging.setup("rally")
+        spec = env_mgr.EnvManager.create_spec_from_sys_environ()["spec"]
+        try:
+            env_mgr.EnvManager.get('my-kubernetes').delete(force=True)
+        except exceptions.DBRecordNotFound:
+            pass
+        env = env_mgr.EnvManager.create('my-kubernetes', spec)
+        result = env.check_health()
+        self.__logger.debug("check health %s: %s", 'my-kubernetes', result)
+        if not result['existing@kubernetes']['available']:
+            self.__logger.error(
+                "Cannot check env heath: %s",
+                result['existing@kubernetes']['message'])
+            return
+        input_task = open(
+            pkg_resources.resource_filename(
+                'functest_kubernetes', 'rally/all-in-one.yaml')).read()
+        task = yaml.safe_load(input_task)
+        rapi.task.validate(deployment='my-kubernetes', config=task)
+        task_instance = rapi.task.create(deployment='my-kubernetes')
+        rapi.task.start(
+            deployment='my-kubernetes', config=task,
+            task=task_instance["uuid"])
+        self.details = rapi.task.get(task_instance["uuid"], detailed=True)
+        self.__logger.debug("details: %s", self.details)
+        if self.details['pass_sla']:
+            self.result = 100
+        result = rapi.task.export(
+            [task_instance["uuid"]], "html",
+            output_dest=os.path.join(
+                self.res_dir, "{}.html".format(self.case_name)))
+        if "files" in result:
+            for path in result["files"]:
+                with open(path, "w+") as output:
+                    output.write(result["files"][path])
+        result = rapi.task.export(
+            [task_instance["uuid"]], "junit-xml",
+            output_dest=os.path.join(
+                self.res_dir, "{}.xml".format(self.case_name)))
+        if "files" in result:
+            for path in result["files"]:
+                with open(path, "w+") as output:
+                    output.write(result["files"][path])
+        self.stop_time = time.time()
index 60ca942..742dcbf 100644 (file)
@@ -3,4 +3,6 @@
 # process, which may cause wedges in the gate later.
 pbr!=2.1.0 # Apache-2.0
 xtesting # Apache-2.0
+rally
+xrally-kubernetes
 kubernetes # Apache-2.0
index cddf6a2..455a61b 100644 (file)
--- a/setup.cfg
+++ b/setup.cfg
@@ -10,6 +10,7 @@ packages = functest_kubernetes
 xtesting.testcase =
     k8s_smoke = functest_kubernetes.k8stest:K8sSmokeTest
     k8s_conformance = functest_kubernetes.k8stest:K8sConformanceTest
+    xrally_kubernetes = functest_kubernetes.rally.rally_kubernetes:RallyKubernetes
     k8s_vims = functest_kubernetes.ims.ims:Vims
     kube_hunter = functest_kubernetes.security.security:KubeHunter
     kube_bench = functest_kubernetes.security.security:KubeBench
diff --git a/tox.ini b/tox.ini
index fa512bf..c1b844b 100644 (file)
--- a/tox.ini
+++ b/tox.ini
@@ -30,6 +30,7 @@ basepython = python2.7
 files =
   .travis.yml
   docker
+  functest_kubernetes/rally/all-in-one.yaml
 commands =
   yamllint {[testenv:yamllint]files}