--platforms linux/amd64 \
--template ${DOCKER_USERNAME}/functest-kubernetes-core:ARCH-latest \
--target ${DOCKER_USERNAME}/functest-kubernetes-core:latest
- - stage: build functest-kubernetes-healthcheck image
+ - stage: build functest-kubernetes-[healthcheck,cnf] images
script: sudo -E bash build.sh
env:
- REPO="${DOCKER_USERNAME}"
- amd64_dirs="docker/healthcheck"
- arm64_dirs=""
- - stage: publish functest-kubernetes-healthcheck manifests
+ - script: sudo -E bash build.sh
+ env:
+ - REPO="${DOCKER_USERNAME}"
+ - amd64_dirs="docker/cnf"
+ - arm64_dirs=""
+ - arm_dirs=""
+ - stage: publish functest-kubernetes-[healthcheck,cnf] manifests
script: >
sudo manifest-tool push from-args \
--platforms linux/amd64 \
--template \
${DOCKER_USERNAME}/functest-kubernetes-healthcheck:ARCH-latest \
--target ${DOCKER_USERNAME}/functest-kubernetes-healthcheck:latest
+ - script: >
+ sudo manifest-tool push from-args \
+ --platforms linux/amd64 \
+ --template ${DOCKER_USERNAME}/functest-kubernetes-cnf:ARCH-latest \
+ --target ${DOCKER_USERNAME}/functest-kubernetes-cnf:latest
- stage: build functest-kubernetes-smoke image
script: sudo -E bash build.sh
env:
amd64_dirs=${amd64_dirs-"\
docker/core \
docker/healthcheck \
-docker/smoke"}
+docker/smoke \
+docker/cnf"}
arm64_dirs=${arm64_dirs-${amd64_dirs}}
build_opts=(--pull=true --no-cache --force-rm=true)
--- /dev/null
+FROM opnfv/functest-kubernetes-core
+
+COPY testcases.yaml /usr/lib/python3.8/site-packages/xtesting/ci/testcases.yaml
+CMD ["run_tests", "-t", "all"]
--- /dev/null
+#!/bin/bash
+
+from="${DOCKER_REPO%/*}/functest-kubernetes-vnf:${DOCKER_TAG}"
+sed -i "s|^FROM.*$|FROM ${from}|" Dockerfile
+
+exit $?
--- /dev/null
+---
+tiers:
+ -
+ name: cnf
+ order: 1
+ description: >-
+ Collection of VNF test cases.
+ testcases:
+ -
+ case_name: k8s_vims
+ project_name: functest
+ criteria: 100
+ blocking: false
+ description: >-
+ Deploy and test Clearwater IMS using Kubernetes as proposed
+ by https://github.com/Metaswitch/clearwater-docker
+ dependencies:
+ - DEPLOY_SCENARIO: 'k8-*'
+ run:
+ name: k8s_vims
--- /dev/null
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: astaire
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ service: astaire
+ template:
+ metadata:
+ labels:
+ service: astaire
+ spec:
+ terminationGracePeriodSeconds: 120
+ containers:
+ - image: "ollivier/clearwater-astaire:latest"
+ imagePullPolicy: Always
+ name: astaire
+ ports:
+ - containerPort: 22
+ - containerPort: 11211
+ - containerPort: 11311
+ envFrom:
+ - configMapRef:
+ name: env-vars
+ env:
+ - name: MY_POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ livenessProbe:
+ tcpSocket:
+ port: 11311
+ periodSeconds: 10
+ failureThreshold: 9
+ readinessProbe:
+ tcpSocket:
+ port: 11311
+ volumeMounts:
+ - name: astairelogs
+ mountPath: /var/log/astaire
+ lifecycle:
+ preStop:
+ exec:
+ command: ["/bin/bash", "-c", "/usr/bin/pre-stop"]
+ - image: busybox
+ name: tailer
+ command: [ "tail", "-F", "/var/log/astaire/astaire_current.txt" ]
+ volumeMounts:
+ - name: astairelogs
+ mountPath: /var/log/astaire
+ volumes:
+ - name: astairelogs
+ emptyDir: {}
+ imagePullSecrets:
+ - name: ~
+ restartPolicy: Always
--- /dev/null
+apiVersion: v1
+kind: Service
+metadata:
+ name: astaire
+spec:
+ ports:
+ - name: "11311"
+ port: 11311
+ selector:
+ service: astaire
+ clusterIP: None
--- /dev/null
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: bono
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ service: bono
+ template:
+ metadata:
+ labels:
+ service: bono
+ snmp: enabled
+ spec:
+ containers:
+ - image: "ollivier/clearwater-bono:latest"
+ imagePullPolicy: Always
+ name: bono
+ ports:
+ - containerPort: 22
+ - containerPort: 3478
+ - containerPort: 5060
+ - containerPort: 5062
+ - containerPort: 5060
+ protocol: "UDP"
+ - containerPort: 5062
+ protocol: "UDP"
+ envFrom:
+ - configMapRef:
+ name: env-vars
+ env:
+ - name: MY_POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ livenessProbe:
+ exec:
+ command: ["nc", "-z", "-w", "5", "127.0.0.1", "5060"]
+ initialDelaySeconds: 30
+ readinessProbe:
+ tcpSocket:
+ port: 5060
+ livenessProbe:
+ exec:
+ command: ["/bin/bash", "/usr/share/kubernetes/liveness.sh", "3478 5060 5062"]
+ initialDelaySeconds: 30
+ readinessProbe:
+ exec:
+ command: ["/bin/bash", "/usr/share/kubernetes/liveness.sh", "3478 5060 5062"]
+ volumeMounts:
+ - name: bonologs
+ mountPath: /var/log/bono
+ - image: busybox
+ name: tailer
+ command: [ "tail", "-F", "/var/log/bono/bono_current.txt" ]
+ volumeMounts:
+ - name: bonologs
+ mountPath: /var/log/bono
+ volumes:
+ - name: bonologs
+ emptyDir: {}
+ imagePullSecrets:
+ - name: ~
+ restartPolicy: Always
--- /dev/null
+apiVersion: v1
+kind: Service
+metadata:
+ name: bono
+spec:
+ clusterIP: None
+ ports:
+ - name: "3478"
+ port: 3478
+ - name: "5060"
+ port: 5060
+ - name: "5062"
+ port: 5062
+ selector:
+ service: bono
--- /dev/null
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: cassandra
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ service: cassandra
+ template:
+ metadata:
+ labels:
+ service: cassandra
+ spec:
+ containers:
+ - image: "ollivier/clearwater-cassandra:latest"
+ imagePullPolicy: Always
+ name: cassandra
+ ports:
+ - containerPort: 22
+ - containerPort: 7001
+ - containerPort: 9042
+ - containerPort: 9160
+ envFrom:
+ - configMapRef:
+ name: env-vars
+ env:
+ - name: MY_POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ livenessProbe:
+ exec:
+ command: ["/bin/bash", "/usr/share/kubernetes/liveness.sh", "7000 9042 9160"]
+ # Cassandra can take a very, very long time to start up
+ initialDelaySeconds: 600
+ readinessProbe:
+ exec:
+ command: ["/bin/bash", "/usr/share/kubernetes/liveness.sh", "7000 9042 9160"]
+ imagePullSecrets:
+ - name: ~
+ restartPolicy: Always
--- /dev/null
+apiVersion: v1
+kind: Service
+metadata:
+ name: cassandra
+spec:
+ ports:
+ - name: "7001"
+ port: 7001
+ - name: "7000"
+ port: 7000
+ - name: "9042"
+ port: 9042
+ - name: "9160"
+ port: 9160
+ selector:
+ service: cassandra
+ clusterIP: None
--- /dev/null
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ service: chronos
+ name: chronos
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ service: chronos
+ template:
+ metadata:
+ labels:
+ service: chronos
+ spec:
+ terminationGracePeriodSeconds: 120
+ containers:
+ - image: "ollivier/clearwater-chronos:latest"
+ imagePullPolicy: Always
+ name: chronos
+ ports:
+ - containerPort: 22
+ - containerPort: 7253
+ envFrom:
+ - configMapRef:
+ name: env-vars
+ env:
+ - name: MY_POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ livenessProbe:
+ tcpSocket:
+ port: 7253
+ periodSeconds: 10
+ failureThreshold: 9
+ readinessProbe:
+ tcpSocket:
+ port: 7253
+ volumeMounts:
+ - name: chronoslogs
+ mountPath: /var/log/chronos
+ lifecycle:
+ preStop:
+ exec:
+ command: ["/bin/bash", "-c", "/usr/bin/pre-stop"]
+ - image: busybox
+ name: tailer
+ command: [ "tail", "-F", "/var/log/chronos/chronos_current.txt" ]
+ volumeMounts:
+ - name: chronoslogs
+ mountPath: /var/log/chronos
+ volumes:
+ - name: chronoslogs
+ emptyDir: {}
+ imagePullSecrets:
+ - name: ~
+ restartPolicy: Always
--- /dev/null
+apiVersion: v1
+kind: Service
+metadata:
+ name: chronos
+spec:
+ ports:
+ - name: "7253"
+ port: 7253
+ selector:
+ service: chronos
+ clusterIP: None
--- /dev/null
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: ellis
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ service: ellis
+ template:
+ metadata:
+ labels:
+ service: ellis
+ spec:
+ containers:
+ - image: "ollivier/clearwater-ellis:latest"
+ imagePullPolicy: Always
+ name: ellis
+ ports:
+ - containerPort: 22
+ - containerPort: 80
+ envFrom:
+ - configMapRef:
+ name: env-vars
+ env:
+ - name: MY_POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ livenessProbe:
+ tcpSocket:
+ port: 80
+ initialDelaySeconds: 30
+ readinessProbe:
+ tcpSocket:
+ port: 80
+ imagePullSecrets:
+ - name: ~
+ restartPolicy: Always
--- /dev/null
+apiVersion: v1
+kind: Service
+metadata:
+ name: ellis
+spec:
+ clusterIP: None
+ ports:
+ - name: "http"
+ port: 80
+ selector:
+ service: ellis
--- /dev/null
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: etcd
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ instance-type: etcd-pod
+ template:
+ metadata:
+ creationTimestamp: null
+ labels:
+ instance-type: etcd-pod
+ spec:
+ containers:
+ - args:
+ - --name
+ - $(MY_POD_NAME)
+ - --advertise-client-urls
+ - http://$(MY_POD_IP):2379,http://$(MY_POD_IP):4001
+ - --listen-client-urls
+ - http://0.0.0.0:2379,http://0.0.0.0:4001
+ - --initial-advertise-peer-urls
+ - http://$(MY_POD_IP):2380
+ - --listen-peer-urls
+ - http://0.0.0.0:2380
+ # By default use a single pod cluster
+ - --initial-cluster
+ - $(MY_POD_NAME)=http://$(MY_POD_IP):2380
+ # Alternatively multi-pod clusters can be supported Using central discvovery. Run e.g.
+ # curl https://discovery.etcd.io/new?size=3 | sed s/https/http/
+ # to get a discovery URL for a 3 pod cluster, substitute the returned value below, and
+ # set replicas: 3 above.
+ #- --discovery
+ #- <URL returned by command above>
+ - --initial-cluster-state
+ - new
+ env:
+ - name: MY_POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: MY_POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ image: quay.io/coreos/etcd:v2.2.5
+ name: etcd
+ ports:
+ - containerPort: 2379
+ - containerPort: 2380
+ - containerPort: 4001
+ livenessProbe:
+ tcpSocket:
+ port: 4001
+ initialDelaySeconds: 300
+ readinessProbe:
+ tcpSocket:
+ port: 4001
+ imagePullSecrets:
+ - name: ~
+ restartPolicy: Always
--- /dev/null
+apiVersion: v1
+kind: Service
+metadata:
+ name: etcd
+ labels:
+ instance-type: etcd-pod
+spec:
+ ports:
+ - name: "etcd-client"
+ port: 2379
+ - name: "etcd-server"
+ port: 2380
+ - name: "4001"
+ port: 4001
+ selector:
+ instance-type: etcd-pod
+ clusterIP: None
--- /dev/null
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: homer
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ service: homer
+ template:
+ metadata:
+ labels:
+ service: homer
+ spec:
+ containers:
+ - image: "ollivier/clearwater-homer:latest"
+ imagePullPolicy: Always
+ name: homer
+ ports:
+ - containerPort: 22
+ - containerPort: 7888
+ envFrom:
+ - configMapRef:
+ name: env-vars
+ env:
+ - name: MY_POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ livenessProbe:
+ tcpSocket:
+ port: 7888
+ initialDelaySeconds: 30
+ readinessProbe:
+ tcpSocket:
+ port: 7888
+ imagePullSecrets:
+ - name: ~
+ restartPolicy: Always
--- /dev/null
+apiVersion: v1
+kind: Service
+metadata:
+ name: homer
+spec:
+ ports:
+ - name: "7888"
+ port: 7888
+ selector:
+ service: homer
+ clusterIP: None
--- /dev/null
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: homestead
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ service: homestead
+ template:
+ metadata:
+ labels:
+ service: homestead
+ snmp: enabled
+ spec:
+ containers:
+ - image: "ollivier/clearwater-homestead:latest"
+ imagePullPolicy: Always
+ name: homestead
+ ports:
+ - containerPort: 22
+ - containerPort: 8888
+ envFrom:
+ - configMapRef:
+ name: env-vars
+ env:
+ - name: MY_POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ livenessProbe:
+ exec:
+ command: ["/bin/bash", "/usr/share/kubernetes/liveness.sh", "8888"]
+ initialDelaySeconds: 60
+ readinessProbe:
+ exec:
+ command: ["/bin/bash", "/usr/share/kubernetes/liveness.sh", "8888"]
+ volumeMounts:
+ - name: homesteadlogs
+ mountPath: /var/log/homestead
+ - image: busybox
+ name: tailer
+ command: [ "tail", "-F", "/var/log/homestead/homestead_current.txt" ]
+ volumeMounts:
+ - name: homesteadlogs
+ mountPath: /var/log/homestead
+ volumes:
+ - name: homesteadlogs
+ emptyDir: {}
+ imagePullSecrets:
+ - name: ~
+ restartPolicy: Always
--- /dev/null
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: homestead-prov
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ service: homestead-prov
+ template:
+ metadata:
+ labels:
+ service: homestead-prov
+ snmp: enabled
+ spec:
+ containers:
+ - image: "ollivier/clearwater-homestead-prov:latest"
+ imagePullPolicy: Always
+ name: homestead-prov
+ ports:
+ - containerPort: 22
+ - containerPort: 8889
+ envFrom:
+ - configMapRef:
+ name: env-vars
+ env:
+ - name: MY_POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ livenessProbe:
+ exec:
+ command: ["/bin/bash", "/usr/share/clearwater/bin/poll_homestead-prov.sh"]
+ initialDelaySeconds: 60
+ readinessProbe:
+ exec:
+ command: ["/bin/bash", "/usr/share/clearwater/bin/poll_homestead-prov.sh"]
+ imagePullSecrets:
+ - name: ~
+ restartPolicy: Always
--- /dev/null
+apiVersion: v1
+kind: Service
+metadata:
+ name: homestead-prov
+spec:
+ ports:
+ - name: "8889"
+ port: 8889
+ selector:
+ service: homestead-prov
+ clusterIP: None
--- /dev/null
+apiVersion: v1
+kind: Service
+metadata:
+ name: homestead
+spec:
+ ports:
+ - name: "8888"
+ port: 8888
+ selector:
+ service: homestead
+ clusterIP: None
--- /dev/null
+#!/usr/bin/env python
+
+# Copyright (c) 2020 Orange and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+"""Deploy and Test Clearwater vIMS using Kubernetes"""
+
+import logging
+import time
+import re
+import yaml
+
+from kubernetes import client
+from kubernetes import config
+from kubernetes import watch
+import pkg_resources
+
+from xtesting.core import testcase
+
+
+class Vims(testcase.TestCase):
+ """Deploy and Test Clearwater vIMS using Kubernetes
+
+ It leverage on the Python kubernetes client to apply operation proposed by
+ clearwater-docker.
+
+ See https://github.com/Metaswitch/clearwater-docker for more details
+ """
+ namespace = 'default'
+ zone = 'default.svc.cluster.local'
+ watch_timeout = 1200
+ metadata_name = "env-vars"
+ test_image_name = "ollivier/clearwater-live-test:latest"
+ test_container_name = "live-test"
+
+ __logger = logging.getLogger(__name__)
+
+ deployment_list = [
+ "astaire", "bono", "cassandra", "chronos", "ellis", "etcd", "homer",
+ "homestead", "homestead-prov", "ralf", "sprout"]
+
+ def __init__(self, **kwargs):
+ super(Vims, self).__init__(**kwargs)
+ config.load_kube_config()
+ self.corev1 = client.CoreV1Api()
+ self.appsv1 = client.AppsV1Api()
+
+ def deploy_vnf(self):
+ """Deploy vIMS as proposed by clearwater-docker
+
+ It leverages on unofficial Clearwater dockers as proposed in the
+ documentation.
+
+ See https://github.com/Metaswitch/clearwater-docker for more details
+ """
+ metadata = client.V1ObjectMeta(
+ name=self.metadata_name, namespace=self.namespace)
+ body = client.V1ConfigMap(
+ metadata=metadata,
+ data={"ADDITIONAL_SHARED_CONFIG": "", "ZONE": self.zone})
+ api_response = self.corev1.create_namespaced_config_map(
+ self.namespace, body=body)
+ self.__logger.debug("create_namespaced_config_map: %s", api_response)
+ for deployment in self.deployment_list:
+ with open(pkg_resources.resource_filename(
+ 'functest_kubernetes',
+ 'ims/{}-depl.yaml'.format(deployment))) as yfile:
+ body = yaml.safe_load(yfile)
+ resp = self.appsv1.create_namespaced_deployment(
+ body=body, namespace="default")
+ self.__logger.info("Deployment %s created", resp.metadata.name)
+ self.__logger.debug(
+ "create_namespaced_deployment: %s", api_response)
+ for service in self.deployment_list:
+ with open(pkg_resources.resource_filename(
+ 'functest_kubernetes',
+ 'ims/{}-svc.yaml'.format(service))) as yfile:
+ body = yaml.safe_load(yfile)
+ resp = self.corev1.create_namespaced_service(
+ body=body, namespace="default")
+ self.__logger.info("Service %s created", resp.metadata.name)
+ self.__logger.debug(
+ "create_namespaced_service: %s", api_response)
+ status = self.deployment_list.copy()
+ watch_deployment = watch.Watch()
+ for event in watch_deployment.stream(
+ func=self.appsv1.list_namespaced_deployment,
+ namespace=self.namespace, timeout_seconds=self.watch_timeout):
+ if event["object"].status.ready_replicas == 1:
+ if event['object'].metadata.name in status:
+ status.remove(event['object'].metadata.name)
+ self.__logger.info(
+ "%s started in %0.2f sec",
+ event['object'].metadata.name,
+ time.time()-self.start_time)
+ if len(status) == 0:
+ watch_deployment.stop()
+ self.result = 1/2 * 100
+
+ def test_vnf(self):
+ """Test vIMS as proposed by clearwater-live-test
+
+ It leverages on an unofficial Clearwater docker to allow testing from
+ the Kubernetes cluster.
+
+ See https://github.com/Metaswitch/clearwater-live-test for more details
+ """
+ container = client.V1Container(
+ name=self.test_container_name, image=self.test_image_name)
+ spec = client.V1PodSpec(containers=[container], restart_policy="Never")
+ metadata = client.V1ObjectMeta(name=self.test_container_name)
+ body = client.V1Pod(metadata=metadata, spec=spec)
+ api_response = self.corev1.create_namespaced_pod(self.namespace, body)
+ watch_deployment = watch.Watch()
+ for event in watch_deployment.stream(
+ func=self.corev1.list_namespaced_pod,
+ namespace=self.namespace, timeout_seconds=self.watch_timeout):
+ if event["object"].metadata.name == self.test_container_name:
+ if (event["object"].status.phase == 'Succeeded'
+ or event["object"].status.phase == 'Error'):
+ watch_deployment.stop()
+ api_response = self.corev1.read_namespaced_pod_log(
+ name=self.test_container_name, namespace=self.namespace)
+ self.__logger.info(api_response)
+ vims_test_result = {}
+ try:
+ grp = re.search(
+ r'^(\d+) failures out of (\d+) tests run.*\n'
+ r'(\d+) tests skipped$', api_response,
+ re.MULTILINE | re.DOTALL)
+ assert grp
+ vims_test_result["failures"] = int(grp.group(1))
+ vims_test_result["total"] = int(grp.group(2))
+ vims_test_result["skipped"] = int(grp.group(3))
+ vims_test_result['passed'] = (
+ int(grp.group(2)) - int(grp.group(3)) - int(grp.group(1)))
+ if vims_test_result['total'] - vims_test_result['skipped'] > 0:
+ vnf_test_rate = vims_test_result['passed'] / (
+ vims_test_result['total'] - vims_test_result['skipped'])
+ else:
+ vnf_test_rate = 0
+ self.result += 1/2 * 100 * vnf_test_rate
+ except Exception: # pylint: disable=broad-except
+ self.__logger.exception("Cannot parse live tests results")
+
+ def run(self, **kwargs):
+ self.start_time = time.time()
+ try:
+ self.deploy_vnf()
+ self.test_vnf()
+ except client.rest.ApiException:
+ self.__logger.exception("Cannot deploy and test vIms")
+ self.stop_time = time.time()
+
+ def clean(self):
+ try:
+ api_response = self.corev1.delete_namespaced_config_map(
+ name=self.metadata_name, namespace=self.namespace)
+ self.__logger.debug(
+ "delete_namespaced_config_map: %s", api_response)
+ except client.rest.ApiException:
+ pass
+ try:
+ api_response = self.corev1.delete_namespaced_pod(
+ name=self.test_container_name, namespace=self.namespace)
+ self.__logger.debug("delete_namespaced_pod: %s", api_response)
+ except client.rest.ApiException:
+ pass
+ for deployment in self.deployment_list:
+ try:
+ api_response = self.appsv1.delete_namespaced_deployment(
+ name=deployment, namespace=self.namespace)
+ self.__logger.debug(
+ "delete_namespaced_deployment: %s", api_response)
+ except client.rest.ApiException:
+ pass
+ try:
+ api_response = self.corev1.delete_namespaced_service(
+ name=deployment, namespace=self.namespace)
+ self.__logger.debug(
+ "delete_namespaced_service: %s", api_response)
+ except client.rest.ApiException:
+ pass
--- /dev/null
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: ralf
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ service: ralf
+ template:
+ metadata:
+ labels:
+ service: ralf
+ snmp: enabled
+ spec:
+ containers:
+ - image: "ollivier/clearwater-ralf:latest"
+ imagePullPolicy: Always
+ name: ralf
+ ports:
+ - containerPort: 22
+ - containerPort: 10888
+ envFrom:
+ - configMapRef:
+ name: env-vars
+ env:
+ - name: MY_POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ livenessProbe:
+ tcpSocket:
+ port: 10888
+ initialDelaySeconds: 30
+ readinessProbe:
+ tcpSocket:
+ port: 10888
+ volumeMounts:
+ - name: ralflogs
+ mountPath: /var/log/ralf
+ - image: busybox
+ name: tailer
+ command: [ "tail", "-F", "/var/log/ralf/ralf_current.txt" ]
+ volumeMounts:
+ - name: ralflogs
+ mountPath: /var/log/ralf
+ volumes:
+ - name: ralflogs
+ emptyDir: {}
+ imagePullSecrets:
+ - name: ~
+ restartPolicy: Always
--- /dev/null
+apiVersion: v1
+kind: Service
+metadata:
+ name: ralf
+spec:
+ ports:
+ - name: "10888"
+ port: 10888
+ selector:
+ service: ralf
+ clusterIP: None
--- /dev/null
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: sprout
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ service: sprout
+ template:
+ metadata:
+ labels:
+ service: sprout
+ snmp: enabled
+ spec:
+ containers:
+ - image: "ollivier/clearwater-sprout:latest"
+ imagePullPolicy: Always
+ name: sprout
+ ports:
+ - containerPort: 22
+ envFrom:
+ - configMapRef:
+ name: env-vars
+ env:
+ - name: MY_POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ livenessProbe:
+ exec:
+ command: ["/bin/bash", "/usr/share/kubernetes/liveness.sh", "5052 5054"]
+ initialDelaySeconds: 30
+ periodSeconds: 3
+ readinessProbe:
+ exec:
+ command: ["/bin/bash", "/usr/share/kubernetes/liveness.sh", "5052 5054"]
+ volumeMounts:
+ - name: sproutlogs
+ mountPath: /var/log/sprout
+ - image: busybox
+ name: tailer
+ command: [ "tail", "-F", "/var/log/sprout/sprout_current.txt" ]
+ volumeMounts:
+ - name: sproutlogs
+ mountPath: /var/log/sprout
+ volumes:
+ - name: sproutlogs
+ emptyDir: {}
+ imagePullSecrets:
+ - name: ~
+ restartPolicy: Always
--- /dev/null
+apiVersion: v1
+kind: Service
+metadata:
+ name: sprout
+spec:
+ ports:
+ - name: "5052"
+ port: 5052
+ - name: "5054"
+ port: 5054
+ selector:
+ service: sprout
+ clusterIP: None
xtesting # Apache-2.0
rally
xrally-kubernetes
+kubernetes # Apache-2.0
k8s_smoke = functest_kubernetes.k8stest:K8sSmokeTest
k8s_conformance = functest_kubernetes.k8stest:K8sConformanceTest
xrally_kubernetes = functest_kubernetes.rally.rally_kubernetes:RallyKubernetes
+ k8s_vims = functest_kubernetes.ims.ims:Vims