| cnf_conformance | functest | cnf | 02:00 | PASS |
+-------------------------+------------------+--------------+------------------+----------------+
```
+
+
+## Use on air gap environments (no access to Internet)
+
+To test a Kubernetes without access to Internet, repository mirrors needs to be
+provided.
+
+Currently, only rally tests supports this feature.
+
+There's two ways for providing the repository mirrors:
+
+- Give an environment variable (`MIRROR_REPO`) which gives a repository with
+ all needed images.
+- Gives an environment variable per needed repo:
+ - `DOCKERHUB_REPO` for DockerHub repository (`docker.io`)
+ - `GCR_REPO` for Google Cloud repository (`gcr.io`)
+ - `K8S_GCR_REPO` for Kubernetes repository (`k8s.gcr.io`)
+
+All needed images are given in
+[functest-kubernetes/ci/images.txt](functest-kubernetes/ci/images.txt)
set -e
-tmpfile=$(mktemp)
-cat << EOF > $tmpfile
-docker.io/appropriate/curl:edge
-docker.io/aquasec/kube-bench:0.3.1
-docker.io/aquasec/kube-hunter:0.3.1
-docker.io/gluster/glusterdynamic-provisioner:v1.0
-docker.io/library/busybox:1.28
-docker.io/library/busybox:1.29
-docker.io/library/httpd:2.4.38-alpine
-docker.io/library/httpd:2.4.39-alpine
-docker.io/library/nginx:1.14-alpine
-docker.io/library/nginx:1.15-alpine
-docker.io/library/perl:5.26
-docker.io/library/redis:5.0.5-alpine
-docker.io/ollivier/clearwater-astaire:hunter
-docker.io/ollivier/clearwater-bono:hunter
-docker.io/ollivier/clearwater-cassandra:hunter
-docker.io/ollivier/clearwater-chronos:hunter
-docker.io/ollivier/clearwater-ellis:hunter
-docker.io/ollivier/clearwater-homer:hunter
-docker.io/ollivier/clearwater-homestead:hunter
-docker.io/ollivier/clearwater-homestead-prov:hunter
-docker.io/ollivier/clearwater-live-test:hunter
-docker.io/ollivier/clearwater-ralf:hunter
-docker.io/ollivier/clearwater-sprout:hunter
-gcr.io/google-samples/hello-go-gke:1.0
-gcr.io/kubernetes-e2e-test-images/apparmor-loader:1.0
-gcr.io/kubernetes-e2e-test-images/cuda-vector-add:1.0
-gcr.io/kubernetes-e2e-test-images/cuda-vector-add:2.0
-gcr.io/kubernetes-e2e-test-images/echoserver:2.2
-gcr.io/kubernetes-e2e-test-images/ipc-utils:1.0
-gcr.io/kubernetes-e2e-test-images/jessie-dnsutils:1.0
-gcr.io/kubernetes-e2e-test-images/kitten:1.0
-gcr.io/kubernetes-e2e-test-images/metadata-concealment:1.2
-gcr.io/kubernetes-e2e-test-images/nautilus:1.0
-gcr.io/kubernetes-e2e-test-images/nonewprivs:1.0
-gcr.io/kubernetes-e2e-test-images/nonroot:1.0
-gcr.io/kubernetes-e2e-test-images/regression-issue-74839-amd64:1.0
-gcr.io/kubernetes-e2e-test-images/resource-consumer:1.5
-gcr.io/kubernetes-e2e-test-images/sample-apiserver:1.17
-gcr.io/kubernetes-e2e-test-images/volume/gluster:1.0
-gcr.io/kubernetes-e2e-test-images/volume/iscsi:2.0
-gcr.io/kubernetes-e2e-test-images/volume/nfs:1.0
-gcr.io/kubernetes-e2e-test-images/volume/rbd:1.0.1
-k8s.gcr.io/build-image/debian-iptables:v12.1.2
-k8s.gcr.io/conformance:v1.19.0
-k8s.gcr.io/e2e-test-images/agnhost:2.20
-k8s.gcr.io/etcd:3.4.13-0
-k8s.gcr.io/pause:3.2
-k8s.gcr.io/pause:3.3
-k8s.gcr.io/prometheus-dummy-exporter:v0.1.0
-k8s.gcr.io/prometheus-to-sd:v0.5.0
-k8s.gcr.io/sd-dummy-exporter:v0.2.0
-k8s.gcr.io/sig-storage/nfs-provisioner:v2.2.2
-quay.io/coreos/etcd:v2.2.5
-EOF
-for i in $(cat $tmpfile); do
+DIR="$(cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd)"
+for i in $(cat $DIR/images.txt); do
sudo docker pull $i
# https://kind.sigs.k8s.io/docs/user/quick-start/
# Be free to use docker save && kind load image-archive
kind load docker-image $i --name latest
done
-rm -f $tmpfile
--- /dev/null
+docker.io/appropriate/curl:edge
+docker.io/aquasec/kube-bench:0.3.1
+docker.io/aquasec/kube-hunter:0.3.1
+docker.io/gluster/glusterdynamic-provisioner:v1.0
+docker.io/library/busybox:1.28
+docker.io/library/busybox:1.29
+docker.io/library/httpd:2.4.38-alpine
+docker.io/library/httpd:2.4.39-alpine
+docker.io/library/nginx:1.14-alpine
+docker.io/library/nginx:1.15-alpine
+docker.io/library/perl:5.26
+docker.io/library/redis:5.0.5-alpine
+docker.io/ollivier/clearwater-astaire:hunter
+docker.io/ollivier/clearwater-bono:hunter
+docker.io/ollivier/clearwater-cassandra:hunter
+docker.io/ollivier/clearwater-chronos:hunter
+docker.io/ollivier/clearwater-ellis:hunter
+docker.io/ollivier/clearwater-homer:hunter
+docker.io/ollivier/clearwater-homestead:hunter
+docker.io/ollivier/clearwater-homestead-prov:hunter
+docker.io/ollivier/clearwater-live-test:hunter
+docker.io/ollivier/clearwater-ralf:hunter
+docker.io/ollivier/clearwater-sprout:hunter
+gcr.io/google-samples/hello-go-gke:1.0
+gcr.io/kubernetes-e2e-test-images/apparmor-loader:1.0
+gcr.io/kubernetes-e2e-test-images/cuda-vector-add:1.0
+gcr.io/kubernetes-e2e-test-images/cuda-vector-add:2.0
+gcr.io/kubernetes-e2e-test-images/echoserver:2.2
+gcr.io/kubernetes-e2e-test-images/ipc-utils:1.0
+gcr.io/kubernetes-e2e-test-images/jessie-dnsutils:1.0
+gcr.io/kubernetes-e2e-test-images/kitten:1.0
+gcr.io/kubernetes-e2e-test-images/metadata-concealment:1.2
+gcr.io/kubernetes-e2e-test-images/nautilus:1.0
+gcr.io/kubernetes-e2e-test-images/nonewprivs:1.0
+gcr.io/kubernetes-e2e-test-images/nonroot:1.0
+gcr.io/kubernetes-e2e-test-images/regression-issue-74839-amd64:1.0
+gcr.io/kubernetes-e2e-test-images/resource-consumer:1.5
+gcr.io/kubernetes-e2e-test-images/sample-apiserver:1.17
+gcr.io/kubernetes-e2e-test-images/volume/gluster:1.0
+gcr.io/kubernetes-e2e-test-images/volume/iscsi:2.0
+gcr.io/kubernetes-e2e-test-images/volume/nfs:1.0
+gcr.io/kubernetes-e2e-test-images/volume/rbd:1.0.1
+k8s.gcr.io/build-image/debian-iptables:v12.1.2
+k8s.gcr.io/conformance:v1.19.0
+k8s.gcr.io/e2e-test-images/agnhost:2.20
+k8s.gcr.io/etcd:3.4.13-0
+k8s.gcr.io/pause:3.2
+k8s.gcr.io/pause:3.3
+k8s.gcr.io/prometheus-dummy-exporter:v0.1.0
+k8s.gcr.io/prometheus-to-sd:v0.5.0
+k8s.gcr.io/sd-dummy-exporter:v0.2.0
+k8s.gcr.io/sig-storage/nfs-provisioner:v2.2.2
+quay.io/coreos/etcd:v2.2.5
- title: Run a single workload with create/read/delete pod
scenario:
Kubernetes.create_and_delete_pod:
- image: k8s.gcr.io/pause:3.3
+ image: {{ k8s_gcr_repo }}/pause:3.3
runner:
constant:
concurrency: {{ concurrency }}
- title: Run a single workload with create/read/delete replication controller
scenario:
Kubernetes.create_and_delete_replication_controller:
- image: k8s.gcr.io/pause:3.3
+ image: {{ k8s_gcr_repo }}/pause:3.3
replicas: 2
runner:
constant:
- title: Run a single workload with create/scale/delete replication controller
scenario:
Kubernetes.create_scale_and_delete_replication_controller:
- image: k8s.gcr.io/pause:3.3
+ image: {{ k8s_gcr_repo }}/pause:3.3
replicas: 2
scale_replicas: 3
runner:
- title: Run a single workload with create/read/delete replicaset
scenario:
Kubernetes.create_and_delete_replicaset:
- image: k8s.gcr.io/pause:3.3
+ image: {{ k8s_gcr_repo }}/pause:3.3
replicas: 1
runner:
constant:
- title: Run a single workload with create/scale/delete replicaset
scenario:
Kubernetes.create_scale_and_delete_replicaset:
- image: k8s.gcr.io/pause:3.3
+ image: {{ k8s_gcr_repo }}/pause:3.3
replicas: 1
scale_replicas: 2
runner:
Run a single workload with create/read/delete pod with emptyDir volume
scenario:
Kubernetes.create_and_delete_pod_with_emptydir_volume:
- image: k8s.gcr.io/pause:3.3
+ image: {{ k8s_gcr_repo }}/pause:3.3
mount_path: /opt/check
runner:
constant:
volume
scenario:
Kubernetes.create_and_delete_pod_with_emptydir_volume:
- image: busybox:1.28
+ image: {{ dockerhub_repo }}/busybox:1.28
command:
- sleep
- "3600"
- title: Run a single workload with create/read/delete pod with secret volume
scenario:
Kubernetes.create_and_delete_pod_with_secret_volume:
- image: busybox:1.28
+ image: {{ dockerhub_repo }}/busybox:1.28
command:
- sleep
- "3600"
- title: Run a single workload with create/check/delete pod with secret volume
scenario:
Kubernetes.create_and_delete_pod_with_secret_volume:
- image: busybox:1.28
+ image: {{ dockerhub_repo }}/busybox:1.28
command:
- sleep
- "3600"
volume
scenario:
Kubernetes.create_and_delete_pod_with_hostpath_volume:
- image: busybox:1.28
+ image: {{ dockerhub_repo }}/busybox:1.28
command:
- sleep
- "3600"
Run a single workload with create/read/delete pod with configMap volume
scenario:
Kubernetes.create_and_delete_pod_with_configmap_volume:
- image: busybox:1.28
+ image: {{ dockerhub_repo }}/busybox:1.28
command:
- "sleep"
- "3600"
volume
scenario:
Kubernetes.create_and_delete_pod_with_configmap_volume:
- image: busybox:1.28
+ image: {{ dockerhub_repo }}/busybox:1.28
command:
- "sleep"
- "3600"
- title: Run a single workload with create/read/delete deployment
scenario:
Kubernetes.create_and_delete_deployment:
- image: k8s.gcr.io/pause:3.3
+ image: {{ k8s_gcr_repo }}/pause:3.3
replicas: 2
runner:
constant:
- title: Run a single workload with create/rollout/delete deployment
scenario:
Kubernetes.create_rollout_and_delete_deployment:
- image: busybox:1.28
+ image: {{ dockerhub_repo }}/busybox:1.28
replicas: 1
command:
- sleep
- title: Run a single workload with create/read/delete statefulset
scenario:
Kubernetes.create_and_delete_statefulset:
- image: k8s.gcr.io/pause:3.3
+ image: {{ k8s_gcr_repo }}/pause:3.3
replicas: 2
runner:
constant:
- title: Run a single workload with create/scale/delete statefulset
scenario:
Kubernetes.create_scale_and_delete_statefulset:
- image: k8s.gcr.io/pause:3.3
+ image: {{ k8s_gcr_repo }}/pause:3.3
replicas: 1
scale_replicas: 2
runner:
- title: Run a single workload with create/read/delete job
scenario:
Kubernetes.create_and_delete_job:
- image: busybox:1.28
+ image: {{ dockerhub_repo }}/busybox:1.28
command:
- echo
- "SUCCESS"
- title: Run a single workload with create/check/delete clusterIP service
scenario:
Kubernetes.create_check_and_delete_pod_with_cluster_ip_service:
- image: gcr.io/google-samples/hello-go-gke:1.0
+ image: {{ gcr_repo }}/google-samples/hello-go-gke:1.0
port: 80
protocol: TCP
runner:
custom endpoint
scenario:
Kubernetes.create_check_and_delete_pod_with_cluster_ip_service:
- image: gcr.io/google-samples/hello-go-gke:1.0
+ image: {{ gcr_repo }}/google-samples/hello-go-gke:1.0
port: 80
protocol: TCP
custom_endpoint: true
- title: Run a single workload with create/check/delete NodePort service
scenario:
Kubernetes.create_check_and_delete_pod_with_node_port_service:
- image: gcr.io/google-samples/hello-go-gke:1.0
+ image: {{ gcr_repo }}/google-samples/hello-go-gke:1.0
port: 80
protocol: TCP
request_timeout: 10
concurrency = 1
times = 1
namespaces_count = 1
+ dockerhub_repo = os.getenv("MIRROR_REPO", "docker.io")
+ gcr_repo = os.getenv("MIRROR_REPO", "gcr.io")
+ k8s_gcr_repo = os.getenv("MIRROR_REPO", "k8s.gcr.io")
def __init__(self, **kwargs):
super(RallyKubernetes, self).__init__(**kwargs)
concurrency=kwargs.get("concurrency", self.concurrency),
times=kwargs.get("times", self.times),
namespaces_count=kwargs.get(
- "namespaces_count", self.namespaces_count)))
+ "namespaces_count", self.namespaces_count),
+ dockerhub_repo=os.getenv("DOCKERHUB_REPO", self.dockerhub_repo),
+ gcr_repo=os.getenv("GCR_REPO", self.gcr_repo),
+ k8s_gcr_repo=os.getenv("K8S_GCR_REPO", self.k8s_gcr_repo)))
rapi.task.validate(deployment='my-kubernetes', config=task)
task_instance = rapi.task.create(deployment='my-kubernetes')
rapi.task.start(