Add support for Calico, Cilium, Contiv-VPP and Danm in k8scluster deployment scripts. 45/72945/6
authorPawan Verma <pawanjbs5@gmail.com>
Mon, 4 Oct 2021 13:14:20 +0000 (18:44 +0530)
committerPawan Verma <pawanjbs5@gmail.com>
Fri, 15 Oct 2021 19:25:05 +0000 (00:55 +0530)
This patch adds support for installing Calico, Cilium, Contiv-VPP and
Danm in Kubernetes cluster deployment ansible scripts.

Signed-off-by: Pawan Verma <pawanjbs5@gmail.com>
Change-Id: Ib76620fa0f63dd58e8496bbf31baf515f697bcde

17 files changed:
tools/k8s/cluster-deployment/k8scluster/README.md
tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/defaults/main.yml
tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/danm-cni-plugins.yaml [new file with mode: 0644]
tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/danm-netwatcher-daemonset.yaml [new file with mode: 0644]
tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/danm-webhook-create-signed-cert.sh [new file with mode: 0755]
tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/kube-flannel-daemonset.yml
tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-calico.yaml [new file with mode: 0644]
tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-cilium.yaml [new file with mode: 0644]
tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-contiv.yaml [new file with mode: 0644]
tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-danm.yaml [new file with mode: 0644]
tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/cni-pre-deploy.yml
tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-calico.yaml [new file with mode: 0644]
tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-cilium.yaml [new file with mode: 0644]
tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-contiv.yaml [new file with mode: 0644]
tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-danm.yaml [new file with mode: 0644]
tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/main.yml
tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/templates/danm-webhook.yaml [new file with mode: 0644]

index 78fdbd0..9708d64 100644 (file)
@@ -24,6 +24,12 @@ worker ansible_host={enter-master-ip}  ansible_connection=ssh ansible_ssh_user={
 ```
 In this configuration file, connection details should be filled in. In case more nodes within the cluster are needed, add lines as necessary to the workers group within the `hosts` file.
 
+Install the kubernetes.core module for ansible-playbook. This module is required as prerequisite to run the playbook.
+
+```
+ansible-galaxy collection install kubernetes.core
+```
+
 
 ### Usage
 In order to use the script, download or clone [this repository] (https://gerrit.opnfv.org/gerrit/vswitchperf) to the root of what will be the master node.
@@ -53,6 +59,23 @@ To deploy only CNI plugins
 ansible-playbook k8sclustermanagement.yml -i hosts_garr –tags “cni” 
 ```
 
+To deploy Danm CNI
+
+Build the Danm Binaries and onboard them to your cluster image repository
+
+```
+git clone github.com/nokia/danm
+cd danm
+./build_danm.sh
+```
+
+Deploy the Danm CNI with the `danm` tag.
+
+```
+ansible-playbook k8sclustermanagement.yml -i hosts_garr –tags “cni, danm” 
+```
+
+Specifying danm tag only deploys Danm and skips Multus.
 
 
 ### Debugging
index 15f1f18..ab2ffc3 100644 (file)
@@ -25,4 +25,6 @@ token_file: $HOME/log_init.txt
 \r
 \r
 \r
-PIP_executable_version: pip3.6
\ No newline at end of file
+PIP_executable_version: pip3.6\r
+helm_version: v3.7.0\r
+openshift_version: 0.11.1\r
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/danm-cni-plugins.yaml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/danm-cni-plugins.yaml
new file mode 100644 (file)
index 0000000..1fe77cd
--- /dev/null
@@ -0,0 +1,36 @@
+#
+# cloned from https://github.com/nokia/danm/blob/v4.3.0/integration/manifests/cni_plugins/cni_plugins_ds.yaml
+#
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+  name: danm-cni
+  namespace: kube-system
+spec:
+  selector:
+    matchLabels:
+      danm.k8s.io: danm-cni
+  template:
+    metadata:
+      labels:
+        danm.k8s.io: danm-cni
+    spec:
+      containers:
+        - name: danm-cni
+          image: danm-cni-plugins
+          imagePullPolicy: IfNotPresent
+          volumeMounts:
+            - name: host-cni
+              mountPath: /host/cni
+            - name: host-net-d
+              mountPath: /host/net.d
+      hostNetwork: true
+      terminationGracePeriodSeconds: 0
+      volumes:
+        - name: host-cni
+          hostPath:
+            path: /opt/cni/bin
+        - name: host-net-d
+          hostPath:
+            path: /etc/cni/net.d
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/danm-netwatcher-daemonset.yaml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/danm-netwatcher-daemonset.yaml
new file mode 100644 (file)
index 0000000..1b61a04
--- /dev/null
@@ -0,0 +1,94 @@
+#
+# cloned from https://github.com/nokia/danm/tree/v4.3.0/integration/manifests/netwatcher
+#
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: netwatcher
+  namespace: kube-system
+  labels:
+      kubernetes.io/cluster-service: "true"
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  labels:
+    kubernetes.io/bootstrapping: rbac-defaults
+  name: system:netwatcher
+rules:
+rules:
+- apiGroups:
+  - danm.k8s.io
+  resources:
+  - danmnets
+  - clusternetworks
+  - tenantnetworks
+  verbs:
+  - get
+  - list
+  - watch
+  - update
+- apiGroups:
+  - k8s.cni.cncf.io
+  resources:
+  - network-attachment-definitions
+  verbs:
+  - get
+  - list
+  - watch
+  - update
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  annotations:
+    rbac.authorization.kubernetes.io/autoupdate: "true"
+  labels:
+    kubernetes.io/bootstrapping: rbac-defaults
+  name: system:netwatcher
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: system:netwatcher
+subjects:
+- kind: ServiceAccount
+  namespace: kube-system
+  name: netwatcher
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+  name: netwatcher
+  namespace: kube-system
+spec:
+  selector:
+    matchLabels:
+      danm.k8s.io: netwatcher
+  template:
+    metadata:
+      labels:
+        danm.k8s.io: netwatcher
+    spec:
+      serviceAccountName: netwatcher
+      hostNetwork: true
+      dnsPolicy: ClusterFirst
+      hostIPC: true
+      hostPID: true
+      containers:
+        - name: netwatcher
+          image: netwatcher
+          imagePullPolicy: IfNotPresent
+          securityContext:
+            capabilities:
+              add:
+                - SYS_PTRACE
+                - SYS_ADMIN
+                - NET_ADMIN
+                - NET_RAW
+      tolerations:
+       - effect: NoSchedule
+         operator: Exists
+       - effect: NoExecute
+         operator: Exists
+      terminationGracePeriodSeconds: 0
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/danm-webhook-create-signed-cert.sh b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/danm-webhook-create-signed-cert.sh
new file mode 100755 (executable)
index 0000000..d1486f6
--- /dev/null
@@ -0,0 +1,121 @@
+#!/bin/sh
+
+set -e
+
+usage() {
+    cat <<EOF
+Generate certificate suitable for use with an sidecar-injector webhook service.
+This script uses k8s' CertificateSigningRequest API to a generate a
+certificate signed by k8s CA suitable for use with sidecar-injector webhook
+services. This requires permissions to create and approve CSR. See
+https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster for
+detailed explantion and additional instructions.
+The server key/cert k8s CA cert are stored in a k8s secret.
+usage: ${0} [OPTIONS]
+The following flags are required.
+       --service          Service name of webhook.
+       --namespace        Namespace where webhook service and secret reside.
+       --secret           Secret name for CA certificate and server certificate/key pair.
+EOF
+    exit 1
+}
+
+while [ $# -gt 0 ]; do
+    case ${1} in
+        --service)
+            service="$2"
+            shift
+            ;;
+        --secret)
+            secret="$2"
+            shift
+            ;;
+        --namespace)
+            namespace="$2"
+            shift
+            ;;
+        *)
+            usage
+            ;;
+    esac
+    shift
+done
+
+[ -z ${service} ] && service=danm-webhook-svc
+[ -z ${secret} ] && secret=danm-webhook-certs
+[ -z ${namespace} ] && namespace=kube-system
+
+if [ ! -x "$(command -v openssl)" ]; then
+    echo "openssl not found"
+    exit 1
+fi
+
+csrName=${service}.${namespace}
+tmpdir=$(mktemp -d)
+echo "creating certs in tmpdir ${tmpdir} "
+
+cat <<EOF >> ${tmpdir}/csr.conf
+[req]
+req_extensions = v3_req
+distinguished_name = req_distinguished_name
+[req_distinguished_name]
+[ v3_req ]
+basicConstraints = CA:FALSE
+keyUsage = nonRepudiation, digitalSignature, keyEncipherment
+extendedKeyUsage = serverAuth
+subjectAltName = @alt_names
+[alt_names]
+DNS.1 = ${service}
+DNS.2 = ${service}.${namespace}
+DNS.3 = ${service}.${namespace}.svc
+EOF
+
+openssl genrsa -out ${tmpdir}/server-key.pem 2048
+openssl req -new -key ${tmpdir}/server-key.pem -subj "/CN=${service}.${namespace}.svc" -out ${tmpdir}/server.csr -config ${tmpdir}/csr.conf
+
+# clean-up any previously created CSR for our service. Ignore errors if not present.
+kubectl delete csr ${csrName} 2>/dev/null || true
+
+# create  server cert/key CSR and  send to k8s API
+cat <<EOF | kubectl create -f -
+apiVersion: certificates.k8s.io/v1beta1
+kind: CertificateSigningRequest
+metadata:
+  name: ${csrName}
+spec:
+  groups:
+  - system:authenticated
+  request: $(cat ${tmpdir}/server.csr | base64 | tr -d '\n')
+  usages:
+  - digital signature
+  - key encipherment
+  - server auth
+EOF
+
+# verify CSR has been created
+while true; do
+    kubectl get csr ${csrName}
+    if [ "$?" -eq 0 ]; then
+        break
+    fi
+done
+
+# approve and fetch the signed certificate
+kubectl certificate approve ${csrName}
+# verify certificate has been signed
+for x in $(seq 10); do
+    serverCert=$(kubectl get csr ${csrName} -o jsonpath='{.status.certificate}')
+    if [ -n ${serverCert} ]; then
+        break
+    fi
+    sleep 1
+done
+echo ${serverCert} | openssl base64 -d -A -out ${tmpdir}/server-cert.pem
+
+
+# create the secret with CA cert and server cert/key
+kubectl create secret generic ${secret} \
+        --from-file=key.pem=${tmpdir}/server-key.pem \
+        --from-file=cert.pem=${tmpdir}/server-cert.pem \
+        --dry-run -o yaml |
+    kubectl -n ${namespace} apply -f -
index 00110ad..1233ead 100644 (file)
@@ -1,5 +1,5 @@
 #
-# cloned from https://github.com/coreos/flannel/blob/v0.12.0/Documentation/kube-flannel.yml
+# cloned from https://github.com/flannel-io/flannel/blob/v0.14.0/Documentation/kube-flannel.yml
 #
 ---
 apiVersion: policy/v1beta1
@@ -14,14 +14,14 @@ metadata:
 spec:
   privileged: false
   volumes:
-    - configMap
-    - secret
-    - emptyDir
-    - hostPath
+  - configMap
+  - secret
+  - emptyDir
+  - hostPath
   allowedHostPaths:
-    - pathPrefix: "/etc/cni/net.d"
-    - pathPrefix: "/etc/kube-flannel"
-    - pathPrefix: "/run/flannel"
+  - pathPrefix: "/etc/cni/net.d"
+  - pathPrefix: "/etc/kube-flannel"
+  - pathPrefix: "/run/flannel"
   readOnlyRootFilesystem: false
   # Users and groups
   runAsUser:
@@ -34,7 +34,7 @@ spec:
   allowPrivilegeEscalation: false
   defaultAllowPrivilegeEscalation: false
   # Capabilities
-  allowedCapabilities: ['NET_ADMIN']
+  allowedCapabilities: ['NET_ADMIN', 'NET_RAW']
   defaultAddCapabilities: []
   requiredDropCapabilities: []
   # Host namespaces
@@ -50,36 +50,36 @@ spec:
     rule: 'RunAsAny'
 ---
 kind: ClusterRole
-apiVersion: rbac.authorization.k8s.io/v1beta1
+apiVersion: rbac.authorization.k8s.io/v1
 metadata:
   name: flannel
 rules:
-  - apiGroups: ['extensions']
-    resources: ['podsecuritypolicies']
-    verbs: ['use']
-    resourceNames: ['psp.flannel.unprivileged']
-  - apiGroups:
-      - ""
-    resources:
-      - pods
-    verbs:
-      - get
-  - apiGroups:
-      - ""
-    resources:
-      - nodes
-    verbs:
-      - list
-      - watch
-  - apiGroups:
-      - ""
-    resources:
-      - nodes/status
-    verbs:
-      - patch
+- apiGroups: ['extensions']
+  resources: ['podsecuritypolicies']
+  verbs: ['use']
+  resourceNames: ['psp.flannel.unprivileged']
+- apiGroups:
+  - ""
+  resources:
+  - pods
+  verbs:
+  - get
+- apiGroups:
+  - ""
+  resources:
+  - nodes
+  verbs:
+  - list
+  - watch
+- apiGroups:
+  - ""
+  resources:
+  - nodes/status
+  verbs:
+  - patch
 ---
 kind: ClusterRoleBinding
-apiVersion: rbac.authorization.k8s.io/v1beta1
+apiVersion: rbac.authorization.k8s.io/v1
 metadata:
   name: flannel
 roleRef:
@@ -137,7 +137,7 @@ data:
 apiVersion: apps/v1
 kind: DaemonSet
 metadata:
-  name: kube-flannel-ds-amd64
+  name: kube-flannel-ds
   namespace: kube-system
   labels:
     tier: node
@@ -156,23 +156,20 @@ spec:
         nodeAffinity:
           requiredDuringSchedulingIgnoredDuringExecution:
             nodeSelectorTerms:
-              - matchExpressions:
-                  - key: beta.kubernetes.io/os
-                    operator: In
-                    values:
-                      - linux
-                  - key: beta.kubernetes.io/arch
-                    operator: In
-                    values:
-                      - amd64
+            - matchExpressions:
+              - key: kubernetes.io/os
+                operator: In
+                values:
+                - linux
       hostNetwork: true
+      priorityClassName: system-node-critical
       tolerations:
       - operator: Exists
         effect: NoSchedule
       serviceAccountName: flannel
       initContainers:
       - name: install-cni
-        image: quay.io/coreos/flannel:v0.12.0-amd64
+        image: quay.io/coreos/flannel:v0.14.0
         command:
         - cp
         args:
@@ -186,7 +183,7 @@ spec:
           mountPath: /etc/kube-flannel/
       containers:
       - name: kube-flannel
-        image: quay.io/coreos/flannel:v0.12.0-amd64
+        image: quay.io/coreos/flannel:v0.14.0
         command:
         - /opt/bin/flanneld
         args:
@@ -202,7 +199,7 @@ spec:
         securityContext:
           privileged: false
           capabilities:
-            add: ["NET_ADMIN"]
+            add: ["NET_ADMIN", "NET_RAW"]
         env:
         - name: POD_NAME
           valueFrom:
@@ -218,389 +215,12 @@ spec:
         - name: flannel-cfg
           mountPath: /etc/kube-flannel/
       volumes:
-        - name: run
-          hostPath:
-            path: /run/flannel
-        - name: cni
-          hostPath:
-            path: /etc/cni/net.d
-        - name: flannel-cfg
-          configMap:
-            name: kube-flannel-cfg
----
-apiVersion: apps/v1
-kind: DaemonSet
-metadata:
-  name: kube-flannel-ds-arm64
-  namespace: kube-system
-  labels:
-    tier: node
-    app: flannel
-spec:
-  selector:
-    matchLabels:
-      app: flannel
-  template:
-    metadata:
-      labels:
-        tier: node
-        app: flannel
-    spec:
-      affinity:
-        nodeAffinity:
-          requiredDuringSchedulingIgnoredDuringExecution:
-            nodeSelectorTerms:
-              - matchExpressions:
-                  - key: beta.kubernetes.io/os
-                    operator: In
-                    values:
-                      - linux
-                  - key: beta.kubernetes.io/arch
-                    operator: In
-                    values:
-                      - arm64
-      hostNetwork: true
-      tolerations:
-      - operator: Exists
-        effect: NoSchedule
-      serviceAccountName: flannel
-      initContainers:
-      - name: install-cni
-        image: quay.io/coreos/flannel:v0.12.0-arm64
-        command:
-        - cp
-        args:
-        - -f
-        - /etc/kube-flannel/cni-conf.json
-        - /etc/cni/net.d/10-flannel.conflist
-        volumeMounts:
-        - name: cni
-          mountPath: /etc/cni/net.d
-        - name: flannel-cfg
-          mountPath: /etc/kube-flannel/
-      containers:
-      - name: kube-flannel
-        image: quay.io/coreos/flannel:v0.12.0-arm64
-        command:
-        - /opt/bin/flanneld
-        args:
-        - --ip-masq
-        - --kube-subnet-mgr
-        resources:
-          requests:
-            cpu: "100m"
-            memory: "50Mi"
-          limits:
-            cpu: "100m"
-            memory: "50Mi"
-        securityContext:
-          privileged: false
-          capabilities:
-             add: ["NET_ADMIN"]
-        env:
-        - name: POD_NAME
-          valueFrom:
-            fieldRef:
-              fieldPath: metadata.name
-        - name: POD_NAMESPACE
-          valueFrom:
-            fieldRef:
-              fieldPath: metadata.namespace
-        volumeMounts:
-        - name: run
-          mountPath: /run/flannel
-        - name: flannel-cfg
-          mountPath: /etc/kube-flannel/
-      volumes:
-        - name: run
-          hostPath:
-            path: /run/flannel
-        - name: cni
-          hostPath:
-            path: /etc/cni/net.d
-        - name: flannel-cfg
-          configMap:
-            name: kube-flannel-cfg
----
-apiVersion: apps/v1
-kind: DaemonSet
-metadata:
-  name: kube-flannel-ds-arm
-  namespace: kube-system
-  labels:
-    tier: node
-    app: flannel
-spec:
-  selector:
-    matchLabels:
-      app: flannel
-  template:
-    metadata:
-      labels:
-        tier: node
-        app: flannel
-    spec:
-      affinity:
-        nodeAffinity:
-          requiredDuringSchedulingIgnoredDuringExecution:
-            nodeSelectorTerms:
-              - matchExpressions:
-                  - key: beta.kubernetes.io/os
-                    operator: In
-                    values:
-                      - linux
-                  - key: beta.kubernetes.io/arch
-                    operator: In
-                    values:
-                      - arm
-      hostNetwork: true
-      tolerations:
-      - operator: Exists
-        effect: NoSchedule
-      serviceAccountName: flannel
-      initContainers:
-      - name: install-cni
-        image: quay.io/coreos/flannel:v0.12.0-arm
-        command:
-        - cp
-        args:
-        - -f
-        - /etc/kube-flannel/cni-conf.json
-        - /etc/cni/net.d/10-flannel.conflist
-        volumeMounts:
-        - name: cni
-          mountPath: /etc/cni/net.d
-        - name: flannel-cfg
-          mountPath: /etc/kube-flannel/
-      containers:
-      - name: kube-flannel
-        image: quay.io/coreos/flannel:v0.12.0-arm
-        command:
-        - /opt/bin/flanneld
-        args:
-        - --ip-masq
-        - --kube-subnet-mgr
-        resources:
-          requests:
-            cpu: "100m"
-            memory: "50Mi"
-          limits:
-            cpu: "100m"
-            memory: "50Mi"
-        securityContext:
-          privileged: false
-          capabilities:
-             add: ["NET_ADMIN"]
-        env:
-        - name: POD_NAME
-          valueFrom:
-            fieldRef:
-              fieldPath: metadata.name
-        - name: POD_NAMESPACE
-          valueFrom:
-            fieldRef:
-              fieldPath: metadata.namespace
-        volumeMounts:
-        - name: run
-          mountPath: /run/flannel
-        - name: flannel-cfg
-          mountPath: /etc/kube-flannel/
-      volumes:
-        - name: run
-          hostPath:
-            path: /run/flannel
-        - name: cni
-          hostPath:
-            path: /etc/cni/net.d
-        - name: flannel-cfg
-          configMap:
-            name: kube-flannel-cfg
----
-apiVersion: apps/v1
-kind: DaemonSet
-metadata:
-  name: kube-flannel-ds-ppc64le
-  namespace: kube-system
-  labels:
-    tier: node
-    app: flannel
-spec:
-  selector:
-    matchLabels:
-      app: flannel
-  template:
-    metadata:
-      labels:
-        tier: node
-        app: flannel
-    spec:
-      affinity:
-        nodeAffinity:
-          requiredDuringSchedulingIgnoredDuringExecution:
-            nodeSelectorTerms:
-              - matchExpressions:
-                  - key: beta.kubernetes.io/os
-                    operator: In
-                    values:
-                      - linux
-                  - key: beta.kubernetes.io/arch
-                    operator: In
-                    values:
-                      - ppc64le
-      hostNetwork: true
-      tolerations:
-      - operator: Exists
-        effect: NoSchedule
-      serviceAccountName: flannel
-      initContainers:
-      - name: install-cni
-        image: quay.io/coreos/flannel:v0.12.0-ppc64le
-        command:
-        - cp
-        args:
-        - -f
-        - /etc/kube-flannel/cni-conf.json
-        - /etc/cni/net.d/10-flannel.conflist
-        volumeMounts:
-        - name: cni
-          mountPath: /etc/cni/net.d
-        - name: flannel-cfg
-          mountPath: /etc/kube-flannel/
-      containers:
-      - name: kube-flannel
-        image: quay.io/coreos/flannel:v0.12.0-ppc64le
-        command:
-        - /opt/bin/flanneld
-        args:
-        - --ip-masq
-        - --kube-subnet-mgr
-        resources:
-          requests:
-            cpu: "100m"
-            memory: "50Mi"
-          limits:
-            cpu: "100m"
-            memory: "50Mi"
-        securityContext:
-          privileged: false
-          capabilities:
-             add: ["NET_ADMIN"]
-        env:
-        - name: POD_NAME
-          valueFrom:
-            fieldRef:
-              fieldPath: metadata.name
-        - name: POD_NAMESPACE
-          valueFrom:
-            fieldRef:
-              fieldPath: metadata.namespace
-        volumeMounts:
-        - name: run
-          mountPath: /run/flannel
-        - name: flannel-cfg
-          mountPath: /etc/kube-flannel/
-      volumes:
-        - name: run
-          hostPath:
-            path: /run/flannel
-        - name: cni
-          hostPath:
-            path: /etc/cni/net.d
-        - name: flannel-cfg
-          configMap:
-            name: kube-flannel-cfg
----
-apiVersion: apps/v1
-kind: DaemonSet
-metadata:
-  name: kube-flannel-ds-s390x
-  namespace: kube-system
-  labels:
-    tier: node
-    app: flannel
-spec:
-  selector:
-    matchLabels:
-      app: flannel
-  template:
-    metadata:
-      labels:
-        tier: node
-        app: flannel
-    spec:
-      affinity:
-        nodeAffinity:
-          requiredDuringSchedulingIgnoredDuringExecution:
-            nodeSelectorTerms:
-              - matchExpressions:
-                  - key: beta.kubernetes.io/os
-                    operator: In
-                    values:
-                      - linux
-                  - key: beta.kubernetes.io/arch
-                    operator: In
-                    values:
-                      - s390x
-      hostNetwork: true
-      tolerations:
-      - operator: Exists
-        effect: NoSchedule
-      serviceAccountName: flannel
-      initContainers:
-      - name: install-cni
-        image: quay.io/coreos/flannel:v0.12.0-s390x
-        command:
-        - cp
-        args:
-        - -f
-        - /etc/kube-flannel/cni-conf.json
-        - /etc/cni/net.d/10-flannel.conflist
-        volumeMounts:
-        - name: cni
-          mountPath: /etc/cni/net.d
-        - name: flannel-cfg
-          mountPath: /etc/kube-flannel/
-      containers:
-      - name: kube-flannel
-        image: quay.io/coreos/flannel:v0.12.0-s390x
-        command:
-        - /opt/bin/flanneld
-        args:
-        - --ip-masq
-        - --kube-subnet-mgr
-        resources:
-          requests:
-            cpu: "100m"
-            memory: "50Mi"
-          limits:
-            cpu: "100m"
-            memory: "50Mi"
-        securityContext:
-          privileged: false
-          capabilities:
-             add: ["NET_ADMIN"]
-        env:
-        - name: POD_NAME
-          valueFrom:
-            fieldRef:
-              fieldPath: metadata.name
-        - name: POD_NAMESPACE
-          valueFrom:
-            fieldRef:
-              fieldPath: metadata.namespace
-        volumeMounts:
-        - name: run
-          mountPath: /run/flannel
-        - name: flannel-cfg
-          mountPath: /etc/kube-flannel/
-      volumes:
-        - name: run
-          hostPath:
-            path: /run/flannel
-        - name: cni
-          hostPath:
-            path: /etc/cni/net.d
-        - name: flannel-cfg
-          configMap:
-            name: kube-flannel-cfg
-
+      - name: run
+        hostPath:
+          path: /run/flannel
+      - name: cni
+        hostPath:
+          path: /etc/cni/net.d
+      - name: flannel-cfg
+        configMap:
+          name: kube-flannel-cfg
\ No newline at end of file
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-calico.yaml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-calico.yaml
new file mode 100644 (file)
index 0000000..9e6f3fa
--- /dev/null
@@ -0,0 +1,10 @@
+---
+
+- name: Delete Calico
+  k8s:
+    state: absent
+    definition: '{{ item }}'
+  with_items: '{{ lookup("url", "https://docs.projectcalico.org/manifests/calico.yaml", split_lines=False) | from_yaml_all | list }}'
+  when: item is not none
+
+
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-cilium.yaml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-cilium.yaml
new file mode 100644 (file)
index 0000000..16b6e8c
--- /dev/null
@@ -0,0 +1,7 @@
+---
+
+- name: Delete cilium
+  kubernetes.core.helm:
+   name: cilium
+   namespace: kube-system
+   state: absent
\ No newline at end of file
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-contiv.yaml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-contiv.yaml
new file mode 100644 (file)
index 0000000..553ac93
--- /dev/null
@@ -0,0 +1,11 @@
+---
+
+- name: Delete Contiv
+  k8s:
+    state: absent
+    definition: '{{ item }}'
+  with_items: '{{ lookup("url", "https://raw.githubusercontent.com/contiv/vpp/v3.4.2/k8s/contiv-vpp.yaml", split_lines=False) | from_yaml_all | list }}'
+  when: item is not none
+
+
+
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-danm.yaml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-danm.yaml
new file mode 100644 (file)
index 0000000..1f98980
--- /dev/null
@@ -0,0 +1,63 @@
+---
+
+- name: Delete DanmNet CRD's
+  k8s:
+    state: absent
+    definition: '{{ item }}'
+  with_items: '{{ lookup("url", "https://raw.githubusercontent.com/nokia/danm/v4.3.0/integration/crds/lightweight/DanmNet.yaml", split_lines=False) | from_yaml_all | list }}'
+  when: item is not none
+
+- name: Delete DanmEp CRD's
+  k8s:
+    state: absent
+    definition: '{{ item }}'
+  with_items: '{{ lookup("url", "https://raw.githubusercontent.com/nokia/danm/v4.3.0/integration/crds/lightweight/DanmEp.yaml", split_lines=False) | from_yaml_all | list }}'
+  when: item is not none
+
+
+- name: Delete Danm service account
+  k8s:
+    state: absent
+    kind: ServiceAccount
+    name: danm
+    namespace: kube-system
+
+- name: Delete Danm cni conf
+  command: sudo rm -f  /etc/cni/net.d/00-danm.conf
+
+- name: Delete Danm cni kubeconfig
+  command: sudo rm -f /etc/cni/net.d/danm-kubeconfig
+
+- name: Delete Danm rbac
+  k8s:
+    state: absent
+    definition: '{{ item }}'
+  with_items: '{{ lookup("url", "https://raw.githubusercontent.com/nokia/danm/v4.3.0/integration/cni_config/danm_rbac.yaml", split_lines=False) | from_yaml_all | list }}'
+  when: item is not none
+
+- name: Delete Danm cni plugins
+  k8s:
+    state: absent
+    definition: "{{ lookup('file', 'danm-cni-plugins.yaml') }}"
+
+- name: Delete Danm netwatcher
+  k8s:
+    state: absent
+    definition: "{{ lookup('file', 'danm-netwatcher-daemonset.yaml') }}"
+
+- name: Get CA Bundle
+  shell: kubectl config view --raw -o json | jq -r '.clusters[0].cluster."certificate-authority-data"' | tr -d '"'
+  register: danm_ca_bundle
+
+- name: Generate webhook deployment
+  template:
+    src: danm-webhook.yaml
+    dest: /tmp/danm-webhook.yaml
+    mode: 0644
+  vars:
+    ca_bundle: "{{ danm_ca_bundle.stdout }}"
+
+- name: Delete Danm webhook
+  k8s:
+    state: absent
+    src: /tmp/danm-webhook.yaml
index b2f280e..8e30c6e 100644 (file)
@@ -3,9 +3,21 @@
   pip:
     name: openshift
     executable: "{{ PIP_executable_version }}"
+    version: "{{ openshift_version }}"
   when: inventory_hostname in groups['master']
   become: yes
 
+- name: Install Helm
+  unarchive:
+    src: "https://get.helm.sh/helm-{{ helm_version }}-linux-amd64.tar.gz"
+    dest: "/tmp"
+    remote_src: yes
+  become: yes
+
+- name: Move helm to PATH
+  command: mv /tmp/linux-amd64/helm /usr/local/bin/helm
+  become: yes
+
 - name: Check whether /etc/cni/net.d/ exists
   stat:
     path: /etc/cni/net.d
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-calico.yaml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-calico.yaml
new file mode 100644 (file)
index 0000000..afee929
--- /dev/null
@@ -0,0 +1,12 @@
+---
+
+- name: Clean Calico
+  import_tasks: clear-calico.yaml
+
+- name: Deploy Calico
+  k8s:
+    state: present
+    apply: yes
+    definition: '{{ item }}'
+  with_items: '{{ lookup("url", "https://docs.projectcalico.org/manifests/calico.yaml", split_lines=False) | from_yaml_all | list }}'
+  when: item is not none
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-cilium.yaml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-cilium.yaml
new file mode 100644 (file)
index 0000000..fb48591
--- /dev/null
@@ -0,0 +1,19 @@
+---
+
+- name: Add cilium helm repo
+  kubernetes.core.helm_repository:
+    name: cilium
+    repo_url: "https://helm.cilium.io/"
+
+- name: Clean Cilium
+  import_tasks: clear-cilium.yaml
+
+- name: Deploy cilium Chart
+  kubernetes.core.helm:
+   name: cilium
+   namespace: kube-system
+   chart_ref: cilium/cilium
+   wait: yes
+   values:
+    cni:
+      exclusive: false
\ No newline at end of file
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-contiv.yaml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-contiv.yaml
new file mode 100644 (file)
index 0000000..f08ad70
--- /dev/null
@@ -0,0 +1,12 @@
+---
+
+- name: Clean Contiv
+  import_tasks: clear-contiv.yaml
+
+- name: Deploy Contiv
+  k8s:
+    state: present
+    apply: yes
+    definition: '{{ item }}'
+  with_items: '{{ lookup("url", "https://raw.githubusercontent.com/contiv/vpp/v3.4.2/k8s/contiv-vpp.yaml", split_lines=False) | from_yaml_all | list }}'
+  when: item is not none
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-danm.yaml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-danm.yaml
new file mode 100644 (file)
index 0000000..04852e5
--- /dev/null
@@ -0,0 +1,125 @@
+---
+
+- name: Clean Danm
+  import_tasks: clear-danm.yaml
+
+- name: Deploy DanmNet CRD
+  k8s:
+    state: present
+    apply: yes
+    definition: '{{ item }}'
+  with_items: '{{ lookup("url", "https://raw.githubusercontent.com/nokia/danm/v4.3.0/integration/crds/lightweight/DanmNet.yaml", split_lines=False) | from_yaml_all | list }}'
+  when: item is not none
+
+- name: Deploy DanmEp CRD
+  k8s:
+    state: present
+    apply: yes
+    definition: '{{ item }}'
+  with_items: '{{ lookup("url", "https://raw.githubusercontent.com/nokia/danm/v4.3.0/integration/crds/lightweight/DanmEp.yaml", split_lines=False) | from_yaml_all | list }}'
+  when: item is not none
+
+- name: Create Danm service account
+  command: kubectl create --namespace kube-system serviceaccount danm
+
+- name: Create Danm cni conf
+  copy:
+    dest: /etc/cni/net.d/00-danm.conf
+    mode: 0644
+    content: |
+      {
+        "cniVersion": "0.3.1",
+        "name": "meta_cni",
+        "type": "danm",
+        "kubeconfig": "/etc/cni/net.d/danm-kubeconfig",
+        "cniDir": "/etc/cni/net.d",
+        "namingScheme": "awesome",
+      }
+  become: yes
+
+- name: Get Cluster name
+  command: kubectl config view -o jsonpath='{.clusters[0].name}'
+  register: cluster_name
+
+- name: Get Cluster Server
+  command: kubectl config view -o jsonpath='{.clusters[0].cluster.server}'
+  register: cluster_server
+
+- name: Get Cluster CA certification
+  command: kubectl config view --flatten -o jsonpath='{.clusters[0].cluster.certificate-authority-data}'
+  register: cluster_ca_certificate
+
+- name: Get Danm Secret Name
+  command: kubectl get --namespace kube-system -o jsonpath='{.secrets[0].name}' serviceaccounts danm
+  register: danm_secret_name
+
+- name: Get Danm Service Account Token
+  shell: kubectl get --namespace kube-system secrets {{ danm_secret_name.stdout }} -o jsonpath='{.data.token}' | base64 -d
+  register: danm_service_account_token
+
+- name: Create Danm kubeconfig
+  copy:
+    dest: /etc/cni/net.d/danm-kubeconfig
+    mode: 0644
+    content: |
+      apiVersion: v1
+      kind: Config
+      current-context: default
+      clusters:
+      - cluster:
+          certificate-authority-data: {{ cluster_ca_certificate.stdout }}
+          server: {{ cluster_server.stdout }}
+        name: {{ cluster_name.stdout }}
+      contexts:
+      - context:
+          cluster: {{ cluster_name.stdout }}
+          user: danm
+        name: default
+      users:
+      - name: danm
+        user:
+          token: {{ danm_service_account_token.stdout }}
+      preferences: {}
+  become: yes
+
+- name: Deploy Danm rbac
+  k8s:
+    state: present
+    apply: yes
+    definition: '{{ item }}'
+  with_items: '{{ lookup("url", "https://raw.githubusercontent.com/nokia/danm/v4.3.0/integration/cni_config/danm_rbac.yaml", split_lines=False) | from_yaml_all | list }}'
+  when: item is not none
+  
+- name: Deploy Danm cni plugins
+  k8s:
+    state: present
+    apply: yes
+    wait: yes
+    definition: "{{ lookup('file', 'danm-cni-plugins.yaml') }}"
+
+- name: Deploy Danm netwatcher
+  k8s:
+    state: present
+    apply: yes
+    definition: "{{ lookup('file', 'danm-netwatcher-daemonset.yaml') }}"
+
+- name: Create Danm webhook signed cert
+  script: danm-webhook-create-signed-cert.sh
+
+- name: Get CA Bundle
+  shell: kubectl config view --raw -o json | jq -r '.clusters[0].cluster."certificate-authority-data"' | tr -d '"'
+  register: danm_ca_bundle
+
+- name: Generate webhook deployment
+  template:
+    src: danm-webhook.yaml
+    dest: /tmp/danm-webhook.yaml
+    mode: 0644
+  vars:
+    ca_bundle: "{{ danm_ca_bundle.stdout }}"
+
+- name: Deploy Danm webhook
+  k8s:
+    state: present
+    apply: yes
+    src: /tmp/danm-webhook.yaml
index 28c3f50..519cd36 100644 (file)
@@ -29,7 +29,7 @@
 
 - name: deploy multus
   import_tasks: deploy-multus.yml
-  when: inventory_hostname in groups['master']
+  when: inventory_hostname in groups['master'] and 'danm' not in ansible_run_tags
   tags: deploy, cni
 
 - name: clear multus
   when: inventory_hostname in groups['master']
   tags: clear
 
+- name: deploy calico
+  import_tasks: deploy-calico.yaml
+  when: inventory_hostname in groups['master']
+  tags: deploy, cni
+
+- name: clear calico
+  import_tasks: clear-calico.yaml
+  when: inventory_hostname in groups['master']
+  tags: clear
+
+- name: deploy cilium
+  import_tasks: deploy-cilium.yaml
+  when: inventory_hostname in groups['master']
+  tags: deploy, cni
+
+- name: clear cilium
+  import_tasks: clear-cilium.yaml
+  when: inventory_hostname in groups['master']
+  tags: clear
+
+- name: deploy contiv
+  import_tasks: deploy-contiv.yaml
+  when: inventory_hostname in groups['master']
+  tags: deploy, cni
+
+- name: clear contiv
+  import_tasks: clear-contiv.yaml
+  when: inventory_hostname in groups['master']
+  tags: clear
+
+- name: deploy danm
+  import_tasks: deploy-danm.yaml
+  when: inventory_hostname in groups['master'] and 'danm' in ansible_run_tags
+  tags: deploy, cni, danm
+
+- name: clear danm
+  import_tasks: clear-danm.yaml
+  when: inventory_hostname in groups['master'] and 'danm' in ansible_run_tags
+  tags: clear
+
 - name: drain and delete workers from master
   import_tasks: clear-k8s-workers-drain.yml
   when: inventory_hostname in groups['workers']
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/templates/danm-webhook.yaml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/templates/danm-webhook.yaml
new file mode 100644 (file)
index 0000000..1e5d66e
--- /dev/null
@@ -0,0 +1,128 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: danm-webhook
+  namespace: kube-system
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  name: caas:danm-webhook
+rules:
+- apiGroups:
+  - danm.k8s.io
+  resources:
+  - tenantconfigs
+  - danmeps
+  verbs: [ "*" ]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: caas:danm-webhook
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: caas:danm-webhook
+subjects:
+- kind: ServiceAccount
+  name: danm-webhook
+  namespace: kube-system
+---
+apiVersion: admissionregistration.k8s.io/v1beta1
+kind: MutatingWebhookConfiguration
+metadata:
+  name: danm-webhook-config
+  namespace: kube-system
+webhooks:
+  - name: danm-netvalidation.nokia.k8s.io
+    clientConfig:
+      service:
+        name: danm-webhook-svc
+        namespace: kube-system
+        path: "/netvalidation"
+      caBundle: {{ ca_bundle  }}
+    rules:
+      # UPDATE IS TEMPORARILY REMOVED DUE TO:https://github.com/nokia/danm/issues/144
+      - operations: ["CREATE"]
+        apiGroups: ["danm.k8s.io"]
+        apiVersions: ["v1"]
+        resources: ["danmnets","clusternetworks","tenantnetworks"]
+    failurePolicy: Fail
+    timeoutSeconds: 25
+  - name: danm-configvalidation.nokia.k8s.io
+    clientConfig:
+      service:
+        name: danm-webhook-svc
+        namespace: kube-system
+        path: "/confvalidation"
+      caBundle: {{ ca_bundle }}
+    rules:
+      - operations: ["CREATE","UPDATE"]
+        apiGroups: ["danm.k8s.io"]
+        apiVersions: ["v1"]
+        resources: ["tenantconfigs"]
+    failurePolicy: Fail
+    timeoutSeconds: 25
+  - name: danm-netdeletion.nokia.k8s.io
+    clientConfig:
+      service:
+        name: danm-webhook-svc
+        namespace: kube-system
+        path: "/netdeletion"
+      caBundle: {{ ca_bundle }}
+    rules:
+      - operations: ["DELETE"]
+        apiGroups: ["danm.k8s.io"]
+        apiVersions: ["v1"]
+        resources: ["danmnets","clusternetworks","tenantnetworks"]
+    failurePolicy: Fail
+    timeoutSeconds: 25
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: danm-webhook-svc
+  namespace: kube-system
+  labels:
+    danm: webhook
+spec:
+  ports:
+  - name: webhook
+    port: 443
+    targetPort: 8443
+  selector:
+    danm: webhook
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: danm-webhook-deployment
+  namespace: kube-system
+  labels:
+    danm: webhook
+spec:
+  selector:
+    matchLabels:
+     danm: webhook
+  template:
+    metadata:
+      name: danm-webhook
+      labels:
+        danm: webhook
+    spec:
+      serviceAccountName: danm-webhook
+      containers:
+        - name: danm-webhook
+          image: webhook
+          command: [ "/usr/local/bin/webhook", "-tls-cert-bundle=/etc/webhook/certs/cert.pem", "-tls-private-key-file=/etc/webhook/certs/key.pem", "bind-port=8443" ]
+          imagePullPolicy: IfNotPresent
+          volumeMounts:
+            - name: webhook-certs
+              mountPath: /etc/webhook/certs
+              readOnly: true
+      # Configure the directory holding the Webhook's server certificates
+      volumes:
+        - name: webhook-certs
+          secret:
+            secretName: danm-webhook-certs
\ No newline at end of file