Fix the Moon platform startup and fix some bugs in startup. 71/46671/1
authorThomas Duval <thomas.duval@orange.com>
Fri, 3 Nov 2017 14:07:12 +0000 (15:07 +0100)
committerThomas Duval <thomas.duval@orange.com>
Fri, 3 Nov 2017 14:07:12 +0000 (15:07 +0100)
Change-Id: I9014071c755ec5ef0c9eb0d575af29d5f3ad3533

19 files changed:
moonv4/kubernetes/init_k8s.sh
moonv4/kubernetes/kube-dns.yaml [new file with mode: 0644]
moonv4/kubernetes/start_moon.sh
moonv4/kubernetes/templates/keystone.yaml
moonv4/kubernetes/templates/moon_gui.yaml
moonv4/moon_interface/moon_interface/authz_requests.py
moonv4/moon_manager/moon_manager/api/pdp.py
moonv4/moon_manager/moon_manager/http_server.py
moonv4/moon_orchestrator/Dockerfile
moonv4/moon_orchestrator/moon_orchestrator/api/pods.py
moonv4/moon_orchestrator/moon_orchestrator/drivers.py
moonv4/moon_orchestrator/moon_orchestrator/http_server.py
moonv4/moon_orchestrator/requirements.txt
moonv4/moon_utilities/moon_utilities/cache.py
moonv4/moon_utilities/moon_utilities/get_os_apis.py
moonv4/moon_wrapper/Dockerfile
moonv4/moon_wrapper/moon_wrapper/api/wrapper.py
moonv4/templates/moonforming/run.sh
moonv4/templates/moonforming/utils/pdp.py

index a0d02a9..6eb94e7 100644 (file)
@@ -6,18 +6,21 @@ sudo kubeadm reset
 
 sudo swapoff -a
 
-#sudo kubeadm init --pod-network-cidr=192.168.0.0/16
-sudo kubeadm init --pod-network-cidr=10.244.0.0/16
+sudo kubeadm init --pod-network-cidr=192.168.0.0/16
+#sudo kubeadm init --pod-network-cidr=10.244.0.0/16
 
 mkdir -p $HOME/.kube
 sudo cp -f /etc/kubernetes/admin.conf $HOME/.kube/config
 sudo chown $(id -u):$(id -g) $HOME/.kube/config
 
-#kubectl apply -f http://docs.projectcalico.org/v2.4/getting-started/kubernetes/installation/hosted/kubeadm/1.6/calico.yaml
-kubectl apply -f https://raw.githubusercontent.com/projectcalico/canal/master/k8s-install/1.6/rbac.yaml
-kubectl apply -f https://raw.githubusercontent.com/projectcalico/canal/master/k8s-install/1.6/canal.yaml
+kubectl apply -f http://docs.projectcalico.org/v2.4/getting-started/kubernetes/installation/hosted/kubeadm/1.6/calico.yaml
+#kubectl apply -f https://raw.githubusercontent.com/projectcalico/canal/master/k8s-install/1.6/rbac.yaml
+#kubectl apply -f https://raw.githubusercontent.com/projectcalico/canal/master/k8s-install/1.6/canal.yaml
 
-kubectl create -f https://raw.githubusercontent.com/kubernetes/dashboard/master/src/deploy/recommended/kubernetes-dashboard.yaml
+#kubectl create -f https://raw.githubusercontent.com/kubernetes/dashboard/master/src/deploy/recommended/kubernetes-dashboard.yaml
+
+kubectl delete deployment kube-dns --namespace=kube-system
+kubectl apply -f kubernetes/templates/kube-dns.yaml
 
 kubectl taint nodes --all node-role.kubernetes.io/master-
 
diff --git a/moonv4/kubernetes/kube-dns.yaml b/moonv4/kubernetes/kube-dns.yaml
new file mode 100644 (file)
index 0000000..c8f18fd
--- /dev/null
@@ -0,0 +1,183 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+    annotations:
+      deployment.kubernetes.io/revision: "2"
+      kubectl.kubernetes.io/last-applied-configuration: |
+        {"apiVersion":"extensions/v1beta1","kind":"Deployment","metadata":{"annotations":{"deployment.kubernetes.io/revision":"1"},"creationTimestamp":"2017-10-30T09:03:59Z","generation":1,"labels":{"k8s-app":"kube-dns"},"name":"kube-dns","namespace":"kube-system","resourceVersion":"556","selfLink":"/apis/extensions/v1beta1/namespaces/kube-system/deployments/kube-dns","uid":"4433b709-bd51-11e7-a055-80fa5b15034a"},"spec":{"replicas":1,"selector":{"matchLabels":{"k8s-app":"kube-dns"}},"strategy":{"rollingUpdate":{"maxSurge":"10%","maxUnavailable":0},"type":"RollingUpdate"},"template":{"metadata":{"creationTimestamp":null,"labels":{"k8s-app":"kube-dns"}},"spec":{"affinity":{"nodeAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":{"nodeSelectorTerms":[{"matchExpressions":[{"key":"beta.kubernetes.io/arch","operator":"In","values":["amd64"]}]}]}}},"containers":[{"args":["--domain=cluster.local.","--dns-port=10053","--config-dir=/kube-dns-config","--v=2"],"env":[{"name":"PROMETHEUS_PORT","value":"10055"}],"image":"gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.5","imagePullPolicy":"IfNotPresent","livenessProbe":{"failureThreshold":5,"httpGet":{"path":"/healthcheck/kubedns","port":10054,"scheme":"HTTP"},"initialDelaySeconds":60,"periodSeconds":10,"successThreshold":1,"timeoutSeconds":5},"name":"kubedns","ports":[{"containerPort":10053,"name":"dns-local","protocol":"UDP"},{"containerPort":10053,"name":"dns-tcp-local","protocol":"TCP"},{"containerPort":10055,"name":"metrics","protocol":"TCP"}],"readinessProbe":{"failureThreshold":3,"httpGet":{"path":"/readiness","port":8081,"scheme":"HTTP"},"initialDelaySeconds":3,"periodSeconds":10,"successThreshold":1,"timeoutSeconds":5},"resources":{"limits":{"memory":"170Mi"},"requests":{"cpu":"100m","memory":"70Mi"}},"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File","volumeMounts":[{"mountPath":"/kube-dns-config","name":"kube-dns-config"}]},{"args":["-v=2","-logtostderr","-configDir=/etc/k8s/dns/dnsmasq-nanny","-restartDnsmasq=true","--","-k","--cache-size=1000","--log-facility=-","--server=/cluster.local/127.0.0.1#10053","--server=/in-addr.arpa/127.0.0.1#10053","--server=/ip6.arpa/127.0.0.1#10053","--server=8.8.8.8"],"image":"gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.5","imagePullPolicy":"IfNotPresent","livenessProbe":{"failureThreshold":5,"httpGet":{"path":"/healthcheck/dnsmasq","port":10054,"scheme":"HTTP"},"initialDelaySeconds":60,"periodSeconds":10,"successThreshold":1,"timeoutSeconds":5},"name":"dnsmasq","ports":[{"containerPort":53,"name":"dns","protocol":"UDP"},{"containerPort":53,"name":"dns-tcp","protocol":"TCP"}],"resources":{"requests":{"cpu":"150m","memory":"20Mi"}},"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File","volumeMounts":[{"mountPath":"/etc/k8s/dns/dnsmasq-nanny","name":"kube-dns-config"}]},{"args":["--v=2","--logtostderr","--probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local,5,A","--probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local,5,A"],"image":"gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.5","imagePullPolicy":"IfNotPresent","livenessProbe":{"failureThreshold":5,"httpGet":{"path":"/metrics","port":10054,"scheme":"HTTP"},"initialDelaySeconds":60,"periodSeconds":10,"successThreshold":1,"timeoutSeconds":5},"name":"sidecar","ports":[{"containerPort":10054,"name":"metrics","protocol":"TCP"}],"resources":{"requests":{"cpu":"10m","memory":"20Mi"}},"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File"}],"dnsPolicy":"Default","restartPolicy":"Always","schedulerName":"default-scheduler","securityContext":{},"serviceAccount":"kube-dns","serviceAccountName":"kube-dns","terminationGracePeriodSeconds":30,"tolerations":[{"key":"CriticalAddonsOnly","operator":"Exists"},{"effect":"NoSchedule","key":"node-role.kubernetes.io/master"}],"volumes":[{"configMap":{"defaultMode":420,"name":"kube-dns","optional":true},"name":"kube-dns-config"}]}}},"status":{"availableReplicas":1,"conditions":[{"lastTransitionTime":"2017-10-30T09:05:11Z","lastUpdateTime":"2017-10-30T09:05:11Z","message":"Deployment has minimum availability.","reason":"MinimumReplicasAvailable","status":"True","type":"Available"}],"observedGeneration":1,"readyReplicas":1,"replicas":1,"updatedReplicas":1}}
+    creationTimestamp: 2017-10-30T09:03:59Z
+    generation: 2
+    labels:
+      k8s-app: kube-dns
+    name: kube-dns
+    namespace: kube-system
+    resourceVersion: "300076"
+    selfLink: /apis/extensions/v1beta1/namespaces/kube-system/deployments/kube-dns
+    uid: 4433b709-bd51-11e7-a055-80fa5b15034a
+spec:
+    replicas: 1
+    selector:
+      matchLabels:
+        k8s-app: kube-dns
+    strategy:
+      rollingUpdate:
+        maxSurge: 10%
+        maxUnavailable: 0
+      type: RollingUpdate
+    template:
+      metadata:
+        creationTimestamp: null
+        labels:
+          k8s-app: kube-dns
+      spec:
+        affinity:
+          nodeAffinity:
+            requiredDuringSchedulingIgnoredDuringExecution:
+              nodeSelectorTerms:
+              - matchExpressions:
+                - key: beta.kubernetes.io/arch
+                  operator: In
+                  values:
+                  - amd64
+        containers:
+        - args:
+          - --domain=cluster.local.
+          - --dns-port=10053
+          - --config-dir=/kube-dns-config
+          - --v=2
+          env:
+          - name: PROMETHEUS_PORT
+            value: "10055"
+          image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.5
+          imagePullPolicy: IfNotPresent
+          livenessProbe:
+            failureThreshold: 5
+            httpGet:
+              path: /healthcheck/kubedns
+              port: 10054
+              scheme: HTTP
+            initialDelaySeconds: 60
+            periodSeconds: 10
+            successThreshold: 1
+            timeoutSeconds: 5
+          name: kubedns
+          ports:
+          - containerPort: 10053
+            name: dns-local
+            protocol: UDP
+          - containerPort: 10053
+            name: dns-tcp-local
+            protocol: TCP
+          - containerPort: 10055
+            name: metrics
+            protocol: TCP
+          readinessProbe:
+            failureThreshold: 3
+            httpGet:
+              path: /readiness
+              port: 8081
+              scheme: HTTP
+            initialDelaySeconds: 3
+            periodSeconds: 10
+            successThreshold: 1
+            timeoutSeconds: 5
+          resources:
+            limits:
+              memory: 340Mi
+            requests:
+              cpu: 200m
+              memory: 140Mi
+          terminationMessagePath: /dev/termination-log
+          terminationMessagePolicy: File
+          volumeMounts:
+          - mountPath: /kube-dns-config
+            name: kube-dns-config
+        - args:
+          - -v=2
+          - -logtostderr
+          - -configDir=/etc/k8s/dns/dnsmasq-nanny
+          - -restartDnsmasq=true
+          - --
+          - -k
+          - --dns-forward-max=300
+          - --cache-size=1000
+          - --log-facility=-
+          - --server=/cluster.local/127.0.0.1#10053
+          - --server=/in-addr.arpa/127.0.0.1#10053
+          - --server=/ip6.arpa/127.0.0.1#10053
+          - --server=8.8.8.8
+          image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.5
+          imagePullPolicy: IfNotPresent
+          livenessProbe:
+            failureThreshold: 5
+            httpGet:
+              path: /healthcheck/dnsmasq
+              port: 10054
+              scheme: HTTP
+            initialDelaySeconds: 60
+            periodSeconds: 10
+            successThreshold: 1
+            timeoutSeconds: 5
+          name: dnsmasq
+          ports:
+          - containerPort: 53
+            name: dns
+            protocol: UDP
+          - containerPort: 53
+            name: dns-tcp
+            protocol: TCP
+          resources:
+            requests:
+              cpu: 150m
+              memory: 20Mi
+          terminationMessagePath: /dev/termination-log
+          terminationMessagePolicy: File
+          volumeMounts:
+          - mountPath: /etc/k8s/dns/dnsmasq-nanny
+            name: kube-dns-config
+        - args:
+          - --v=2
+          - --logtostderr
+          - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local,5,A
+          - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local,5,A
+          image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.5
+          imagePullPolicy: IfNotPresent
+          livenessProbe:
+            failureThreshold: 5
+            httpGet:
+              path: /metrics
+              port: 10054
+              scheme: HTTP
+            initialDelaySeconds: 60
+            periodSeconds: 10
+            successThreshold: 1
+            timeoutSeconds: 5
+          name: sidecar
+          ports:
+          - containerPort: 10054
+            name: metrics
+            protocol: TCP
+          resources:
+            requests:
+              cpu: 10m
+              memory: 20Mi
+          terminationMessagePath: /dev/termination-log
+          terminationMessagePolicy: File
+        dnsPolicy: Default
+        restartPolicy: Always
+        schedulerName: default-scheduler
+        securityContext: {}
+        serviceAccount: kube-dns
+        serviceAccountName: kube-dns
+        terminationGracePeriodSeconds: 30
+        tolerations:
+        - key: CriticalAddonsOnly
+          operator: Exists
+        - effect: NoSchedule
+          key: node-role.kubernetes.io/master
+        volumes:
+        - configMap:
+            defaultMode: 420
+            name: kube-dns
+            optional: true
+          name: kube-dns-config
index 9900f1e..705ca22 100644 (file)
@@ -16,6 +16,7 @@ echo =========================================
 kubectl get pods -n moon
 echo =========================================
 
+sleep 5
 kubectl create -n moon -f kubernetes/templates/moon_configuration.yaml
 
 echo Waiting for jobs moonforming
@@ -23,8 +24,14 @@ sleep 5
 kubectl get jobs -n moon
 kubectl logs -n moon jobs/moonforming
 
+sleep 5
+
 kubectl create -n moon -f kubernetes/templates/moon_manager.yaml
+
+sleep 2
+
 kubectl create -n moon -f kubernetes/templates/moon_orchestrator.yaml
+
 kubectl create -n moon -f kubernetes/templates/moon_gui.yaml
 
 
index 84a51f7..b3ef412 100644 (file)
@@ -14,6 +14,11 @@ spec:
       containers:
       - name: keystone
         image: asteroide/keystone:mitaka
+        env:
+        - name: KEYSTONE_HOSTNAME
+          value: "varuna"
+        - name: KEYSTONE_PORT
+          value: "30006"
         ports:
         - containerPort: 35357
           containerPort: 5000
index 732a3ce..3500f1e 100644 (file)
@@ -13,7 +13,16 @@ spec:
       hostname: gui
       containers:
       - name: gui
-        image: wukongsun/moon_gui:v4.1
+        image: wukongsun/moon_gui:v4.3
+        env:
+        - name: MANAGER_HOST
+          value: "varuna"
+        - name: MANAGER_PORT
+          value: "30001"
+        - name: KEYSTONE_HOST
+          value: "varuna"
+        - name: KEYSTONE_PORT
+          value: "30006"
         ports:
         - containerPort: 80
 ---
index 2eb5fd1..1035678 100644 (file)
@@ -28,6 +28,8 @@ class AuthzRequest:
         self.context = Context(ctx, CACHE)
         self.args = args
         self.request_id = ctx["request_id"]
+        # LOG.info("container={}".format(CACHE.containers))
+        # LOG.info("container_chaining={}".format(CACHE.container_chaining))
         if ctx['project_id'] not in CACHE.container_chaining:
             raise exceptions.KeystoneProjectError("Unknown Project ID {}".format(ctx['project_id']))
         self.container_chaining = CACHE.container_chaining[ctx['project_id']]
@@ -39,6 +41,9 @@ class AuthzRequest:
     def run(self):
         self.context.delete_cache()
         try:
+            LOG.debug("url=http://{}:{}/authz".format(
+                self.container_chaining[0]["hostname"],
+                self.container_chaining[0]["port"]))
             req = requests.post("http://{}:{}/authz".format(
                 self.container_chaining[0]["hostname"],
                 self.container_chaining[0]["port"],
@@ -80,48 +85,48 @@ class AuthzRequest:
             # req.raw.decode_content = True
             self.result = pickle.loads(req.content)
 
-    def __exec_next_state(self, rule_found):
-        index = self.context.index
-        current_meta_rule = self.context.headers[index]
-        current_container = self.__get_container_from_meta_rule(current_meta_rule)
-        current_container_genre = current_container["genre"]
-        try:
-            next_meta_rule = self.context.headers[index + 1]
-        except IndexError:
-            next_meta_rule = None
-        if current_container_genre == "authz":
-            if rule_found:
-                return True
-            pass
-            if next_meta_rule:
-                # next will be session if current is deny and session is unset
-                if self.payload["authz_context"]['pdp_set'][next_meta_rule]['effect'] == "unset":
-                    return notify(
-                        request_id=self.payload["authz_context"]["request_id"],
-                        container_id=self.__get_container_from_meta_rule(next_meta_rule)['container_id'],
-                        payload=self.payload)
-                # next will be delegation if current is deny and session is passed or deny and delegation is unset
-                else:
-                    LOG.error("Delegation is not developed!")
-
-            else:
-                # else next will be None and the request is sent to router
-                return self.__return_to_router()
-        elif current_container_genre == "session":
-            pass
-            # next will be next container in headers if current is passed
-            if self.payload["authz_context"]['pdp_set'][current_meta_rule]['effect'] == "passed":
-                return notify(
-                    request_id=self.payload["authz_context"]["request_id"],
-                    container_id=self.__get_container_from_meta_rule(next_meta_rule)['container_id'],
-                    payload=self.payload)
-            # next will be None if current is grant and the request is sent to router
-            else:
-                return self.__return_to_router()
-        elif current_container_genre == "delegation":
-            LOG.error("Delegation is not developed!")
-            # next will be authz if current is deny
-            # next will be None if current is grant and the request is sent to router
+    def __exec_next_state(self, rule_found):
+        index = self.context.index
+        current_meta_rule = self.context.headers[index]
+        current_container = self.__get_container_from_meta_rule(current_meta_rule)
+        current_container_genre = current_container["genre"]
+        try:
+            next_meta_rule = self.context.headers[index + 1]
+        except IndexError:
+            next_meta_rule = None
+        if current_container_genre == "authz":
+            if rule_found:
+                return True
+            pass
+            if next_meta_rule:
+                # next will be session if current is deny and session is unset
+                if self.payload["authz_context"]['pdp_set'][next_meta_rule]['effect'] == "unset":
+                    return notify(
+                        request_id=self.payload["authz_context"]["request_id"],
+                        container_id=self.__get_container_from_meta_rule(next_meta_rule)['container_id'],
+                        payload=self.payload)
+                # next will be delegation if current is deny and session is passed or deny and delegation is unset
+                else:
+                    LOG.error("Delegation is not developed!")
+    #
+            else:
+                # else next will be None and the request is sent to router
+                return self.__return_to_router()
+        elif current_container_genre == "session":
+            pass
+            # next will be next container in headers if current is passed
+            if self.payload["authz_context"]['pdp_set'][current_meta_rule]['effect'] == "passed":
+                return notify(
+                    request_id=self.payload["authz_context"]["request_id"],
+                    container_id=self.__get_container_from_meta_rule(next_meta_rule)['container_id'],
+                    payload=self.payload)
+            # next will be None if current is grant and the request is sent to router
+            else:
+                return self.__return_to_router()
+        elif current_container_genre == "delegation":
+            LOG.error("Delegation is not developed!")
+            # next will be authz if current is deny
+            # next will be None if current is grant and the request is sent to router
 
     def set_result(self, result):
         self.result = result
index 823055a..ffc91fb 100644 (file)
@@ -11,6 +11,7 @@ from flask import request
 from flask_restful import Resource
 import logging
 import requests
+import time
 from moon_utilities.security_functions import check_auth
 from moon_db.core import PDPManager
 from moon_utilities import configuration
@@ -25,12 +26,20 @@ def delete_pod(uuid):
 
 
 def add_pod(uuid, data):
+    LOG.info("Add a new pod {}".format(data))
     conf = configuration.get_configuration("components/orchestrator")
     hostname = conf["components/orchestrator"].get("hostname", "orchestrator")
     port = conf["components/orchestrator"].get("port", 80)
     proto = conf["components/orchestrator"].get("protocol", "http")
-    req = requests.post("{}://{}:{}/pods".format(proto, hostname, port),
-                        data=data)
+    while True:
+        try:
+            req = requests.post("{}://{}:{}/pods".format(proto, hostname, port),
+                                data=data)
+        except requests.exceptions.ConnectionError:
+            LOG.warning("Orchestrator is not ready, standby...")
+            time.sleep(1)
+        else:
+            break
     LOG.info(req.text)
 
 
@@ -93,8 +102,14 @@ class PDP(Resource):
         :internal_api: add_pdp
         """
         try:
+            data = dict(request.json)
+            if not data.get("keystone_project_id"):
+                data["keystone_project_id"] = None
             data = PDPManager.add_pdp(
                 user_id=user_id, pdp_id=None, value=request.json)
+            uuid = list(data.keys())[0]
+            LOG.info("data={}".format(data))
+            LOG.info("uuid={}".format(uuid))
             add_pod(uuid=uuid, data=data[uuid])
         except Exception as e:
             LOG.error(e, exc_info=True)
@@ -140,8 +155,13 @@ class PDP(Resource):
         :internal_api: update_pdp
         """
         try:
+            _data = dict(request.json)
+            if not _data.get("keystone_project_id"):
+                _data["keystone_project_id"] = None
             data = PDPManager.update_pdp(
-                user_id=user_id, pdp_id=uuid, value=request.json)
+                user_id=user_id, pdp_id=uuid, value=_data)
+            LOG.info("data={}".format(data))
+            LOG.info("uuid={}".format(uuid))
             add_pod(uuid=uuid, data=data[uuid])
         except Exception as e:
             LOG.error(e, exc_info=True)
index dfaa23a..c671ed6 100644 (file)
@@ -7,6 +7,8 @@ from flask import Flask, jsonify
 from flask_cors import CORS, cross_origin
 from flask_restful import Resource, Api
 import logging
+import sqlalchemy.exc
+import time
 from moon_manager import __version__
 from moon_manager.api.generic import Status, Logs, API
 from moon_manager.api.models import Models
@@ -20,8 +22,10 @@ from moon_manager.api.assignments import SubjectAssignments, ObjectAssignments,
 from moon_manager.api.rules import Rules
 # from moon_manager.api.containers import Container
 from moon_utilities import configuration, exceptions
+from moon_db.core import PDPManager
 
-logger = logging.getLogger("moon.manager.http")
+
+LOG = logging.getLogger("moon.manager.http")
 
 
 class Server:
@@ -132,6 +136,22 @@ class HTTPServer(Server):
         for api in __API__:
             self.api.add_resource(api, *api.__urls__)
 
+    @staticmethod
+    def __check_if_db_is_up():
+        first = True
+        while True:
+            try:
+                PDPManager.get_pdp(user_id="admin", pdp_id=None)
+            except sqlalchemy.exc.ProgrammingError:
+                time.sleep(1)
+                if first:
+                    LOG.warning("Waiting for the database...")
+                    first = False
+            else:
+                LOG.warning("Database is up, resuming operations...")
+                break
+
     def run(self):
+        self.__check_if_db_is_up()
         self.app.run(debug=True, host=self._host, port=self._port)  # nosec
 
index 70eef9a..aafe178 100644 (file)
@@ -9,7 +9,7 @@ RUN pip3 install pip --upgrade
 ADD . /root
 WORKDIR /root/
 RUN pip3 install -r requirements.txt --upgrade
-RUN pip3 install /root/dist/* --upgrade
+#RUN pip3 install /root/dist/* --upgrade
 RUN pip3 install . --upgrade
 
 CMD ["python3", "-m", "moon_orchestrator"]
\ No newline at end of file
index 524f8e4..a7ca1cb 100644 (file)
@@ -8,7 +8,7 @@ from flask_restful import Resource
 from moon_utilities.security_functions import check_auth
 import logging
 
-LOG = logging.getLogger("moon.orchestrator.api.containers")
+LOG = logging.getLogger("moon.orchestrator.api.pods")
 
 
 class Pods(Resource):
@@ -42,12 +42,15 @@ class Pods(Resource):
         :internal_api: get_pdp
         """
         pods = {}
-        LOG.info("pods={}".format(self.driver.get_pods()))
+        # LOG.info("pods={}".format(self.driver.get_pods()))
+        if uuid:
+            return {"pods": self.driver.get_pods(uuid)}
         for _pod_key, _pod_values in self.driver.get_pods().items():
+            pods[_pod_key] = []
             for _pod_value in _pod_values:
                 if _pod_value['namespace'] != "moon":
                     continue
-                pods[_pod_key] = _pod_value
+                pods[_pod_key].append(_pod_value)
         return {"pods": pods}
 
     @check_auth
index 970914a..63631c0 100644 (file)
@@ -6,7 +6,6 @@
 from kubernetes import client, config
 import logging
 import urllib3.exceptions
-import time
 from moon_utilities import configuration
 
 logger = logging.getLogger("moon.orchestrator.drivers")
@@ -14,12 +13,10 @@ logger = logging.getLogger("moon.orchestrator.drivers")
 
 def get_driver():
     try:
-        driver = K8S()
+        return K8S()
     except urllib3.exceptions.MaxRetryError as e:
         logger.exception(e)
         return Docker()
-    else:
-        return K8S()
 
 
 class Driver:
@@ -60,25 +57,19 @@ class K8S(Driver):
         self.client = client.CoreV1Api()
 
     def get_pods(self, name=None):
-        # pods = self.client.list_pod_for_all_namespaces(watch=False)
-        # if not namespace:
-        #     return pods
-        # # TODO: get pods with specific namespace
-        # for pod in pods:
-        #     logger.info("%s\t%s\t%s" % (pod.status.pod_ip,
-        #                                 pod.metadata.namespace,
-        #                                 pod.metadata.name))
-        # return pods
         if name:
             pods = self.client.list_pod_for_all_namespaces(watch=False)
-            for pod in pods:
-                if pod.metadata.name == name:
+            for pod in pods.items:
+                logger.info("get_pods {}".format(pod.metadata.name))
+                if name in pod.metadata.name:
                     return pod
             else:
                 return None
+        logger.info("get_pods cache={}".format(self.cache))
         return self.cache
 
-    def __create_pod(self, client, data):
+    @staticmethod
+    def __create_pod(client, data):
         pod_manifest = {
             'apiVersion': 'extensions/v1beta1',
             'kind': 'Deployment',
@@ -89,7 +80,7 @@ class K8S(Driver):
                 'replicas': 1,
                 'template': {
                     'metadata': {'labels': {'app': data[0].get('name')}},
-                    # 'hostname': data.get('name'),
+                    'hostname': data[0].get('name'),
                     'spec': {
                         'containers': []
                     }
@@ -101,6 +92,7 @@ class K8S(Driver):
                 {
                     'image': _data.get('container', "busybox"),
                     'name': _data.get('name'),
+                    'hostname': _data.get('name'),
                     'ports': [
                         {"containerPort": _data.get('port', 80)},
                     ],
@@ -118,9 +110,12 @@ class K8S(Driver):
         resp = client.create_namespaced_deployment(body=pod_manifest,
                                                    namespace='moon')
         logger.info("Pod {} created!".format(data[0].get('name')))
+        # logger.info(yaml.dump(pod_manifest, sys.stdout))
+        # logger.info(resp)
         return resp
 
-    def __create_service(self, client, data, expose=False):
+    @staticmethod
+    def __create_service(client, data, expose=False):
         service_manifest = {
             'apiVersion': 'v1',
             'kind': 'Service',
@@ -154,26 +149,17 @@ class K8S(Driver):
 
     def load_pod(self, data, api_client=None, ext_client=None, expose=False):
         _client = api_client if api_client else self.client
-        logger.info("Creating pod/service {}".format(data[0].get('name')))
-        logger.info("Creating pod/service {}".format(data))
         pod = self.__create_pod(client=ext_client, data=data)
         service = self.__create_service(client=_client, data=data[0],
                                         expose=expose)
-        # logger.info("data={}".format(data))
-        # logger.info("service={}".format(service))
+        # logger.info("load_poad data={}".format(data))
+        # logger.info("pod.metadata.uid={}".format(pod.metadata.uid))
         self.cache[pod.metadata.uid] = data
-        #     {
-        #     "ip": "",
-        #     "hostname": pod.metadata.name,
-        #     "port": service.spec.ports[0].node_port,
-        #     "pdp": "",
-        #     "keystone_project_id": data[0].get('keystone_project_id'),
-        #     "plugin_names": [d.get('genre') for d in data],
-        #     "namespace": "moon"
-        # }
 
     def delete_pod(self, uuid=None, name=None):
         logger.info("Deleting pod {}".format(uuid))
+        # TODO: delete_namespaced_deployment
+        # https://github.com/kubernetes-incubator/client-python/blob/master/kubernetes/client/apis/extensions_v1beta1_api.py
 
     def get_slaves(self):
         contexts, active_context = config.list_kube_config_contexts()
@@ -184,6 +170,8 @@ class Docker(Driver):
 
     def load_pod(self, data, api_client=None, ext_client=None):
         logger.info("Creating pod {}".format(data[0].get('name')))
+        raise NotImplementedError
 
     def delete_pod(self, uuid=None, name=None):
         logger.info("Deleting pod {}".format(uuid))
+        raise NotImplementedError
index a0738f4..70c5e2d 100644 (file)
@@ -10,6 +10,7 @@ import logging
 from kubernetes import client, config
 import random
 import requests
+import time
 from moon_orchestrator import __version__
 from moon_orchestrator.api.pods import Pods
 from moon_orchestrator.api.generic import Logs, Status
@@ -17,7 +18,7 @@ from moon_utilities import configuration, exceptions
 from moon_utilities.misc import get_random_name
 from moon_orchestrator.drivers import get_driver
 
-logger = logging.getLogger("moon.orchestrator.http")
+LOG = logging.getLogger("moon.orchestrator.http")
 
 
 class Server:
@@ -106,13 +107,26 @@ class HTTPServer(Server):
         # CORS(self.app)
         self.api = Api(self.app)
         self.driver = get_driver()
-        logger.info("Driver = {}".format(self.driver.__class__))
+        LOG.info("Driver = {}".format(self.driver.__class__))
         self.__set_route()
         self.__hook_errors()
+        pdp = None
+        while True:
+            try:
+                pdp = requests.get(
+                    "http://{}:{}/pdp".format(self.manager_hostname,
+                                              self.manager_port))
+            except requests.exceptions.ConnectionError:
+                LOG.warning("Manager is not ready, standby...")
+                time.sleep(1)
+            except KeyError:
+                LOG.warning("Manager is not ready, standby...")
+                time.sleep(1)
+            else:
+                if "pdps" in pdp.json():
+                    break
+        LOG.debug("pdp={}".format(pdp))
         self.create_wrappers()
-        pdp = requests.get("http://{}:{}/pdp".format(self.manager_hostname,
-                                                     self.manager_port))
-        logger.info("pdp={}".format(pdp))
         for _pdp_key, _pdp_value in pdp.json()['pdps'].items():
             if _pdp_value.get('keystone_project_id'):
                 # TODO: select context to add security function
@@ -151,8 +165,8 @@ class HTTPServer(Server):
 
     def create_wrappers(self):
         contexts, active_context = self.driver.get_slaves()
-        logger.info("contexts: {}".format(contexts))
-        logger.info("active_context: {}".format(active_context))
+        LOG.debug("contexts: {}".format(contexts))
+        LOG.debug("active_context: {}".format(active_context))
         conf = configuration.get_configuration("components/wrapper")
         hostname = conf["components/wrapper"].get(
             "hostname", "wrapper")
@@ -162,7 +176,7 @@ class HTTPServer(Server):
             "wukongsun/moon_wrapper:v4.3")
         for _ctx in contexts:
             _config = config.new_client_from_config(context=_ctx['name'])
-            logger.info("_config={}".format(_config))
+            LOG.debug("_config={}".format(_config))
             api_client = client.CoreV1Api(_config)
             ext_client = client.ExtensionsV1beta1Api(_config)
             # TODO: get data from consul
@@ -173,7 +187,7 @@ class HTTPServer(Server):
                 "namespace": "moon"
             }, ]
             pod = self.driver.load_pod(data, api_client, ext_client, expose=True)
-            logger.info('wrapper pod={}'.format(pod))
+            LOG.debug('wrapper pod={}'.format(pod))
 
     def create_security_function(self, keystone_project_id,
                                  pdp_id, policy_ids, active_context=None,
@@ -189,11 +203,11 @@ class HTTPServer(Server):
         security function in all context (ie, in all slaves)
         :return: None
         """
-        logger.info(self.driver.get_pods())
+        LOG.debug(self.driver.get_pods())
         for key, value in self.driver.get_pods().items():
             for _pod in value:
                 if _pod.get('keystone_project_id') == keystone_project_id:
-                    logger.warning("A pod for this Keystone project {} "
+                    LOG.warning("A pod for this Keystone project {} "
                                    "already exists.".format(keystone_project_id))
                     return
         plugins = configuration.get_plugins()
@@ -247,14 +261,14 @@ class HTTPServer(Server):
             active_context = _active_context
             _config = config.new_client_from_config(
                 context=active_context['name'])
-            logger.info("_config={}".format(_config))
+            LOG.debug("_config={}".format(_config))
             api_client = client.CoreV1Api(_config)
             ext_client = client.ExtensionsV1beta1Api(_config)
             self.driver.load_pod(data, api_client, ext_client)
             return
         for _ctx in contexts:
             _config = config.new_client_from_config(context=_ctx['name'])
-            logger.info("_config={}".format(_config))
+            LOG.debug("_config={}".format(_config))
             api_client = client.CoreV1Api(_config)
             ext_client = client.ExtensionsV1beta1Api(_config)
             self.driver.load_pod(data, api_client, ext_client)
index 29885a4..6197f10 100644 (file)
@@ -4,4 +4,5 @@ flask_cors
 werkzeug
 moon_utilities
 moon_db
-kubernetes
\ No newline at end of file
+kubernetes
+pyaml
\ No newline at end of file
index 2a289df..e1d2477 100644 (file)
@@ -388,29 +388,28 @@ class Cache(object):
 
     def get_containers_from_keystone_project_id(self, keystone_project_id,
                                                 meta_rule_id=None):
-        for container_id, container_value in self.containers.items():
-            LOG.info("container={}".format(container_value))
-            if 'keystone_project_id' not in container_value:
-                continue
-            if container_value['keystone_project_id'] == keystone_project_id:
-                if not meta_rule_id:
-                    yield container_id, container_value
-                elif container_value.get('meta_rule_id') == meta_rule_id:
-                    yield container_id, container_value
-                    break
+        for container_id, container_values in self.containers.items():
+            for container_value in container_values:
+                if 'keystone_project_id' not in container_value:
+                    continue
+                if container_value['keystone_project_id'] == keystone_project_id:
+                    if not meta_rule_id:
+                        yield container_id, container_value
+                    elif container_value.get('meta_rule_id') == meta_rule_id:
+                        yield container_id, container_value
+                        break
 
     # containers functions
 
     def __update_container(self):
-        LOG.info("orchestrator={}".format("{}/pods".format(self.orchestrator_url)))
         req = requests.get("{}/pods".format(self.orchestrator_url))
-        LOG.info("pods={}".format(req.text))
         pods = req.json()
         for key, value in pods["pods"].items():
-            if key not in self.__CONTAINERS:
-                self.__CONTAINERS[key] = value
-            else:
-                self.__CONTAINERS[key].update(value)
+            # if key not in self.__CONTAINERS:
+            self.__CONTAINERS[key] = value
+            # else:
+            #     for container in value:
+            #         self.__CONTAINERS[key].update(value)
 
     def add_container(self, container_data):
         """Add a new container in the cache
@@ -491,6 +490,8 @@ class Cache(object):
         current_time = time.time()
         if self.__CONTAINER_CHAINING_UPDATE + self.__UPDATE_INTERVAL < current_time:
             for key, value in self.pdp.items():
+                if not value["keystone_project_id"]:
+                    continue
                 self.__update_container_chaining(value["keystone_project_id"])
         self.__CONTAINER_CHAINING_UPDATE = current_time
         LOG.info(self.__CONTAINER_CHAINING_UPDATE)
@@ -508,14 +509,18 @@ class Cache(object):
                                 keystone_project_id,
                                 meta_rule_id
                             ):
+                                _raw = requests.get("{}/pods/{}".format(
+                                    self.orchestrator_url, container_value["name"])
+                                )
+                                LOG.debug("_raw={}".format(_raw.text))
                                 container_ids.append(
                                     {
-                                        "container_id": self.__CONTAINERS[container_id]["name"],
-                                        "genre": self.__CONTAINERS[container_id]["genre"],
+                                        "container_id": container_value["name"],
+                                        "genre": container_value["genre"],
                                         "policy_id": policy_id,
                                         "meta_rule_id": meta_rule_id,
-                                        "hostname": self.__CONTAINERS[container_id]["name"],
-                                        "port": self.__CONTAINERS[container_id]["port"],
+                                        "hostname": container_value["name"],
+                                        "port": container_value["port"],
                                     }
                                 )
         self.__CONTAINER_CHAINING[keystone_project_id] = container_ids
index d019c1c..0008b28 100644 (file)
@@ -101,10 +101,10 @@ def to_str(results):
 
 
 def get_data_from_policies(policies):
-    for filename in policies.split(","):
-        try:
-            obj = json.loads(open(filename.strip()).read())
-
+    return
+    # for filename in policies.split(","):
+    #     try:
+    #         obj = json.loads(open(filename.strip()).read())
 
 
 def save(results, args):
@@ -131,4 +131,4 @@ def main():
     save(results, args)
 
 if __name__ == "__main__":
-    main()
\ No newline at end of file
+    main()
index dc06f8d..958f136 100644 (file)
@@ -6,7 +6,7 @@ RUN pip3 install pip --upgrade
 ADD . /root
 WORKDIR /root/
 RUN pip3 install -r requirements.txt --upgrade
-RUN pip3 install /root/dist/* --ugrade
+#RUN pip3 install /root/dist/* --upgrade
 RUN pip3 install .
 
 CMD ["python3", "-m", "moon_wrapper"]
index 1de9278..99ac248 100644 (file)
@@ -12,6 +12,7 @@ from flask_restful import Resource
 import logging
 import json
 import requests
+from moon_utilities import exceptions
 import time
 from uuid import uuid4
 
@@ -45,7 +46,7 @@ class Wrapper(Resource):
     #     return self.manage_data()
 
     def post(self):
-        LOG.info("POST {}".format(request.form))
+        LOG.debug("POST {}".format(request.form))
         response = flask.make_response("False")
         if self.manage_data():
             response = flask.make_response("True")
@@ -75,10 +76,25 @@ class Wrapper(Resource):
         return target.get("project_id", "none")
 
     def get_interface_url(self, project_id):
-        for container in self.CACHE.containers.values():
-            if container.get("keystone_project_id") == project_id:
-                return "http://{}:{}".format(container['hostname'],
-                                             container['port'][0]["PublicPort"])
+        for containers in self.CACHE.containers.values():
+            for container in containers:
+                if container.get("keystone_project_id") == project_id:
+                    if "interface" in container['name']:
+                        return "http://{}:{}".format(
+                            container['name'],
+                            container['port'])
+        self.CACHE.update()
+        # Note (asteroide): test an other time after the update
+        for containers in self.CACHE.containers.values():
+            for container in containers:
+                if container.get("keystone_project_id") == project_id:
+                    if "interface" in container['name']:
+                        return "http://{}:{}".format(
+                            container['name'],
+                            container['port'])
+        raise exceptions.AuthzException("Keystone Project "
+                                        "ID ({}) is unknown or not mapped "
+                                        "to a PDP.".format(project_id))
 
     def manage_data(self):
         target = json.loads(request.form.get('target', {}))
@@ -87,16 +103,19 @@ class Wrapper(Resource):
         _subject = self.__get_subject(target, credentials)
         _object = self.__get_object(target, credentials)
         _project_id = self.__get_project_id(target, credentials)
-        LOG.info("POST with args project={} / "
+        LOG.debug("POST with args project={} / "
                  "subject={} - object={} - action={}".format(
                     _project_id, _subject, _object, rule))
         interface_url = self.get_interface_url(_project_id)
-        req = requests.get("{}/{}/{}/{}".format(
+        LOG.debug("interface_url={}".format(interface_url))
+        req = requests.get("{}/authz/{}/{}/{}/{}".format(
             interface_url,
+            _project_id,
             _subject,
             _object,
             rule
         ))
+        LOG.debug("Get interface {}".format(req.text))
         if req.status_code == 200:
             if req.json().get("result", False):
                 return True
index 25e015f..e3c052c 100644 (file)
@@ -1,12 +1,26 @@
 #!/usr/bin/env bash
 
-# TODO: wait for consul
+echo "Waiting for Consul (http://consul:8500)"
+while ! python -c "import requests; req = requests.get('http://consul:8500')" 2>/dev/null ; do
+    sleep 5 ;
+    echo "."
+done
+
+echo "Manager (http://consul:8500) is up."
+
 python3 /root/conf2consul.py /etc/moon/moon.conf
 
-# TODO: wait for database
+echo "Waiting for DB (tcp://db:3306)"
+while ! python -c "import socket, sys; s = socket.socket(socket.AF_INET, socket.SOCK_STREAM); s.connect(('db', 3306)); sys.exit(0)" 2>/dev/null ; do
+    sleep 5 ;
+    echo "."
+done
+
+echo "Manager (http://db:3306) is up."
+
 moon_db_manager upgrade
 
-echo "Waiting for manager (http://manager:8082)"
+echo "Waiting for Manager (http://manager:8082)"
 while ! python -c "import requests; req = requests.get('http://manager:8082')" 2>/dev/null ; do
     sleep 5 ;
     echo "."
index 4f513aa..676b216 100644 (file)
@@ -15,7 +15,7 @@ KEYSTONE_SERVER = config['openstack']['keystone']['url']
 pdp_template = {
     "name": "test_pdp",
     "security_pipeline": [],
-    "keystone_project_id": "",
+    "keystone_project_id": None,
     "description": "test",
 }
 
@@ -46,6 +46,8 @@ def get_keystone_projects():
     }
 
     req = requests.post("{}/auth/tokens".format(KEYSTONE_SERVER), json=data_auth, headers=HEADERS)
+    print("{}/auth/tokens".format(KEYSTONE_SERVER))
+    print(req.text)
     assert req.status_code in (200, 201)
     TOKEN = req.headers['X-Subject-Token']
     HEADERS['X-Auth-Token'] = TOKEN
@@ -95,6 +97,8 @@ def add_pdp(name="test_pdp", policy_id=None):
     if policy_id:
         pdp_template['security_pipeline'].append(policy_id)
     req = requests.post(URL + "/pdp", json=pdp_template, headers=HEADERS)
+    print(req.status_code)
+    print(req)
     assert req.status_code == 200
     result = req.json()
     assert type(result) is dict