VNFM against Fenix API schema validation fix 62/69662/10
authorTomi Juvonen <tomi.juvonen@nokia.com>
Mon, 10 Feb 2020 10:22:31 +0000 (12:22 +0200)
committerTomi Juvonen <tomi.juvonen@nokia.com>
Wed, 11 Mar 2020 08:32:49 +0000 (08:32 +0000)
-Small API changes as Fenix validates API schema
-Bug fixes in building Fenix service and container
-Need to wait container ready before testing

Related: https://review.opendev.org/#/c/706775/

Signed-off-by: Tomi Juvonen <tomi.juvonen@nokia.com>
Change-Id: I8b88865d6b8569c6660513974955f7cd65619a33

doctor_tests/admin_tool/fenix/Dockerfile
doctor_tests/app_manager/vnfm.py
doctor_tests/installer/common/set_fenix.sh
doctor_tests/installer/mcp.py

index 5804b20..202380e 100644 (file)
@@ -21,7 +21,7 @@ RUN apk --no-cache add ca-certificates && \
     if [ ! -e /usr/bin/pip ]; then ln -s pip3 /usr/bin/pip ; fi && \
     if [[ ! -e /usr/bin/python ]]; then ln -sf /usr/bin/python3 /usr/bin/python; fi && \
     pip3 install --upgrade pip && \
     if [ ! -e /usr/bin/pip ]; then ln -s pip3 /usr/bin/pip ; fi && \
     if [[ ! -e /usr/bin/python ]]; then ln -sf /usr/bin/python3 /usr/bin/python; fi && \
     pip3 install --upgrade pip && \
-    pip3 install alembic aodhclient decorator flask Flask-RESTful eventlet \
+    pip3 install alembic aodhclient decorator flask Flask-RESTful eventlet jsonschema \
         keystoneauth1 keystonemiddleware python-novaclient oslo.config pecan \
         oslo.db oslo.log oslo.messaging oslo.serialization oslo.service oslo_policy \
         oslotest oslo.utils pbr pymysql six sqlalchemy -cupper-constraints.txt && \
         keystoneauth1 keystonemiddleware python-novaclient oslo.config pecan \
         oslo.db oslo.log oslo.messaging oslo.serialization oslo.service oslo_policy \
         oslotest oslo.utils pbr pymysql six sqlalchemy -cupper-constraints.txt && \
index 7a3c9ad..68fdbb8 100644 (file)
@@ -109,7 +109,7 @@ class VNFManager(Thread):
                                if ep.service_id == maint_id and
                                ep.interface == 'public'][0]
         self.log.info('maintenance endpoint: %s' % self.maint_endpoint)
                                if ep.service_id == maint_id and
                                ep.interface == 'public'][0]
         self.log.info('maintenance endpoint: %s' % self.maint_endpoint)
-
+        self.update_constraints_lock = False
         self.update_constraints()
 
     def delete_remote_instance_constraints(self, instance_id):
         self.update_constraints()
 
     def delete_remote_instance_constraints(self, instance_id):
@@ -153,6 +153,10 @@ class VNFManager(Thread):
         self.delete_remote_group_constraints(self.ha_group)
 
     def update_constraints(self):
         self.delete_remote_group_constraints(self.ha_group)
 
     def update_constraints(self):
+        while self.update_constraints_lock:
+            self.log.info('Waiting update_constraints_lock...')
+            time.sleep(1)
+        self.update_constraints_lock = True
         self.log.info('Update constraints')
         if self.project_id is None:
             self.project_id = self.keystone.projects.list(
         self.log.info('Update constraints')
         if self.project_id is None:
             self.project_id = self.keystone.projects.list(
@@ -185,7 +189,7 @@ class VNFManager(Thread):
                 "recovery_time": 4,
                 "resource_mitigation": True}
             self.log.info('create doctor_ha_app_group constraints: %s'
                 "recovery_time": 4,
                 "resource_mitigation": True}
             self.log.info('create doctor_ha_app_group constraints: %s'
-                          % self.nonha_group)
+                          % self.ha_group)
             self.update_remote_group_constraints(self.ha_group)
         instance_constraints = {}
         for ha_instance in self.ha_instances:
             self.update_remote_group_constraints(self.ha_group)
         instance_constraints = {}
         for ha_instance in self.ha_instances:
@@ -195,7 +199,7 @@ class VNFManager(Thread):
                 "group_id": self.ha_group["group_id"],
                 "instance_name": ha_instance.name,
                 "max_interruption_time": 120,
                 "group_id": self.ha_group["group_id"],
                 "instance_name": ha_instance.name,
                 "max_interruption_time": 120,
-                "migration_type": "MIGRATION",
+                "migration_type": "MIGRATE",
                 "resource_mitigation": True,
                 "lead_time": 40}
             self.log.info('create ha instance constraints: %s'
                 "resource_mitigation": True,
                 "lead_time": 40}
             self.log.info('create ha instance constraints: %s'
@@ -208,7 +212,7 @@ class VNFManager(Thread):
                 "group_id": self.nonha_group["group_id"],
                 "instance_name": nonha_instance.name,
                 "max_interruption_time": 120,
                 "group_id": self.nonha_group["group_id"],
                 "instance_name": nonha_instance.name,
                 "max_interruption_time": 120,
-                "migration_type": "MIGRATION",
+                "migration_type": "MIGRATE",
                 "resource_mitigation": True,
                 "lead_time": 40}
             self.log.info('create nonha instance constraints: %s'
                 "resource_mitigation": True,
                 "lead_time": 40}
             self.log.info('create nonha instance constraints: %s'
@@ -234,11 +238,12 @@ class VNFManager(Thread):
             for instance_id in deleted:
                 self.delete_remote_instance_constraints(instance_id)
             updated = added + modified
             for instance_id in deleted:
                 self.delete_remote_instance_constraints(instance_id)
             updated = added + modified
-            for instance in [instance_constraints[i] in i in updated]:
+            for instance in [instance_constraints[i] for i in updated]:
                 self.update_remote_instance_constraints(instance)
             if updated or deleted:
                 # Some instance constraints have changed
                 self.instance_constraints = instance_constraints.copy()
                 self.update_remote_instance_constraints(instance)
             if updated or deleted:
                 # Some instance constraints have changed
                 self.instance_constraints = instance_constraints.copy()
+        self.update_constraints_lock = False
 
     def active_instance_id(self):
         # Need rertry as it takes time after heat template done before
 
     def active_instance_id(self):
         # Need rertry as it takes time after heat template done before
@@ -358,14 +363,20 @@ class VNFManager(Thread):
                 instance_ids = (self.get_session_instance_ids(
                                 payload['instance_ids'],
                                 payload['session_id']))
                 instance_ids = (self.get_session_instance_ids(
                                 payload['instance_ids'],
                                 payload['session_id']))
-                reply['instance_ids'] = instance_ids
-                reply_state = 'ACK_MAINTENANCE'
+                my_instance_ids = self.get_instance_ids()
+                invalid_instances = (
+                    [instance_id for instance_id in instance_ids
+                     if instance_id not in my_instance_ids])
+                if invalid_instances:
+                    self.log.error('Invalid instances: %s' % invalid_instances)
+                    reply_state = 'NACK_MAINTENANCE'
+                else:
+                    reply_state = 'ACK_MAINTENANCE'
 
             elif state == 'SCALE_IN':
                 # scale down "self.scale" instances that is VCPUS equaling
                 # at least a single compute node
                 self.scale_instances(-self.scale)
 
             elif state == 'SCALE_IN':
                 # scale down "self.scale" instances that is VCPUS equaling
                 # at least a single compute node
                 self.scale_instances(-self.scale)
-                reply['instance_ids'] = self.get_instance_ids()
                 reply_state = 'ACK_SCALE_IN'
 
             elif state == 'MAINTENANCE_COMPLETE':
                 reply_state = 'ACK_SCALE_IN'
 
             elif state == 'MAINTENANCE_COMPLETE':
@@ -411,7 +422,6 @@ class VNFManager(Thread):
             if reply_state:
                 if self.conf.admin_tool.type == 'fenix':
                     self.headers['X-Auth-Token'] = self.session.get_token()
             if reply_state:
                 if self.conf.admin_tool.type == 'fenix':
                     self.headers['X-Auth-Token'] = self.session.get_token()
-                reply['session_id'] = payload['session_id']
                 reply['state'] = reply_state
                 url = payload['reply_url']
                 self.log.info('VNFM reply: %s' % reply)
                 reply['state'] = reply_state
                 url = payload['reply_url']
                 self.log.info('VNFM reply: %s' % reply)
index 68bb4a6..bd1eae4 100644 (file)
@@ -22,14 +22,15 @@ apt-get install -y docker-ce docker-ce-cli containerd.io
 dpkg -r --force-depends golang-docker-credential-helpers
 }
 
 dpkg -r --force-depends golang-docker-credential-helpers
 }
 
-docker ps | grep fenix >/dev/null && {
-REMOTE=`docker exec -ti fenix git rev-parse origin/master`
-LOCAL=`docker exec -ti fenix git rev-parse @`
-if [ $LOCAL = $REMOTE ]; then
-    echo "Fenix start: Already running latest"
+docker ps | grep fenix -q && {
+REMOTE=`git ls-remote  https://opendev.org/x/fenix HEAD | awk '{ print $1}'`
+LOCAL=`docker exec -t fenix git rev-parse @`
+if [[ "$LOCAL" =~ "$REMOTE" ]]; then
+    # Difference in above string ending marks, so cannot compare equal
+    echo "Fenix start: Already running latest $LOCAL equals $REMOTE"
     exit 0
 else
     exit 0
 else
-    echo "Fenix container needs to be recreated..."
+    echo "Fenix container needs to be recreated $LOCAL not $REMOTE"
     # Remove previous container
     for img in `docker image list | grep "^fenix" | awk '{print $1}'`; do
         for dock in `docker ps --all -f "ancestor=$img" | grep "$img" | awk '{print $1}'`; do
     # Remove previous container
     for img in `docker image list | grep "^fenix" | awk '{print $1}'`; do
         for dock in `docker ps --all -f "ancestor=$img" | grep "$img" | awk '{print $1}'`; do
@@ -75,7 +76,7 @@ echo "password = $OS_PASSWORD" >> fenix-api.conf
 echo "username = $OS_USERNAME" >> fenix-api.conf
 echo "cafile = /opt/stack/data/ca-bundle.pem" >> fenix-api.conf
 
 echo "username = $OS_USERNAME" >> fenix-api.conf
 echo "cafile = /opt/stack/data/ca-bundle.pem" >> fenix-api.conf
 
-openstack service list | grep maintenance | {
+openstack service list | grep -q maintenance || {
 openstack service create --name fenix --enable maintenance
 openstack endpoint create --region $OS_REGION_NAME --enable fenix public http://localhost:12347/v1
 }
 openstack service create --name fenix --enable maintenance
 openstack endpoint create --region $OS_REGION_NAME --enable fenix public http://localhost:12347/v1
 }
index f8f33c8..7659c9e 100644 (file)
@@ -129,8 +129,8 @@ class McpInstaller(BaseInstaller):
     def set_apply_patches(self):
         self.log.info('Set apply patches start......')
         fenix_files = None
     def set_apply_patches(self):
         self.log.info('Set apply patches start......')
         fenix_files = None
-
         set_scripts = [self.cm_set_script]
         set_scripts = [self.cm_set_script]
+        thrs = []
 
         restart_cmd = 'sudo systemctl restart' \
                       ' ceilometer-agent-notification.service'
 
         restart_cmd = 'sudo systemctl restart' \
                       ' ceilometer-agent-notification.service'
@@ -152,10 +152,10 @@ class McpInstaller(BaseInstaller):
                                                     'admin_tool/fenix',
                                                     fenix_file)
                     client.scp(src_file, fenix_file)
                                                     'admin_tool/fenix',
                                                     fenix_file)
                     client.scp(src_file, fenix_file)
-            self._run_apply_patches(client,
-                                    restart_cmd,
-                                    set_scripts,
-                                    python=self.python)
+            thrs.append(self._run_apply_patches(client,
+                                                restart_cmd,
+                                                set_scripts,
+                                                python=self.python))
         time.sleep(5)
 
         self.log.info('Set apply patches start......')
         time.sleep(5)
 
         self.log.info('Set apply patches start......')
@@ -165,11 +165,15 @@ class McpInstaller(BaseInstaller):
             for node_ip in self.computes:
                 client = SSHClient(node_ip, self.node_user_name,
                                    key_filename=self.key_file)
             for node_ip in self.computes:
                 client = SSHClient(node_ip, self.node_user_name,
                                    key_filename=self.key_file)
-                self._run_apply_patches(client,
-                                        restart_cmd,
-                                        [self.nc_set_compute_script],
-                                        python=self.python)
+                thrs.append(self._run_apply_patches(
+                    client,
+                    restart_cmd,
+                    [self.nc_set_compute_script],
+                    python=self.python))
             time.sleep(5)
             time.sleep(5)
+        # If Fenix container ir build, it needs to be ready before continue
+        for thr in thrs:
+            thr.join()
 
     def restore_apply_patches(self):
         self.log.info('restore apply patches start......')
 
     def restore_apply_patches(self):
         self.log.info('restore apply patches start......')