Maintenance support for latest Fenix, python3 and Fuel 05/68805/8
authorTomi Juvonen <tomi.juvonen@nokia.com>
Thu, 7 Nov 2019 11:09:45 +0000 (13:09 +0200)
committerTomi Juvonen <tomi.juvonen@nokia.com>
Tue, 19 Nov 2019 06:58:07 +0000 (06:58 +0000)
JIRA: DOCTOR-134

Signed-off-by: Tomi Juvonen <tomi.juvonen@nokia.com>
Change-Id: I51a93637f30b0eece2075a8277616fb97a1b230e

doctor_tests/admin_tool/fenix/Dockerfile
doctor_tests/admin_tool/fenix/run
doctor_tests/app_manager/sample.py
doctor_tests/inspector/sample.py
doctor_tests/installer/base.py
doctor_tests/installer/common/set_fenix.sh
doctor_tests/installer/mcp.py
doctor_tests/main.py
doctor_tests/scenario/maintenance.py
tox.ini

index 90039b0..5804b20 100644 (file)
@@ -1,4 +1,4 @@
-FROM gliderlabs/alpine:3.5
+FROM gliderlabs/alpine:3.6
 
 ARG BRANCH=master
 ARG OPENSTACK=master
@@ -11,23 +11,24 @@ RUN echo "Building Fenix container against OpenStack $OPENSTACK" && \
     mkdir -p /var/tmp/fenix
 WORKDIR /var/tmp/fenix
 COPY fenix*.conf /etc/fenix/
+
 RUN apk --no-cache add ca-certificates && \
     apk --no-cache add --update python3 sshpass py-pip git curl  && \
     apk --no-cache add --virtual .build-deps --update \
-        python-dev python3-dev build-base linux-headers libffi-dev \
+        python3-dev build-base linux-headers libffi-dev \
         openssl-dev libjpeg-turbo-dev && \
-    curl https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt?h=$OPENSTACK > upper-constraints.txt && \
-    pip install --upgrade pip  && \
-    pip install alembic aodhclient ast decorator \
-        eventlet flask Flask-RESTful importlib \
-        keystoneauth1 logging python-novaclient oslo.config oslo.db \
-        oslo.log oslo.messaging oslo.serialization oslo.service \
-        oslotest oslo.utils pbr pymysql setuptools six sqlalchemy \
-        wsgiref -cupper-constraints.txt && \
-    git clone https://git.openstack.org/openstack/fenix -b $BRANCH /fenix && \
+    curl https://opendev.org/openstack/requirements/raw/branch/$OPENSTACK/upper-constraints.txt > upper-constraints.txt && \
+    if [ ! -e /usr/bin/pip ]; then ln -s pip3 /usr/bin/pip ; fi && \
+    if [[ ! -e /usr/bin/python ]]; then ln -sf /usr/bin/python3 /usr/bin/python; fi && \
+    pip3 install --upgrade pip && \
+    pip3 install alembic aodhclient decorator flask Flask-RESTful eventlet \
+        keystoneauth1 keystonemiddleware python-novaclient oslo.config pecan \
+        oslo.db oslo.log oslo.messaging oslo.serialization oslo.service oslo_policy \
+        oslotest oslo.utils pbr pymysql six sqlalchemy -cupper-constraints.txt && \
+    git clone https://opendev.org/x/fenix -b $BRANCH /fenix && \
     rm -fr /var/tmp/fenix
 COPY run /fenix
-COPY overcloudrc /fenix
+COPY keystonercv3 /fenix
 WORKDIR /fenix
-RUN python setup.py install
+RUN python3 setup.py install
 CMD ./run
index 2a2e37c..50ae68e 100755 (executable)
@@ -1,8 +1,8 @@
 #!/bin/sh
-. overcloudrc
+. keystonercv3
 
 # Start the first process
-nohup python /fenix/fenix/cmd/engine.py > /var/log/fenix-engine.log&
+nohup python3 /fenix/fenix/cmd/engine.py > /var/log/fenix-engine.log&
 status=$?
 if [ $status -ne 0 ]; then
   echo "Failed to start engine.py: $status"
@@ -10,7 +10,7 @@ if [ $status -ne 0 ]; then
 fi
 
 # Start the second process
-nohup python /fenix/fenix/cmd/api.py > /var/log/fenix-api.log&
+nohup python3 /fenix/fenix/cmd/api.py > /var/log/fenix-api.log&
 status=$?
 if [ $status -ne 0 ]; then
   echo "Failed to start api.py: $status"
index a7bc412..94049aa 100644 (file)
@@ -17,6 +17,7 @@ import requests
 from doctor_tests.app_manager.base import BaseAppManager
 from doctor_tests.identity_auth import get_identity_auth
 from doctor_tests.identity_auth import get_session
+from doctor_tests.os_clients import neutron_client
 from doctor_tests.os_clients import nova_client
 
 
@@ -56,12 +57,16 @@ class AppManager(Thread):
         self.app_manager = app_manager
         self.log = log
         self.intance_ids = None
+        self.auth = get_identity_auth(project=self.conf.doctor_project)
+        self.session = get_session(auth=self.auth)
+        self.nova = nova_client(self.conf.nova_version,
+                                self.session)
+        self.neutron = neutron_client(session=self.session)
         self.headers = {
             'Content-Type': 'application/json',
             'Accept': 'application/json'}
-        self.auth = get_identity_auth(project=self.conf.doctor_project)
-        self.nova = nova_client(self.conf.nova_version,
-                                get_session(auth=self.auth))
+        if self.conf.admin_tool.type == 'fenix':
+            self.headers['X-Auth-Token'] = self.session.get_token()
         self.orig_number_of_instances = self.number_of_instances()
         self.ha_instances = self.get_ha_instances()
         self.floating_ip = None
@@ -85,7 +90,13 @@ class AppManager(Thread):
             if instance.id != self.active_instance_id:
                 self.log.info('Switch over to: %s %s' % (instance.name,
                                                          instance.id))
-                instance.add_floating_ip(self.floating_ip)
+                # Deprecated, need to use neutron instead
+                # instance.add_floating_ip(self.floating_ip)
+                port = self.neutron.list_ports(device_id=instance.id)['ports'][0]['id']  # noqa
+                floating_id = self.neutron.list_floatingips(floating_ip_address=self.floating_ip)['floatingips'][0]['id']  # noqa
+                self.neutron.update_floatingip(floating_id, {'floatingip': {'port_id': port}})  # noqa
+                # Have to update ha_instances as floating_ip changed
+                self.ha_instances = self.get_ha_instances()
                 self.active_instance_id = instance.id
                 break
 
index baf0306..70156b2 100644 (file)
@@ -72,7 +72,7 @@ class SampleInspector(BaseInspector):
                 host = server.__dict__.get('OS-EXT-SRV-ATTR:host')
                 self.servers[host].append(server)
                 self.log.debug('get hostname=%s from server=%s'
-                               % (host, server))
+                               % (host, str(server.name)))
             except Exception as e:
                 self.log.info('can not get hostname from server=%s, error=%s'
                               % (server, e))
index 7e2658e..b227065 100644 (file)
@@ -139,10 +139,10 @@ class BaseInstaller(object):
             ret, url = client.ssh(cmd)
             if ret:
                 raise Exception('Exec command to get transport from '
-                                'controller(%s) in Apex installer failed, '
+                                'controller(%s) failed, '
                                 'ret=%s, output=%s'
                                 % (self.controllers[0], ret, url))
-            else:
+            elif self.controllers[0] not in url:
                 # need to use ip instead of hostname
                 ret = (re.sub("@.*:", "@%s:" % self.controllers[0],
                        url[0].split("=", 1)[1]))
index a660af7..aac376c 100644 (file)
@@ -1,7 +1,7 @@
 #!/usr/bin/env bash
 
 ##############################################################################
-# Copyright (c) 2018 Nokia Corporation and others.
+# Copyright (c) 2019 Nokia Corporation and others.
 #
 # All rights reserved. This program and the accompanying materials
 # are made available under the terms of the Apache License, Version 2.0
 ##############################################################################
 
 # Config files
+docker -v >/dev/null || {
+echo "Fenix needs docker to be installed..." 
+ver=`grep "UBUNTU_CODENAME" /etc/os-release | cut -d '=' -f 2`
+curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
+add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $ver stable"
+apt install apt-transport-https ca-certificates curl software-properties-common
+apt update
+apt-cache policy docker-ce
+apt-get install -y docker-ce docker-ce-cli containerd.io
+dpkg -r --force-depends golang-docker-credential-helpers
+}
+
+docker ps | grep fenix >/dev/null && {
+REMOTE=`docker exec -ti fenix git rev-parse origin/master`
+LOCAL=`docker exec -ti fenix git rev-parse @`
+if [ $LOCAL = $REMOTE ]; then
+    echo "Fenix start: Already running latest"
+    exit 0
+else
+    echo "Fenix container needs to be recreated..."
+    # Remove previous container
+    for img in `docker image list | grep "^fenix" | awk '{print $1}'`; do
+        for dock in `docker ps --all -f "ancestor=$img" | grep "$img" | awk '{print $1}'`; do
+            docker stop $dock; docker rm $dock;
+        done;
+        docker image rm $img;
+    done
+fi
+} || echo "Fenix container needs to be created..."
+
+cp /root/keystonercv3 .
+
+transport=`grep -m1 "^transport" /etc/nova/nova.conf`
+. keystonercv3
+
 echo "[DEFAULT]" > fenix.conf
-echo "[DEFAULT]" > fenix-api.conf
 echo "port = 12347" >> fenix.conf
-echo "port = 12347" >> fenix-api.conf
-grep -m1 "^transport" /var/lib/config-data/puppet-generated/nova/etc/nova/nova.conf >> fenix.conf
-grep -m1 "^transport" /var/lib/config-data/puppet-generated/nova/etc/nova/nova.conf >> fenix-api.conf
+echo $transport >> fenix.conf
+
 echo "[database]" >> fenix.conf
-MYSQLIP=`grep -m1 "^connection=mysql" /var/lib/config-data/puppet-generated/nova/etc/nova/nova.conf | sed -e "s/.*@//;s/\/.*//"`
-echo "connection=mysql+pymysql://fenix:fenix@$MYSQLIP/fenix?read_default_group=tripleo&read_default_file=/etc/my.cnf.d/tripleo.cnf" >> fenix.conf
+MYSQLIP=`grep -m1 "^connection" /etc/nova/nova.conf | sed -e "s/.*@//;s/\/.*//"`
+echo "connection = mysql+pymysql://fenix:fenix@$MYSQLIP/fenix" >> fenix.conf
+
+echo "[service_user]" >> fenix.conf
+echo "os_auth_url = $OS_AUTH_URL" >> fenix.conf
+echo "os_username = $OS_USERNAME" >> fenix.conf
+echo "os_password = $OS_PASSWORD" >> fenix.conf
+echo "os_user_domain_name = $OS_USER_DOMAIN_NAME" >> fenix.conf
+echo "os_project_name = $OS_PROJECT_NAME" >> fenix.conf
+echo "os_project_domain_name = $OS_PROJECT_DOMAIN_NAME" >> fenix.conf
+
+echo "[DEFAULT]" > fenix-api.conf
+echo "port = 12347" >> fenix-api.conf
+echo $transport >> fenix-api.conf
+
+echo "[keystone_authtoken]" >> fenix-api.conf
+echo "auth_url = $OS_AUTH_URL" >> fenix-api.conf
+echo "auth_type = password" >> fenix-api.conf
+echo "project_domain_name = $OS_PROJECT_DOMAIN_NAME" >> fenix-api.conf
+echo "project_name = $OS_PROJECT_NAME" >> fenix-api.conf
+echo "user_domain_name = $OS_PROJECT_DOMAIN_NAME" >> fenix-api.conf
+echo "password = $OS_PASSWORD" >> fenix-api.conf
+echo "username = $OS_USERNAME" >> fenix-api.conf
+echo "cafile = /opt/stack/data/ca-bundle.pem" >> fenix-api.conf
 
 # Mysql pw
-MYSQLPW=`cat /var/lib/config-data/mysql/etc/puppet/hieradata/service_configs.json | grep mysql | grep root_password | awk -F": " '{print $2}' | awk -F"\"" '{print $2}'`
+# MYSQLPW=`cat /var/lib/config-data/mysql/etc/puppet/hieradata/service_configs.json | grep mysql | grep root_password | awk -F": " '{print $2}' | awk -F"\"" '{print $2}'`
+MYSQLPW=root
 
 # Fenix DB
 [ `mysql -uroot -p$MYSQLPW -e "SELECT host, user FROM mysql.user;" | grep fenix | wc -l` -eq 0 ] && {
@@ -31,17 +87,9 @@ MYSQLPW=`cat /var/lib/config-data/mysql/etc/puppet/hieradata/service_configs.jso
 mysql -ufenix -pfenix -hlocalhost -e "DROP DATABASE IF EXISTS fenix;"
 mysql -ufenix -pfenix -hlocalhost -e "CREATE DATABASE fenix CHARACTER SET utf8;"
 
-# Remove previous container
-for img in `docker image list | grep "^fenix" | awk '{print $1}'`; do
-    for dock in `docker ps --all -f "ancestor=$img" | grep "$img" | awk '{print $1}'`; do
-        docker stop $dock; docker rm $dock;
-    done;
-    docker image rm $img;
-done
-
 # Build Fenix container and run it
 chmod 700 run
-docker build --build-arg OPENSTACK=master --build-arg BRANCH=master --network host /home/heat-admin -t fenix | tail -1
+docker build --build-arg OPENSTACK=master --build-arg BRANCH=master --network host $PWD -t fenix | tail -1
 docker run --network host -d --name fenix -p 12347:12347 -ti fenix
 if [ $? -eq 0 ]; then
     echo "Fenix start: OK"
index 80e559e..65c8ed7 100644 (file)
@@ -1,5 +1,5 @@
 ##############################################################################
-# Copyright (c) 2018 ZTE Corporation and others.
+# Copyright (c) 2019 ZTE Corporation and others.
 #
 # All rights reserved. This program and the accompanying materials
 # are made available under the terms of the Apache License, Version 2.0
@@ -7,7 +7,10 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 from os.path import isfile
+import time
 
+from doctor_tests.common.constants import is_fenix
+from doctor_tests.common.utils import get_doctor_test_root_dir
 from doctor_tests.common.utils import SSHClient
 from doctor_tests.installer.base import BaseInstaller
 
@@ -15,6 +18,15 @@ from doctor_tests.installer.base import BaseInstaller
 class McpInstaller(BaseInstaller):
     node_user_name = 'ubuntu'
 
+    cm_set_script = 'set_config.py'
+    nc_set_compute_script = 'set_compute_config.py'
+    fe_set_script = 'set_fenix.sh'
+    cm_restore_script = 'restore_config.py'
+    nc_restore_compute_script = 'restore_compute_config.py'
+    ac_restart_script = 'restart_aodh.py'
+    ac_restore_script = 'restore_aodh.py'
+    python = 'python3'
+
     def __init__(self, conf, log):
         super(McpInstaller, self).__init__(conf, log)
         self.key_file = self.get_ssh_key_from_installer()
@@ -24,40 +36,48 @@ class McpInstaller(BaseInstaller):
                                 look_for_keys=True)
         self.controllers = list()
         self.controller_clients = list()
+        self.computes = list()
 
     def setup(self):
         self.log.info('Setup MCP installer start......')
-
-        self.controllers = self.get_controller_ips()
+        self.get_node_ips()
         self.create_flavor()
-        self.set_apply_patches()
+        if is_fenix(self.conf):
+            self.set_apply_patches()
         self.setup_stunnel()
 
     def cleanup(self):
-        self.restore_apply_patches()
+        if is_fenix(self.conf):
+            self.restore_apply_patches()
         for server in self.servers:
             server.terminate()
 
     def get_ssh_key_from_installer(self):
         self.log.info('Get SSH keys from MCP......')
 
-        # Assuming mcp.rsa is already mapped to functest container
-        # if not, only the test runs on jumphost can get the ssh_key
-        # default in path /var/lib/opnfv/mcp.rsa
+        # Default in path /var/lib/opnfv/mcp.rsa
         ssh_key = '/root/.ssh/id_rsa'
         mcp_key = '/var/lib/opnfv/mcp.rsa'
-        return ssh_key if isfile(ssh_key) else mcp_key
+        return mcp_key if isfile(mcp_key) else ssh_key
+
+    def _copy_overcloudrc_to_controllers(self):
+        for ip in self.controllers:
+            cmd = "scp overcloudrc %s@%s:" % (self.node_user_name, ip)
+            self._run_cmd_remote(self.client, cmd)
+
+    def get_node_ips(self):
+        self.log.info('Get node ips from Mcp installer......')
+
+        command = 'sudo salt "*" --out yaml pillar.get _param:single_address'
+        node_details = self._run_cmd_remote(self.client, command)
 
-    def get_controller_ips(self):
-        self.log.info('Get controller ips from Mcp installer......')
+        self.controllers = [line.split()[1] for line in node_details
+                            if line.startswith("ctl")]
+        self.computes = [line.split()[1] for line in node_details
+                         if line.startswith("cmp")]
 
-        command = "sudo salt --out yaml 'ctl*' " \
-                  "pillar.get _param:openstack_control_address |" \
-                  "awk '{print $2}'"
-        controllers = self._run_cmd_remote(self.client, command)
-        self.log.info('Get controller_ips:%s from Mcp installer'
-                      % controllers)
-        return controllers
+        self.log.info('controller_ips:%s' % self.controllers)
+        self.log.info('compute_ips:%s' % self.computes)
 
     def get_host_ip_from_hostname(self, hostname):
         command = "sudo salt --out yaml '%s*' " \
@@ -68,6 +88,76 @@ class McpInstaller(BaseInstaller):
 
     def set_apply_patches(self):
         self.log.info('Set apply patches start......')
+        fenix_files = None
+
+        set_scripts = [self.cm_set_script]
+
+        restart_cmd = 'sudo systemctl restart' \
+                      ' ceilometer-agent-notification.service'
+
+        if self.conf.test_case != 'fault_management':
+            if is_fenix(self.conf):
+                set_scripts.append(self.fe_set_script)
+                testdir = get_doctor_test_root_dir()
+                fenix_files = ["Dockerfile", "run"]
+            restart_cmd += ' nova-scheduler.service'
+            set_scripts.append(self.nc_set_compute_script)
+
+        for node_ip in self.controllers:
+            client = SSHClient(node_ip, self.node_user_name,
+                               key_filename=self.key_file)
+            if fenix_files is not None:
+                for fenix_file in fenix_files:
+                    src_file = '{0}/{1}/{2}'.format(testdir,
+                                                    'admin_tool/fenix',
+                                                    fenix_file)
+                    client.scp(src_file, fenix_file)
+            self._run_apply_patches(client,
+                                    restart_cmd,
+                                    set_scripts,
+                                    python=self.python)
+        time.sleep(5)
+
+        self.log.info('Set apply patches start......')
+
+        if self.conf.test_case != 'fault_management':
+            restart_cmd = 'sudo systemctl restart nova-compute.service'
+            for node_ip in self.computes:
+                client = SSHClient(node_ip, self.node_user_name,
+                                   key_filename=self.key_file)
+                self._run_apply_patches(client,
+                                        restart_cmd,
+                                        [self.nc_set_compute_script],
+                                        python=self.python)
+            time.sleep(5)
 
     def restore_apply_patches(self):
         self.log.info('restore apply patches start......')
+
+        restore_scripts = [self.cm_restore_script]
+
+        restore_scripts.append(self.ac_restore_script)
+        restart_cmd = 'sudo systemctl restart' \
+                      ' ceilometer-agent-notification.service'
+
+        if self.conf.test_case != 'fault_management':
+            restart_cmd += ' nova-scheduler.service'
+            restore_scripts.append(self.nc_restore_compute_script)
+
+        for node_ip in self.controllers:
+            client = SSHClient(node_ip, self.node_user_name,
+                               key_filename=self.key_file)
+            self._run_apply_patches(client,
+                                    restart_cmd,
+                                    restore_scripts,
+                                    python=self.python)
+
+        if self.conf.test_case != 'fault_management':
+            restart_cmd = 'sudo systemctl restart nova-compute.service'
+            for node_ip in self.computes:
+                client = SSHClient(node_ip, self.node_user_name,
+                                   key_filename=self.key_file)
+                self._run_apply_patches(
+                    client, restart_cmd,
+                    [self.nc_restore_compute_script],
+                    python=self.python)
index 351d5f1..cdb4af5 100644 (file)
@@ -1,5 +1,5 @@
 ##############################################################################
-# Copyright (c) 2017 ZTE Corporation and others.
+# Copyright (c) 2019 ZTE Corporation and others.
 #
 # All rights reserved. This program and the accompanying materials
 # are made available under the terms of the Apache License, Version 2.0
@@ -96,7 +96,7 @@ class DoctorTest(object):
             LOG.info('not enough compute nodes, skipping doctor '
                      'maintenance test')
             return
-        elif self.conf.installer.type != 'apex':
+        elif self.conf.installer.type not in ['apex', 'fuel']:
             LOG.info('not supported installer, skipping doctor '
                      'maintenance test')
             return
index 7c2c17e..2e40529 100644 (file)
@@ -1,5 +1,5 @@
 ##############################################################################
-# Copyright (c) 2018 Nokia Corporation and others.
+# Copyright (c) 2019 Nokia Corporation and others.
 #
 # All rights reserved. This program and the accompanying materials
 # are made available under the terms of the Apache License, Version 2.0
@@ -28,6 +28,7 @@ class Maintenance(object):
     def __init__(self, trasport_url, conf, log):
         self.conf = conf
         self.log = log
+        self.admin_session = get_session()
         self.keystone = keystone_client(
             self.conf.keystone_version, get_session())
         self.nova = nova_client(conf.nova_version, get_session())
@@ -145,8 +146,11 @@ class Maintenance(object):
         headers = {
             'Content-Type': 'application/json',
             'Accept': 'application/json'}
-
+        if self.conf.admin_tool.type == 'fenix':
+            headers['X-Auth-Token'] = self.admin_session.get_token()
+        self.log.info('headers %s' % headers)
         retries = 12
+        ret = None
         while retries > 0:
             # let's start maintenance 20sec from now, so projects will have
             # time to ACK to it before that
@@ -175,6 +179,8 @@ class Maintenance(object):
                 time.sleep(10)
                 continue
             break
+        if not ret:
+            raise Exception("admin tool did not respond")
         if ret.status_code != 200:
             raise Exception(ret.text)
         return ret.json()['session_id']
@@ -191,6 +197,9 @@ class Maintenance(object):
             'Content-Type': 'application/json',
             'Accept': 'application/json'}
 
+        if self.conf.admin_tool.type == 'fenix':
+            headers['X-Auth-Token'] = self.admin_session.get_token()
+
         ret = requests.delete(url, data=None, headers=headers)
         if ret.status_code != 200:
             raise Exception(ret.text)
@@ -205,6 +214,10 @@ class Maintenance(object):
         headers = {
             'Content-Type': 'application/json',
             'Accept': 'application/json'}
+
+        if self.conf.admin_tool.type == 'fenix':
+            headers['X-Auth-Token'] = self.admin_session.get_token()
+
         ret = requests.get(url, data=None, headers=headers)
         if ret.status_code != 200:
             raise Exception(ret.text)
diff --git a/tox.ini b/tox.ini
index c6c185e..30feecf 100644 (file)
--- a/tox.ini
+++ b/tox.ini
@@ -1,12 +1,12 @@
 [tox]
 minversion = 2.3.1
-envlist = py34, pep8,docs,docs-linkcheck
+envlist = py36,pep8,docs,docs-linkcheck
 skipsdist = True
 
 [testenv]
 usedevelop = True
-install_command = pip install \
-    -chttps://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt?h=stable/pike \
+install_command = pip3 install \
+    -chttps://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt?h=stable/stein \
     {opts} {packages}
 setenv = VIRTUAL_ENV={envdir}
 deps = -r{toxinidir}/requirements.txt