DevStack support 34/69134/8
authorTomi Juvonen <tomi.juvonen@nokia.com>
Thu, 28 Nov 2019 10:31:51 +0000 (12:31 +0200)
committerTomi Juvonen <tomi.juvonen@nokia.com>
Wed, 8 Jan 2020 10:22:50 +0000 (12:22 +0200)
Support running Doctor testing is DevStack multi-node controller

JIRA: DOCTOR-136

Signed-off-by: Tomi Juvonen <tomi.juvonen@nokia.com>
Change-Id: I1569f3f77d889420b3b8f3c2724c10253e509c28

13 files changed:
doctor_tests/app_manager/sample.py
doctor_tests/common/utils.py
doctor_tests/image.py
doctor_tests/inspector/sample.py
doctor_tests/installer/__init__.py
doctor_tests/installer/base.py
doctor_tests/installer/common/set_compute_config.py
doctor_tests/installer/devstack.py [new file with mode: 0644]
doctor_tests/installer/local.py [deleted file]
doctor_tests/installer/mcp.py
doctor_tests/main.py
doctor_tests/scenario/maintenance.py
doctor_tests/user.py

index 94049aa..7ca35b9 100644 (file)
@@ -165,7 +165,7 @@ class AppManager(Thread):
             data = json.loads(request.data.decode('utf8'))
             try:
                 payload = self._alarm_traits_decoder(data)
-            except:
+            except Exception:
                 payload = ({t[0]: t[2] for t in
                            data['reason_data']['event']['traits']})
                 self.log.error('cannot parse alarm data: %s' % payload)
index 1a8840d..67ca4f4 100644 (file)
@@ -10,6 +10,7 @@ import json
 import os
 import paramiko
 import re
+import subprocess
 
 
 def load_json_file(full_path):
@@ -97,6 +98,27 @@ class SSHClient(object):
         ftp.close()
 
 
+class LocalSSH(object):
+
+    def __init__(self, log):
+        self.log = log
+        self.log.info('Init local ssh client')
+
+    def ssh(self, cmd):
+        ret = 0
+        output = "%s failed!!!" % cmd
+        try:
+            output = subprocess.check_output((cmd), shell=True,
+                                             universal_newlines=True)
+        except subprocess.CalledProcessError:
+            ret = 1
+        return ret, output
+
+    def scp(self, src_file, dst_file):
+        return subprocess.check_output("cp %s %s" % (src_file, dst_file),
+                                       shell=True)
+
+
 def run_async(func):
     from threading import Thread
     from functools import wraps
index 9961b22..50841ef 100644 (file)
@@ -7,7 +7,11 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 import os
-import urllib.request
+try:
+    from urllib.request import urlopen
+except Exception:
+    from urllib2 import urlopen
+
 
 from oslo_config import cfg
 
@@ -46,11 +50,14 @@ class Image(object):
 
     def create(self):
         self.log.info('image create start......')
-
         images = {image.name: image for image in self.glance.images.list()}
+        if self.conf.image_name == 'cirros':
+            cirros = [image for image in images if 'cirros' in image]
+            if cirros:
+                self.conf.image_name = cirros[0]
         if self.conf.image_name not in images:
             if not os.path.exists(self.conf.image_filename):
-                resp = urllib.request.urlopen(self.conf.image_download_url)
+                resp = urlopen(self.conf.image_download_url)
                 with open(self.conf.image_filename, "wb") as file:
                     file.write(resp.read())
             self.image = \
index 70156b2..c44db95 100644 (file)
@@ -52,7 +52,7 @@ class SampleInspector(BaseInspector):
                                                  driver='messaging',
                                                  topics=['notifications'])
             self.notif = self.notif.prepare(publisher_id='sample')
-        except:
+        except Exception:
             self.notif = None
 
     def _init_novaclients(self):
@@ -135,7 +135,7 @@ class SampleInspector(BaseInspector):
     def maintenance(self, data):
         try:
             payload = self._alarm_traits_decoder(data)
-        except:
+        except Exception:
             payload = ({t[0]: t[2] for t in
                        data['reason_data']['event']['traits']})
             self.log.error('cannot parse alarm data: %s' % payload)
index 2b9ad83..00a0166 100644 (file)
@@ -13,8 +13,8 @@ from oslo_utils import importutils
 
 OPTS = [
     cfg.StrOpt('type',
-               default=os.environ.get('INSTALLER_TYPE', 'local'),
-               choices=['local', 'apex', 'daisy', 'fuel'],
+               default=os.environ.get('INSTALLER_TYPE', 'devstack'),
+               choices=['apex', 'daisy', 'fuel', 'devstack'],
                help='the type of installer',
                required=True),
     cfg.StrOpt('ip',
@@ -28,10 +28,10 @@ OPTS = [
 
 
 _installer_name_class_mapping = {
-    'local': 'doctor_tests.installer.local.LocalInstaller',
     'apex': 'doctor_tests.installer.apex.ApexInstaller',
     'daisy': 'doctor_tests.installer.daisy.DaisyInstaller',
-    'fuel': 'doctor_tests.installer.mcp.McpInstaller'
+    'fuel': 'doctor_tests.installer.mcp.McpInstaller',
+    'devstack': 'doctor_tests.installer.devstack.DevstackInstaller'
 }
 
 
index b227065..de4d2f2 100644 (file)
@@ -11,7 +11,6 @@ import getpass
 import grp
 import os
 import pwd
-import re
 import six
 import stat
 import subprocess
@@ -127,47 +126,9 @@ class BaseInstaller(object):
         os.chmod(ssh_key, stat.S_IREAD)
         return ssh_key
 
+    @abc.abstractmethod
     def get_transport_url(self):
-        client = utils.SSHClient(self.controllers[0], self.node_user_name,
-                                 key_filename=self.key_file)
-        if self.use_containers:
-            ncbase = "/var/lib/config-data/puppet-generated/nova"
-        else:
-            ncbase = ""
-        try:
-            cmd = 'sudo grep "^transport_url" %s/etc/nova/nova.conf' % ncbase
-            ret, url = client.ssh(cmd)
-            if ret:
-                raise Exception('Exec command to get transport from '
-                                'controller(%s) failed, '
-                                'ret=%s, output=%s'
-                                % (self.controllers[0], ret, url))
-            elif self.controllers[0] not in url:
-                # need to use ip instead of hostname
-                ret = (re.sub("@.*:", "@%s:" % self.controllers[0],
-                       url[0].split("=", 1)[1]))
-        except:
-            cmd = 'grep -i "^rabbit" %s/etc/nova/nova.conf' % ncbase
-            ret, lines = client.ssh(cmd)
-            if ret:
-                raise Exception('Exec command to get transport from '
-                                'controller(%s) in Apex installer failed, '
-                                'ret=%s, output=%s'
-                                % (self.controllers[0], ret, url))
-            else:
-                for line in lines.split('\n'):
-                    if line.startswith("rabbit_userid"):
-                        rabbit_userid = line.split("=")
-                    if line.startswith("rabbit_port"):
-                        rabbit_port = line.split("=")
-                    if line.startswith("rabbit_password"):
-                        rabbit_password = line.split("=")
-                ret = "rabbit://%s:%s@%s:%s/?ssl=0" % (rabbit_userid,
-                                                       rabbit_password,
-                                                       self.controllers[0],
-                                                       rabbit_port)
-        self.log.debug('get_transport_url %s' % ret)
-        return ret
+        pass
 
     def _run_cmd_remote(self, client, command):
         self.log.info('Run command=%s in %s installer......'
@@ -199,14 +160,15 @@ class BaseInstaller(object):
     def _run_apply_patches(self, client, restart_cmd, script_names,
                            python='python3'):
         installer_dir = os.path.dirname(os.path.realpath(__file__))
-
         if isinstance(script_names, list):
             for script_name in script_names:
                 script_abs_path = '{0}/{1}/{2}'.format(installer_dir,
                                                        'common', script_name)
+                if self.conf.installer.type == "devstack":
+                    script_name = "/opt/stack/%s" % script_name
                 try:
                     client.scp(script_abs_path, script_name)
-                except:
+                except Exception:
                     client.scp(script_abs_path, script_name)
                 try:
                     if ".py" in script_name:
@@ -216,14 +178,14 @@ class BaseInstaller(object):
                                                                script_name)
                     ret, output = client.ssh(cmd)
                     self.log.info('Command %s output %s' % (cmd, output))
-                except:
+                except Exception:
                     ret, output = client.ssh(cmd)
-
+                    self.log.info('Command %s output %s' % (cmd, output))
                 if ret:
                     raise Exception('Do the command in remote'
                                     ' node failed, ret=%s, cmd=%s, output=%s'
                                     % (ret, cmd, output))
-            if 'nova' in restart_cmd:
+            if 'nova' in restart_cmd or 'devstack@n-' in restart_cmd:
                 # Make sure scheduler has proper cpu_allocation_ratio
                 time.sleep(5)
             client.ssh(restart_cmd)
index 76ac649..615f189 100644 (file)
@@ -26,9 +26,9 @@ def set_cpu_allocation_ratio():
     found_list = ([ca for ca in fcheck.readlines() if "cpu_allocation_ratio"
                   in ca])
     fcheck.close()
+    change = False
+    found = False
     if found_list and len(found_list):
-        change = False
-        found = False
         for car in found_list:
             if car.startswith('#'):
                 continue
diff --git a/doctor_tests/installer/devstack.py b/doctor_tests/installer/devstack.py
new file mode 100644 (file)
index 0000000..02f3601
--- /dev/null
@@ -0,0 +1,151 @@
+##############################################################################
+# Copyright (c) 2019 Nokia Corporation and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import os
+import socket
+import time
+
+from doctor_tests.common.utils import SSHClient
+from doctor_tests.common.utils import LocalSSH
+from doctor_tests.identity_auth import get_session
+from doctor_tests.installer.base import BaseInstaller
+from doctor_tests.os_clients import nova_client
+
+
+class DevstackInstaller(BaseInstaller):
+    node_user_name = None
+    cm_set_script = 'set_config.py'
+    nc_set_compute_script = 'set_compute_config.py'
+    cm_restore_script = 'restore_config.py'
+    nc_restore_compute_script = 'restore_compute_config.py'
+    ac_restart_script = 'restart_aodh.py'
+    ac_restore_script = 'restore_aodh.py'
+    python = 'python'
+
+    def __init__(self, conf, log):
+        super(DevstackInstaller, self).__init__(conf, log)
+        # Run Doctor under users home. sudo hides other env param to be used
+        home, self.node_user_name = (iter(os.environ.get('VIRTUAL_ENV')
+                                     .split('/', 3)[1:3]))
+        # Migration needs to work so ssh should have proper key defined
+        self.key_file = '/%s/%s/.ssh/id_rsa' % (home, self.node_user_name)
+        self.log.info('ssh uses: %s and %s' % (self.node_user_name,
+                                               self.key_file))
+        self.controllers = ([ip for ip in
+                            socket.gethostbyname_ex(socket.gethostname())[2]
+                            if not ip.startswith('127.')] or
+                            [[(s.connect(('8.8.8.8', 53)),
+                             s.getsockname()[0], s.close())
+                             for s in [socket.socket(socket.AF_INET,
+                                       socket.SOCK_DGRAM)]][0][1]])
+        conf.admin_tool.ip = self.controllers[0]
+        self.computes = list()
+        self.nova = nova_client(conf.nova_version, get_session())
+
+    def setup(self):
+        self.log.info('Setup Devstack installer start......')
+        self._get_devstack_conf()
+        self.create_flavor()
+        self.set_apply_patches()
+
+    def cleanup(self):
+        self.restore_apply_patches()
+
+    def get_ssh_key_from_installer(self):
+        return self.key_file
+
+    def get_transport_url(self):
+        client = LocalSSH(self.log)
+        cmd = 'sudo grep -m1 "^transport_url" /etc/nova/nova.conf'
+        ret, url = client.ssh(cmd)
+        url = url.split("= ", 1)[1][:-1]
+        self.log.info('get_transport_url %s' % url)
+        return url
+
+    def get_host_ip_from_hostname(self, hostname):
+        return [hvisor.__getattr__('host_ip') for hvisor in self.hvisors
+                if hvisor.__getattr__('hypervisor_hostname') == hostname][0]
+
+    def _get_devstack_conf(self):
+        self.log.info('Get devstack config details for Devstack installer'
+                      '......')
+        self.hvisors = self.nova.hypervisors.list(detailed=True)
+        self.log.info('checking hypervisors.......')
+        self.computes = [hvisor.__getattr__('host_ip') for hvisor in
+                         self.hvisors]
+        self.use_containers = False
+        self.log.info('controller_ips:%s' % self.controllers)
+        self.log.info('compute_ips:%s' % self.computes)
+        self.log.info('use_containers:%s' % self.use_containers)
+
+    def _set_docker_restart_cmd(self, service):
+        # There can be multiple instances running so need to restart all
+        cmd = "for container in `sudo docker ps | grep "
+        cmd += service
+        cmd += " | awk '{print $1}'`; do sudo docker restart $container; \
+               done;"
+        return cmd
+
+    def set_apply_patches(self):
+        self.log.info('Set apply patches start......')
+
+        set_scripts = [self.cm_set_script]
+
+        restart_cmd = 'sudo systemctl restart' \
+                      ' devstack@ceilometer-anotification.service'
+
+        client = LocalSSH(self.log)
+        self._run_apply_patches(client,
+                                restart_cmd,
+                                set_scripts,
+                                python=self.python)
+        time.sleep(7)
+
+        self.log.info('Set apply patches start......')
+
+        if self.conf.test_case != 'fault_management':
+            restart_cmd = 'sudo systemctl restart' \
+                          ' devstack@n-cpu.service'
+            for node_ip in self.computes:
+                client = SSHClient(node_ip, self.node_user_name,
+                                   key_filename=self.key_file)
+                self._run_apply_patches(client,
+                                        restart_cmd,
+                                        [self.nc_set_compute_script],
+                                        python=self.python)
+            time.sleep(7)
+
+    def restore_apply_patches(self):
+        self.log.info('restore apply patches start......')
+
+        restore_scripts = [self.cm_restore_script]
+
+        restart_cmd = 'sudo systemctl restart' \
+                      ' devstack@ceilometer-anotification.service'
+
+        if self.conf.test_case != 'fault_management':
+            restart_cmd += ' devstack@n-sch.service'
+            restore_scripts.append(self.nc_restore_compute_script)
+
+        client = LocalSSH(self.log)
+        self._run_apply_patches(client,
+                                restart_cmd,
+                                restore_scripts,
+                                python=self.python)
+
+        if self.conf.test_case != 'fault_management':
+
+            restart_cmd = 'sudo systemctl restart' \
+                          ' devstack@n-cpu.service'
+            for node_ip in self.computes:
+                client = SSHClient(node_ip, self.node_user_name,
+                                   key_filename=self.key_file)
+                self._run_apply_patches(
+                    client, restart_cmd,
+                    [self.nc_restore_compute_script],
+                    python=self.python)
diff --git a/doctor_tests/installer/local.py b/doctor_tests/installer/local.py
deleted file mode 100644 (file)
index fee14f3..0000000
+++ /dev/null
@@ -1,118 +0,0 @@
-##############################################################################
-# Copyright (c) 2017 ZTE Corporation and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-import os
-import shutil
-import subprocess
-
-from doctor_tests.installer.base import BaseInstaller
-from doctor_tests.installer.common.vitrage import \
-    set_vitrage_host_down_template
-from doctor_tests.common.constants import Inspector
-from doctor_tests.common.utils import load_json_file
-from doctor_tests.common.utils import write_json_file
-
-
-class LocalInstaller(BaseInstaller):
-    node_user_name = 'root'
-
-    nova_policy_file = '/etc/nova/policy.json'
-    nova_policy_file_backup = '%s%s' % (nova_policy_file, '.bak')
-
-    def __init__(self, conf, log):
-        super(LocalInstaller, self).__init__(conf, log)
-        self.policy_modified = False
-        self.add_policy_file = False
-
-    def setup(self):
-        self.get_ssh_key_from_installer()
-        self.set_apply_patches()
-
-    def cleanup(self):
-        self.restore_apply_patches()
-
-    def get_ssh_key_from_installer(self):
-        self.log.info('Assuming SSH keys already exchanged with computer'
-                      'for local installer type')
-        return None
-
-    def get_host_ip_from_hostname(self, hostname):
-        self.log.info('Get host ip from host name in local installer......')
-
-        cmd = "getent hosts %s | awk '{ print $1 }'" % (hostname)
-        server = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
-        stdout, stderr = server.communicate()
-        host_ip = stdout.strip().decode("utf-8")
-
-        self.log.info('Get host_ip:%s from host_name:%s in local installer'
-                      % (host_ip, hostname))
-        return host_ip
-
-    def set_apply_patches(self):
-        self._set_nova_policy()
-        if self.conf.inspector.type == Inspector.VITRAGE:
-            set_vitrage_host_down_template()
-            os.system('sudo systemctl restart devstack@vitrage-graph.service')
-
-    def restore_apply_patches(self):
-        self._restore_nova_policy()
-
-    def _set_nova_policy(self):
-        host_status_policy = 'os_compute_api:servers:show:host_status'
-        host_status_rule = 'rule:admin_or_owner'
-        policy_data = {
-            'context_is_admin': 'role:admin',
-            'owner': 'user_id:%(user_id)s',
-            'admin_or_owner': 'rule:context_is_admin or rule:owner',
-            host_status_policy: host_status_rule
-        }
-
-        if os.path.isfile(self.nova_policy_file):
-            data = load_json_file(self.nova_policy_file)
-            if host_status_policy in data:
-                rule_origion = data[host_status_policy]
-                if host_status_rule == rule_origion:
-                    self.log.info('Do not need to modify nova policy.')
-                    self.policy_modified = False
-                else:
-                    # update the host_status_policy
-                    data[host_status_policy] = host_status_rule
-                    self.policy_modified = True
-            else:
-                # add the host_status_policy, if the admin_or_owner is not
-                # defined, add it also
-                for policy, rule in policy_data.items():
-                    if policy not in data:
-                        data[policy] = rule
-                self.policy_modified = True
-            if self.policy_modified:
-                self.log.info('Nova policy is Modified.')
-                shutil.copyfile(self.nova_policy_file,
-                                self.nova_policy_file_backup)
-        else:
-            # file does not exit, create a new one and add the policy
-            self.log.info('Nova policy file not exist. Creating a new one')
-            data = policy_data
-            self.add_policy_file = True
-
-        if self.policy_modified or self.add_policy_file:
-            write_json_file(self.nova_policy_file, data)
-            os.system('sudo systemctl restart devstack@n-api.service')
-
-    def _restore_nova_policy(self):
-        if self.policy_modified:
-            shutil.copyfile(self.nova_policy_file_backup,
-                            self.nova_policy_file)
-            os.remove(self.nova_policy_file_backup)
-        elif self.add_policy_file:
-            os.remove(self.nova_policy_file)
-
-        if self.add_policy_file or self.policy_modified:
-            os.system('sudo systemctl restart devstack@n-api.service')
-            self.add_policy_file = False
-            self.policy_modified = False
index 65c8ed7..f8f33c8 100644 (file)
@@ -7,6 +7,7 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 from os.path import isfile
+import re
 import time
 
 from doctor_tests.common.constants import is_fenix
@@ -60,6 +61,45 @@ class McpInstaller(BaseInstaller):
         mcp_key = '/var/lib/opnfv/mcp.rsa'
         return mcp_key if isfile(mcp_key) else ssh_key
 
+    def get_transport_url(self):
+        client = SSHClient(self.controllers[0], self.node_user_name,
+                           key_filename=self.key_file)
+        try:
+            cmd = 'sudo grep -m1 "^transport_url" /etc/nova/nova.conf'
+            ret, url = client.ssh(cmd)
+
+            if ret:
+                raise Exception('Exec command to get transport from '
+                                'controller(%s) in MCP installer failed, '
+                                'ret=%s, output=%s'
+                                % (self.controllers[0], ret, url))
+            elif self.controllers[0] not in url:
+                # need to use ip instead of hostname
+                url = (re.sub("@.*:", "@%s:" % self.controllers[0],
+                       url[0].split("=", 1)[1]))
+        except Exception:
+            cmd = 'grep -i "^rabbit" /etc/nova/nova.conf'
+            ret, lines = client.ssh(cmd)
+            if ret:
+                raise Exception('Exec command to get transport from '
+                                'controller(%s) in MCP installer failed, '
+                                'ret=%s, output=%s'
+                                % (self.controllers[0], ret, url))
+            else:
+                for line in lines.split('\n'):
+                    if line.startswith("rabbit_userid"):
+                        rabbit_userid = line.split("=")
+                    if line.startswith("rabbit_port"):
+                        rabbit_port = line.split("=")
+                    if line.startswith("rabbit_password"):
+                        rabbit_password = line.split("=")
+                url = "rabbit://%s:%s@%s:%s/?ssl=0" % (rabbit_userid,
+                                                       rabbit_password,
+                                                       self.controllers[0],
+                                                       rabbit_port)
+        self.log.info('get_transport_url %s' % url)
+        return url
+
     def _copy_overcloudrc_to_controllers(self):
         for ip in self.controllers:
             cmd = "scp overcloudrc %s@%s:" % (self.node_user_name, ip)
index cdb4af5..3dea89d 100644 (file)
@@ -43,7 +43,6 @@ class DoctorTest(object):
     def setup(self):
         # prepare the cloud env
         self.installer.setup()
-
         # preparing VM image...
         self.image.create()
 
@@ -51,39 +50,51 @@ class DoctorTest(object):
         self.user.create()
 
     def test_fault_management(self):
-        try:
-            LOG.info('doctor fault management test starting.......')
-            transport_url = self.installer.get_transport_url()
-            self.fault_management = \
-                FaultManagement(self.conf, self.installer, self.user, LOG,
-                                transport_url)
-
-            # prepare test env
-            self.fault_management.setup()
-
-            # wait for aodh alarms are updated in caches for event evaluator,
-            # sleep time should be larger than event_alarm_cache_ttl
-            # (default 60)
-            # (tojuvone) Fraser currently needs 120
-            time.sleep(120)
-
-            # injecting host failure...
-            # NOTE (umar) add INTERFACE_NAME logic to host injection
-            self.fault_management.start()
-            time.sleep(30)
-
-            # verify the test results
-            # NOTE (umar) copy remote monitor.log file when monitor=collectd
-            self.fault_management.check_host_status('down')
-            self.fault_management.check_notification_time()
-
-        except Exception as e:
-            LOG.error('doctor fault management test failed, '
-                      'Exception=%s' % e)
-            LOG.error(format_exc())
-            sys.exit(1)
-        finally:
-            self.fault_management.cleanup()
+        retry = 2
+        # Retry once if notified_time is None
+        while retry > 0:
+            try:
+                self.fault_management = None
+                LOG.info('doctor fault management test starting.......')
+                transport_url = self.installer.get_transport_url()
+                self.fault_management = \
+                    FaultManagement(self.conf, self.installer, self.user, LOG,
+                                    transport_url)
+
+                # prepare test env
+                self.fault_management.setup()
+
+                # wait for aodh alarms are updated in caches for event
+                # evaluator,
+                # sleep time should be larger than event_alarm_cache_ttl
+                # (default 60)
+                # (tojuvone) Fraser currently needs 120
+                time.sleep(120)
+
+                # injecting host failure...
+                # NOTE (umar) add INTERFACE_NAME logic to host injection
+                self.fault_management.start()
+                time.sleep(30)
+
+                # verify the test results
+                # NOTE (umar) copy remote monitor.log file when
+                # monitor=collectd
+                self.fault_management.check_host_status('down')
+                self.fault_management.check_notification_time()
+                retry = 0
+
+            except Exception as e:
+                LOG.error('doctor fault management test failed, '
+                          'Exception=%s' % e)
+                if 'notified_time=None' in str(e):
+                    retry -= 1
+                    LOG.info('doctor fault management retry')
+                    continue
+                LOG.error(format_exc())
+                sys.exit(1)
+            finally:
+                if self.fault_management is not None:
+                    self.fault_management.cleanup()
 
     def _amount_compute_nodes(self):
         services = self.nova.services.list(binary='nova-compute')
@@ -96,11 +107,12 @@ class DoctorTest(object):
             LOG.info('not enough compute nodes, skipping doctor '
                      'maintenance test')
             return
-        elif self.conf.installer.type not in ['apex', 'fuel']:
+        elif self.conf.installer.type not in ['apex', 'fuel', 'devstack']:
             LOG.info('not supported installer, skipping doctor '
                      'maintenance test')
             return
         try:
+            maintenance = None
             LOG.info('doctor maintenance test starting.......')
             trasport_url = self.installer.get_transport_url()
             maintenance = Maintenance(trasport_url, self.conf, LOG)
@@ -122,7 +134,8 @@ class DoctorTest(object):
             LOG.error(format_exc())
             sys.exit(1)
         finally:
-            maintenance.cleanup_maintenance()
+            if maintenance is not None:
+                maintenance.cleanup_maintenance()
 
     def run(self):
         """run doctor tests"""
index 2e40529..f5b9b51 100644 (file)
@@ -35,11 +35,16 @@ class Maintenance(object):
         auth = get_identity_auth(project=self.conf.doctor_project)
         self.neutron = neutron_client(get_session(auth=auth))
         self.stack = Stack(self.conf, self.log)
+        if self.conf.installer.type == "devstack":
+            self.endpoint_ip = trasport_url.split("@", 1)[1].split(":", 1)[0]
+        else:
+            self.endpoint_ip = self.conf.admin_tool.ip
+        self.endpoint = "http://%s:12347/" % self.endpoint_ip
         if self.conf.admin_tool.type == 'sample':
             self.admin_tool = get_admin_tool(trasport_url, self.conf, self.log)
-            self.endpoint = 'maintenance'
+            self.endpoint += 'maintenance'
         else:
-            self.endpoint = 'v1/maintenance'
+            self.endpoint += 'v1/maintenance'
         self.app_manager = get_app_manager(self.stack, self.conf, self.log)
         self.inspector = get_inspector(self.conf, self.log, trasport_url)
 
@@ -128,8 +133,9 @@ class Maintenance(object):
         else:
             # TBD Now we expect Fenix is running in self.conf.admin_tool.port
             pass
-        self.app_manager.start()
+        # Inspector before app_manager, as floating ip might come late
         self.inspector.start()
+        self.app_manager.start()
 
     def start_maintenance(self):
         self.log.info('start maintenance.......')
@@ -138,17 +144,13 @@ class Maintenance(object):
         for hvisor in hvisors:
             hostname = hvisor.__getattr__('hypervisor_hostname')
             maintenance_hosts.append(hostname)
-
-        url = ('http://%s:%s/%s' %
-               (self.conf.admin_tool.ip,
-                self.conf.admin_tool.port,
-                self.endpoint))
+        url = self.endpoint
         headers = {
             'Content-Type': 'application/json',
             'Accept': 'application/json'}
         if self.conf.admin_tool.type == 'fenix':
             headers['X-Auth-Token'] = self.admin_session.get_token()
-        self.log.info('headers %s' % headers)
+        self.log.info('url %s headers %s' % (url, headers))
         retries = 12
         ret = None
         while retries > 0:
@@ -170,7 +172,7 @@ class Maintenance(object):
             try:
                 ret = requests.post(url, data=json.dumps(data),
                                     headers=headers)
-            except:
+            except Exception:
                 if retries == 0:
                     raise Exception('admin tool did not respond in 120s')
                 else:
@@ -187,11 +189,8 @@ class Maintenance(object):
 
     def remove_maintenance_session(self, session_id):
         self.log.info('remove maintenance session %s.......' % session_id)
-        url = ('http://%s:%s/%s/%s' %
-               (self.conf.admin_tool.ip,
-                self.conf.admin_tool.port,
-                self.endpoint,
-                session_id))
+
+        url = ('%s/%s' % (self.endpoint, session_id))
 
         headers = {
             'Content-Type': 'application/json',
@@ -205,11 +204,8 @@ class Maintenance(object):
             raise Exception(ret.text)
 
     def get_maintenance_state(self, session_id):
-        url = ('http://%s:%s/%s/%s' %
-               (self.conf.admin_tool.ip,
-                self.conf.admin_tool.port,
-                self.endpoint,
-                session_id))
+
+        url = ('%s/%s' % (self.endpoint, session_id))
 
         headers = {
             'Content-Type': 'application/json',
index 29aa004..2cd9757 100644 (file)
@@ -129,7 +129,6 @@ class User(object):
 
     def _add_user_role_in_project(self, is_admin=False):
         """add test user with test role in test project"""
-
         project = self.projects.get(self.conf.doctor_project)
 
         user_name = 'admin' if is_admin else self.conf.doctor_user