Add setup and cleanup for maintenance test 35/50735/28
authorTomi Juvonen <tomi.juvonen@nokia.com>
Wed, 17 Jan 2018 08:20:44 +0000 (10:20 +0200)
committerTomi Juvonen <tomi.juvonen@nokia.com>
Thu, 9 Aug 2018 09:08:49 +0000 (12:08 +0300)
JIRA: DOCTOR-106

Change-Id: Idb95e1391e8f9094f68e2c7bc79fd2d7e01af9de
Signed-off-by: Tomi Juvonen <tomi.juvonen@nokia.com>
14 files changed:
doctor_tests/installer/apex.py
doctor_tests/installer/base.py
doctor_tests/installer/common/restore_compute_config.py [new file with mode: 0644]
doctor_tests/installer/common/restore_config.py [moved from doctor_tests/installer/common/restore_ceilometer.py with 51% similarity]
doctor_tests/installer/common/set_ceilometer.py [deleted file]
doctor_tests/installer/common/set_compute_config.py [new file with mode: 0644]
doctor_tests/installer/common/set_config.py [new file with mode: 0644]
doctor_tests/main.py
doctor_tests/maintenance_hot_tpl.yaml [new file with mode: 0644]
doctor_tests/os_clients.py
doctor_tests/scenario/maintenance.py [new file with mode: 0644]
doctor_tests/stack.py [new file with mode: 0644]
doctor_tests/user.py
requirements.txt

index c851b2a..121767f 100644 (file)
@@ -6,14 +6,19 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
+import re
+import time
+
 from doctor_tests.common.utils import SSHClient
 from doctor_tests.installer.base import BaseInstaller
 
 
 class ApexInstaller(BaseInstaller):
     node_user_name = 'heat-admin'
 from doctor_tests.common.utils import SSHClient
 from doctor_tests.installer.base import BaseInstaller
 
 
 class ApexInstaller(BaseInstaller):
     node_user_name = 'heat-admin'
-    cm_set_script = 'set_ceilometer.py'
-    cm_restore_script = 'restore_ceilometer.py'
+    cm_set_script = 'set_config.py'
+    cm_set_compute_script = 'set_compute_config.py'
+    cm_restore_script = 'restore_config.py'
+    cm_restore_compute_script = 'restore_compute_config.py'
 
     def __init__(self, conf, log):
         super(ApexInstaller, self).__init__(conf, log)
 
     def __init__(self, conf, log):
         super(ApexInstaller, self).__init__(conf, log)
@@ -23,13 +28,15 @@ class ApexInstaller(BaseInstaller):
                                 look_for_keys=True)
         self.key_file = None
         self.controllers = list()
                                 look_for_keys=True)
         self.key_file = None
         self.controllers = list()
+        self.computes = list()
         self.controller_clients = list()
         self.controller_clients = list()
+        self.compute_clients = list()
 
     def setup(self):
         self.log.info('Setup Apex installer start......')
 
         self.key_file = self.get_ssh_key_from_installer()
 
     def setup(self):
         self.log.info('Setup Apex installer start......')
 
         self.key_file = self.get_ssh_key_from_installer()
-        self.controllers = self.get_controller_ips()
+        self._get_and_set_ips()
         self.create_flavor()
         self.set_apply_patches()
         self.setup_stunnel()
         self.create_flavor()
         self.set_apply_patches()
         self.setup_stunnel()
@@ -43,16 +50,20 @@ class ApexInstaller(BaseInstaller):
         key_path = '/home/stack/.ssh/id_rsa'
         return self._get_ssh_key(self.client, key_path)
 
         key_path = '/home/stack/.ssh/id_rsa'
         return self._get_ssh_key(self.client, key_path)
 
-    def get_controller_ips(self):
-        self.log.info('Get controller ips from Apex installer......')
-
-        command = "source stackrc; " \
-                  "nova list | grep ' overcloud-controller-[0-9] ' " \
-                  "| sed -e 's/^.*ctlplane=//' |awk '{print $1}'"
-        controllers = self._run_cmd_remote(self.client, command)
-        self.log.info('Get controller_ips:%s from Apex installer'
-                      % controllers)
-        return controllers
+    def _get_and_set_ips(self):
+        self.log.info('Get controller and compute ips from Apex installer'
+                      '......')
+
+        command = "source stackrc; nova list | grep ' overcloud-'"
+        raw_ips_list = self._run_cmd_remote(self.client, command)
+        for line in raw_ips_list:
+            ip = line.split('ctlplane=', 1)[1].split(" ", 1)[0]
+            if 'overcloud-controller-' in line:
+                self.controllers.append(ip)
+            elif 'overcloud-novacompute-' in line:
+                self.computes.append(ip)
+        self.log.info('controller_ips:%s' % self.controllers)
+        self.log.info('compute_ips:%s' % self.computes)
 
     def get_host_ip_from_hostname(self, hostname):
         self.log.info('Get host ip by hostname=%s from Apex installer......'
 
     def get_host_ip_from_hostname(self, hostname):
         self.log.info('Get host ip by hostname=%s from Apex installer......'
@@ -63,12 +74,31 @@ class ApexInstaller(BaseInstaller):
         host_ips = self._run_cmd_remote(self.client, command)
         return host_ips[0]
 
         host_ips = self._run_cmd_remote(self.client, command)
         return host_ips[0]
 
+    def get_transport_url(self):
+        client = SSHClient(self.controllers[0], self.node_user_name,
+                           key_filename=self.key_file)
+
+        command = 'sudo grep "^transport_url" /etc/nova/nova.conf'
+        ret, url = client.ssh(command)
+        if ret:
+            raise Exception('Exec command to get host ip from controller(%s)'
+                            'in Apex installer failed, ret=%s, output=%s'
+                            % (self.controllers[0], ret, url))
+        # need to use ip instead of hostname
+        ret = (re.sub("@.*:", "@%s:" % self.controllers[0],
+               url[0].split("=", 1)[1]))
+        self.log.debug('get_transport_url %s' % ret)
+        return ret
+
     def set_apply_patches(self):
         self.log.info('Set apply patches start......')
 
         restart_cm_cmd = 'sudo systemctl restart ' \
                          'openstack-ceilometer-notification.service'
 
     def set_apply_patches(self):
         self.log.info('Set apply patches start......')
 
         restart_cm_cmd = 'sudo systemctl restart ' \
                          'openstack-ceilometer-notification.service'
 
+        if self.conf.test_case != 'fault_management':
+            restart_cm_cmd += ' openstack-nova-scheduler.service'
+
         for node_ip in self.controllers:
             client = SSHClient(node_ip, self.node_user_name,
                                key_filename=self.key_file)
         for node_ip in self.controllers:
             client = SSHClient(node_ip, self.node_user_name,
                                key_filename=self.key_file)
@@ -77,13 +107,38 @@ class ApexInstaller(BaseInstaller):
                                     restart_cm_cmd,
                                     self.cm_set_script)
 
                                     restart_cm_cmd,
                                     self.cm_set_script)
 
+        if self.conf.test_case != 'fault_management':
+            restart_cm_cmd = 'sudo systemctl restart ' \
+                             'openstack-nova-compute.service'
+            for node_ip in self.computes:
+                client = SSHClient(node_ip, self.node_user_name,
+                                   key_filename=self.key_file)
+                self.compute_clients.append(client)
+                self._run_apply_patches(client,
+                                        restart_cm_cmd,
+                                        self.cm_set_compute_script)
+
+        if self.conf.test_case != 'fault_management':
+            time.sleep(10)
+
     def restore_apply_patches(self):
         self.log.info('restore apply patches start......')
 
         restart_cm_cmd = 'sudo systemctl restart ' \
                          'openstack-ceilometer-notification.service'
 
     def restore_apply_patches(self):
         self.log.info('restore apply patches start......')
 
         restart_cm_cmd = 'sudo systemctl restart ' \
                          'openstack-ceilometer-notification.service'
 
+        if self.conf.test_case != 'fault_management':
+            restart_cm_cmd += ' openstack-nova-scheduler.service'
+
         for client in self.controller_clients:
             self._run_apply_patches(client,
                                     restart_cm_cmd,
                                     self.cm_restore_script)
         for client in self.controller_clients:
             self._run_apply_patches(client,
                                     restart_cm_cmd,
                                     self.cm_restore_script)
+
+        if self.conf.test_case != 'fault_management':
+            restart_cm_cmd = 'sudo systemctl restart ' \
+                             'openstack-nova-compute.service'
+            for client in self.compute_clients:
+                self._run_apply_patches(client,
+                                        restart_cm_cmd,
+                                        self.cm_restore_compute_script)
index f37b7f1..4eed3f2 100644 (file)
@@ -58,21 +58,33 @@ class BaseInstaller(object):
     def setup_stunnel(self):
         self.log.info('Setup ssh stunnel in %s installer......'
                       % self.conf.installer.type)
     def setup_stunnel(self):
         self.log.info('Setup ssh stunnel in %s installer......'
                       % self.conf.installer.type)
+        tunnels = [self.conf.consumer.port]
+        if self.conf.test_case == 'maintenance':
+            tunnel_uptime = 1200
+            tunnels += [self.conf.app_manager.port, self.conf.inspector.port]
+        elif self.conf.test_case == 'all':
+            tunnel_uptime = 1800
+            tunnels += [self.conf.app_manager.port, self.conf.inspector.port]
+        else:
+            tunnel_uptime = 600
 
         for node_ip in self.controllers:
 
         for node_ip in self.controllers:
-            cmd = ("ssh -o UserKnownHostsFile=/dev/null"
-                   " -o StrictHostKeyChecking=no"
-                   " -i %s %s@%s -R %s:localhost:%s"
-                   " sleep 600 > ssh_tunnel.%s"
-                   " 2>&1 < /dev/null "
-                   % (self.key_file,
-                      self.node_user_name,
-                      node_ip,
-                      self.conf.consumer.port,
-                      self.conf.consumer.port,
-                      node_ip))
-            server = subprocess.Popen('exec ' + cmd, shell=True)
-            self.servers.append(server)
+            for port in tunnels:
+                self.log.info('tunnel for port %s' % port)
+                cmd = ("ssh -o UserKnownHostsFile=/dev/null"
+                       " -o StrictHostKeyChecking=no"
+                       " -i %s %s@%s -R %s:localhost:%s"
+                       " sleep %s > ssh_tunnel.%s"
+                       " 2>&1 < /dev/null "
+                       % (self.key_file,
+                          self.node_user_name,
+                          node_ip,
+                          port,
+                          port,
+                          tunnel_uptime,
+                          node_ip))
+                server = subprocess.Popen('exec ' + cmd, shell=True)
+                self.servers.append(server)
 
     def _get_ssh_key(self, client, key_path):
         self.log.info('Get SSH keys from %s installer......'
 
     def _get_ssh_key(self, client, key_path):
         self.log.info('Get SSH keys from %s installer......'
diff --git a/doctor_tests/installer/common/restore_compute_config.py b/doctor_tests/installer/common/restore_compute_config.py
new file mode 100644 (file)
index 0000000..0971d12
--- /dev/null
@@ -0,0 +1,25 @@
+##############################################################################
+# Copyright (c) 2018 Nokia Corporation and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import os
+import shutil
+
+
+def restore_cpu_allocation_ratio():
+    nova_file = '/etc/nova/nova.conf'
+    nova_file_bak = '/etc/nova/nova.bak'
+
+    if not os.path.isfile(nova_file_bak):
+        print('Bak_file:%s does not exist.' % nova_file_bak)
+    else:
+        print('restore: %s' % nova_file)
+        shutil.copyfile(nova_file_bak, nova_file)
+        os.remove(nova_file_bak)
+    return
+
+restore_cpu_allocation_ratio()
@@ -24,4 +24,32 @@ def restore_ep_config():
     return
 
 
     return
 
 
+def restore_ed_config():
+
+    ed_file = '/etc/ceilometer/event_definitions.yaml'
+    ed_file_bak = '/etc/ceilometer/event_definitions.bak'
+
+    if not os.path.isfile(ed_file_bak):
+        print("Bak_file doesn't exist: %s." % ed_file_bak)
+    else:
+        print('restore: %s' % ed_file)
+        shutil.copyfile(ed_file_bak, ed_file)
+        os.remove(ed_file_bak)
+    return
+
+
+def restore_cpu_allocation_ratio():
+    nova_file = '/etc/nova/nova.conf'
+    nova_file_bak = '/etc/nova/nova.bak'
+
+    if not os.path.isfile(nova_file_bak):
+        print('Bak_file:%s does not exist.' % nova_file_bak)
+    else:
+        print('restore: %s' % nova_file)
+        shutil.copyfile(nova_file_bak, nova_file)
+        os.remove(nova_file_bak)
+    return
+
 restore_ep_config()
 restore_ep_config()
+restore_ed_config()
+restore_cpu_allocation_ratio()
diff --git a/doctor_tests/installer/common/set_ceilometer.py b/doctor_tests/installer/common/set_ceilometer.py
deleted file mode 100644 (file)
index 4050aae..0000000
+++ /dev/null
@@ -1,45 +0,0 @@
-##############################################################################
-# Copyright (c) 2017 ZTE Corporation and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-import os
-import shutil
-import yaml
-
-ep_file = '/etc/ceilometer/event_pipeline.yaml'
-ep_file_bak = '/etc/ceilometer/event_pipeline.yaml.bak'
-event_notifier_topic = 'notifier://?topic=alarm.all'
-
-
-def set_notifier_topic():
-    config_modified = False
-
-    if not os.path.isfile(ep_file):
-        raise Exception("File doesn't exist: %s." % ep_file)
-
-    with open(ep_file, 'r') as file:
-        config = yaml.safe_load(file)
-
-    sinks = config['sinks']
-    for sink in sinks:
-        if sink['name'] == 'event_sink':
-            publishers = sink['publishers']
-            if event_notifier_topic not in publishers:
-                print('Add event notifier in ceilometer')
-                publishers.append(event_notifier_topic)
-                config_modified = True
-            else:
-                print('NOTE: event notifier is configured'
-                      'in ceilometer as we needed')
-
-    if config_modified:
-        shutil.copyfile(ep_file, ep_file_bak)
-        with open(ep_file, 'w+') as file:
-            file.write(yaml.safe_dump(config))
-
-
-set_notifier_topic()
diff --git a/doctor_tests/installer/common/set_compute_config.py b/doctor_tests/installer/common/set_compute_config.py
new file mode 100644 (file)
index 0000000..07db1e1
--- /dev/null
@@ -0,0 +1,48 @@
+##############################################################################
+# Copyright (c) 2018 Nokia Corporation and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import os
+import shutil
+
+
+def set_cpu_allocation_ratio():
+    nova_file = '/etc/nova/nova.conf'
+    nova_file_bak = '/etc/nova/nova.bak'
+
+    if not os.path.isfile(nova_file):
+        raise Exception("File doesn't exist: %s." % nova_file)
+    # TODO (tojuvone): Unfortunately ConfigParser did not produce working conf
+    fcheck = open(nova_file)
+    found_list = ([ca for ca in fcheck.readlines() if "cpu_allocation_ratio"
+                  in ca])
+    fcheck.close()
+    if found_list and len(found_list):
+        change = False
+        found = False
+        for car in found_list:
+            if car.startswith('#'):
+                continue
+            if car.startswith('cpu_allocation_ratio'):
+                found = True
+                if "1.0" not in car.split('=')[1]:
+                    change = True
+    if not found or change:
+        # need to add or change
+        shutil.copyfile(nova_file, nova_file_bak)
+        fin = open(nova_file_bak)
+        fout = open(nova_file, "wt")
+        for line in fin:
+            if change and line.startswith("cpu_allocation_ratio"):
+                line = "cpu_allocation_ratio=1.0"
+            if not found and line.startswith("[DEFAULT]"):
+                line += "cpu_allocation_ratio=1.0\n"
+            fout.write(line)
+        fin.close()
+        fout.close()
+
+set_cpu_allocation_ratio()
diff --git a/doctor_tests/installer/common/set_config.py b/doctor_tests/installer/common/set_config.py
new file mode 100644 (file)
index 0000000..4246524
--- /dev/null
@@ -0,0 +1,139 @@
+##############################################################################
+# Copyright (c) 2017 ZTE Corporation and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import os
+import shutil
+import yaml
+
+ep_file = '/etc/ceilometer/event_pipeline.yaml'
+ep_file_bak = '/etc/ceilometer/event_pipeline.yaml.bak'
+event_notifier_topic = 'notifier://?topic=alarm.all'
+
+
+def set_notifier_topic():
+    config_modified = False
+
+    if not os.path.isfile(ep_file):
+        raise Exception("File doesn't exist: %s." % ep_file)
+
+    with open(ep_file, 'r') as file:
+        config = yaml.safe_load(file)
+
+    sinks = config['sinks']
+    for sink in sinks:
+        if sink['name'] == 'event_sink':
+            publishers = sink['publishers']
+            if event_notifier_topic not in publishers:
+                print('Add event notifier in ceilometer')
+                publishers.append(event_notifier_topic)
+                config_modified = True
+            else:
+                print('NOTE: event notifier is configured'
+                      'in ceilometer as we needed')
+
+    if config_modified:
+        shutil.copyfile(ep_file, ep_file_bak)
+        with open(ep_file, 'w+') as file:
+            file.write(yaml.safe_dump(config))
+
+
+def set_maintenance_event_definitions():
+    ed_file = '/etc/ceilometer/event_definitions.yaml'
+    ed_file_bak = '/etc/ceilometer/event_definitions.bak'
+
+    if not os.path.isfile(ed_file):
+        raise Exception("File doesn't exist: %s." % ed_file)
+
+    with open(ed_file, 'r') as file:
+        config = yaml.safe_load(file)
+
+    et_list = [et['event_type'] for et in config]
+
+    if 'maintenance.scheduled' in et_list:
+        add_mscheduled = False
+        print('NOTE: maintenance.scheduled allready configured')
+    else:
+        print('NOTE: add maintenance.scheduled to event_definitions.yaml')
+        add_mscheduled = True
+        mscheduled = {
+            'event_type': 'maintenance.scheduled',
+            'traits': {
+                'allowed_actions': {'fields': 'payload.allowed_actions'},
+                'instance_ids': {'fields': 'payload.instance_ids'},
+                'reply_url': {'fields': 'payload.reply_url'},
+                'actions_at': {'fields': 'payload.actions_at',
+                               'type': 'datetime'},
+                'state': {'fields': 'payload.state'},
+                'session_id': {'fields': 'payload.session_id'},
+                'project_id': {'fields': 'payload.project_id'},
+                'metadata': {'fields': 'payload.metadata'}
+            }
+        }
+        config.append(mscheduled)
+
+    if 'maintenance.host' in et_list:
+        add_mhost = False
+        print('NOTE: maintenance.host allready configured')
+    else:
+        print('NOTE: add maintenance.host to event_definitions.yaml')
+        add_mhost = True
+        mhost = {
+            'event_type': 'maintenance.host',
+            'traits': {
+                'host': {'fields': 'payload.host'},
+                'project_id': {'fields': 'payload.project_id'},
+                'state': {'fields': 'payload.state'},
+                'session_id': {'fields': 'payload.session_id'}
+            }
+        }
+        config.append(mhost)
+
+    if add_mscheduled or add_mhost:
+        shutil.copyfile(ed_file, ed_file_bak)
+        with open(ed_file, 'w+') as file:
+            file.write(yaml.safe_dump(config))
+
+
+def set_cpu_allocation_ratio():
+    nova_file = '/etc/nova/nova.conf'
+    nova_file_bak = '/etc/nova/nova.bak'
+
+    if not os.path.isfile(nova_file):
+        raise Exception("File doesn't exist: %s." % nova_file)
+    # TODO (tojuvone): Unfortunately ConfigParser did not produce working conf
+    fcheck = open(nova_file)
+    found_list = ([ca for ca in fcheck.readlines() if "cpu_allocation_ratio"
+                  in ca])
+    fcheck.close()
+    if found_list and len(found_list):
+        change = False
+        found = False
+        for car in found_list:
+            if car.startswith('#'):
+                continue
+            if car.startswith('cpu_allocation_ratio'):
+                found = True
+                if "1.0" not in car.split('=')[1]:
+                    change = True
+    if not found or change:
+        # need to add or change
+        shutil.copyfile(nova_file, nova_file_bak)
+        fin = open(nova_file_bak)
+        fout = open(nova_file, "wt")
+        for line in fin:
+            if change and line.startswith("cpu_allocation_ratio"):
+                line = "cpu_allocation_ratio=1.0"
+            if not found and line.startswith("[DEFAULT]"):
+                line += "cpu_allocation_ratio=1.0\n"
+            fout.write(line)
+        fin.close()
+        fout.close()
+
+set_notifier_topic()
+set_maintenance_event_definitions()
+set_cpu_allocation_ratio()
index 79480ca..2a8abda 100644 (file)
@@ -10,6 +10,7 @@ import os
 from os.path import isfile, join
 import sys
 import time
 from os.path import isfile, join
 import sys
 import time
+from traceback import format_exc
 
 from doctor_tests import config
 from doctor_tests.identity_auth import get_identity_auth
 
 from doctor_tests import config
 from doctor_tests.identity_auth import get_identity_auth
@@ -17,8 +18,9 @@ from doctor_tests.identity_auth import get_session
 from doctor_tests.image import Image
 from doctor_tests.installer import get_installer
 import doctor_tests.logger as doctor_log
 from doctor_tests.image import Image
 from doctor_tests.installer import get_installer
 import doctor_tests.logger as doctor_log
-from doctor_tests.os_clients import nova_client
 from doctor_tests.scenario.fault_management import FaultManagement
 from doctor_tests.scenario.fault_management import FaultManagement
+from doctor_tests.os_clients import nova_client
+from doctor_tests.scenario.maintenance import Maintenance
 from doctor_tests.user import User
 
 
 from doctor_tests.user import User
 
 
@@ -92,20 +94,33 @@ class DoctorTest(object):
             LOG.info('not enough compute nodes, skipping doctor '
                      'maintenance test')
             return
             LOG.info('not enough compute nodes, skipping doctor '
                      'maintenance test')
             return
+        elif self.conf.installer.type != 'apex':
+            LOG.info('not supported installer, skipping doctor '
+                     'maintenance test')
+            return
         try:
             LOG.info('doctor maintenance test starting.......')
         try:
             LOG.info('doctor maintenance test starting.......')
-            # TODO (tojuvone) test setup and actual test
+
+            maintenance = Maintenance(self.conf, LOG)
+            maintenance.setup_maintenance(self.user)
+
+            # TODO (tojuvone) actual test
+
         except Exception as e:
             LOG.error('doctor maintenance test failed, Exception=%s' % e)
         except Exception as e:
             LOG.error('doctor maintenance test failed, Exception=%s' % e)
+            LOG.error(format_exc())
             sys.exit(1)
             sys.exit(1)
-        # TODO (tojuvone) finally: test case specific cleanup
+        finally:
+            maintenance.cleanup_maintenance()
 
     def run(self):
         """run doctor tests"""
         try:
             LOG.info('doctor test starting.......')
 
     def run(self):
         """run doctor tests"""
         try:
             LOG.info('doctor test starting.......')
+
             # prepare common test env
             self.setup()
             # prepare common test env
             self.setup()
+
             if self.conf.test_case == 'all':
                 self.test_fault_management()
                 self.test_maintenance()
             if self.conf.test_case == 'all':
                 self.test_fault_management()
                 self.test_maintenance()
diff --git a/doctor_tests/maintenance_hot_tpl.yaml b/doctor_tests/maintenance_hot_tpl.yaml
new file mode 100644 (file)
index 0000000..e2e4702
--- /dev/null
@@ -0,0 +1,119 @@
+---
+heat_template_version: 2017-02-24
+description: Doctor Maintenance test case
+
+parameters:
+  ext_net:
+    type: string
+    default: external
+  flavor_vcpus:
+    type: number
+    default: 24
+  maint_image:
+    type: string
+    default: cirros
+  ha_intances:
+    type: number
+    default: 2
+  nonha_intances:
+    type: number
+    default: 4
+  app_manager_alarm_url:
+    type: string
+    default: http://0.0.0.0:12348/maintenance
+  inpector_alarm_url:
+    type: string
+    default: http://0.0.0.0:12345/maintenance
+
+
+resources:
+  int_net:
+    type: OS::Neutron::Net
+
+  int_subnet:
+    type: OS::Neutron::Subnet
+    properties:
+      network_id: {get_resource: int_net}
+      cidr: "9.9.9.0/24"
+      dns_nameservers: ["8.8.8.8"]
+      ip_version: 4
+
+  int_router:
+    type: OS::Neutron::Router
+    properties:
+      external_gateway_info: {network: {get_param: ext_net}}
+
+  int_interface:
+    type: OS::Neutron::RouterInterface
+    properties:
+      router_id: {get_resource: int_router}
+      subnet: {get_resource: int_subnet}
+
+  maint_instance_flavor:
+    type: OS::Nova::Flavor
+    properties:
+      name: doctor_maint_flavor
+      ram: 512
+      vcpus: {get_param: flavor_vcpus}
+      disk: 1
+
+  ha_app_svrgrp:
+    type: OS::Nova::ServerGroup
+    properties:
+      name: doctor_ha_app_group
+      policies: ['anti-affinity']
+
+  floating_ip:
+    type: OS::Nova::FloatingIP
+    properties:
+      pool: {get_param: ext_net}
+
+  multi_ha_instances:
+    type: OS::Heat::ResourceGroup
+    properties:
+      count: {get_param: ha_intances}
+      resource_def:
+        type: OS::Nova::Server
+        properties:
+          name: doctor_ha_app_%index%
+          flavor: {get_resource: maint_instance_flavor}
+          image: {get_param: maint_image}
+          networks:
+            - network: {get_resource: int_net}
+          scheduler_hints:
+            group: {get_resource: ha_app_svrgrp}
+
+  multi_nonha_instances:
+    type: OS::Heat::ResourceGroup
+    properties:
+      count: {get_param: nonha_intances}
+      resource_def:
+        type: OS::Nova::Server
+        properties:
+          name: doctor_nonha_app_%index%
+          flavor: {get_resource: maint_instance_flavor}
+          image: {get_param: maint_image}
+          networks:
+            - network: {get_resource: int_net}
+
+  association:
+    type: OS::Nova::FloatingIPAssociation
+    properties:
+      floating_ip: {get_resource: floating_ip}
+      server_id: {get_attr: [multi_ha_instances, resource.0]}
+
+  app_manager_alarm:
+    type: OS::Aodh::EventAlarm
+    properties:
+      alarm_actions:
+        - {get_param: app_manager_alarm_url}
+      event_type: "maintenance.scheduled"
+      repeat_actions: true
+
+  inpector_alarm:
+    type: OS::Aodh::EventAlarm
+    properties:
+      alarm_actions:
+        - {get_param: inpector_alarm_url}
+      event_type: "maintenance.host"
+      repeat_actions: true
index 640281d..7ab4e9b 100644 (file)
@@ -11,6 +11,7 @@ from oslo_config import cfg
 import aodhclient.client as aodhclient\r
 from congressclient.v1 import client as congressclient\r
 import glanceclient.client as glanceclient\r
 import aodhclient.client as aodhclient\r
 from congressclient.v1 import client as congressclient\r
 import glanceclient.client as glanceclient\r
+import heatclient.client as heatclient\r
 from keystoneclient import client as ks_client\r
 from neutronclient.v2_0 import client as neutronclient\r
 import novaclient.client as novaclient\r
 from keystoneclient import client as ks_client\r
 from neutronclient.v2_0 import client as neutronclient\r
 import novaclient.client as novaclient\r
@@ -23,6 +24,7 @@ OPTS = [
     cfg.StrOpt('aodh_version', default='2', help='aodh version'),\r
     cfg.StrOpt('vitrage_version', default='1', help='vitrage version'),\r
     cfg.StrOpt('keystone_version', default='v3', help='keystone version'),\r
     cfg.StrOpt('aodh_version', default='2', help='aodh version'),\r
     cfg.StrOpt('vitrage_version', default='1', help='vitrage version'),\r
     cfg.StrOpt('keystone_version', default='v3', help='keystone version'),\r
+    cfg.StrOpt('heat_version', default='1', help='heat version'),\r
 ]\r
 \r
 \r
 ]\r
 \r
 \r
@@ -31,6 +33,11 @@ def glance_client(version, session):
                                session=session)\r
 \r
 \r
                                session=session)\r
 \r
 \r
+def heat_client(version, session):\r
+    return heatclient.Client(version=version,\r
+                             session=session)\r
+\r
+\r
 def keystone_client(version, session):\r
     return ks_client.Client(version=version,\r
                             session=session)\r
 def keystone_client(version, session):\r
     return ks_client.Client(version=version,\r
                             session=session)\r
diff --git a/doctor_tests/scenario/maintenance.py b/doctor_tests/scenario/maintenance.py
new file mode 100644 (file)
index 0000000..bb0e943
--- /dev/null
@@ -0,0 +1,103 @@
+##############################################################################
+# Copyright (c) 2018 Nokia Corporation and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+from doctor_tests.common.utils import get_doctor_test_root_dir
+from doctor_tests.identity_auth import get_identity_auth
+from doctor_tests.identity_auth import get_session
+from doctor_tests.os_clients import keystone_client
+from doctor_tests.os_clients import neutron_client
+from doctor_tests.os_clients import nova_client
+from doctor_tests.stack import Stack
+
+
+class Maintenance(object):
+
+    def __init__(self, conf, log):
+        self.conf = conf
+        self.log = log
+        self.keystone = keystone_client(
+            self.conf.keystone_version, get_session())
+        self.nova = nova_client(conf.nova_version, get_session())
+        auth = get_identity_auth(project=self.conf.doctor_project)
+        self.neutron = neutron_client(get_session(auth=auth))
+        self.stack = Stack(self.conf, self.log)
+
+    def get_external_network(self):
+        ext_net = None
+        networks = self.neutron.list_networks()['networks']
+        for network in networks:
+            if network['router:external']:
+                ext_net = network['name']
+                break
+        if ext_net is None:
+            raise Exception("externl network not defined")
+        return ext_net
+
+    def setup_maintenance(self, user):
+        # each hypervisor needs to have same amount of vcpus and they
+        # need to be free before test
+        hvisors = self.nova.hypervisors.list(detailed=True)
+        prev_vcpus = 0
+        prev_hostname = ""
+        self.log.info('checking hypervisors.......')
+        for hvisor in hvisors:
+            vcpus = hvisor.__getattr__("vcpus")
+            vcpus_used = hvisor.__getattr__("vcpus_used")
+            hostname = hvisor.__getattr__("hypervisor_hostname")
+            if vcpus < 2:
+                raise Exception('not enough vcpus on %s' % hostname)
+            if vcpus_used > 0:
+                raise Exception('%d vcpus used on %s'
+                                % (vcpus_used, hostname))
+            if prev_vcpus != 0 and prev_vcpus != vcpus:
+                raise Exception('%d vcpus on %s does not match to'
+                                '%d on %s'
+                                % (vcpus, hostname,
+                                   prev_vcpus, prev_hostname))
+            prev_vcpus = vcpus
+            prev_hostname = hostname
+
+        # maintenance flavor made so that 2 instances take whole node
+        flavor_vcpus = int(vcpus / 2)
+        compute_nodes = len(hvisors)
+        amount_actstdby_instances = 2
+        amount_noredundancy_instances = 2 * compute_nodes - 2
+        self.log.info('testing %d computes with %d vcpus each'
+                      % (compute_nodes, vcpus))
+        self.log.info('testing %d actstdby and %d noredundancy instances'
+                      % (amount_actstdby_instances,
+                         amount_noredundancy_instances))
+        max_instances = (amount_actstdby_instances +
+                         amount_noredundancy_instances)
+        max_cores = compute_nodes * vcpus
+
+        user.update_quota(max_instances, max_cores)
+
+        test_dir = get_doctor_test_root_dir()
+        template_file = '{0}/{1}'.format(test_dir, 'maintenance_hot_tpl.yaml')
+        files, template = self.stack.get_hot_tpl(template_file)
+
+        ext_net = self.get_external_network()
+
+        parameters = {'ext_net': ext_net,
+                      'flavor_vcpus': flavor_vcpus,
+                      'maint_image': self.conf.image_name,
+                      'nonha_intances': amount_noredundancy_instances,
+                      'ha_intances': amount_actstdby_instances}
+
+        self.log.info('creating maintenance stack.......')
+        self.log.info('parameters: %s' % parameters)
+
+        self.stack.create('doctor_test_maintenance',
+                          template,
+                          parameters=parameters,
+                          files=files)
+
+    def cleanup_maintenance(self):
+        self.log.info('stack delete start.......')
+        self.stack.delete()
diff --git a/doctor_tests/stack.py b/doctor_tests/stack.py
new file mode 100644 (file)
index 0000000..688c205
--- /dev/null
@@ -0,0 +1,106 @@
+##############################################################################
+# Copyright (c) 2018 Nokia Corporation and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import os
+import time
+
+from heatclient.common.template_utils import get_template_contents
+from heatclient import exc as heat_excecption
+
+from doctor_tests.identity_auth import get_identity_auth
+from doctor_tests.identity_auth import get_session
+from doctor_tests.os_clients import heat_client
+
+
+class Stack(object):
+
+    def __init__(self, conf, log):
+        self.conf = conf
+        self.log = log
+        auth = get_identity_auth(project=self.conf.doctor_project)
+        self.heat = heat_client(self.conf.heat_version,
+                                get_session(auth=auth))
+        self.stack_name = None
+        self.stack_id = None
+        self.template = None
+        self.parameters = {}
+        self.files = {}
+
+    # standard yaml.load will not work for hot tpl becasue of date format in
+    # heat_template_version is not string
+    def get_hot_tpl(self, template_file):
+        if not os.path.isfile(template_file):
+            raise Exception('File(%s) does not exist' % template_file)
+        return get_template_contents(template_file=template_file)
+
+    def _wait_stack_action_complete(self, action):
+        action_in_progress = '%s_IN_PROGRESS' % action
+        action_complete = '%s_COMPLETE' % action
+        action_failed = '%s_FAILED' % action
+
+        status = action_in_progress
+        stack_retries = 150
+        while status == action_in_progress and stack_retries > 0:
+            time.sleep(2)
+            try:
+                stack = self.heat.stacks.get(self.stack_name)
+            except heat_excecption.HTTPNotFound:
+                if action == 'DELETE':
+                    # Might happen you never get status as stack deleted
+                    status = action_complete
+                    break
+                else:
+                    raise Exception('unable to get stack')
+            status = stack.stack_status
+            stack_retries = stack_retries - 1
+        if stack_retries == 0 and status != action_complete:
+            raise Exception("stack %s not completed within 5min, status:"
+                            " %s" % (action, status))
+        elif status == action_complete:
+            self.log.info('stack %s %s' % (self.stack_name, status))
+        elif status == action_failed:
+            raise Exception("stack %s failed" % action)
+        else:
+            self.log.error('stack %s %s' % (self.stack_name, status))
+            raise Exception("stack %s unknown result" % action)
+
+    def wait_stack_delete(self):
+        self._wait_stack_action_complete('DELETE')
+
+    def wait_stack_create(self):
+        self._wait_stack_action_complete('CREATE')
+
+    def wait_stack_update(self):
+        self._wait_stack_action_complete('UPDATE')
+
+    def create(self, stack_name, template, parameters={}, files={}):
+        self.stack_name = stack_name
+        self.template = template
+        self.parameters = parameters
+        self.files = files
+        stack = self.heat.stacks.create(stack_name=self.stack_name,
+                                        files=files,
+                                        template=template,
+                                        parameters=parameters)
+        self.stack_id = stack['stack']['id']
+        self.wait_stack_create()
+
+    def update(self, stack_name, stack_id, template, parameters={}, files={}):
+        self.heat.stacks.update(stack_name=stack_name,
+                                stack_id=stack_id,
+                                files=files,
+                                template=template,
+                                parameters=parameters)
+        self.wait_stack_update()
+
+    def delete(self):
+        if self.stack_id is not None:
+            self.heat.stacks.delete(self.stack_name)
+            self.wait_stack_delete()
+        else:
+            self.log.info('no stack to delete')
index fee3e1f..29aa004 100644 (file)
@@ -8,12 +8,12 @@
 ##############################################################################
 import os
 
 ##############################################################################
 import os
 
+from keystoneclient import exceptions as ks_exceptions
 from oslo_config import cfg
 
 from doctor_tests.identity_auth import get_session
 from doctor_tests.os_clients import keystone_client
 from doctor_tests.os_clients import nova_client
 from oslo_config import cfg
 
 from doctor_tests.identity_auth import get_session
 from doctor_tests.os_clients import keystone_client
 from doctor_tests.os_clients import nova_client
-from keystoneclient import exceptions as ks_exceptions
 
 
 OPTS = [
 
 
 OPTS = [
@@ -53,10 +53,11 @@ class User(object):
     def __init__(self, conf, log):
         self.conf = conf
         self.log = log
     def __init__(self, conf, log):
         self.conf = conf
         self.log = log
+        self.def_quota = None
+        self.restore_def_quota = False
         self.keystone = keystone_client(
             self.conf.keystone_version, get_session())
         self.keystone = keystone_client(
             self.conf.keystone_version, get_session())
-        self.nova = \
-            nova_client(conf.nova_version, get_session())
+        self.nova = nova_client(conf.nova_version, get_session())
         self.users = {}
         self.projects = {}
         self.roles = {}
         self.users = {}
         self.projects = {}
         self.roles = {}
@@ -83,10 +84,9 @@ class User(object):
                              domain=self.conf.doctor_domain_id)}
         if self.conf.doctor_project not in self.projects:
             self.log.info('create project......')
                              domain=self.conf.doctor_domain_id)}
         if self.conf.doctor_project not in self.projects:
             self.log.info('create project......')
-            test_project = \
-                self.keystone.projects.create(
-                    self.conf.doctor_project,
-                    self.conf.doctor_domain_id)
+            test_project = self.keystone.projects.create(
+                self.conf.doctor_project,
+                self.conf.doctor_domain_id)
             self.projects[test_project.name] = test_project
         else:
             self.log.info('project %s already created......'
             self.projects[test_project.name] = test_project
         else:
             self.log.info('project %s already created......'
@@ -151,6 +151,13 @@ class User(object):
             self.keystone.roles.grant(role, user=user, project=project)
             roles_for_user[role_name] = role
 
             self.keystone.roles.grant(role, user=user, project=project)
             roles_for_user[role_name] = role
 
+    def _restore_default_quota(self):
+        if self.def_quota is not None and self.restore_def_quota:
+            self.log.info('restore default quota......')
+            self.nova.quota_classes.update('default',
+                                           instances=self.def_quota.instances,
+                                           cores=self.def_quota.cores)
+
     def delete(self):
         """delete the test user, project and role"""
         self.log.info('user delete start......')
     def delete(self):
         """delete the test user, project and role"""
         self.log.info('user delete start......')
@@ -159,6 +166,8 @@ class User(object):
         user = self.users.get(self.conf.doctor_user)
         role = self.roles.get(self.conf.doctor_role)
 
         user = self.users.get(self.conf.doctor_user)
         role = self.roles.get(self.conf.doctor_role)
 
+        self._restore_default_quota()
+
         if project:
             if 'admin' in self.roles_for_admin:
                 self.keystone.roles.revoke(
         if project:
             if 'admin' in self.roles_for_admin:
                 self.keystone.roles.revoke(
@@ -177,23 +186,45 @@ class User(object):
             self.keystone.projects.delete(project)
         self.log.info('user delete end......')
 
             self.keystone.projects.delete(project)
         self.log.info('user delete end......')
 
-    def update_quota(self):
-        self.log.info('user quota update start......')
+    def update_quota(self, instances=None, cores=None):
+        self.log.info('quota update start......')
         project = self.projects.get(self.conf.doctor_project)
         project = self.projects.get(self.conf.doctor_project)
+
         user = self.users.get(self.conf.doctor_user)
 
         user = self.users.get(self.conf.doctor_user)
 
+        if instances is not None:
+            quota_instances = instances
+        else:
+            quota_instances = self.conf.quota_instances
+        if cores is not None:
+            quota_cores = cores
+        else:
+            quota_cores = self.conf.quota_cores
+
         if project and user:
         if project and user:
+            # default needs to be at least the same as with doctor_user
+            self.log.info('default quota update start......')
+
+            self.def_quota = self.nova.quota_classes.get('default')
+            if quota_instances > self.def_quota.instances:
+                self.restore_def_quota = True
+                self.nova.quota_classes.update('default',
+                                               instances=quota_instances)
+            if quota_cores > self.def_quota.cores:
+                self.restore_def_quota = True
+                self.nova.quota_classes.update('default',
+                                               cores=quota_cores)
+            self.log.info('user quota update start......')
             self.quota = self.nova.quotas.get(project.id,
                                               user_id=user.id)
             self.quota = self.nova.quotas.get(project.id,
                                               user_id=user.id)
-            if self.conf.quota_instances > self.quota.instances:
-                self.nova.quotas.update(
-                    project.id,
-                    instances=self.conf.quota_instances,
-                    user_id=user.id)
-            if self.conf.quota_cores > self.quota.cores:
+            if quota_instances > self.quota.instances:
+                self.nova.quotas.update(project.id,
+                                        instances=quota_instances,
+                                        user_id=user.id)
+            if quota_cores > self.quota.cores:
                 self.nova.quotas.update(project.id,
                 self.nova.quotas.update(project.id,
-                                        cores=self.conf.quota_cores,
+                                        cores=quota_cores,
                                         user_id=user.id)
                                         user_id=user.id)
-            self.log.info('user quota update end......')
         else:
             raise Exception('No project or role for update quota')
         else:
             raise Exception('No project or role for update quota')
+        self.log.info('quota update end......')
index b60878f..4d2ffff 100644 (file)
@@ -16,4 +16,5 @@ python-congressclient<2000,>=1.9.0 # Apache-2.0
 python-glanceclient>=2.8.0 # Apache-2.0
 python-vitrageclient>=2.0.0 # Apache-2.0
 virtualenv>=14.0.6 # MIT
 python-glanceclient>=2.8.0 # Apache-2.0
 python-vitrageclient>=2.0.0 # Apache-2.0
 virtualenv>=14.0.6 # MIT
+python-heatclient>=1.8.2 # Apache-2.0
 flake8<2.6.0,>=2.5.4 # MIT
 flake8<2.6.0,>=2.5.4 # MIT