Support Fenix as admin tool
[doctor.git] / doctor_tests / installer / apex.py
index 31850a7..3ec2100 100644 (file)
@@ -6,10 +6,11 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
-import re
 import time
 
 from doctor_tests.common.constants import Inspector
+from doctor_tests.common.constants import is_fenix
+from doctor_tests.common.utils import get_doctor_test_root_dir
 from doctor_tests.common.utils import SSHClient
 from doctor_tests.installer.base import BaseInstaller
 
@@ -20,9 +21,13 @@ class ApexInstaller(BaseInstaller):
     cm_set_script = 'set_config.py'
     nc_set_compute_script = 'set_compute_config.py'
     cg_set_script = 'set_congress.py'
+    fe_set_script = 'set_fenix.sh'
     cm_restore_script = 'restore_config.py'
     nc_restore_compute_script = 'restore_compute_config.py'
     cg_restore_script = 'restore_congress.py'
+    ac_restart_script = 'restart_aodh.py'
+    ac_restore_script = 'restore_aodh.py'
+    python = 'python'
 
     def __init__(self, conf, log):
         super(ApexInstaller, self).__init__(conf, log)
@@ -33,13 +38,13 @@ class ApexInstaller(BaseInstaller):
         self.key_file = None
         self.controllers = list()
         self.computes = list()
-        self.controller_clients = list()
-        self.compute_clients = list()
 
     def setup(self):
         self.log.info('Setup Apex installer start......')
         self.key_file = self.get_ssh_key_from_installer()
-        self._get_and_set_ips()
+        self._get_overcloud_conf()
+        if is_fenix(self.conf):
+            self._copy_overcloudrc_to_controllers()
         self.create_flavor()
         self.set_apply_patches()
         self.setup_stunnel()
@@ -53,8 +58,13 @@ class ApexInstaller(BaseInstaller):
         key_path = '/home/stack/.ssh/id_rsa'
         return self._get_ssh_key(self.client, key_path)
 
-    def _get_and_set_ips(self):
-        self.log.info('Get controller and compute ips from Apex installer'
+    def _copy_overcloudrc_to_controllers(self):
+        for ip in self.controllers:
+            cmd = "scp overcloudrc %s@%s:" % (self.node_user_name, ip)
+            self._run_cmd_remote(self.client, cmd)
+
+    def _get_overcloud_conf(self):
+        self.log.info('Get overcloud config details from Apex installer'
                       '......')
 
         command = "source stackrc; nova list | grep ' overcloud-'"
@@ -65,8 +75,11 @@ class ApexInstaller(BaseInstaller):
                 self.controllers.append(ip)
             elif 'overcloud-novacompute-' in line:
                 self.computes.append(ip)
+        command = "grep docker /home/stack/deploy_command"
+        self.use_containers = self._check_cmd_remote(self.client, command)
         self.log.info('controller_ips:%s' % self.controllers)
         self.log.info('compute_ips:%s' % self.computes)
+        self.log.info('use_containers:%s' % self.use_containers)
 
     def get_host_ip_from_hostname(self, hostname):
         self.log.info('Get host ip by hostname=%s from Apex installer......'
@@ -77,83 +90,121 @@ class ApexInstaller(BaseInstaller):
         host_ips = self._run_cmd_remote(self.client, command)
         return host_ips[0]
 
-    def get_transport_url(self):
-        client = SSHClient(self.controllers[0], self.node_user_name,
-                           key_filename=self.key_file)
-
-        command = 'sudo grep "^transport_url" /etc/nova/nova.conf'
-        ret, url = client.ssh(command)
-        if ret:
-            raise Exception('Exec command to get host ip from controller(%s)'
-                            'in Apex installer failed, ret=%s, output=%s'
-                            % (self.controllers[0], ret, url))
-        # need to use ip instead of hostname
-        ret = (re.sub("@.*:", "@%s:" % self.controllers[0],
-               url[0].split("=", 1)[1]))
-        self.log.debug('get_transport_url %s' % ret)
-        return ret
+    def _set_docker_restart_cmd(self, service):
+        # There can be multiple instances running so need to restart all
+        cmd = "for container in `sudo docker ps | grep "
+        cmd += service
+        cmd += " | awk '{print $1}'`; do sudo docker restart $container; \
+               done;"
+        return cmd
 
     def set_apply_patches(self):
         self.log.info('Set apply patches start......')
-
-        restart_cmd = 'sudo systemctl restart' \
-                      ' openstack-ceilometer-notification.service'
+        fenix_files = None
 
         set_scripts = [self.cm_set_script]
 
+        if self.use_containers:
+            restart_cmd = (self._set_docker_restart_cmd(
+                           "ceilometer-notification"))
+            set_scripts.append(self.ac_restart_script)
+        else:
+            restart_cmd = 'sudo systemctl restart' \
+                          ' openstack-ceilometer-notification.service'
+
         if self.conf.test_case != 'fault_management':
-            restart_cmd += ' openstack-nova-scheduler.service'
+            if self.use_containers:
+                restart_cmd += self._set_docker_restart_cmd("nova-scheduler")
+                if is_fenix(self.conf):
+                    set_scripts.append(self.fe_set_script)
+                    testdir = get_doctor_test_root_dir()
+                    fenix_files = ["Dockerfile", "run"]
+            else:
+                restart_cmd += ' openstack-nova-scheduler.service'
+            set_scripts.append(self.nc_set_compute_script)
 
         if self.conf.inspector.type == Inspector.CONGRESS:
-            restart_cmd += ' openstack-congress-server.service'
+            if self.use_containers:
+                restart_cmd += self._set_docker_restart_cmd("congress-server")
+            else:
+                restart_cmd += ' openstack-congress-server.service'
             set_scripts.append(self.cg_set_script)
 
         for node_ip in self.controllers:
             client = SSHClient(node_ip, self.node_user_name,
                                key_filename=self.key_file)
-            self.controller_clients.append(client)
+            if fenix_files is not None:
+                for fenix_file in fenix_files:
+                    src_file = '{0}/{1}/{2}'.format(testdir,
+                                                    'admin_tool/fenix',
+                                                    fenix_file)
+                    client.scp(src_file, fenix_file)
             self._run_apply_patches(client,
                                     restart_cmd,
-                                    set_scripts)
+                                    set_scripts,
+                                    python=self.python)
+        time.sleep(5)
+
+        self.log.info('Set apply patches start......')
 
         if self.conf.test_case != 'fault_management':
-            restart_cmd = 'sudo systemctl restart' \
-                          ' openstack-nova-compute.service'
+            if self.use_containers:
+                restart_cmd = self._set_docker_restart_cmd("nova")
+            else:
+                restart_cmd = 'sudo systemctl restart' \
+                              ' openstack-nova-compute.service'
             for node_ip in self.computes:
                 client = SSHClient(node_ip, self.node_user_name,
                                    key_filename=self.key_file)
-                self.compute_clients.append(client)
                 self._run_apply_patches(client,
                                         restart_cmd,
-                                        [self.nc_set_compute_script])
-
-        if self.conf.test_case != 'fault_management':
-            time.sleep(10)
+                                        [self.nc_set_compute_script],
+                                        python=self.python)
+            time.sleep(5)
 
     def restore_apply_patches(self):
         self.log.info('restore apply patches start......')
 
-        restart_cmd = 'sudo systemctl restart' \
-                      ' openstack-ceilometer-notification.service'
-
         restore_scripts = [self.cm_restore_script]
 
+        if self.use_containers:
+            restart_cmd = (self._set_docker_restart_cmd(
+                           "ceilometer-notification"))
+            restore_scripts.append(self.ac_restore_script)
+        else:
+            restart_cmd = 'sudo systemctl restart' \
+                          ' openstack-ceilometer-notification.service'
+
         if self.conf.test_case != 'fault_management':
-            restart_cmd += ' openstack-nova-scheduler.service'
+            if self.use_containers:
+                restart_cmd += self._set_docker_restart_cmd("nova-scheduler")
+            else:
+                restart_cmd += ' openstack-nova-scheduler.service'
+            restore_scripts.append(self.nc_restore_compute_script)
 
         if self.conf.inspector.type == Inspector.CONGRESS:
-            restart_cmd += ' openstack-congress-server.service'
+            if self.use_containers:
+                restart_cmd += self._set_docker_restart_cmd("congress-server")
+            else:
+                restart_cmd += ' openstack-congress-server.service'
             restore_scripts.append(self.cg_restore_script)
 
-        for client in self.controller_clients:
+        for node_ip in self.controllers:
+            client = SSHClient(node_ip, self.node_user_name,
+                               key_filename=self.key_file)
             self._run_apply_patches(client,
                                     restart_cmd,
-                                    restore_scripts)
+                                    restore_scripts,
+                                    python=self.python)
 
         if self.conf.test_case != 'fault_management':
-            restart_cmd = 'sudo systemctl restart' \
-                          ' openstack-nova-compute.service'
-            for client in self.compute_clients:
-                self._run_apply_patches(client,
-                                        restart_cmd,
-                                        [self.nc_restore_compute_script])
+            if self.use_containers:
+                restart_cmd = self._set_docker_restart_cmd("nova-compute")
+            else:
+                restart_cmd = 'sudo systemctl restart' \
+                              ' openstack-nova-compute.service'
+            for node_ip in self.computes:
+                self._run_apply_patches(
+                    client, restart_cmd,
+                    [self.nc_restore_compute_script],
+                    python=self.python)