fix the username to login undercloud in Apex
[doctor.git] / doctor_tests / installer / apex.py
index aaacb38..31850a7 100644 (file)
@@ -6,39 +6,41 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
-import getpass
-import grp
-import os
-import pwd
-import stat
-import subprocess
-
-from doctor_tests.common.utils import get_doctor_test_root_dir
+import re
+import time
+
+from doctor_tests.common.constants import Inspector
 from doctor_tests.common.utils import SSHClient
 from doctor_tests.installer.base import BaseInstaller
 
 
 class ApexInstaller(BaseInstaller):
     node_user_name = 'heat-admin'
-    cm_set_script = 'set_ceilometer.py'
-    cm_restore_script = 'restore_ceilometer.py'
+    installer_username = 'stack'
+    cm_set_script = 'set_config.py'
+    nc_set_compute_script = 'set_compute_config.py'
+    cg_set_script = 'set_congress.py'
+    cm_restore_script = 'restore_config.py'
+    nc_restore_compute_script = 'restore_compute_config.py'
+    cg_restore_script = 'restore_congress.py'
 
     def __init__(self, conf, log):
         super(ApexInstaller, self).__init__(conf, log)
         self.client = SSHClient(self.conf.installer.ip,
-                                self.conf.installer.username,
+                                self.installer_username,
+                                key_filename=self.conf.installer.key_file,
                                 look_for_keys=True)
         self.key_file = None
         self.controllers = list()
+        self.computes = list()
         self.controller_clients = list()
-        self.servers = list()
-        self.test_dir = get_doctor_test_root_dir()
+        self.compute_clients = list()
 
     def setup(self):
         self.log.info('Setup Apex installer start......')
-
-        self.get_ssh_key_from_installer()
-        self.get_controller_ips()
+        self.key_file = self.get_ssh_key_from_installer()
+        self._get_and_set_ips()
+        self.create_flavor()
         self.set_apply_patches()
         self.setup_stunnel()
 
@@ -48,97 +50,110 @@ class ApexInstaller(BaseInstaller):
             server.terminate()
 
     def get_ssh_key_from_installer(self):
-        self.log.info('Get SSH keys from Apex installer......')
-
-        if self.key_file is not None:
-            self.log.info('Already have SSH keys from Apex installer......')
-            return self.key_file
-
-        ssh_key = '{0}/{1}'.format(self.test_dir, 'instack_key')
-        self.client.scp('/home/stack/.ssh/id_rsa', ssh_key, method='get')
-        user = getpass.getuser()
-        uid = pwd.getpwnam(user).pw_uid
-        gid = grp.getgrnam(user).gr_gid
-        os.chown(ssh_key, uid, gid)
-        os.chmod(ssh_key, stat.S_IREAD)
-        self.key_file = ssh_key
-        return self.key_file
-
-    def get_controller_ips(self):
-        self.log.info('Get controller ips from Apex installer......')
-
-        command = "source stackrc; " \
-                  "nova list | grep ' overcloud-controller-[0-9] ' " \
-                  "| sed -e 's/^.*ctlplane=//' |awk '{print $1}'"
-        ret, controllers = self.client.ssh(command)
-        if ret:
-            raise Exception('Exec command to get controller ips'
-                            'in Apex installer failed, ret=%s, output=%s'
-                            % (ret, controllers))
-        self.log.info('Get controller_ips:%s from Apex installer'
-                      % controllers)
-        self.controllers = controllers
+        key_path = '/home/stack/.ssh/id_rsa'
+        return self._get_ssh_key(self.client, key_path)
+
+    def _get_and_set_ips(self):
+        self.log.info('Get controller and compute ips from Apex installer'
+                      '......')
+
+        command = "source stackrc; nova list | grep ' overcloud-'"
+        raw_ips_list = self._run_cmd_remote(self.client, command)
+        for line in raw_ips_list:
+            ip = line.split('ctlplane=', 1)[1].split(" ", 1)[0]
+            if 'overcloud-controller-' in line:
+                self.controllers.append(ip)
+            elif 'overcloud-novacompute-' in line:
+                self.computes.append(ip)
+        self.log.info('controller_ips:%s' % self.controllers)
+        self.log.info('compute_ips:%s' % self.computes)
 
     def get_host_ip_from_hostname(self, hostname):
-        self.log.info('Get host ip from host name in Apex installer......')
+        self.log.info('Get host ip by hostname=%s from Apex installer......'
+                      % hostname)
 
         hostname_in_undercloud = hostname.split('.')[0]
-
         command = "source stackrc; nova show %s | awk '/ ctlplane network /{print $5}'" % (hostname_in_undercloud)   # noqa
-        ret, host_ip = self.client.ssh(command)
+        host_ips = self._run_cmd_remote(self.client, command)
+        return host_ips[0]
+
+    def get_transport_url(self):
+        client = SSHClient(self.controllers[0], self.node_user_name,
+                           key_filename=self.key_file)
+
+        command = 'sudo grep "^transport_url" /etc/nova/nova.conf'
+        ret, url = client.ssh(command)
         if ret:
-            raise Exception('Exec command to get host ip from hostname(%s)'
+            raise Exception('Exec command to get host ip from controller(%s)'
                             'in Apex installer failed, ret=%s, output=%s'
-                            % (hostname, ret, host_ip))
-        self.log.info('Get host_ip:%s from host_name:%s in Apex installer'
-                      % (host_ip, hostname))
-        return host_ip[0]
-
-    def setup_stunnel(self):
-        self.log.info('Setup ssh stunnel in controller nodes'
-                      ' in Apex installer......')
-        for node_ip in self.controllers:
-            cmd = ("ssh -o UserKnownHostsFile=/dev/null"
-                   "-o StrictHostKeyChecking=no"
-                   "-i %s %s@%s -R %s:localhost:%s"
-                   "sleep 600 > ssh_tunnel.%s.log"
-                   "2>&1 < /dev/null &"
-                   % (self.key_file,
-                      self.node_user_name,
-                      node_ip,
-                      self.conf.consumer.port,
-                      self.conf.consumer.port,
-                      node_ip))
-            server = subprocess.Popen(cmd, shell=True)
-            self.servers.append(server)
-            server.communicate()
+                            % (self.controllers[0], ret, url))
+        # need to use ip instead of hostname
+        ret = (re.sub("@.*:", "@%s:" % self.controllers[0],
+               url[0].split("=", 1)[1]))
+        self.log.debug('get_transport_url %s' % ret)
+        return ret
 
     def set_apply_patches(self):
         self.log.info('Set apply patches start......')
 
+        restart_cmd = 'sudo systemctl restart' \
+                      ' openstack-ceilometer-notification.service'
+
+        set_scripts = [self.cm_set_script]
+
+        if self.conf.test_case != 'fault_management':
+            restart_cmd += ' openstack-nova-scheduler.service'
+
+        if self.conf.inspector.type == Inspector.CONGRESS:
+            restart_cmd += ' openstack-congress-server.service'
+            set_scripts.append(self.cg_set_script)
+
         for node_ip in self.controllers:
             client = SSHClient(node_ip, self.node_user_name,
                                key_filename=self.key_file)
             self.controller_clients.append(client)
-            self._ceilometer_apply_patches(client, self.cm_set_script)
+            self._run_apply_patches(client,
+                                    restart_cmd,
+                                    set_scripts)
+
+        if self.conf.test_case != 'fault_management':
+            restart_cmd = 'sudo systemctl restart' \
+                          ' openstack-nova-compute.service'
+            for node_ip in self.computes:
+                client = SSHClient(node_ip, self.node_user_name,
+                                   key_filename=self.key_file)
+                self.compute_clients.append(client)
+                self._run_apply_patches(client,
+                                        restart_cmd,
+                                        [self.nc_set_compute_script])
+
+        if self.conf.test_case != 'fault_management':
+            time.sleep(10)
 
     def restore_apply_patches(self):
         self.log.info('restore apply patches start......')
 
-        for client in self.controller_clients:
-            self._ceilometer_apply_patches(client, self.cm_restore_script)
+        restart_cmd = 'sudo systemctl restart' \
+                      ' openstack-ceilometer-notification.service'
 
-    def _ceilometer_apply_patches(self, ssh_client, script_name):
-        installer_dir = os.path.dirname(os.path.realpath(__file__))
-        script_abs_path = '{0}/{1}/{2}'.format(installer_dir,
-                                               'common', script_name)
+        restore_scripts = [self.cm_restore_script]
 
-        ssh_client.scp(script_abs_path, script_name)
-        cmd = 'sudo python %s' % script_name
-        ret, output = ssh_client.ssh(cmd)
-        if ret:
-            raise Exception('Do the ceilometer command in controller'
-                            ' node failed, ret=%s, cmd=%s, output=%s'
-                            % (ret, cmd, output))
-        ssh_client.ssh('sudo systemctl restart '
-                       'openstack-ceilometer-notification.service')
+        if self.conf.test_case != 'fault_management':
+            restart_cmd += ' openstack-nova-scheduler.service'
+
+        if self.conf.inspector.type == Inspector.CONGRESS:
+            restart_cmd += ' openstack-congress-server.service'
+            restore_scripts.append(self.cg_restore_script)
+
+        for client in self.controller_clients:
+            self._run_apply_patches(client,
+                                    restart_cmd,
+                                    restore_scripts)
+
+        if self.conf.test_case != 'fault_management':
+            restart_cmd = 'sudo systemctl restart' \
+                          ' openstack-nova-compute.service'
+            for client in self.compute_clients:
+                self._run_apply_patches(client,
+                                        restart_cmd,
+                                        [self.nc_restore_compute_script])