X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=blobdiff_plain;f=doctor_tests%2Finstaller%2Fapex.py;h=3ec2100cf485d1751439fac36398b344933e7aaa;hb=73605c5c34b97ab56306bfa9af0f5888f3c7e46d;hp=2a1ce94bc4b642ccb0ea3b1622266e65a3ae96b5;hpb=44d1e135eced7afe13b8772a610ae5cdae310b68;p=doctor.git diff --git a/doctor_tests/installer/apex.py b/doctor_tests/installer/apex.py index 2a1ce94b..3ec2100c 100644 --- a/doctor_tests/installer/apex.py +++ b/doctor_tests/installer/apex.py @@ -6,37 +6,46 @@ # which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## -import getpass -import grp -import os -import pwd -import stat -import subprocess +import time +from doctor_tests.common.constants import Inspector +from doctor_tests.common.constants import is_fenix +from doctor_tests.common.utils import get_doctor_test_root_dir from doctor_tests.common.utils import SSHClient from doctor_tests.installer.base import BaseInstaller class ApexInstaller(BaseInstaller): node_user_name = 'heat-admin' - cm_set_script = 'set_ceilometer.py' - cm_restore_script = 'restore_ceilometer.py' + installer_username = 'stack' + cm_set_script = 'set_config.py' + nc_set_compute_script = 'set_compute_config.py' + cg_set_script = 'set_congress.py' + fe_set_script = 'set_fenix.sh' + cm_restore_script = 'restore_config.py' + nc_restore_compute_script = 'restore_compute_config.py' + cg_restore_script = 'restore_congress.py' + ac_restart_script = 'restart_aodh.py' + ac_restore_script = 'restore_aodh.py' + python = 'python' def __init__(self, conf, log): super(ApexInstaller, self).__init__(conf, log) self.client = SSHClient(self.conf.installer.ip, - self.conf.installer.username, + self.installer_username, + key_filename=self.conf.installer.key_file, look_for_keys=True) self.key_file = None self.controllers = list() - self.controller_clients = list() - self.servers = list() + self.computes = list() def setup(self): self.log.info('Setup Apex installer start......') - - self.get_ssh_key_from_installer() - self.get_controller_ips() + self.key_file = self.get_ssh_key_from_installer() + self._get_overcloud_conf() + if is_fenix(self.conf): + self._copy_overcloudrc_to_controllers() + self.create_flavor() self.set_apply_patches() self.setup_stunnel() @@ -46,81 +55,156 @@ class ApexInstaller(BaseInstaller): server.terminate() def get_ssh_key_from_installer(self): - self.log.info('Get SSH keys from Apex installer......') - - if self.key_file is not None: - self.log.info('Already have SSH keys from Apex installer......') - return self.key_file - - self.client.scp('/home/stack/.ssh/id_rsa', './instack_key', method='get') - user = getpass.getuser() - uid = pwd.getpwnam(user).pw_uid - gid = grp.getgrnam(user).gr_gid - os.chown('./instack_key', uid, gid) - os.chmod('./instack_key', stat.S_IREAD) - current_dir = os.curdir - self.key_file = '{0}/{1}'.format(current_dir, 'instack_key') - return self.key_file - - def get_controller_ips(self): - self.log.info('Get controller ips from Apex installer......') - - command = "source stackrc; " \ - "nova list | grep ' overcloud-controller-[0-9] ' " \ - "| sed -e 's/^.*ctlplane=//' |awk '{print $1}'" - ret, controllers = self.client.ssh(command) - if ret: - raise Exception('Exec command to get controller ips in Apex installer failed' - 'ret=%s, output=%s' % (ret, controllers)) - self.log.info('Get controller_ips:%s from Apex installer' % controllers) - self.controllers = controllers + key_path = '/home/stack/.ssh/id_rsa' + return self._get_ssh_key(self.client, key_path) + + def _copy_overcloudrc_to_controllers(self): + for ip in self.controllers: + cmd = "scp overcloudrc %s@%s:" % (self.node_user_name, ip) + self._run_cmd_remote(self.client, cmd) + + def _get_overcloud_conf(self): + self.log.info('Get overcloud config details from Apex installer' + '......') + + command = "source stackrc; nova list | grep ' overcloud-'" + raw_ips_list = self._run_cmd_remote(self.client, command) + for line in raw_ips_list: + ip = line.split('ctlplane=', 1)[1].split(" ", 1)[0] + if 'overcloud-controller-' in line: + self.controllers.append(ip) + elif 'overcloud-novacompute-' in line: + self.computes.append(ip) + command = "grep docker /home/stack/deploy_command" + self.use_containers = self._check_cmd_remote(self.client, command) + self.log.info('controller_ips:%s' % self.controllers) + self.log.info('compute_ips:%s' % self.computes) + self.log.info('use_containers:%s' % self.use_containers) def get_host_ip_from_hostname(self, hostname): - self.log.info('Get host ip from host name in Apex installer......') + self.log.info('Get host ip by hostname=%s from Apex installer......' + % hostname) hostname_in_undercloud = hostname.split('.')[0] + command = "source stackrc; nova show %s | awk '/ ctlplane network /{print $5}'" % (hostname_in_undercloud) # noqa + host_ips = self._run_cmd_remote(self.client, command) + return host_ips[0] + + def _set_docker_restart_cmd(self, service): + # There can be multiple instances running so need to restart all + cmd = "for container in `sudo docker ps | grep " + cmd += service + cmd += " | awk '{print $1}'`; do sudo docker restart $container; \ + done;" + return cmd - command = "source stackrc; nova show %s | awk '/ ctlplane network /{print $5}'" % (hostname_in_undercloud) - ret, host_ip = self.client.ssh(command) - if ret: - raise Exception('Exec command to get host ip from hostname(%s) in Apex installer failed' - 'ret=%s, output=%s' % (hostname, ret, host_ip)) - self.log.info('Get host_ip:%s from host_name:%s in Apex installer' % (host_ip, hostname)) - return host_ip[0] + def set_apply_patches(self): + self.log.info('Set apply patches start......') + fenix_files = None + + set_scripts = [self.cm_set_script] + + if self.use_containers: + restart_cmd = (self._set_docker_restart_cmd( + "ceilometer-notification")) + set_scripts.append(self.ac_restart_script) + else: + restart_cmd = 'sudo systemctl restart' \ + ' openstack-ceilometer-notification.service' + + if self.conf.test_case != 'fault_management': + if self.use_containers: + restart_cmd += self._set_docker_restart_cmd("nova-scheduler") + if is_fenix(self.conf): + set_scripts.append(self.fe_set_script) + testdir = get_doctor_test_root_dir() + fenix_files = ["Dockerfile", "run"] + else: + restart_cmd += ' openstack-nova-scheduler.service' + set_scripts.append(self.nc_set_compute_script) + + if self.conf.inspector.type == Inspector.CONGRESS: + if self.use_containers: + restart_cmd += self._set_docker_restart_cmd("congress-server") + else: + restart_cmd += ' openstack-congress-server.service' + set_scripts.append(self.cg_set_script) - def setup_stunnel(self): - self.log.info('Setup ssh stunnel in controller nodes in Apex installer......') for node_ip in self.controllers: - cmd = "sudo ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i %s %s@%s -R %s:localhost:%s sleep 600 > ssh_tunnel.%s.log 2>&1 < /dev/null &" \ - % (self.key_file, self.node_user_name, node_ip, - self.conf.consumer.port, self.conf.consumer.port, node_ip) - server = subprocess.Popen(cmd, shell=True) - self.servers.append(server) - server.communicate() + client = SSHClient(node_ip, self.node_user_name, + key_filename=self.key_file) + if fenix_files is not None: + for fenix_file in fenix_files: + src_file = '{0}/{1}/{2}'.format(testdir, + 'admin_tool/fenix', + fenix_file) + client.scp(src_file, fenix_file) + self._run_apply_patches(client, + restart_cmd, + set_scripts, + python=self.python) + time.sleep(5) - def set_apply_patches(self): self.log.info('Set apply patches start......') - for node_ip in self.controllers: - client = SSHClient(node_ip, self.node_user_name, key_filename=self.key_file) - self.controller_clients.append(client) - self._ceilometer_apply_patches(client, self.cm_set_script) + if self.conf.test_case != 'fault_management': + if self.use_containers: + restart_cmd = self._set_docker_restart_cmd("nova") + else: + restart_cmd = 'sudo systemctl restart' \ + ' openstack-nova-compute.service' + for node_ip in self.computes: + client = SSHClient(node_ip, self.node_user_name, + key_filename=self.key_file) + self._run_apply_patches(client, + restart_cmd, + [self.nc_set_compute_script], + python=self.python) + time.sleep(5) def restore_apply_patches(self): self.log.info('restore apply patches start......') - for client in self.controller_clients: - self._ceilometer_apply_patches(client, self.cm_restore_script) - - def _ceilometer_apply_patches(self, ssh_client, script_name): - installer_dir = os.path.dirname(os.path.realpath(__file__)) - script_abs_path = '{0}/{1}/{2}'.format(installer_dir, 'common', script_name) - - ssh_client.scp(script_abs_path, script_name) - cmd = 'sudo python %s' % script_name - ret, output = ssh_client.ssh(cmd) - if ret: - raise Exception('Do the ceilometer command in controller node failed....' - 'ret=%s, cmd=%s, output=%s' % (ret, cmd, output)) - ssh_client.ssh('sudo systemctl restart openstack-ceilometer-notification.service') + restore_scripts = [self.cm_restore_script] + + if self.use_containers: + restart_cmd = (self._set_docker_restart_cmd( + "ceilometer-notification")) + restore_scripts.append(self.ac_restore_script) + else: + restart_cmd = 'sudo systemctl restart' \ + ' openstack-ceilometer-notification.service' + + if self.conf.test_case != 'fault_management': + if self.use_containers: + restart_cmd += self._set_docker_restart_cmd("nova-scheduler") + else: + restart_cmd += ' openstack-nova-scheduler.service' + restore_scripts.append(self.nc_restore_compute_script) + + if self.conf.inspector.type == Inspector.CONGRESS: + if self.use_containers: + restart_cmd += self._set_docker_restart_cmd("congress-server") + else: + restart_cmd += ' openstack-congress-server.service' + restore_scripts.append(self.cg_restore_script) + for node_ip in self.controllers: + client = SSHClient(node_ip, self.node_user_name, + key_filename=self.key_file) + self._run_apply_patches(client, + restart_cmd, + restore_scripts, + python=self.python) + + if self.conf.test_case != 'fault_management': + if self.use_containers: + restart_cmd = self._set_docker_restart_cmd("nova-compute") + else: + restart_cmd = 'sudo systemctl restart' \ + ' openstack-nova-compute.service' + for node_ip in self.computes: + self._run_apply_patches( + client, restart_cmd, + [self.nc_restore_compute_script], + python=self.python)