Support Fenix as admin tool
[doctor.git] / doctor_tests / installer / apex.py
1 ##############################################################################
2 # Copyright (c) 2017 ZTE Corporation and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9 import time
10
11 from doctor_tests.common.constants import Inspector
12 from doctor_tests.common.constants import is_fenix
13 from doctor_tests.common.utils import get_doctor_test_root_dir
14 from doctor_tests.common.utils import SSHClient
15 from doctor_tests.installer.base import BaseInstaller
16
17
18 class ApexInstaller(BaseInstaller):
19     node_user_name = 'heat-admin'
20     installer_username = 'stack'
21     cm_set_script = 'set_config.py'
22     nc_set_compute_script = 'set_compute_config.py'
23     cg_set_script = 'set_congress.py'
24     fe_set_script = 'set_fenix.sh'
25     cm_restore_script = 'restore_config.py'
26     nc_restore_compute_script = 'restore_compute_config.py'
27     cg_restore_script = 'restore_congress.py'
28     ac_restart_script = 'restart_aodh.py'
29     ac_restore_script = 'restore_aodh.py'
30     python = 'python'
31
32     def __init__(self, conf, log):
33         super(ApexInstaller, self).__init__(conf, log)
34         self.client = SSHClient(self.conf.installer.ip,
35                                 self.installer_username,
36                                 key_filename=self.conf.installer.key_file,
37                                 look_for_keys=True)
38         self.key_file = None
39         self.controllers = list()
40         self.computes = list()
41
42     def setup(self):
43         self.log.info('Setup Apex installer start......')
44         self.key_file = self.get_ssh_key_from_installer()
45         self._get_overcloud_conf()
46         if is_fenix(self.conf):
47             self._copy_overcloudrc_to_controllers()
48         self.create_flavor()
49         self.set_apply_patches()
50         self.setup_stunnel()
51
52     def cleanup(self):
53         self.restore_apply_patches()
54         for server in self.servers:
55             server.terminate()
56
57     def get_ssh_key_from_installer(self):
58         key_path = '/home/stack/.ssh/id_rsa'
59         return self._get_ssh_key(self.client, key_path)
60
61     def _copy_overcloudrc_to_controllers(self):
62         for ip in self.controllers:
63             cmd = "scp overcloudrc %s@%s:" % (self.node_user_name, ip)
64             self._run_cmd_remote(self.client, cmd)
65
66     def _get_overcloud_conf(self):
67         self.log.info('Get overcloud config details from Apex installer'
68                       '......')
69
70         command = "source stackrc; nova list | grep ' overcloud-'"
71         raw_ips_list = self._run_cmd_remote(self.client, command)
72         for line in raw_ips_list:
73             ip = line.split('ctlplane=', 1)[1].split(" ", 1)[0]
74             if 'overcloud-controller-' in line:
75                 self.controllers.append(ip)
76             elif 'overcloud-novacompute-' in line:
77                 self.computes.append(ip)
78         command = "grep docker /home/stack/deploy_command"
79         self.use_containers = self._check_cmd_remote(self.client, command)
80         self.log.info('controller_ips:%s' % self.controllers)
81         self.log.info('compute_ips:%s' % self.computes)
82         self.log.info('use_containers:%s' % self.use_containers)
83
84     def get_host_ip_from_hostname(self, hostname):
85         self.log.info('Get host ip by hostname=%s from Apex installer......'
86                       % hostname)
87
88         hostname_in_undercloud = hostname.split('.')[0]
89         command = "source stackrc; nova show %s | awk '/ ctlplane network /{print $5}'" % (hostname_in_undercloud)   # noqa
90         host_ips = self._run_cmd_remote(self.client, command)
91         return host_ips[0]
92
93     def _set_docker_restart_cmd(self, service):
94         # There can be multiple instances running so need to restart all
95         cmd = "for container in `sudo docker ps | grep "
96         cmd += service
97         cmd += " | awk '{print $1}'`; do sudo docker restart $container; \
98                done;"
99         return cmd
100
101     def set_apply_patches(self):
102         self.log.info('Set apply patches start......')
103         fenix_files = None
104
105         set_scripts = [self.cm_set_script]
106
107         if self.use_containers:
108             restart_cmd = (self._set_docker_restart_cmd(
109                            "ceilometer-notification"))
110             set_scripts.append(self.ac_restart_script)
111         else:
112             restart_cmd = 'sudo systemctl restart' \
113                           ' openstack-ceilometer-notification.service'
114
115         if self.conf.test_case != 'fault_management':
116             if self.use_containers:
117                 restart_cmd += self._set_docker_restart_cmd("nova-scheduler")
118                 if is_fenix(self.conf):
119                     set_scripts.append(self.fe_set_script)
120                     testdir = get_doctor_test_root_dir()
121                     fenix_files = ["Dockerfile", "run"]
122             else:
123                 restart_cmd += ' openstack-nova-scheduler.service'
124             set_scripts.append(self.nc_set_compute_script)
125
126         if self.conf.inspector.type == Inspector.CONGRESS:
127             if self.use_containers:
128                 restart_cmd += self._set_docker_restart_cmd("congress-server")
129             else:
130                 restart_cmd += ' openstack-congress-server.service'
131             set_scripts.append(self.cg_set_script)
132
133         for node_ip in self.controllers:
134             client = SSHClient(node_ip, self.node_user_name,
135                                key_filename=self.key_file)
136             if fenix_files is not None:
137                 for fenix_file in fenix_files:
138                     src_file = '{0}/{1}/{2}'.format(testdir,
139                                                     'admin_tool/fenix',
140                                                     fenix_file)
141                     client.scp(src_file, fenix_file)
142             self._run_apply_patches(client,
143                                     restart_cmd,
144                                     set_scripts,
145                                     python=self.python)
146         time.sleep(5)
147
148         self.log.info('Set apply patches start......')
149
150         if self.conf.test_case != 'fault_management':
151             if self.use_containers:
152                 restart_cmd = self._set_docker_restart_cmd("nova")
153             else:
154                 restart_cmd = 'sudo systemctl restart' \
155                               ' openstack-nova-compute.service'
156             for node_ip in self.computes:
157                 client = SSHClient(node_ip, self.node_user_name,
158                                    key_filename=self.key_file)
159                 self._run_apply_patches(client,
160                                         restart_cmd,
161                                         [self.nc_set_compute_script],
162                                         python=self.python)
163             time.sleep(5)
164
165     def restore_apply_patches(self):
166         self.log.info('restore apply patches start......')
167
168         restore_scripts = [self.cm_restore_script]
169
170         if self.use_containers:
171             restart_cmd = (self._set_docker_restart_cmd(
172                            "ceilometer-notification"))
173             restore_scripts.append(self.ac_restore_script)
174         else:
175             restart_cmd = 'sudo systemctl restart' \
176                           ' openstack-ceilometer-notification.service'
177
178         if self.conf.test_case != 'fault_management':
179             if self.use_containers:
180                 restart_cmd += self._set_docker_restart_cmd("nova-scheduler")
181             else:
182                 restart_cmd += ' openstack-nova-scheduler.service'
183             restore_scripts.append(self.nc_restore_compute_script)
184
185         if self.conf.inspector.type == Inspector.CONGRESS:
186             if self.use_containers:
187                 restart_cmd += self._set_docker_restart_cmd("congress-server")
188             else:
189                 restart_cmd += ' openstack-congress-server.service'
190             restore_scripts.append(self.cg_restore_script)
191
192         for node_ip in self.controllers:
193             client = SSHClient(node_ip, self.node_user_name,
194                                key_filename=self.key_file)
195             self._run_apply_patches(client,
196                                     restart_cmd,
197                                     restore_scripts,
198                                     python=self.python)
199
200         if self.conf.test_case != 'fault_management':
201             if self.use_containers:
202                 restart_cmd = self._set_docker_restart_cmd("nova-compute")
203             else:
204                 restart_cmd = 'sudo systemctl restart' \
205                               ' openstack-nova-compute.service'
206             for node_ip in self.computes:
207                 self._run_apply_patches(
208                     client, restart_cmd,
209                     [self.nc_restore_compute_script],
210                     python=self.python)