Support Apex with services in containers
[doctor.git] / doctor_tests / installer / apex.py
1 ##############################################################################
2 # Copyright (c) 2017 ZTE Corporation and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9 import re
10 import time
11
12 from doctor_tests.common.constants import Inspector
13 from doctor_tests.common.utils import SSHClient
14 from doctor_tests.installer.base import BaseInstaller
15
16
17 class ApexInstaller(BaseInstaller):
18     node_user_name = 'heat-admin'
19     installer_username = 'stack'
20     cm_set_script = 'set_config.py'
21     nc_set_compute_script = 'set_compute_config.py'
22     cg_set_script = 'set_congress.py'
23     cm_restore_script = 'restore_config.py'
24     nc_restore_compute_script = 'restore_compute_config.py'
25     cg_restore_script = 'restore_congress.py'
26     ac_restart_script = 'restart_aodh.py'
27     ac_restore_script = 'restore_aodh.py'
28     python = 'python'
29
30     def __init__(self, conf, log):
31         super(ApexInstaller, self).__init__(conf, log)
32         self.client = SSHClient(self.conf.installer.ip,
33                                 self.installer_username,
34                                 key_filename=self.conf.installer.key_file,
35                                 look_for_keys=True)
36         self.key_file = None
37         self.controllers = list()
38         self.computes = list()
39         self.controller_clients = list()
40         self.compute_clients = list()
41
42     def setup(self):
43         self.log.info('Setup Apex installer start......')
44         self.key_file = self.get_ssh_key_from_installer()
45         self._get_overcloud_conf()
46         self.create_flavor()
47         self.set_apply_patches()
48         self.setup_stunnel()
49
50     def cleanup(self):
51         self.restore_apply_patches()
52         for server in self.servers:
53             server.terminate()
54
55     def get_ssh_key_from_installer(self):
56         key_path = '/home/stack/.ssh/id_rsa'
57         return self._get_ssh_key(self.client, key_path)
58
59     def _get_overcloud_conf(self):
60         self.log.info('Get overcloud config details from Apex installer'
61                       '......')
62
63         command = "source stackrc; nova list | grep ' overcloud-'"
64         raw_ips_list = self._run_cmd_remote(self.client, command)
65         for line in raw_ips_list:
66             ip = line.split('ctlplane=', 1)[1].split(" ", 1)[0]
67             if 'overcloud-controller-' in line:
68                 self.controllers.append(ip)
69             elif 'overcloud-novacompute-' in line:
70                 self.computes.append(ip)
71         command = "grep docker /home/stack/deploy_command"
72         self.use_containers = self._check_cmd_remote(self.client, command)
73         self.log.info('controller_ips:%s' % self.controllers)
74         self.log.info('compute_ips:%s' % self.computes)
75         self.log.info('use_containers:%s' % self.use_containers)
76
77     def get_host_ip_from_hostname(self, hostname):
78         self.log.info('Get host ip by hostname=%s from Apex installer......'
79                       % hostname)
80
81         hostname_in_undercloud = hostname.split('.')[0]
82         command = "source stackrc; nova show %s | awk '/ ctlplane network /{print $5}'" % (hostname_in_undercloud)   # noqa
83         host_ips = self._run_cmd_remote(self.client, command)
84         return host_ips[0]
85
86     def get_transport_url(self):
87         client = SSHClient(self.controllers[0], self.node_user_name,
88                            key_filename=self.key_file)
89         if self.use_containers:
90             ncbase = "/var/lib/config-data/puppet-generated/nova"
91         else:
92             ncbase = ""
93         command = 'sudo grep "^transport_url" %s/etc/nova/nova.conf' % ncbase
94
95         ret, url = client.ssh(command)
96         if ret:
97             raise Exception('Exec command to get host ip from controller(%s)'
98                             'in Apex installer failed, ret=%s, output=%s'
99                             % (self.controllers[0], ret, url))
100         # need to use ip instead of hostname
101         ret = (re.sub("@.*:", "@%s:" % self.controllers[0],
102                url[0].split("=", 1)[1]))
103         self.log.debug('get_transport_url %s' % ret)
104         return ret
105
106     def _set_docker_restart_cmd(self, service):
107         # There can be multiple instances running so need to restart all
108         cmd = "for container in `sudo docker ps | grep "
109         cmd += service
110         cmd += " | awk '{print $1}'`; do sudo docker restart $container; \
111                done;"
112         return cmd
113
114     def set_apply_patches(self):
115         self.log.info('Set apply patches start......')
116
117         set_scripts = [self.cm_set_script]
118
119         if self.use_containers:
120             restart_cmd = (self._set_docker_restart_cmd(
121                            "ceilometer-notification"))
122             set_scripts.append(self.ac_restart_script)
123         else:
124             restart_cmd = 'sudo systemctl restart' \
125                           ' openstack-ceilometer-notification.service'
126
127         if self.conf.test_case != 'fault_management':
128             if self.use_containers:
129                 restart_cmd += self._set_docker_restart_cmd("nova-scheduler")
130             else:
131                 restart_cmd += ' openstack-nova-scheduler.service'
132             set_scripts.append(self.nc_set_compute_script)
133
134         if self.conf.inspector.type == Inspector.CONGRESS:
135             if self.use_containers:
136                 restart_cmd += self._set_docker_restart_cmd("congress-server")
137             else:
138                 restart_cmd += ' openstack-congress-server.service'
139             set_scripts.append(self.cg_set_script)
140
141         for node_ip in self.controllers:
142             client = SSHClient(node_ip, self.node_user_name,
143                                key_filename=self.key_file)
144             self.controller_clients.append(client)
145             self._run_apply_patches(client,
146                                     restart_cmd,
147                                     set_scripts,
148                                     python=self.python)
149
150         if self.conf.test_case != 'fault_management':
151             if self.use_containers:
152                 restart_cmd = self._set_docker_restart_cmd("nova-compute")
153             else:
154                 restart_cmd = 'sudo systemctl restart' \
155                               ' openstack-nova-compute.service'
156             for node_ip in self.computes:
157                 client = SSHClient(node_ip, self.node_user_name,
158                                    key_filename=self.key_file)
159                 self.compute_clients.append(client)
160                 self._run_apply_patches(client,
161                                         restart_cmd,
162                                         [self.nc_set_compute_script],
163                                         python=self.python)
164
165         if self.conf.test_case != 'fault_management':
166             time.sleep(10)
167
168     def restore_apply_patches(self):
169         self.log.info('restore apply patches start......')
170
171         restore_scripts = [self.cm_restore_script]
172
173         if self.use_containers:
174             restart_cmd = (self._set_docker_restart_cmd(
175                            "ceilometer-notification"))
176             restore_scripts.append(self.ac_restore_script)
177         else:
178             restart_cmd = 'sudo systemctl restart' \
179                           ' openstack-ceilometer-notification.service'
180
181         if self.conf.test_case != 'fault_management':
182             if self.use_containers:
183                 restart_cmd += self._set_docker_restart_cmd("nova-scheduler")
184             else:
185                 restart_cmd += ' openstack-nova-scheduler.service'
186             restore_scripts.append(self.nc_restore_compute_script)
187
188         if self.conf.inspector.type == Inspector.CONGRESS:
189             if self.use_containers:
190                 restart_cmd += self._set_docker_restart_cmd("congress-server")
191             else:
192                 restart_cmd += ' openstack-congress-server.service'
193             restore_scripts.append(self.cg_restore_script)
194
195         for client in self.controller_clients:
196             self._run_apply_patches(client,
197                                     restart_cmd,
198                                     restore_scripts,
199                                     python=self.python)
200
201         if self.conf.test_case != 'fault_management':
202             if self.use_containers:
203                 restart_cmd = self._set_docker_restart_cmd("nova-compute")
204             else:
205                 restart_cmd = 'sudo systemctl restart' \
206                               ' openstack-nova-compute.service'
207             for client in self.compute_clients:
208                 self._run_apply_patches(client,
209                                         restart_cmd,
210                                         [self.nc_restore_compute_script],
211                                         python=self.python)