Fixes undercloud install failure with setting hostname
[apex.git] / apex / deploy.py
1 #!/usr/bin/env python
2
3 ##############################################################################
4 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
5 #
6 # All rights reserved. This program and the accompanying materials
7 # are made available under the terms of the Apache License, Version 2.0
8 # which accompanies this distribution, and is available at
9 # http://www.apache.org/licenses/LICENSE-2.0
10 ##############################################################################
11
12 import argparse
13 import git
14 import json
15 import logging
16 import os
17 import platform
18 import pprint
19 import shutil
20 import sys
21 import tempfile
22 import yaml
23
24 import apex.virtual.configure_vm as vm_lib
25 import apex.virtual.utils as virt_utils
26 import apex.builders.common_builder as c_builder
27 import apex.builders.overcloud_builder as oc_builder
28 import apex.builders.undercloud_builder as uc_builder
29 from apex import DeploySettings
30 from apex import Inventory
31 from apex import NetworkEnvironment
32 from apex import NetworkSettings
33 from apex.deployment.snapshot import SnapshotDeployment
34 from apex.common import utils
35 from apex.common import constants
36 from apex.common import parsers
37 from apex.common.exceptions import ApexDeployException
38 from apex.deployment.tripleo import ApexDeployment
39 from apex.network import jumphost
40 from apex.network import network_data
41 from apex.undercloud import undercloud as uc_lib
42 from apex.overcloud import config as oc_cfg
43 from apex.overcloud import deploy as oc_deploy
44
45 APEX_TEMP_DIR = tempfile.mkdtemp(prefix='apex_tmp')
46 SDN_IMAGE = 'overcloud-full-opendaylight.qcow2'
47 UC_DISK_FILES = [
48     'overcloud-full.vmlinuz',
49     'overcloud-full.initrd',
50     'ironic-python-agent.initramfs',
51     'ironic-python-agent.kernel'
52 ]
53
54
55 def validate_cross_settings(deploy_settings, net_settings, inventory):
56     """
57     Used to validate compatibility across settings file.
58     :param deploy_settings: parsed settings for deployment
59     :param net_settings: parsed settings for network
60     :param inventory: parsed inventory file
61     :return: None
62     """
63
64     if deploy_settings['deploy_options']['dataplane'] != 'ovs' and 'tenant' \
65             not in net_settings.enabled_network_list:
66         raise ApexDeployException("Setting a DPDK based dataplane requires"
67                                   "a dedicated NIC for tenant network")
68
69     if 'odl_vpp_routing_node' in deploy_settings['deploy_options']:
70         if deploy_settings['deploy_options']['dataplane'] != 'fdio':
71             raise ApexDeployException("odl_vpp_routing_node should only be set"
72                                       "when dataplane is set to fdio")
73         if deploy_settings['deploy_options'].get('dvr') is True:
74             raise ApexDeployException("odl_vpp_routing_node should only be set"
75                                       "when dvr is not enabled")
76
77     # TODO(trozet): add more checks here like RAM for ODL, etc
78     # check if odl_vpp_netvirt is true and vpp is set
79     # Check if fdio and nosdn:
80     # tenant_nic_mapping_controller_members" ==
81     # "$tenant_nic_mapping_compute_members
82
83
84 def build_vms(inventory, network_settings,
85               template_dir='/usr/share/opnfv-apex'):
86     """
87     Creates VMs and configures vbmc and host
88     :param inventory:
89     :param network_settings:
90     :return:
91     """
92
93     for idx, node in enumerate(inventory['nodes']):
94         name = 'baremetal{}'.format(idx)
95         volume = name + ".qcow2"
96         volume_path = os.path.join(constants.LIBVIRT_VOLUME_PATH, volume)
97         # TODO(trozet): add error checking
98         vm_lib.create_vm(
99             name, volume_path,
100             baremetal_interfaces=network_settings.enabled_network_list,
101             memory=node['memory'], cpus=node['cpu'],
102             macs=node['mac'],
103             template_dir=template_dir)
104         virt_utils.host_setup({name: node['pm_port']})
105
106
107 def create_deploy_parser():
108     deploy_parser = argparse.ArgumentParser()
109     deploy_parser.add_argument('--debug', action='store_true', default=False,
110                                help="Turn on debug messages")
111     deploy_parser.add_argument('-l', '--log-file',
112                                default='./apex_deploy.log',
113                                dest='log_file', help="Log file to log to")
114     deploy_parser.add_argument('-d', '--deploy-settings',
115                                dest='deploy_settings_file',
116                                required=True,
117                                help='File which contains Apex deploy settings')
118     deploy_parser.add_argument('-n', '--network-settings',
119                                dest='network_settings_file',
120                                required=False,
121                                help='File which contains Apex network '
122                                     'settings')
123     deploy_parser.add_argument('-i', '--inventory-file',
124                                dest='inventory_file',
125                                default=None,
126                                help='Inventory file which contains POD '
127                                     'definition')
128     deploy_parser.add_argument('-e', '--environment-file',
129                                dest='env_file',
130                                default='opnfv-environment.yaml',
131                                help='Provide alternate base env file located '
132                                     'in deploy_dir')
133     deploy_parser.add_argument('-v', '--virtual', action='store_true',
134                                default=False,
135                                dest='virtual',
136                                help='Enable virtual deployment')
137     deploy_parser.add_argument('--interactive', action='store_true',
138                                default=False,
139                                help='Enable interactive deployment mode which '
140                                     'requires user to confirm steps of '
141                                     'deployment')
142     deploy_parser.add_argument('--virtual-computes',
143                                dest='virt_compute_nodes',
144                                default=1,
145                                type=int,
146                                help='Number of Virtual Compute nodes to create'
147                                     ' and use during deployment (defaults to 1'
148                                     ' for noha and 2 for ha)')
149     deploy_parser.add_argument('--virtual-cpus',
150                                dest='virt_cpus',
151                                default=4,
152                                type=int,
153                                help='Number of CPUs to use per Overcloud VM in'
154                                     ' a virtual deployment (defaults to 4)')
155     deploy_parser.add_argument('--virtual-default-ram',
156                                dest='virt_default_ram',
157                                default=8,
158                                type=int,
159                                help='Amount of default RAM to use per '
160                                     'Overcloud VM in GB (defaults to 8).')
161     deploy_parser.add_argument('--virtual-compute-ram',
162                                dest='virt_compute_ram',
163                                default=None,
164                                type=int,
165                                help='Amount of RAM to use per Overcloud '
166                                     'Compute VM in GB (defaults to 8). '
167                                     'Overrides --virtual-default-ram arg for '
168                                     'computes')
169     deploy_parser.add_argument('--deploy-dir',
170                                default='/usr/share/opnfv-apex',
171                                help='Directory to deploy from which contains '
172                                     'base config files for deployment')
173     deploy_parser.add_argument('--image-dir',
174                                default='/var/opt/opnfv/images',
175                                help='Directory which contains '
176                                     'base disk images for deployment')
177     deploy_parser.add_argument('--lib-dir',
178                                default='/usr/share/opnfv-apex',
179                                help='Directory path for apex ansible '
180                                     'and third party libs')
181     deploy_parser.add_argument('-s', '--snapshot', action='store_true',
182                                default=False,
183                                help='Use snapshots for deployment')
184     deploy_parser.add_argument('--snap-cache', dest='snap_cache',
185                                default="{}/snap_cache".format(
186                                    os.path.expanduser('~')),
187                                help='Local directory to cache snapshot '
188                                     'artifacts. Defaults to $HOME/snap_cache')
189     deploy_parser.add_argument('--upstream', action='store_true',
190                                default=True,
191                                help='Force deployment to use upstream '
192                                     'artifacts. This option is now '
193                                     'deprecated and only upstream '
194                                     'deployments are supported.')
195     deploy_parser.add_argument('--no-fetch', action='store_true',
196                                default=False,
197                                help='Ignore fetching latest upstream and '
198                                     'use what is in cache')
199     deploy_parser.add_argument('-p', '--patches',
200                                default='/etc/opnfv-apex/common-patches.yaml',
201                                dest='patches_file',
202                                help='File to include for common patches '
203                                     'which apply to all deployment scenarios')
204     return deploy_parser
205
206
207 def validate_deploy_args(args):
208     """
209     Validates arguments for deploy
210     :param args:
211     :return: None
212     """
213
214     logging.debug('Validating arguments for deployment')
215     if args.snapshot:
216         logging.debug('Skipping inventory validation as it is not applicable'
217                       'to snapshot deployments')
218     elif args.virtual and args.inventory_file is not None:
219         logging.error("Virtual enabled but inventory file also given")
220         raise ApexDeployException('You should not specify an inventory file '
221                                   'with virtual deployments')
222     elif args.virtual:
223         args.inventory_file = os.path.join(APEX_TEMP_DIR,
224                                            'inventory-virt.yaml')
225     elif not os.path.isfile(args.inventory_file):
226         logging.error("Specified inventory file does not exist: {}".format(
227             args.inventory_file))
228         raise ApexDeployException('Specified inventory file does not exist')
229
230     for settings_file in (args.deploy_settings_file,
231                           args.network_settings_file):
232         if settings_file == args.network_settings_file and args.snapshot:
233             continue
234         if os.path.isfile(settings_file) is False:
235             logging.error("Specified settings file does not "
236                           "exist: {}".format(settings_file))
237             raise ApexDeployException('Specified settings file does not '
238                                       'exist: {}'.format(settings_file))
239
240
241 def main():
242     parser = create_deploy_parser()
243     args = parser.parse_args(sys.argv[1:])
244     # FIXME (trozet): this is only needed as a workaround for CI.  Remove
245     # when CI is changed
246     if os.getenv('IMAGES', False):
247         args.image_dir = os.getenv('IMAGES')
248     if args.debug:
249         log_level = logging.DEBUG
250     else:
251         log_level = logging.INFO
252     os.makedirs(os.path.dirname(args.log_file), exist_ok=True)
253     formatter = '%(asctime)s %(levelname)s: %(message)s'
254     logging.basicConfig(filename=args.log_file,
255                         format=formatter,
256                         datefmt='%m/%d/%Y %I:%M:%S %p',
257                         level=log_level)
258     console = logging.StreamHandler()
259     console.setLevel(log_level)
260     console.setFormatter(logging.Formatter(formatter))
261     logging.getLogger('').addHandler(console)
262     utils.install_ansible()
263     validate_deploy_args(args)
264     # Parse all settings
265     deploy_settings = DeploySettings(args.deploy_settings_file)
266     logging.info("Deploy settings are:\n {}".format(pprint.pformat(
267         deploy_settings)))
268
269     if not args.snapshot:
270         net_settings = NetworkSettings(args.network_settings_file)
271         logging.info("Network settings are:\n {}".format(pprint.pformat(
272             net_settings)))
273         os_version = deploy_settings['deploy_options']['os_version']
274         net_env_file = os.path.join(args.deploy_dir, constants.NET_ENV_FILE)
275         net_env = NetworkEnvironment(net_settings, net_env_file,
276                                      os_version=os_version)
277         net_env_target = os.path.join(APEX_TEMP_DIR, constants.NET_ENV_FILE)
278         utils.dump_yaml(dict(net_env), net_env_target)
279
280         # get global deploy params
281         ha_enabled = deploy_settings['global_params']['ha_enabled']
282         introspect = deploy_settings['global_params'].get('introspect', True)
283         net_list = net_settings.enabled_network_list
284         if args.virtual:
285             if args.virt_compute_ram is None:
286                 compute_ram = args.virt_default_ram
287             else:
288                 compute_ram = args.virt_compute_ram
289             if (deploy_settings['deploy_options']['sdn_controller'] ==
290                     'opendaylight' and args.virt_default_ram < 12):
291                 control_ram = 12
292                 logging.warning('RAM per controller is too low.  OpenDaylight '
293                                 'requires at least 12GB per controller.')
294                 logging.info('Increasing RAM per controller to 12GB')
295             elif args.virt_default_ram < 10:
296                 if platform.machine() == 'aarch64':
297                     control_ram = 16
298                     logging.warning('RAM per controller is too low for '
299                                     'aarch64 ')
300                     logging.info('Increasing RAM per controller to 16GB')
301                 else:
302                     control_ram = 10
303                     logging.warning('RAM per controller is too low.  nosdn '
304                                     'requires at least 10GB per controller.')
305                     logging.info('Increasing RAM per controller to 10GB')
306             else:
307                 control_ram = args.virt_default_ram
308             if platform.machine() == 'aarch64' and args.virt_cpus < 16:
309                 vcpus = 16
310                 logging.warning('aarch64 requires at least 16 vCPUS per '
311                                 'target VM. Increasing to 16.')
312             else:
313                 vcpus = args.virt_cpus
314             if ha_enabled and args.virt_compute_nodes < 2:
315                 logging.debug(
316                     'HA enabled, bumping number of compute nodes to 2')
317                 args.virt_compute_nodes = 2
318             virt_utils.generate_inventory(args.inventory_file, ha_enabled,
319                                           num_computes=args.virt_compute_nodes,
320                                           controller_ram=control_ram * 1024,
321                                           compute_ram=compute_ram * 1024,
322                                           vcpus=vcpus
323                                           )
324         inventory = Inventory(args.inventory_file, ha_enabled, args.virtual)
325         logging.info("Inventory is:\n {}".format(pprint.pformat(
326             inventory)))
327
328         validate_cross_settings(deploy_settings, net_settings, inventory)
329     else:
330         # only one network with snapshots
331         net_list = [constants.ADMIN_NETWORK]
332
333     ds_opts = deploy_settings['deploy_options']
334     ansible_args = {
335         'virsh_enabled_networks': net_list,
336         'snapshot': args.snapshot
337     }
338     utils.run_ansible(ansible_args,
339                       os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
340                                    'deploy_dependencies.yml'))
341     all_in_one = not bool(args.virt_compute_nodes)
342     if args.snapshot:
343         # Start snapshot Deployment
344         logging.info('Executing Snapshot Deployment...')
345         SnapshotDeployment(deploy_settings=deploy_settings,
346                            snap_cache_dir=args.snap_cache,
347                            fetch=not args.no_fetch,
348                            all_in_one=all_in_one)
349     else:
350         # Start Standard TripleO Deployment
351         deployment = ApexDeployment(deploy_settings, args.patches_file,
352                                     args.deploy_settings_file)
353         # TODO (trozet): add logic back from:
354         # Iedb75994d35b5dc1dd5d5ce1a57277c8f3729dfd (FDIO DVR)
355         uc_external = False
356         if 'external' in net_settings.enabled_network_list:
357             uc_external = True
358         if args.virtual:
359             # create all overcloud VMs
360             build_vms(inventory, net_settings, args.deploy_dir)
361         else:
362             # Attach interfaces to jumphost for baremetal deployment
363             jump_networks = ['admin']
364             if uc_external:
365                 jump_networks.append('external')
366             for network in jump_networks:
367                 if network == 'external':
368                     # TODO(trozet): enable vlan secondary external networks
369                     iface = net_settings['networks'][network][0][
370                         'installer_vm']['members'][0]
371                 else:
372                     iface = net_settings['networks'][network]['installer_vm'][
373                         'members'][0]
374                 bridge = "br-{}".format(network)
375                 jumphost.attach_interface_to_ovs(bridge, iface, network)
376         instackenv_json = os.path.join(APEX_TEMP_DIR, 'instackenv.json')
377         with open(instackenv_json, 'w') as fh:
378             json.dump(inventory, fh)
379
380         # Create and configure undercloud
381         if args.debug:
382             root_pw = constants.DEBUG_OVERCLOUD_PW
383         else:
384             root_pw = None
385
386         if not args.upstream:
387             logging.warning("Using upstream is now required for Apex. "
388                             "Forcing upstream to true")
389         if os_version == 'master':
390             branch = 'master'
391         else:
392             branch = "stable/{}".format(os_version)
393
394         logging.info("Deploying with upstream artifacts for OpenStack "
395                      "{}".format(os_version))
396         args.image_dir = os.path.join(args.image_dir, os_version)
397         upstream_url = constants.UPSTREAM_RDO.replace(
398             constants.DEFAULT_OS_VERSION, os_version)
399
400         upstream_targets = ['overcloud-full.tar', 'ironic-python-agent.tar']
401         if platform.machine() == 'aarch64':
402             upstream_targets.append('undercloud.qcow2')
403         utils.fetch_upstream_and_unpack(args.image_dir, upstream_url,
404                                         upstream_targets,
405                                         fetch=not args.no_fetch)
406         # Copy ironic files and overcloud ramdisk and kernel into temp dir
407         # to be copied by ansible into undercloud /home/stack
408         # Note the overcloud disk does not need to be copied here as it will
409         # be modified and copied later
410         for tmp_file in UC_DISK_FILES:
411             shutil.copyfile(os.path.join(args.image_dir, tmp_file),
412                             os.path.join(APEX_TEMP_DIR, tmp_file))
413         if platform.machine() == 'aarch64':
414             sdn_image = os.path.join(args.image_dir, 'undercloud.qcow2')
415         else:
416             sdn_image = os.path.join(args.image_dir, 'overcloud-full.qcow2')
417         # copy undercloud so we don't taint upstream fetch
418         uc_image = os.path.join(args.image_dir, 'undercloud_mod.qcow2')
419         uc_fetch_img = sdn_image
420         shutil.copyfile(uc_fetch_img, uc_image)
421         # prep undercloud with required packages
422         if platform.machine() != 'aarch64':
423             uc_builder.update_repos(image=uc_image,
424                                     branch=branch.replace('stable/', ''))
425         uc_builder.add_upstream_packages(uc_image)
426         uc_builder.inject_calipso_installer(APEX_TEMP_DIR, uc_image)
427         # add patches from upstream to undercloud and overcloud
428         logging.info('Adding patches to undercloud')
429         patches = deployment.determine_patches()
430         c_builder.add_upstream_patches(patches['undercloud'], uc_image,
431                                        APEX_TEMP_DIR, branch)
432
433         # Create/Start Undercloud VM
434         undercloud = uc_lib.Undercloud(args.image_dir,
435                                        args.deploy_dir,
436                                        root_pw=root_pw,
437                                        external_network=uc_external,
438                                        image_name=os.path.basename(uc_image),
439                                        os_version=os_version)
440         undercloud.start()
441         undercloud_admin_ip = net_settings['networks'][
442             constants.ADMIN_NETWORK]['installer_vm']['ip']
443
444         if ds_opts['containers']:
445             tag = constants.DOCKER_TAG
446         else:
447             tag = None
448
449         # Generate nic templates
450         for role in 'compute', 'controller':
451             oc_cfg.create_nic_template(net_settings, deploy_settings, role,
452                                        args.deploy_dir, APEX_TEMP_DIR)
453         # Prepare/Upload docker images
454         docker_env = 'containers-prepare-parameter.yaml'
455         shutil.copyfile(os.path.join(args.deploy_dir, docker_env),
456                         os.path.join(APEX_TEMP_DIR, docker_env))
457         # Upload extra ansible.cfg
458         if platform.machine() == 'aarch64':
459             ansible_env = 'ansible.cfg'
460             shutil.copyfile(os.path.join(args.deploy_dir, ansible_env),
461                             os.path.join(APEX_TEMP_DIR, ansible_env))
462
463         c_builder.prepare_container_images(
464             os.path.join(APEX_TEMP_DIR, docker_env),
465             branch=branch.replace('stable/', ''),
466             neutron_driver=c_builder.get_neutron_driver(ds_opts)
467         )
468         # Install Undercloud
469         undercloud.configure(net_settings, deploy_settings,
470                              os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
471                                           'configure_undercloud.yml'),
472                              APEX_TEMP_DIR, virtual_oc=args.virtual)
473
474         # Prepare overcloud-full.qcow2
475         logging.info("Preparing Overcloud for deployment...")
476         if os_version != 'ocata':
477             net_data_file = os.path.join(APEX_TEMP_DIR, 'network_data.yaml')
478             net_data = network_data.create_network_data(net_settings,
479                                                         net_data_file)
480         else:
481             net_data = False
482
483         shutil.copyfile(os.path.join(args.deploy_dir, 'build_ovs_nsh.sh'),
484                         os.path.join(APEX_TEMP_DIR, 'build_ovs_nsh.sh'))
485
486         # TODO(trozet): Either fix opnfv env or default to use upstream env
487         if args.env_file == 'opnfv-environment.yaml':
488             # Override the env_file if it is defaulted to opnfv
489             # opnfv env file will not work with upstream
490             args.env_file = 'upstream-environment.yaml'
491         opnfv_env = os.path.join(args.deploy_dir, args.env_file)
492         oc_deploy.prep_env(deploy_settings, net_settings, inventory,
493                            opnfv_env, net_env_target, APEX_TEMP_DIR)
494         if not args.virtual:
495             oc_deploy.LOOP_DEVICE_SIZE = "50G"
496         if platform.machine() == 'aarch64':
497             oc_image = os.path.join(args.image_dir, 'overcloud-full.qcow2')
498         else:
499             oc_image = sdn_image
500         patched_containers = oc_deploy.prep_image(
501             deploy_settings, net_settings, oc_image, APEX_TEMP_DIR,
502             root_pw=root_pw, docker_tag=tag, patches=patches['overcloud'])
503
504         oc_deploy.create_deploy_cmd(deploy_settings, net_settings, inventory,
505                                     APEX_TEMP_DIR, args.virtual,
506                                     os.path.basename(opnfv_env),
507                                     net_data=net_data)
508         # Prepare undercloud with containers
509         docker_playbook = os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
510                                        'prepare_overcloud_containers.yml')
511         if ds_opts['containers']:
512             logging.info("Preparing Undercloud with Docker containers")
513             sdn_env = oc_deploy.get_docker_sdn_files(ds_opts)
514             sdn_env_files = str()
515             for sdn_file in sdn_env:
516                 sdn_env_files += " -e {}".format(sdn_file)
517             if patched_containers:
518                 oc_builder.archive_docker_patches(APEX_TEMP_DIR)
519             container_vars = dict()
520             container_vars['apex_temp_dir'] = APEX_TEMP_DIR
521             container_vars['patched_docker_services'] = list(
522                 patched_containers)
523             container_vars['container_tag'] = constants.DOCKER_TAG
524             container_vars['stackrc'] = 'source /home/stack/stackrc'
525             container_vars['sdn'] = ds_opts['sdn_controller']
526             container_vars['undercloud_ip'] = undercloud_admin_ip
527             container_vars['os_version'] = os_version
528             container_vars['aarch64'] = platform.machine() == 'aarch64'
529             container_vars['sdn_env_file'] = sdn_env_files
530             container_vars['container_client'] = utils.find_container_client(
531                 os_version)
532             try:
533                 utils.run_ansible(container_vars, docker_playbook,
534                                   host=undercloud.ip, user='stack',
535                                   tmp_dir=APEX_TEMP_DIR)
536                 logging.info("Container preparation complete")
537             except Exception:
538                 logging.error("Unable to complete container prep on "
539                               "Undercloud")
540                 for tmp_file in UC_DISK_FILES:
541                     os.remove(os.path.join(APEX_TEMP_DIR, tmp_file))
542                 os.remove(os.path.join(APEX_TEMP_DIR, 'overcloud-full.qcow2'))
543                 raise
544
545         deploy_playbook = os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
546                                        'deploy_overcloud.yml')
547         virt_env = 'virtual-environment.yaml'
548         bm_env = 'baremetal-environment.yaml'
549         k8s_env = 'kubernetes-environment.yaml'
550         for p_env in virt_env, bm_env, k8s_env:
551             shutil.copyfile(os.path.join(args.deploy_dir, p_env),
552                             os.path.join(APEX_TEMP_DIR, p_env))
553
554         # Start Overcloud Deployment
555         logging.info("Executing Overcloud Deployment...")
556         deploy_vars = dict()
557         deploy_vars['virtual'] = args.virtual
558         deploy_vars['debug'] = args.debug
559         deploy_vars['aarch64'] = platform.machine() == 'aarch64'
560         deploy_vars['introspect'] = not (args.virtual or
561                                          deploy_vars['aarch64'] or
562                                          not introspect)
563         deploy_vars['dns_server_args'] = ''
564         deploy_vars['apex_temp_dir'] = APEX_TEMP_DIR
565         deploy_vars['apex_env_file'] = os.path.basename(opnfv_env)
566         deploy_vars['stackrc'] = 'source /home/stack/stackrc'
567         deploy_vars['overcloudrc'] = 'source /home/stack/overcloudrc'
568         deploy_vars['undercloud_ip'] = undercloud_admin_ip
569         deploy_vars['ha_enabled'] = ha_enabled
570         deploy_vars['os_version'] = os_version
571         deploy_vars['http_proxy'] = net_settings.get('http_proxy', '')
572         deploy_vars['https_proxy'] = net_settings.get('https_proxy', '')
573         deploy_vars['vim'] = ds_opts['vim']
574         deploy_vars['container_client'] = utils.find_container_client(
575             os_version)
576         for dns_server in net_settings['dns_servers']:
577             deploy_vars['dns_server_args'] += " --dns-nameserver {}".format(
578                 dns_server)
579         try:
580             utils.run_ansible(deploy_vars, deploy_playbook, host=undercloud.ip,
581                               user='stack', tmp_dir=APEX_TEMP_DIR)
582             logging.info("Overcloud deployment complete")
583         except Exception:
584             logging.error("Deployment Failed.  Please check deploy log as "
585                           "well as mistral logs in "
586                           "{}".format(os.path.join(APEX_TEMP_DIR,
587                                                    'mistral_logs.tar.gz')))
588             raise
589         finally:
590             os.remove(os.path.join(APEX_TEMP_DIR, 'overcloud-full.qcow2'))
591             for tmp_file in UC_DISK_FILES:
592                 os.remove(os.path.join(APEX_TEMP_DIR, tmp_file))
593
594         # Post install
595         logging.info("Executing post deploy configuration")
596         jumphost.configure_bridges(net_settings)
597         nova_output = os.path.join(APEX_TEMP_DIR, 'nova_output')
598         deploy_vars['overcloud_nodes'] = parsers.parse_nova_output(
599             nova_output)
600         deploy_vars['SSH_OPTIONS'] = '-o StrictHostKeyChecking=no -o ' \
601                                      'GlobalKnownHostsFile=/dev/null -o ' \
602                                      'UserKnownHostsFile=/dev/null -o ' \
603                                      'LogLevel=error'
604         deploy_vars['external_network_cmds'] = \
605             oc_deploy.external_network_cmds(net_settings, deploy_settings)
606         # TODO(trozet): just parse all ds_opts as deploy vars one time
607         deploy_vars['gluon'] = ds_opts['gluon']
608         deploy_vars['sdn'] = ds_opts['sdn_controller']
609         for dep_option in 'yardstick', 'dovetail', 'vsperf':
610             if dep_option in ds_opts:
611                 deploy_vars[dep_option] = ds_opts[dep_option]
612             else:
613                 deploy_vars[dep_option] = False
614         deploy_vars['dataplane'] = ds_opts['dataplane']
615         overcloudrc = os.path.join(APEX_TEMP_DIR, 'overcloudrc')
616         if ds_opts['congress']:
617             deploy_vars['congress_datasources'] = \
618                 oc_deploy.create_congress_cmds(overcloudrc)
619             deploy_vars['congress'] = True
620         else:
621             deploy_vars['congress'] = False
622         deploy_vars['calipso'] = ds_opts.get('calipso', False)
623         deploy_vars['calipso_ip'] = undercloud_admin_ip
624         # overcloudrc.v3 removed and set as default in queens and later
625         if os_version == 'pike':
626             deploy_vars['overcloudrc_files'] = ['overcloudrc',
627                                                 'overcloudrc.v3']
628         else:
629             deploy_vars['overcloudrc_files'] = ['overcloudrc']
630
631         post_undercloud = os.path.join(args.lib_dir,
632                                        constants.ANSIBLE_PATH,
633                                        'post_deploy_undercloud.yml')
634         logging.info("Executing post deploy configuration undercloud "
635                      "playbook")
636         try:
637             utils.run_ansible(deploy_vars, post_undercloud,
638                               host=undercloud.ip, user='stack',
639                               tmp_dir=APEX_TEMP_DIR)
640             logging.info("Post Deploy Undercloud Configuration Complete")
641         except Exception:
642             logging.error("Post Deploy Undercloud Configuration failed.  "
643                           "Please check log")
644             raise
645
646         # Deploy kubernetes if enabled
647         # (TODO)zshi move handling of kubernetes deployment
648         # to its own deployment class
649         if deploy_vars['vim'] == 'k8s':
650             # clone kubespray repo
651             git.Repo.clone_from(constants.KUBESPRAY_URL,
652                                 os.path.join(APEX_TEMP_DIR, 'kubespray'))
653             shutil.copytree(
654                 os.path.join(APEX_TEMP_DIR, 'kubespray', 'inventory',
655                              'sample'),
656                 os.path.join(APEX_TEMP_DIR, 'kubespray', 'inventory',
657                              'apex'))
658             k8s_node_inventory = {
659                 'all':
660                     {'hosts': {},
661                      'children': {
662                          'k8s-cluster': {
663                              'children': {
664                                  'kube-master': {
665                                      'hosts': {}
666                                  },
667                                  'kube-node': {
668                                      'hosts': {}
669                                  }
670                              }
671                          },
672                          'etcd': {
673                              'hosts': {}
674                          }
675                     }
676                     }
677             }
678             for node, ip in deploy_vars['overcloud_nodes'].items():
679                 k8s_node_inventory['all']['hosts'][node] = {
680                     'ansible_become': True,
681                     'ansible_ssh_host': ip,
682                     'ansible_become_user': 'root',
683                     'ip': ip
684                 }
685                 if 'controller' in node:
686                     k8s_node_inventory['all']['children']['k8s-cluster'][
687                         'children']['kube-master']['hosts'][node] = None
688                     k8s_node_inventory['all']['children']['etcd'][
689                         'hosts'][node] = None
690                 elif 'compute' in node:
691                     k8s_node_inventory['all']['children']['k8s-cluster'][
692                         'children']['kube-node']['hosts'][node] = None
693
694             kubespray_dir = os.path.join(APEX_TEMP_DIR, 'kubespray')
695             with open(os.path.join(kubespray_dir, 'inventory', 'apex',
696                                    'apex.yaml'), 'w') as invfile:
697                 yaml.dump(k8s_node_inventory, invfile,
698                           default_flow_style=False)
699             k8s_deploy_vars = {}
700             # Add kubespray ansible control variables in k8s_deploy_vars,
701             # example: 'kube_network_plugin': 'flannel'
702             k8s_deploy = os.path.join(kubespray_dir, 'cluster.yml')
703             k8s_deploy_inv_file = os.path.join(kubespray_dir, 'inventory',
704                                                'apex', 'apex.yaml')
705
706             k8s_remove_pkgs = os.path.join(args.lib_dir,
707                                            constants.ANSIBLE_PATH,
708                                            'k8s_remove_pkgs.yml')
709             try:
710                 logging.debug("Removing any existing overcloud docker "
711                               "packages")
712                 utils.run_ansible(k8s_deploy_vars, k8s_remove_pkgs,
713                                   host=k8s_deploy_inv_file,
714                                   user='heat-admin', tmp_dir=APEX_TEMP_DIR)
715                 logging.info("k8s Deploy Remove Existing Docker Related "
716                              "Packages Complete")
717             except Exception:
718                 logging.error("k8s Deploy Remove Existing Docker Related "
719                               "Packages failed. Please check log")
720                 raise
721
722             try:
723                 utils.run_ansible(k8s_deploy_vars, k8s_deploy,
724                                   host=k8s_deploy_inv_file,
725                                   user='heat-admin', tmp_dir=APEX_TEMP_DIR)
726                 logging.info("k8s Deploy Overcloud Configuration Complete")
727             except Exception:
728                 logging.error("k8s Deploy Overcloud Configuration failed."
729                               "Please check log")
730                 raise
731
732         # Post deploy overcloud node configuration
733         # TODO(trozet): just parse all ds_opts as deploy vars one time
734         deploy_vars['sfc'] = ds_opts['sfc']
735         deploy_vars['vpn'] = ds_opts['vpn']
736         deploy_vars['l2gw'] = ds_opts.get('l2gw')
737         deploy_vars['sriov'] = ds_opts.get('sriov')
738         deploy_vars['tacker'] = ds_opts.get('tacker')
739         deploy_vars['all_in_one'] = all_in_one
740         # TODO(trozet): need to set container client to docker until OOO
741         # migrates OC to podman. Remove this later.
742         deploy_vars['container_client'] = 'docker'
743         # TODO(trozet): pull all logs and store in tmp dir in overcloud
744         # playbook
745         post_overcloud = os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
746                                       'post_deploy_overcloud.yml')
747         # Run per overcloud node
748         for node, ip in deploy_vars['overcloud_nodes'].items():
749             logging.info("Executing Post deploy overcloud playbook on "
750                          "node {}".format(node))
751             try:
752                 utils.run_ansible(deploy_vars, post_overcloud, host=ip,
753                                   user='heat-admin', tmp_dir=APEX_TEMP_DIR)
754                 logging.info("Post Deploy Overcloud Configuration Complete "
755                              "for node {}".format(node))
756             except Exception:
757                 logging.error("Post Deploy Overcloud Configuration failed "
758                               "for node {}. Please check log".format(node))
759                 raise
760         logging.info("Apex deployment complete")
761         logging.info("Undercloud IP: {}, please connect by doing "
762                      "'opnfv-util undercloud'".format(undercloud.ip))
763         # TODO(trozet): add logging here showing controller VIP and horizon url
764
765
766 if __name__ == '__main__':
767     main()