return "/usr/lib/python2.7/site-packages/"
-def project_to_docker_image(project):
+def project_to_docker_image(project, docker_url):
"""
Translates OpenStack project to OOO services that are containerized
:param project: name of OpenStack project
# based on project
hub_output = utils.open_webpage(
- urllib.parse.urljoin(con.DOCKERHUB_OOO, '?page_size=1024'), timeout=10)
+ urllib.parse.urljoin(docker_url,
+ '?page_size=1024'), timeout=10)
try:
results = json.loads(hub_output.decode())['results']
except Exception as e:
return docker_images
-def is_patch_promoted(change, branch, docker_image=None):
+def is_patch_promoted(change, branch, docker_url, docker_image=None):
"""
Checks to see if a patch that is in merged exists in either the docker
container or the promoted tripleo images
return True
else:
# must be a docker patch, check docker tag modified time
- docker_url = con.DOCKERHUB_OOO.replace('tripleomaster',
- "tripleo{}".format(branch))
+ docker_url = docker_url.replace('tripleomaster',
+ "tripleo{}".format(branch))
url_path = "{}/tags/{}".format(docker_image, con.DOCKER_TAG)
docker_url = urllib.parse.urljoin(docker_url, url_path)
logging.debug("docker url is: {}".format(docker_url))
# and move the patch into the containers directory. We also assume
# this builder call is for overcloud, because we do not support
# undercloud containers
+ if platform.machine() == 'aarch64':
+ docker_url = con.DOCKERHUB_AARCH64
+ else:
+ docker_url = con.DOCKERHUB_OOO
if docker_tag and 'python' in project_path:
# Projects map to multiple THT services, need to check which
# are supported
- ooo_docker_services = project_to_docker_image(patch['project'])
+ ooo_docker_services = project_to_docker_image(patch['project'],
+ docker_url)
docker_img = ooo_docker_services[0]
else:
ooo_docker_services = []
patch['change-id'])
patch_promoted = is_patch_promoted(change,
branch.replace('stable/', ''),
+ docker_url,
docker_img)
if patch_diff and not patch_promoted:
p_set['neutron_driver'] = neutron_driver
p_set['namespace'] = "docker.io/tripleo{}".format(branch)
if platform.machine() == 'aarch64':
- p_set['ceph_tag'] = 'master-fafda7d-luminous-centos-7-aarch64'
+ p_set['namespace'] = "docker.io/armbandapex"
+ p_set['ceph_tag'] = 'v3.1.0-stable-3.1-luminous-centos-7-aarch64'
except KeyError:
logging.error("Invalid prep file format: {}".format(prep_file))
}
DOCKERHUB_OOO = 'https://registry.hub.docker.com/v2/repositories' \
'/tripleomaster/'
+DOCKERHUB_AARCH64 = 'https://registry.hub.docker.com/v2/repositories' \
+ '/armbandapex/'
KUBESPRAY_URL = 'https://github.com/kubernetes-incubator/kubespray.git'
OPNFV_ARTIFACTS = 'http://storage.googleapis.com/artifacts.opnfv.org'
CUSTOM_OVS = '{}/apex/random/openvswitch-2.9.0-9.el7fdn.x86_64.' \
'requires at least 12GB per controller.')
logging.info('Increasing RAM per controller to 12GB')
elif args.virt_default_ram < 10:
- control_ram = 10
- logging.warning('RAM per controller is too low. nosdn '
- 'requires at least 10GB per controller.')
- logging.info('Increasing RAM per controller to 10GB')
+ if platform.machine() == 'aarch64':
+ control_ram = 16
+ logging.warning('RAM per controller is too low for '
+ 'aarch64 ')
+ logging.info('Increasing RAM per controller to 16GB')
+ else:
+ control_ram = 10
+ logging.warning('RAM per controller is too low. nosdn '
+ 'requires at least 10GB per controller.')
+ logging.info('Increasing RAM per controller to 10GB')
else:
control_ram = args.virt_default_ram
+ if platform.machine() == 'aarch64' and args.virt_cpus < 16:
+ vcpus = 16
+ logging.warning('aarch64 requires at least 16 vCPUS per '
+ 'target VM. Increasing to 16.')
+ else:
+ vcpus = args.virt_cpus
if ha_enabled and args.virt_compute_nodes < 2:
logging.debug(
'HA enabled, bumping number of compute nodes to 2')
num_computes=args.virt_compute_nodes,
controller_ram=control_ram * 1024,
compute_ram=compute_ram * 1024,
- vcpus=args.virt_cpus
+ vcpus=vcpus
)
inventory = Inventory(args.inventory_file, ha_enabled, args.virtual)
logging.info("Inventory is:\n {}".format(pprint.pformat(
docker_env = 'containers-prepare-parameter.yaml'
shutil.copyfile(os.path.join(args.deploy_dir, docker_env),
os.path.join(APEX_TEMP_DIR, docker_env))
+ # Upload extra ansible.cfg
+ if platform.machine() == 'aarch64':
+ ansible_env = 'ansible.cfg'
+ shutil.copyfile(os.path.join(args.deploy_dir, ansible_env),
+ os.path.join(APEX_TEMP_DIR, ansible_env))
+
c_builder.prepare_container_images(
os.path.join(APEX_TEMP_DIR, docker_env),
branch=branch.replace('stable/', ''),
if net_data:
cmd += ' --networks-file network_data.yaml'
libvirt_type = 'kvm'
- if virtual:
+ if virtual and (platform.machine() != 'aarch64'):
with open('/sys/module/kvm_intel/parameters/nested') as f:
nested_kvm = f.read().strip()
if nested_kvm != 'Y':
libvirt_type = 'qemu'
+ elif virtual and (platform.machine() == 'aarch64'):
+ libvirt_type = 'qemu'
cmd += ' --libvirt-type {}'.format(libvirt_type)
+ if platform.machine() == 'aarch64':
+ cmd += ' --override-ansible-cfg /home/stack/ansible.cfg '
logging.info("Deploy command set: {}".format(cmd))
with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
dummy_change = {'submitted': '2017-06-05 20:23:09.000000000',
'status': 'MERGED'}
self.assertTrue(c_builder.is_patch_promoted(dummy_change,
- 'master'))
+ 'master',
+ con.DOCKERHUB_OOO))
def test_is_patch_promoted_docker(self):
dummy_change = {'submitted': '2017-06-05 20:23:09.000000000',
dummy_image = 'centos-binary-opendaylight'
self.assertTrue(c_builder.is_patch_promoted(dummy_change,
'master',
+ con.DOCKERHUB_OOO,
docker_image=dummy_image))
def test_patch_not_promoted(self):
dummy_change = {'submitted': '2900-06-05 20:23:09.000000000',
'status': 'MERGED'}
self.assertFalse(c_builder.is_patch_promoted(dummy_change,
- 'master'))
+ 'master',
+ con.DOCKERHUB_OOO))
def test_patch_not_promoted_docker(self):
dummy_change = {'submitted': '2900-06-05 20:23:09.000000000',
dummy_image = 'centos-binary-opendaylight'
self.assertFalse(c_builder.is_patch_promoted(dummy_change,
'master',
+ con.DOCKERHUB_OOO,
docker_image=dummy_image))
def test_patch_not_promoted_and_not_merged(self):
dummy_change = {'submitted': '2900-06-05 20:23:09.000000000',
'status': 'BLAH'}
self.assertFalse(c_builder.is_patch_promoted(dummy_change,
- 'master'))
+ 'master',
+ con.DOCKERHUB_OOO))
@patch('builtins.open', mock_open())
@patch('apex.builders.common_builder.is_patch_promoted')
'/dummytmp/dummyrepo.tar')
def test_project_to_docker_image(self):
- found_services = c_builder.project_to_docker_image(project='nova')
+ found_services = c_builder.project_to_docker_image('nova',
+ con.DOCKERHUB_OOO)
assert 'nova-api' in found_services
@patch('apex.common.utils.open_webpage')
mock_open_web.return_value = b'{"blah": "blah"}'
self.assertRaises(exceptions.ApexCommonBuilderException,
c_builder.project_to_docker_image,
- 'nova')
+ 'nova',
+ con.DOCKERHUB_OOO)
def test_get_neutron_driver(self):
ds_opts = {'dataplane': 'fdio',
if self.external_net:
networks.append('external')
console = 'ttyAMA0' if platform.machine() == 'aarch64' else 'ttyS0'
- root = 'vda' if platform.machine() == 'aarch64' else 'sda'
+ root = 'vda2' if platform.machine() == 'aarch64' else 'sda'
self.vm = vm_lib.create_vm(name='undercloud',
image=self.volume,
# give 10 seconds to come up
time.sleep(10)
# set IP
- for x in range(5):
+ for x in range(10):
if self._set_ip():
logging.info("Undercloud started. IP Address: {}".format(
self.ip))
with open(os.path.join(template_dir, 'domain.xml'), 'r') as f:
source_template = f.read()
imagefile = os.path.realpath(image)
+
+ if arch == 'aarch64' and diskbus == 'sata':
+ diskbus = 'virtio'
+
memory = int(memory) * 1024
params = {
'name': name,
'user_interface': '',
}
- # assign virtio as default for aarch64
- if arch == 'aarch64' and diskbus == 'sata':
- diskbus = 'virtio'
# Configure the bus type for the target disk device
params['diskbus'] = diskbus
nicparams = {
"""
params['user_interface'] = """
<controller type='virtio-serial' index='0'>
- <address type='virtio-mmio'/>
+ <address type='pci'/>
</controller>
<serial type='pty'>
<target port='0'/>
--- /dev/null
+[defaults]
+retry_files_enabled = False
+forks = 25
+timeout = 60
+gather_timeout = 30
+
+[ssh_connection]
+ssh_args = -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o ControlMaster=auto -o ControlPersist=30m -o ServerAliveInterval=5 -o ServerAliveCountMax=5
+retries = 8
+pipelining = True
+
node_output=$(undercloud_connect "stack" "source stackrc; nova list")
node=$(echo "$1" | sed -E 's/([a-zA-Z]+)([0-9]+)/\1-\2/')
- node_ip=$(echo "$node_output" | grep "$node" | grep -Eo "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+")
+ node_ip=$(echo "$node_output" | grep "$node " | grep -Eo "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+")
if [ "$node_ip" == "" ]; then
echo -e "Unable to find IP for ${node} in \n${node_output}"
--- /dev/null
+disk_images:
+ -
+ imagename: overcloud-full-rootfs
+ arch: aarch64
+ type: qcow2
+ distro: centos7
+ elements:
+ - baremetal
+ - dhcp-all-interfaces
+ - cloud-init
+ - openvswitch
+ - overcloud-agent
+ - overcloud-full
+ - overcloud-controller
+ - overcloud-compute
+ - overcloud-ceph-storage
+ - puppet-modules
+ - enable-serial-console
+ - stable-interface-names
+ - selinux-permissive
+ - grub2
+ - growroot
+ - devuser
+ - element-manifest
+ - dynamic-login
+ - iptables
+ - enable-packages-install
+ - pip-and-virtualenv-override
+ - dracut-regenerate
+ - remove-machine-id
+ - remove-resolvconf
+ packages:
+ - openstack-utils
+ - python-tripleoclient
+ - python-tripleoclient-heat-installer
+ - python-psutil
+ - python-debtcollector
+ - plotnetcfg
+ - sos
+ - yum-plugin-priorities
+ - ntp
+ - jq
+ - openstack-heat-agents
+ - device-mapper-multipath
+ - os-net-config
+ - grub2-efi-aa64
+ - grub2-efi-aa64-modules
+ options:
+ - " --no-tmpfs"
+ environment:
+ DIB_PYTHON_VERSION: '2'
+ DIB_DEV_USER_USERNAME: 'stack'
+ DIB_DEV_USER_PASSWORD: 'stack'
+ DIB_DEV_USER_PWDLESS_SUDO: 'Yes'
--- /dev/null
+disk_images:
+ -
+ imagename: undercloud-full
+ arch: aarch64
+ type: qcow2
+ distro: centos7
+ elements:
+ - vm
+ - block-device-efi
+ - baremetal
+ - dhcp-all-interfaces
+ - disable-selinux
+ - cloud-init-nocloud
+ - openvswitch
+ - overcloud-agent
+ - overcloud-full
+ - overcloud-controller
+ - overcloud-compute
+ - overcloud-ceph-storage
+ - puppet-modules
+ - enable-serial-console
+ - stable-interface-names
+ - grub2
+ - bootloader
+ - devuser
+ - element-manifest
+ - dynamic-login
+ - iptables
+ - enable-packages-install
+ - pip-and-virtualenv-override
+ - dracut-regenerate
+ - remove-machine-id
+ - remove-resolvconf
+ packages:
+ - openstack-utils
+ - python-tripleoclient
+ - python-tripleoclient-heat-installer
+ - python-psutil
+ - python-debtcollector
+ - plotnetcfg
+ - sos
+ - yum-plugin-priorities
+ - ntp
+ - jq
+ - openstack-heat-agents
+ - device-mapper-multipath
+ - os-net-config
+ options:
+ - " --no-tmpfs"
+ environment:
+ DIB_PYTHON_VERSION: '2'
+ DIB_DEV_USER_USERNAME: 'stack'
+ DIB_DEV_USER_PASSWORD: 'stack'
+ DIB_DEV_USER_PWDLESS_SUDO: 'Yes'
+ -
+ imagename: ironic-python-agent
+ # This is bogus, but there's no initrd type in diskimage-builder
+ arch: aarch64
+ type: qcow2
+ distro: centos7
+
+ # So we just override the extension instead
+ imageext: initramfs
+ elements:
+ - ironic-agent
+ - ironic-agent-multipath
+ - dynamic-login
+ - devuser
+ - disable-selinux
+ - element-manifest
+ - network-gateway
+ - enable-packages-install
+ - pip-and-virtualenv-override
+ packages:
+ - util-linux
+ - grub2-efi-aa64
+ - grub2-efi-aa64-module
+ - python-hardware-detect
+ - yum-plugin-priorities
+ - iscsi-initiator-utils
+ options:
+ - " --no-tmpfs"
+ environment:
+ DIB_PYTHON_VERSION: '2'
+ DIB_DEV_USER_USERNAME: 'stack'
+ DIB_DEV_USER_PASSWORD: 'stack'
+ DIB_DEV_USER_PWDLESS_SUDO: 'Yes'
--- /dev/null
+==================================================================================
+APEX on AARCH64
+==================================================================================
+
+This document describes the changes needed to deploy OPNFV-APEX on aarch64
+ * General considerations
+ * Creating undercloud and overcloud images using DIB
+ * Creating Kolla containers
+
+General considerations
+--------------------------
+
+OPNFV - APEX relies on artifacts created by the OOO project.
+
+Those artifacts are:
+
+1. Openstack packages, found in delorean_.
+
+ .. _delorean: http://www.python.org/
+
+2. UC and OC images created by RDO and found in images_.
+
+ .. _images: https://images.rdoproject.org/master/rdo_trunk/current-tripleo-rdo-internal/
+
+3. The containerized version of the openstack services found in docker.io_.
+
+ .. _docker.io: https://hub.docker.com/r/tripleomaster/
+
+All the above artifacts are x86_64 only and as a result cannot be used by APEX on aarch64
+As a result the user needs to create the Images locally before attempting to deploy.
+The only supported scenario is 'os-nosdn-rocky-ha'.
+
+Other than the aarch64 disk images and containers, there is no other special configuration
+required for aarch64. The only requirement is for the nodes to be identified as aarch64 nodes
+in the inventory files.
+
+For example :
+
+.. code-block:: yaml
+
+ node1:
+ mac_address: "68:05:CA:68:08:CA"
+ ipmi_ip: 10.10.10.10
+ ipmi_user: user
+ ipmi_pass: pass
+ pm_type: "pxe_ipmitool"
+ cpus: 1
+ memory: 128000
+ disk: 480
+ disk_device: sda
+ arch: "aarch64"
+ capabilities: "profile:control"
+
+
+Creating undercloud and overcloud images using DIB
+--------------------------------------------------
+In order to create that image DIB_ must be used. DIB can either be built from source or use yum to be installed.
+
+.. _DIB: https://github.com/openstack/diskimage-builder
+
+It is important to use a fairly late version of DIB to support UEFI systems. The version currently on epel does NOT have support for UEFI. The version on delorean (15.01) works just fine. DIB uses a YAML file from the user which describes how the
+image should look like. The original yaml from RDO is here_:
+
+
+.. _here: https://github.com/openstack/tripleo-common/blob/master/image-yaml/overcloud-images.yaml
+
+The equivelant yaml files for aarch64 are included in the apex repo in the "apex/contrib/aarch64" folder.
+The UC and OC images are very similar in terms of packages. The major difference is the partition table in EFI so for the undercloud, that has to provided as an environmental variable.
+
+.. code-block:: python
+
+ export DIB_BLOCK_DEVICE_CONFIG="
+
+ - local_loop:
+ name: image0
+
+ - partitioning:
+ base: image0
+ label: gpt
+ partitions:
+ - name: ESP
+ type: 'EF00'
+ size: 64MiB
+ mkfs:
+ type: vfat
+ mount:
+ mount_point: /boot/efi
+ fstab:
+ options: "defaults"
+ fsck-passno: 1
+ - name: root
+ type: '8300'
+ size: 50GiB
+ mkfs:
+ type: ext4
+ mount:
+ mount_point: /
+ fstab:
+ options: "defaults"
+ fsck-passno: 1
+ "
+
+ export DIB_YUM_REPO_CONF+="/etc/yum.repos.d/delorean-deps-rocky.repo /etc/yum.repos.d/delorean-rocky.repo /etc/yum.repos.d
+ /epel.repo "
+ openstack --debug overcloud image build --config-file undercloud_full.yaml --output-directory ./
+
+
+The overcloud is built in a similar way.
+
+.. code-block:: python
+
+ export DIB_YUM_REPO_CONF+="/etc/yum.repos.d/delorean-deps-rocky.repo /etc/yum.repos.d/delorean-rocky.repo /etc/yum.repos.d
+ /epel.repo "
+ openstack --debug overcloud image build --config-file overcloud_full_rootfs.yaml --output-directory ./
+
+
+
+Apex container deployment
+-------------------------
+Similarly the containers provided by OOO are for x86 only. Containers for apex on aarch64 for the Rocky release can
+be found in armbandapex_.
+
+.. _armbandapex: https://registry.hub.docker.com/v2/repositories/armbandapex/
+
+A user who wishes to rebuild the containers can easily do so by sing Kolla. An example kolla.conf and the command to build the containers is given bellow.
+
+
+.. code-block:: python
+
+ [DEFAULT]
+
+ base=centos
+ type=binary
+ namespace="private docker.io repository"
+ tag=current-tripleo-rdo
+ rpm_setup_config=ceph.repo,epel.repo,delorean-deps.repo,delorean.repo
+ push=True
+
+
+
+.. code-block:: python
+
+ openstack overcloud container image build --config-file /usr/share/tripleo-common/container-images/overcloud_containers.yaml
+ --kolla-config-file /etc/kolla/kolla-build.conf
+
+
- ironic_conductor
- ironic_inspector
become: yes
- # will need to modify the below to patch the container
- - lineinfile:
- path: /usr/lib/python2.7/site-packages/ironic/common/pxe_utils.py
- regexp: '_link_ip_address_pxe_configs'
- line: ' _link_mac_pxe_configs(task)'
- when: aarch64
- name: configure external network vlan ifcfg
template:
src: external_vlan_ifcfg.yml.j2
when:
- external_network.vlan == "native"
- external_network.enabled
- - not aarch64
- name: bring up eth2
shell: ip link set up dev eth2
when:
- external_network.vlan == "native"
- external_network.enabled
- - not aarch64
- become: yes
- - name: assign IP to native eth0 if aarch64
- shell: ip a a {{ external_network.ip }}/{{ external_network.prefix }} dev eth0
become: yes
- when:
- - external_network.vlan == "native"
- - external_network.enabled
- - aarch64
- name: bring up eth0 if aarch64
shell: ip link set up dev eth0
when:
- baremetal-environment.yaml
- kubernetes-environment.yaml
- "{{ apex_env_file }}"
+ - name: Copy ansible.cfg data to undercloud in aarch64
+ copy:
+ src: "{{ apex_temp_dir }}/ansible.cfg"
+ dest: "/home/stack/ansible.cfg"
+ owner: stack
+ group: stack
+ mode: 0644
+ when: aarch64
- name: Copy network data to undercloud
copy:
src: "{{ apex_temp_dir }}/network_data.yaml"
allow_downgrade: yes
name: ceph-ansible-3.1.6
become: yes
+ - name: Re-enable ceph config for aarch64
+ replace:
+ path: "/usr/share/ceph-ansible/roles/ceph-client/tasks/create_users_keys.yml"
+ regexp: "x86_64"
+ replace: "aarch64"
+ backup: yes
+ when: aarch64
- name: Configure DNS server for ctlplane network
shell: "{{ stackrc }} && openstack subnet set ctlplane-subnet {{ dns_server_args }}"
- block:
tasks:
- name: aarch64 configuration
block:
- - shell: yum -y reinstall grub2-efi shim
- copy:
src: /boot/efi/EFI/centos/grubaa64.efi
- dest: /tftpboot/grubaa64.efi
+ dest: /var/lib/config-data/puppet-generated/ironic/var/lib/ironic/tftpboot/grubaa64.efi
remote_src: yes
- file:
- path: /tftpboot/EFI/centos
+ path: /var/lib/config-data/puppet-generated/ironic/var/lib/ironic/tftpboot/EFI/centos
state: directory
mode: 0755
- copy:
set timeout=5
set hidden_timeout_quiet=false
menuentry "local" {
- configfile (hd0,gpt3)/boot/grub2/grub.cfg
+ configfile /var/lib/ironic/tftpboot/$net_default_mac.conf
}
- dest: /tftpboot/EFI/centos/grub.cfg
+ dest: /var/lib/config-data/puppet-generated/ironic/var/lib/ironic/tftpboot/EFI/centos/grub.cfg
mode: 0644
- - shell: 'openstack-config --set /etc/ironic/ironic.conf pxe uefi_pxe_bootfile_name grubaa64.efi'
- - shell: 'openstack-config --set /etc/ironic/ironic.conf pxe uefi_pxe_config_template \$pybasedir/drivers/modules/pxe_grub_config.template'
-
- - systemd:
- name: openstack-ironic-conductor
- state: restarted
- enabled: yes
- - replace:
- path: /usr/lib/python2.7/site-packages/ironic/drivers/modules/pxe_grub_config.template
- regexp: 'linuxefi'
- replace: 'linux'
- - replace:
- path: /usr/lib/python2.7/site-packages/ironic/drivers/modules/pxe_grub_config.template
- regexp: 'initrdefi'
- replace: 'initrd'
+ - shell: 'sudo crudini --set /var/lib/config-data/puppet-generated/ironic/etc/ironic/ironic.conf pxe pxe_bootfile_name_by_arch aarch64:grubaa64.efi'
+ - shell: 'sudo crudini --set /var/lib/config-data/puppet-generated/ironic/etc/ironic/ironic.conf pxe pxe_config_template_by_arch aarch64:\$pybasedir/drivers/modules/pxe_grub_config.template'
+ - shell: 'docker exec -u root ironic_conductor sed -i "s/initrdefi/initrd/g" /usr/lib/python2.7/site-packages/ironic/drivers/modules/pxe_grub_config.template'
+ - shell: 'docker exec -u root ironic_conductor sed -i "s/linuxefi/linux/g" /usr/lib/python2.7/site-packages/ironic/drivers/modules/pxe_grub_config.template'
- lineinfile:
- path: /tftpboot/map-file
+ path: /var/lib/config-data/puppet-generated/ironic/var/lib/ironic/tftpboot/map-file
insertafter: EOF
state: present
line: ''
- - shell: "echo 'r ^/EFI/centos/grub.cfg-(.*) /tftpboot/pxelinux.cfg/\\1' | sudo tee --append /tftpboot/map-file"
- - shell: "echo 'r ^/EFI/centos/grub.cfg /tftpboot/EFI/centos/grub.cfg' | sudo tee --append /tftpboot/map-file"
+ - shell: "echo 'r ^/EFI/centos/grub.cfg-(.*) /var/lib/ironic/tftpboot/pxelinux.cfg/\\1' | sudo tee --append /var/lib/config-data/puppet-generated/ironic/var/lib/ironic/tftpboot/map-file"
+ - shell: "echo 'r ^/EFI/centos/grub.cfg /var/lib/ironic/tftpboot/EFI/centos/grub.cfg' | sudo tee --append /var/lib/config-data/puppet-generated/ironic/var/lib/ironic/tftpboot/map-file"
+ - shell: "docker restart {{ item }}"
+ with_items:
+ - ironic_conductor
+ - ironic_pxe_tftp
- systemd:
name: xinetd
state: restarted