From 6769ae9bcbf61dc079c42ee5ebfafcaaf9fd708a Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Wed, 3 May 2017 19:36:23 +0100 Subject: [PATCH] prototypes: bifrost: Add keystone roles The os_client_config Ansible module used by all the ironic-* roles depends on the keystone roles so we need to make use of them even though they do not affect the end result. This fixes the following OPNFV CI problem due to not having a clouds.yaml file present to be consumed by the os_client_config Ansible module. fatal: [controller00]: FAILED! => {"censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result"} An exception occurred during task execution. The full traceback is: Traceback (most recent call last): File "/tmp/ansible_ApkCUQ/ansible_module_os_client_config.py", line 75, in main() File "/tmp/ansible_ApkCUQ/ansible_module_os_client_config.py", line 63, in main for cloud in config.get_all_clouds(): File "/usr/lib/python2.7/site-packages/os_client_config/config.py", line 798, in get_all_clouds cloud, region_name=region['name'])) File "/usr/lib/python2.7/site-packages/os_client_config/config.py", line 1071, in get_one_cloud auth_plugin = loader.load_from_options(**config['auth']) File "/usr/lib/python2.7/site-packages/keystoneauth1/loading/base.py", line 162, in load_from_options raise exceptions.MissingRequiredOptions(missing_required) keystoneauth1.exceptions.auth_plugins.MissingRequiredOptions: Auth plugin requires parameters which were not given: auth_url fatal: [opnfv]: FAILED! => {"censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result"} Moreover, we cleanup the ~/openstack directory which may contain some bifrost artifacts such as a clouds.yaml file. Finally, we use 'sudo -H' because for the keystone roles we need the HOME variable to be set properly. Change-Id: I45b08bd33dd8ea9505fe10eb4b2b10956b3b683c --- jjb/xci/bifrost-verify.sh | 4 ++-- prototypes/bifrost/playbooks/opnfv-virtual.yaml | 15 +++++++++++++++ prototypes/bifrost/scripts/bifrost-provision.sh | 4 ++-- prototypes/bifrost/scripts/destroy-env.sh | 2 ++ 4 files changed, 21 insertions(+), 4 deletions(-) diff --git a/jjb/xci/bifrost-verify.sh b/jjb/xci/bifrost-verify.sh index f596d7527..29af7ca3b 100755 --- a/jjb/xci/bifrost-verify.sh +++ b/jjb/xci/bifrost-verify.sh @@ -113,11 +113,11 @@ sudo /bin/cp -rf /opt/releng/prototypes/bifrost/* /opt/bifrost/ # cleanup remnants of previous deployment cd /opt/bifrost -sudo -E ./scripts/destroy-env.sh +sudo -H -E ./scripts/destroy-env.sh # provision 3 VMs; xcimaster, controller, and compute cd /opt/bifrost -sudo -E ./scripts/bifrost-provision.sh +sudo -H -E ./scripts/bifrost-provision.sh # list the provisioned VMs cd /opt/bifrost diff --git a/prototypes/bifrost/playbooks/opnfv-virtual.yaml b/prototypes/bifrost/playbooks/opnfv-virtual.yaml index 310eca864..699c96698 100644 --- a/prototypes/bifrost/playbooks/opnfv-virtual.yaml +++ b/prototypes/bifrost/playbooks/opnfv-virtual.yaml @@ -35,6 +35,7 @@ become: yes gather_facts: yes roles: + - role: bifrost-keystone-install - role: bifrost-ironic-install cleaning: false testing: true @@ -57,6 +58,20 @@ dib_elements: "vm enable-serial-console simple-init devuser growroot {{ extra_dib_elements }}" dib_packages: "{{ lookup('env', 'DIB_OS_PACKAGES') }}" when: create_image_via_dib | bool == true and transform_boot_image | bool == false + - role: bifrost-keystone-client-config + # NOTE(hwoarang): This should be ansible_env.SUDO_USER like in the + # upstream playbook. However, we run ansible as root (ie with sudo) + # so clouds.yaml will be placed in the user's home directory (see + # the bifrost-keystone-client-config role) and then ansible will look + # for one in /root and fail. As such we hardcode the user to be 'root'. + user: "root" + clouds: + bifrost: + config_username: "{{ ironic.keystone.default_username }}" + config_password: "{{ ironic.keystone.default_password }}" + config_project_name: "baremetal" + config_region_name: "{{ keystone.bootstrap.region_name }}" + config_auth_url: "{{ keystone.bootstrap.public_url }}" environment: http_proxy: "{{ lookup('env','http_proxy') }}" https_proxy: "{{ lookup('env','https_proxy') }}" diff --git a/prototypes/bifrost/scripts/bifrost-provision.sh b/prototypes/bifrost/scripts/bifrost-provision.sh index d3b28ee10..056196254 100755 --- a/prototypes/bifrost/scripts/bifrost-provision.sh +++ b/prototypes/bifrost/scripts/bifrost-provision.sh @@ -70,7 +70,6 @@ export EXTRA_DIB_ELEMENTS=${EXTRA_DIB_ELEMENTS:-"openssh-server"} # Source Ansible set +x +o nounset $SCRIPT_HOME/env-setup.sh -source ${ANSIBLE_INSTALL_ROOT}/ansible/hacking/env-setup ANSIBLE=$(which ansible-playbook) set -x -o nounset @@ -121,7 +120,8 @@ ${ANSIBLE} ${ANSIBLE_VERBOSITY} \ -e create_ipa_image=${CREATE_IPA_IMAGE} \ -e write_interfaces_file=${WRITE_INTERFACES_FILE} \ -e ipv4_gateway=192.168.122.1 \ - -e wait_timeout=${PROVISION_WAIT_TIMEOUT} + -e wait_timeout=${PROVISION_WAIT_TIMEOUT} \ + -e enable_keystone=false EXITCODE=$? if [ $EXITCODE != 0 ]; then diff --git a/prototypes/bifrost/scripts/destroy-env.sh b/prototypes/bifrost/scripts/destroy-env.sh index d570f10ad..c75e814b7 100755 --- a/prototypes/bifrost/scripts/destroy-env.sh +++ b/prototypes/bifrost/scripts/destroy-env.sh @@ -16,6 +16,8 @@ fi # Start fresh rm -rf /opt/stack +# HOME is normally set by sudo -H +rm -rf ${HOME}/.config/openstack # Delete all libvirt VMs and hosts from vbmc (look for a port number) for vm in $(vbmc list | awk '/[0-9]/{{ print $2 }}'); do -- 2.16.6