# cleanup remnants of previous deployment
cd /opt/bifrost
-sudo -E ./scripts/destroy-env.sh
+sudo -H -E ./scripts/destroy-env.sh
# provision 3 VMs; xcimaster, controller, and compute
cd /opt/bifrost
-sudo -E ./scripts/bifrost-provision.sh
+sudo -H -E ./scripts/bifrost-provision.sh
# list the provisioned VMs
cd /opt/bifrost
become: yes
gather_facts: yes
roles:
+ - role: bifrost-keystone-install
- role: bifrost-ironic-install
cleaning: false
testing: true
dib_elements: "vm enable-serial-console simple-init devuser growroot {{ extra_dib_elements }}"
dib_packages: "{{ lookup('env', 'DIB_OS_PACKAGES') }}"
when: create_image_via_dib | bool == true and transform_boot_image | bool == false
+ - role: bifrost-keystone-client-config
+ # NOTE(hwoarang): This should be ansible_env.SUDO_USER like in the
+ # upstream playbook. However, we run ansible as root (ie with sudo)
+ # so clouds.yaml will be placed in the user's home directory (see
+ # the bifrost-keystone-client-config role) and then ansible will look
+ # for one in /root and fail. As such we hardcode the user to be 'root'.
+ user: "root"
+ clouds:
+ bifrost:
+ config_username: "{{ ironic.keystone.default_username }}"
+ config_password: "{{ ironic.keystone.default_password }}"
+ config_project_name: "baremetal"
+ config_region_name: "{{ keystone.bootstrap.region_name }}"
+ config_auth_url: "{{ keystone.bootstrap.public_url }}"
environment:
http_proxy: "{{ lookup('env','http_proxy') }}"
https_proxy: "{{ lookup('env','https_proxy') }}"
# Source Ansible
set +x +o nounset
$SCRIPT_HOME/env-setup.sh
-source ${ANSIBLE_INSTALL_ROOT}/ansible/hacking/env-setup
ANSIBLE=$(which ansible-playbook)
set -x -o nounset
-e create_ipa_image=${CREATE_IPA_IMAGE} \
-e write_interfaces_file=${WRITE_INTERFACES_FILE} \
-e ipv4_gateway=192.168.122.1 \
- -e wait_timeout=${PROVISION_WAIT_TIMEOUT}
+ -e wait_timeout=${PROVISION_WAIT_TIMEOUT} \
+ -e enable_keystone=false
EXITCODE=$?
if [ $EXITCODE != 0 ]; then
# Start fresh
rm -rf /opt/stack
+# HOME is normally set by sudo -H
+rm -rf ${HOME}/.config/openstack
# Delete all libvirt VMs and hosts from vbmc (look for a port number)
for vm in $(vbmc list | awk '/[0-9]/{{ print $2 }}'); do