# settings for virtual deployment
DEPLOY_CMD="${DEPLOY_CMD} -v"
if [[ "${DEPLOY_SCENARIO}" =~ fdio|ovs ]]; then
- DEPLOY_CMD="${DEPLOY_CMD} --virtual-default-ram 14 --virtual-compute-ram 8"
+ DEPLOY_CMD="${DEPLOY_CMD} --virtual-default-ram 12 --virtual-compute-ram 7"
fi
if [[ "$JOB_NAME" == *csit* ]]; then
DEPLOY_CMD="${DEPLOY_CMD} -e csit-environment.yaml"
--- /dev/null
+#!/bin/bash
+set -o errexit
+set -o nounset
+set -o pipefail
+
+# log info to console
+echo "Starting the Apex iso verify."
+echo "--------------------------------------------------------"
+echo
+
+if ! rpm -q virt-install > /dev/null; then
+ sudo yum -y install virt-install
+fi
+
+# define a clean function
+rm_apex_iso_verify () {
+if sudo virsh list --all | grep apex-iso-verify | grep running; then
+ sudo virsh destroy apex-iso-verify
+fi
+if sudo virsh list --all | grep apex-iso-verify; then
+ sudo virsh undefine apex-iso-verify
+fi
+}
+
+# Make sure a pre-existing iso-verify isn't there
+rm_apex_iso_verify
+
+# run an install from the iso
+# This streams a serial console to tcp port 3737 on localhost
+sudo virt-install -n apex-iso-verify -r 4096 --vcpus 4 --os-variant=rhel7 \
+ --accelerate -v --noautoconsole --nographics \
+ --disk path=/var/lib/libvirt/images/apex-iso-verify.qcow2,size=30,format=qcow2 \
+ -l $BUILD_DIRECTORY/release/OPNFV-CentOS-7-x86_64-$OPNFV_ARTIFACT_VERSION.iso \
+ --extra-args 'console=ttyS0 console=ttyS0,115200n8 serial inst.ks=file:/iso-verify.ks inst.stage2=hd:LABEL=OPNFV\x20CentOS\x207\x20x86_64:/' \
+ --initrd-inject $BUILD_DIRECTORY/../ci/iso-verify.ks \
+ --serial tcp,host=:3737,protocol=raw
+
+# Attach to tcpport 3737 and echo the output to stdout
+# watch for a 5 min time out, a power off message or a tcp disconnect
+python << EOP
+#!/usr/bin/env python
+
+import sys
+import socket
+from time import sleep
+from time import time
+
+
+TCP_IP = '127.0.0.1'
+TCP_PORT = 3737
+BUFFER_SIZE = 1024
+
+try:
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ s.connect((TCP_IP, TCP_PORT))
+except Exception, e:
+ print "Failed to connect to the iso-verofy vm's serial console"
+ print "this probably means that the VM failed to start"
+ raise e
+
+activity = time()
+data = s.recv(BUFFER_SIZE)
+last_data = data
+while time() - activity < 300:
+ try:
+ if data != last_data:
+ activity = time()
+ last_data = data
+ data = s.recv(BUFFER_SIZE)
+ sys.stdout.write(data)
+ if 'Powering off' in data:
+ break
+ sleep(.5)
+ except socket.error, e:
+ # for now assuming that the connection was closed
+ # which is good, means the vm finished installing
+ # printing the error output just in case we need to debug
+ print "VM console connection lost: %s" % msg
+ break
+s.close()
+
+if time() - activity > 300:
+ print "failing due to console inactivity"
+ exit(1)
+else:
+ print "Success!"
+EOP
+
+# save the python return code for after cleanup
+python_rc=$?
+
+# clean up
+rm_apex_iso_verify
+
+# Exit with the RC of the Python job
+exit $python_rc
+
+echo
+echo "--------------------------------------------------------"
+echo "Done!"
set -o pipefail
# log info to console
-echo "Uploading the Apex artifact. This could take some time..."
+echo "Uploading the Apex $1 artifact. This could take some time..."
echo "--------------------------------------------------------"
echo
[ -d releng ] && rm -rf releng
git clone https://gerrit.opnfv.org/gerrit/releng $WORKSPACE/releng/ &> /dev/null
#this is where we import the siging key
-if [ -f $WORKSPACE/releng/utils/gpg_import_key.sh ]; then
+if [ -f $WORKSPACE/releng/utils/gpg_import_key.sh ]; then
source $WORKSPACE/releng/utils/gpg_import_key.sh
fi
uploadsnap
elif gpg2 --list-keys | grep "opnfv-helpdesk@rt.linuxfoundation.org"; then
echo "Signing Key avaliable"
- signiso
- uploadiso
- signrpm
- uploadrpm
+ if [ $1 == 'iso' ]; then
+ signiso
+ uploadiso
+ fi
+ if [ $1 == 'rpm' ]; then
+ signrpm
+ uploadrpm
+ fi
else
- uploadiso
- uploadrpm
+ if [ $1 == 'iso' ]; then uploadiso; fi
+ if [ $1 == 'rpm' ]; then uploadrpm; fi
fi
echo
echo "--------------------------------------------------------"
echo "Done!"
-echo "ISO Artifact is available as http://$GS_URL/opnfv-$OPNFV_ARTIFACT_VERSION.iso"
-echo "RPM Artifact is available as http://$GS_URL/$(basename $OPNFV_RPM_URL)"
+if [ $1 == 'iso' ]; then echo "ISO Artifact is available as http://$GS_URL/opnfv-$OPNFV_ARTIFACT_VERSION.iso"; fi
+if [ $1 == 'rpm' ]; then echo "RPM Artifact is available as http://$GS_URL/$(basename $OPNFV_RPM_URL)"; fi
git-revision: false
same-node: true
block: true
- - 'apex-upload-artifact'
+ - 'apex-upload-rpm-artifact'
+ - trigger-builds:
+ - project: 'apex-iso-verify-{stream}'
+ predefined-parameters: |
+ BUILD_DIRECTORY=apex-build-{stream}/.build
+ OPNFV_CLEAN=yes
+ git-revision: false
+ block: true
+ same-node: true
+ - 'apex-upload-iso-artifact'
- job-template:
name: 'apex-deploy-virtual-{scenario}-{stream}'
same-node: true
- shell:
!include-raw-escape: ./apex-snapshot-create.sh
- - shell:
- !include-raw-escape: ./apex-upload-artifact.sh
+ - 'apex-upload-snapshot-artifact'
# FDIO promote
- job-template:
same-node: true
- shell:
!include-raw-escape: ./apex-snapshot-create.sh
- - shell:
- !include-raw-escape: ./apex-upload-artifact.sh
+ - 'apex-upload-snapshot-artifact'
- job-template:
name: 'apex-gs-clean-{stream}'
!include-raw: ./apex-workspace-cleanup.sh
- builder:
- name: 'apex-upload-artifact'
+ name: 'apex-iso-verify'
builders:
- shell:
- !include-raw: ./apex-upload-artifact.sh
+ !include-raw: ./apex-iso-verify.sh
+
+- builder:
+ name: 'apex-upload-snapshot-artifact'
+ builders:
+ - shell: ./apex-upload-artifact.sh snapshot
+
+- builder:
+ name: 'apex-upload-iso-artifact'
+ builders:
+ - shell: ./apex-upload-artifact.sh iso
+
+- builder:
+ name: 'apex-upload-rpm-artifact'
+ builders:
+ - shell: ./apex-upload-artifact.sh rpm
- builder:
name: 'apex-gs-cleanup'
- branch-compare-type: 'ANT'
branch-pattern: '**/{branch}'
builders:
- - bottlenecks-hello
- #- bottlenecks-unit-tests
+ #- bottlenecks-hello
+ - bottlenecks-unit-tests
- job-template:
name: 'bottlenecks-merge-{stream}'
# install python packages
easy_install -U setuptools
easy_install -U pip
- pip install -r requirements.txt
+ pip install -r $WORKSPACE/requirements/verify.txt
# unit tests
- /bin/bash $WORKSPACE/tests.sh
+ /bin/bash $WORKSPACE/verify.sh
deactivate
#!/bin/bash
set -o errexit
- echo "hello"
+ echo -e "Wellcome to Bottlenecks! \nMerge event is planning to support more functions! "
#----------------------------------
# BRANCH ANCHORS
#----------------------------------
- colorado: &colorado
- stream: colorado
+ danube: &danube
+ stream: danube
branch: 'stable/{stream}'
gs-pathname: '/{stream}'
disabled: false
pod:
- baremetal:
slave-label: compass-baremetal
- os-version: 'trusty'
- <<: *colorado
+ os-version: 'xenial'
+ <<: *danube
#-----------------------------------
# scenarios
#-----------------------------------
scenario:
- 'os-nosdn-nofeature-ha':
- disabled: false
+ disabled: true
auto-trigger-name: 'compass-{scenario}-{pod}-weekly-{stream}-trigger'
jobs:
build-step-failure-threshold: 'never'
failure-threshold: 'never'
unstable-threshold: 'FAILURE'
- - trigger-builds:
- - project: 'dovetail-compass-{pod}-proposed_tests-weekly-{stream}'
- current-parameters: false
- predefined-parameters:
- DEPLOY_SCENARIO={scenario}
- block: true
- same-node: true
- block-thresholds:
- build-step-failure-threshold: 'never'
- failure-threshold: 'never'
- unstable-threshold: 'FAILURE'
- job-template:
name: 'compass-deploy-{pod}-weekly-{stream}'
- choice:
name: COMPASS_OPENSTACK_VERSION
choices:
- - 'mitaka'
+ - 'newton'
########################
# trigger macros
########################
- trigger:
- name: 'compass-os-nosdn-nofeature-ha-baremetal-weekly-colorado-trigger'
+ name: 'compass-os-nosdn-nofeature-ha-baremetal-weekly-danube-trigger'
triggers:
- timed: 'H H * * 0'
description: "URL to Google Storage."
- string:
name: PPA_REPO
- default: "http://205.177.226.237:9999{ppa-pathname}"
+ default: "http://artifacts.opnfv.org/compass4nfv/package{ppa-pathname}"
- string:
name: PPA_CACHE
default: "$WORKSPACE/work/repo/"
description: "URL to Google Storage."
- string:
name: PPA_REPO
- default: "http://205.177.226.237:9999{ppa-pathname}"
+ default: "http://artifacts.opnfv.org/compass4nfv/package{ppa-pathname}"
- string:
name: PPA_CACHE
default: "$WORKSPACE/work/repo/"
-v of_port:6653"
robot_suite="/home/opnfv/repos/odl_test/csit/suites/openflowplugin/Performance/010_Cbench.robot"
- docker run -ti -v /tmp:/tmp opnfv/cperf:$DOCKER_TAG ${robot_cmd} ${robot_suite}
+ docker run -i -v /tmp:/tmp opnfv/cperf:$DOCKER_TAG ${robot_cmd} ${robot_suite}
- builder:
name: cperf-cleanup
echo "This is diasy4nfv basic job!"
echo "--------------------------------------------------------"
-sudo rm -rf /home/jenkins-ci/opnfv/slave_root/workspace/daisy4nfv-verify-build-master/*
#--------------------------------
testsuite:
- 'debug'
- - 'proposed_tests'
- 'compliance_set'
jobs:
dovetail-branch: '{stream}'
gs-pathname: ''
docker-tag: 'latest'
- colorado: &colorado
- stream: colorado
+ danube: &danube
+ stream: danube
branch: 'stable/{stream}'
dovetail-branch: master
gs-pathname: '/{stream}'
pod:
# - baremetal:
# slave-label: apex-baremetal
-# sut: apex
-# <<: *colorado
+# SUT: apex
+# <<: *danube
- baremetal:
slave-label: compass-baremetal
- sut: compass
- <<: *colorado
+ SUT: compass
+ <<: *danube
# - baremetal:
# slave-label: fuel-baremetal
-# sut: fuel
-# <<: *master
+# SUT: fuel
+# <<: *danube
# - baremetal:
# slave-label: joid-baremetal
-# sut: joid
-# <<: *colorado
+# SUT: joid
+# <<: *danube
testsuite:
- 'debug'
- - 'proposed_tests'
- 'compliance_set'
loop:
- 'weekly':
- job-timeout: 60
+ job-timeout: 180
jobs:
- - 'dovetail-{sut}-{pod}-{testsuite}-{loop}-{stream}'
+ - 'dovetail-{SUT}-{pod}-{testsuite}-{loop}-{stream}'
################################
# job template
################################
- job-template:
- name: 'dovetail-{sut}-{pod}-{testsuite}-{loop}-{stream}'
+ name: 'dovetail-{SUT}-{pod}-{testsuite}-{loop}-{stream}'
- disabled: false
+ disabled: true
concurrent: true
- project-parameter:
project: '{project}'
branch: '{dovetail-branch}'
- - '{sut}-defaults'
+ - '{SUT}-defaults'
- '{slave-label}-defaults'
- string:
name: DEPLOY_SCENARIO
- danube:
branch: 'stable/{stream}'
gs-pathname: '/{stream}'
- disabled: false
+ disabled: true
#####################################
# patch verification phases
#####################################
while [[ $try_to_rm -lt 6 ]]; do
gsutil -m rm -r $BIFROST_GS_URL && _exitcode=$? && break
_exitcode=$?
- echo "gsutil rm failed! Trying again... (attempt #$i)"
+ echo "gsutil rm failed! Trying again... (attempt #$try_to_rm)"
let try_to_rm += 1
# Give it some time...
sleep 10
- string:
name: ANSIBLE_VERBOSITY
default: ''
+ - string:
+ name: XCI_LOOP
+ default: 'periodic'
wrappers:
- fix-workspace-permissions
- string:
name: ANSIBLE_VERBOSITY
default: '-vvvv'
+ - string:
+ name: XCI_LOOP
+ default: 'verify'
scm:
- git:
- string:
name: ANSIBLE_VERBOSITY
default: ''
+ - string:
+ name: XCI_LOOP
+ default: 'periodic'
wrappers:
- fix-workspace-permissions
- label:
name: SLAVE_LABEL
default: '{slave-label}'
+ - string:
+ name: XCI_LOOP
+ default: 'daily'
triggers:
- '{auto-trigger-name}'
predefined-parameters: |
DEPLOY_SCENARIO=$DEPLOY_SCENARIO
XCI_FLAVOR=$XCI_FLAVOR
+ XCI_LOOP=$XCI_LOOP
same-node: true
block: true
- trigger-builds:
predefined-parameters: |
DEPLOY_SCENARIO=$DEPLOY_SCENARIO
XCI_FLAVOR=$XCI_FLAVOR
+ XCI_LOOP=$XCI_LOOP
same-node: true
block: true
block-thresholds:
- string:
name: ANSIBLE_VERBOSITY
default: ''
+ - string:
+ name: XCI_LOOP
+ default: 'daily'
builders:
- description-setter:
# for daily jobs, we want to use working versions
# for periodic jobs, we will use whatever is set in the job, probably master
-if [[ "$JOB_NAME" =~ "daily" ]]; then
+if [[ "$XCI_LOOP" == "daily" ]]; then
# source pinned-vars to get releng version
source ./config/pinned-versions
# checkout the version
git checkout -q $OPNFV_RELENG_VERSION
echo "Info: Using $OPNFV_RELENG_VERSION"
-elif [[ "$JOB_NAME" =~ "periodic" ]]; then
+elif [[ "$XCI_LOOP" == "periodic" ]]; then
echo "Info: Using $OPNFV_RELENG_VERSION"
fi
# to take this into account while deploying anyways
# clone openstack-ansible
# stable/ocata already use pinned versions so this is only valid for master
-if [[ "$JOB_NAME" =~ "periodic" && "$OPENSTACK_OSA_VERSION" == "master" ]]; then
+if [[ "$XCI_LOOP" == "periodic" && "$OPENSTACK_OSA_VERSION" == "master" ]]; then
cd $WORKSPACE
# get the url to openstack-ansible git
source ./config/env-vars
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
+import json
import netaddr
import re
fields = lines[i].strip().encode().rsplit('\t')
host_id = fields[0].strip().encode()
name = 'host{0}'.format(host_id)
- node_roles = fields[1].strip().encode().lower()
+ node_roles_str = fields[1].strip().encode().lower()
+ node_roles_list = json.loads(node_roles_str)
node_roles = [manager.Role.ODL if x == 'odl'
- else x for x in node_roles]
+ else x for x in node_roles_list]
roles = [x for x in [manager.Role.CONTROLLER,
manager.Role.COMPUTE,
manager.Role.ODL,
remote_user: root
tasks:
- name: make nfs dir
- file: "dest=/images mode=777 state=directory"
+ file: "dest=/images mode=0777 state=directory"
- name: configure sdrvice
shell: "echo 'nfs 2049/tcp' >> /etc/services && echo 'nfs 2049/udp' >> /etc/services"
- name: configure NFS
export CLEAN_DIB_IMAGES=false
export OPNFV_HOST_IP=192.168.122.2
export XCI_FLAVOR_ANSIBLE_FILE_PATH=$OPNFV_RELENG_PATH/prototypes/xci/file/$XCI_FLAVOR
+export CI_LOOP=${CI_LOOP:-daily}
export JOB_NAME=${JOB_NAME:-false}
# TODO: this currently matches to bifrost ansible version
# there is perhaps better way to do this
#-------------------------------------------------------------------------------
# use releng from master until the development work with the sandbox is complete
export OPNFV_RELENG_VERSION="master"
-# HEAD of "master" as of 28.03.2017
-export OPENSTACK_BIFROST_VERSION=${OPENSTACK_BIFROST_VERSION:-"2600d546ed7116f5aad81972b0987a269f3c45b4"}
-# HEAD of "master" as of 26.03.2017
-export OPENSTACK_OSA_VERSION=${OPENSTACK_OSA_VERSION:-"baba7b317a5898cd73b4a11c4ce364c7e2d3d77f"}
+# HEAD of "master" as of 04.04.2017
+export OPENSTACK_BIFROST_VERSION=${OPENSTACK_BIFROST_VERSION:-"6109f824e5510e794dbf1968c3859e8b6356d280"}
+# HEAD of "master" as of 04.04.2017
+export OPENSTACK_OSA_VERSION=${OPENSTACK_OSA_VERSION:-"d9e1330c7ff9d72a604b6b4f3af765f66a01b30e"}
--- /dev/null
+#########################
+OPNFV XCI Developer Guide
+#########################
+
+This document will contain details about the XCI and how things are put
+together in order to support different flavors and different distros in future.
+
+Document is for anyone who will
+
+- do hands on development with XCI such as new features to XCI itself or
+ bugfixes
+- integrate new features
+- want to know what is going on behind the scenes
+
+It will also have guidance regarding how to develop for the sandbox.
+
+If you are looking for User's Guide, please check README.rst in the root of
+xci folder or take a look at
+`Wiki <https://wiki.opnfv.org/display/INF/OpenStack>`_.
+
+===================================
+Components of XCI Developer Sandbox
+===================================
+
+TBD
+
+=============
+Detailed Flow
+=============
+
+TBD
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+# these versions are extracted based on the osa commit d9e1330c7ff9d72a604b6b4f3af765f66a01b30e on 04.04.2017
+# https://review.openstack.org/gitweb?p=openstack/openstack-ansible.git;a=commit;h=d9e1330c7ff9d72a604b6b4f3af765f66a01b30e
- name: apt_package_pinning
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-apt_package_pinning
- version: master
+ version: 364fc9fcd8ff652546c13d9c20ac808bc0e35f66
- name: pip_install
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-pip_install
- version: master
+ version: 793ae4d01397bd91ebe18e9670e8e27d1ae91960
- name: galera_client
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-galera_client
- version: master
+ version: c093c13e01826da545bf9a0259e0be441bc1b5e1
- name: galera_server
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-galera_server
- version: master
+ version: fd0a6b104a32badbe7e7594e2c829261a53bfb11
- name: ceph_client
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-ceph_client
- version: master
+ version: 9149bfa8e3c4284b656834ba7765ea3aa48bec2e
- name: haproxy_server
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-haproxy_server
- version: master
+ version: 32415ab81c61083ac5a83b65274703e4a5470e5e
- name: keepalived
scm: git
src: https://github.com/evrardjp/ansible-keepalived
- version: master
+ version: 4f7c8eb16e3cbd8c8748f126c1eea73db5c8efe9
- name: lxc_container_create
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-lxc_container_create
- version: master
+ version: 097da38126d90cfca36cdc3955aaf658a00db599
- name: lxc_hosts
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-lxc_hosts
- version: master
+ version: 2931d0c87a1c592ad7f1f2f83cdcf468e8dea932
- name: memcached_server
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-memcached_server
- version: master
+ version: 58e17aa13ebe7b0aa5da7c00afc75d6716d2720d
- name: openstack-ansible-security
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-security
- version: master
+ version: 9d745ec4fe8ac3e6d6cbb2412abe5196a9d2dad7
- name: openstack_hosts
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-openstack_hosts
- version: master
+ version: 2076dfddf418b1bdd64d3782346823902aa996bc
- name: os_keystone
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_keystone
- version: master
+ version: cee7a02143a1826479e6444c6fb5f1c2b6074ab7
- name: openstack_openrc
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-openstack_openrc
- version: master
+ version: fb98ad8d7bfe7fba0c964cb061313f1b8767c4b0
- name: os_aodh
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_aodh
- version: master
+ version: 9dcacb8fd6feef02e485f99c83535707ae67876b
- name: os_barbican
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_barbican
- version: master
+ version: bb3f39cb2f3c31c6980aa65c8953ff6293b992c0
- name: os_ceilometer
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_ceilometer
- version: master
+ version: 178ad8245fa019f0610c628c58c377997b011e8a
- name: os_cinder
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_cinder
- version: master
+ version: 1321fd39d8f55d1dc3baf91b4194469b349d7dc4
- name: os_glance
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_glance
- version: master
+ version: f39ef212bfa2edff8334bfb632cc463001c77c11
- name: os_gnocchi
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_gnocchi
- version: master
+ version: 318bd76e5e72402e8ff5b372b469c27a9395341b
- name: os_heat
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_heat
- version: master
+ version: 07d59ddb757b2d2557fba52ac537803e646e65b4
- name: os_horizon
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_horizon
- version: master
+ version: 69ef49c4f7a42f082f4bcff824d13f57145e2b83
- name: os_ironic
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_ironic
- version: master
+ version: 57e8a0eaaa2159f33e64a1b037180383196919d1
- name: os_magnum
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_magnum
- version: master
+ version: 8329c257dff25686827bd1cc904506d76ad1d12f
- name: os_trove
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_trove
- version: master
+ version: b948402c76d6188caa7be376098354cdb850d638
- name: os_neutron
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_neutron
- version: master
+ version: 2a92a4e1857e7457683aefd87ee5a4e751fc701a
- name: os_nova
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_nova
- version: master
+ version: 511963b7921ec7c2db24e8ee1d71a940b0aafae4
- name: os_rally
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_rally
- version: master
+ version: 96153c5b3285d11d00611a03135c9d8f267e0f52
- name: os_sahara
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_sahara
- version: master
+ version: 012d3f3530f878e5143d58380f94d1f514baad04
- name: os_swift
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_swift
- version: master
+ version: d62d6a23ac0b01d0320dbcb6c710dfd5f3cecfdf
- name: os_tempest
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_tempest
- version: master
+ version: 9d2bfb09d1ebbc9102329b0d42de33aa321e57b1
- name: plugins
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-plugins
- version: master
+ version: 3d2e23bb7e1d6775789d7f65ce8a878a7ee1d3c7
- name: rabbitmq_server
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-rabbitmq_server
- version: master
+ version: 9b0ce64fe235705e237bc4b476ecc0ad602d67a8
- name: repo_build
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-repo_build
- version: master
+ version: fe3ae20f74a912925d5c78040984957a6d55f9de
- name: repo_server
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-repo_server
- version: master
+ version: 7ea0820e0941282cd5c5cc263e939ffbee54ba52
- name: rsyslog_client
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-rsyslog_client
- version: master
+ version: 19615e47137eee46ee92c0308532fe1d2212333c
- name: rsyslog_server
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-rsyslog_server
- version: master
+ version: efd7b21798da49802012e390a0ddf7cc38636eeb
- name: sshd
scm: git
src: https://github.com/willshersystems/ansible-sshd
- version: master
+ version: 426e11c4dffeca09fcc4d16103a91e5e65180040
- name: bird
scm: git
src: https://github.com/logan2211/ansible-bird
- version: master
+ version: 2c4d29560d3617abddf0e63e0c95536364dedd92
- name: etcd
scm: git
src: https://github.com/logan2211/ansible-etcd
- version: master
+ version: ef63b0c5fd352b61084fd5aca286ee7f3fea932b
- name: unbound
scm: git
src: https://github.com/logan2211/ansible-unbound
- version: master
+ version: 5329d03eb9c15373d648a801563087c576bbfcde
- name: resolvconf
scm: git
src: https://github.com/logan2211/ansible-resolvconf
- version: master
+ version: 3b2b7cf2e900b194829565b351bf32bb63954548
- name: os_designate
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_designate
- version: master
+ version: b7098a6bdea73c869f45a86e0cc78d21b032161e
- name: ceph.ceph-common
scm: git
src: https://github.com/ceph/ansible-ceph-common
- version: master
+ version: ef149767fa9565ec887f0bdb007ff752bd61e5d5
- name: ceph.ceph-docker-common
scm: git
src: https://github.com/ceph/ansible-ceph-docker-common
- version: master
+ version: ca86fd0ef6d24aa2c750a625acdcb8012c374aa0
- name: ceph-mon
scm: git
src: https://github.com/ceph/ansible-ceph-mon
- version: master
+ version: c5be4d6056dfe6a482ca3fcc483a6050cc8929a1
- name: ceph-osd
scm: git
src: https://github.com/ceph/ansible-ceph-osd
- version: master
-- name: os_octavia
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_octavia
- version: master
-- name: os_molteniron
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_molteniron
- version: master
+ version: 7bc5a61ceb96e487b7a9fe9643f6dafa6492f2b5
+++ /dev/null
-# /etc/exports: the access control list for filesystems which may be exported
-# to NFS clients. See exports(5).
-#
-# Example for NFSv2 and NFSv3:
-# /srv/homes hostname1(rw,sync,no_subtree_check) hostname2(ro,sync,no_subtree_check)
-#
-# Example for NFSv4:
-# /srv/nfs4 gss/krb5i(rw,sync,fsid=0,crossmnt,no_subtree_check)
-# /srv/nfs4/homes gss/krb5i(rw,sync,no_subtree_check)
-#
-# glance images are stored on compute host and made available to image hosts via nfs
-# see image_hosts section in openstack_user_config.yml for details
-/images *(rw,sync,no_subtree_check,no_root_squash)
-
---
host_info: {
'opnfv': {
- 'MGMT_IP': '172.29.236.10',
'VLAN_IP': '192.168.122.2',
+ 'MGMT_IP': '172.29.236.10',
+ 'VXLAN_IP': '172.29.240.10',
'STORAGE_IP': '172.29.244.10'
},
'controller00': {
- 'MGMT_IP': '172.29.236.11',
'VLAN_IP': '192.168.122.3',
+ 'MGMT_IP': '172.29.236.11',
+ 'VXLAN_IP': '172.29.240.11',
'STORAGE_IP': '172.29.244.11'
},
'controller01': {
- 'MGMT_IP': '172.29.236.12',
'VLAN_IP': '192.168.122.4',
+ 'MGMT_IP': '172.29.236.12',
+ 'VXLAN_IP': '172.29.240.12',
'STORAGE_IP': '172.29.244.12'
},
'controller02': {
- 'MGMT_IP': '172.29.236.13',
'VLAN_IP': '192.168.122.5',
+ 'MGMT_IP': '172.29.236.13',
+ 'VXLAN_IP': '172.29.240.13',
'STORAGE_IP': '172.29.244.13'
},
'compute00': {
- 'MGMT_IP': '172.29.236.14',
'VLAN_IP': '192.168.122.6',
- 'STORAGE_IP': '172.29.244.14',
- 'VLAN_IP_SECOND': '173.29.241.1',
- 'VXLAN_IP': '172.29.240.14'
+ 'MGMT_IP': '172.29.236.14',
+ 'VXLAN_IP': '172.29.240.14',
+ 'STORAGE_IP': '172.29.244.14'
},
'compute01': {
- 'MGMT_IP': '172.29.236.15',
'VLAN_IP': '192.168.122.7',
- 'STORAGE_IP': '172.29.244.15',
- 'VLAN_IP_SECOND': '173.29.241.2',
- 'VXLAN_IP': '172.29.240.15'
+ 'MGMT_IP': '172.29.236.15',
+ 'VXLAN_IP': '172.29.240.15',
+ 'STORAGE_IP': '172.29.244.15'
}
}
container_vars:
limit_container_types: glance
glance_nfs_client:
- - server: "172.29.244.15"
+ - server: "172.29.244.14"
remote_path: "/images"
local_path: "/var/lib/glance/images"
type: "nfs"
container_vars:
limit_container_types: glance
glance_nfs_client:
- - server: "172.29.244.15"
+ - server: "172.29.244.14"
remote_path: "/images"
local_path: "/var/lib/glance/images"
type: "nfs"
container_vars:
limit_container_types: glance
glance_nfs_client:
- - server: "172.29.244.15"
+ - server: "172.29.244.14"
remote_path: "/images"
local_path: "/var/lib/glance/images"
type: "nfs"
controller02:
ip: 172.29.236.13
-# ceilometer (telemetry API)
-metering-infra_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# aodh (telemetry alarm service)
-metering-alarm_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# gnocchi (telemetry metrics storage)
-metrics_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
# nova hypervisors
compute_hosts:
compute00:
compute01:
ip: 172.29.236.15
-# ceilometer compute agent (telemetry)
-metering-compute_hosts:
- compute00:
- ip: 172.29.236.14
- compute01:
- ip: 172.29.236.15
# cinder volume hosts (NFS-backed)
# The settings here are repeated for each infra host.
# They could instead be applied as global settings in
container_vars:
cinder_backends:
limit_container_types: cinder_volume
- lvm:
- volume_group: cinder-volumes
- volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver
- volume_backend_name: LVM_iSCSI
- iscsi_ip_address: "172.29.244.11"
+ nfs_volume:
+ volume_backend_name: NFS_VOLUME1
+ volume_driver: cinder.volume.drivers.nfs.NfsDriver
+ nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+ nfs_shares_config: /etc/cinder/nfs_shares
+ shares:
+ - ip: "172.29.244.14"
+ share: "/volumes"
controller01:
ip: 172.29.236.12
container_vars:
cinder_backends:
limit_container_types: cinder_volume
- lvm:
- volume_group: cinder-volumes
- volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver
- volume_backend_name: LVM_iSCSI
- iscsi_ip_address: "172.29.244.12"
+ nfs_volume:
+ volume_backend_name: NFS_VOLUME1
+ volume_driver: cinder.volume.drivers.nfs.NfsDriver
+ nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+ nfs_shares_config: /etc/cinder/nfs_shares
+ shares:
+ - ip: "172.29.244.14"
+ share: "/volumes"
controller02:
ip: 172.29.236.13
container_vars:
cinder_backends:
limit_container_types: cinder_volume
- lvm:
- volume_group: cinder-volumes
- volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver
- volume_backend_name: LVM_iSCSI
- iscsi_ip_address: "172.29.244.13"
+ nfs_volume:
+ volume_backend_name: NFS_VOLUME1
+ volume_driver: cinder.volume.drivers.nfs.NfsDriver
+ nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+ nfs_shares_config: /etc/cinder/nfs_shares
+ shares:
+ - ip: "172.29.244.14"
+ share: "/volumes"
+++ /dev/null
----
-- hosts: all
- remote_user: root
- tasks:
- - name: add public key to host
- copy:
- src: ../file/authorized_keys
- dest: /root/.ssh/authorized_keys
- - name: configure modules
- copy:
- src: ../file/modules
- dest: /etc/modules
-
-- hosts: controller
- remote_user: root
- vars_files:
- - ../var/{{ ansible_os_family }}.yml
- - ../var/flavor-vars.yml
- roles:
- # TODO: this only works for ubuntu/xenial and need to be adjusted for other distros
- - { role: configure-network, when: ansible_distribution_release == "xenial", src: "../template/controller.interface.j2", dest: "/etc/network/interfaces" }
-
-- hosts: compute
- remote_user: root
- vars_files:
- - ../var/{{ ansible_os_family }}.yml
- - ../var/flavor-vars.yml
- roles:
- # TODO: this only works for ubuntu/xenial and need to be adjusted for other distros
- - { role: configure-network, when: ansible_distribution_release == "xenial", src: "../template/compute.interface.j2", dest: "/etc/network/interfaces" }
- # TODO: this role is for configuring NFS on xenial and adjustment needed for other distros
- - role: configure-nfs
---
host_info: {
'opnfv': {
- 'MGMT_IP': '172.29.236.10',
'VLAN_IP': '192.168.122.2',
+ 'MGMT_IP': '172.29.236.10',
+ 'VXLAN_IP': '172.29.240.10',
'STORAGE_IP': '172.29.244.10'
},
'controller00': {
- 'MGMT_IP': '172.29.236.11',
'VLAN_IP': '192.168.122.3',
+ 'MGMT_IP': '172.29.236.11',
+ 'VXLAN_IP': '172.29.240.11',
'STORAGE_IP': '172.29.244.11'
},
'compute00': {
- 'MGMT_IP': '172.29.236.12',
'VLAN_IP': '192.168.122.4',
- 'VLAN_IP_SECOND': '173.29.241.1',
+ 'MGMT_IP': '172.29.236.12',
'VXLAN_IP': '172.29.240.12',
'STORAGE_IP': '172.29.244.12'
},
controller00:
ip: 172.29.236.11
-# ceilometer (telemetry API)
-metering-infra_hosts:
- controller00:
- ip: 172.29.236.11
-
-# aodh (telemetry alarm service)
-metering-alarm_hosts:
- controller00:
- ip: 172.29.236.11
-
-# gnocchi (telemetry metrics storage)
-metrics_hosts:
- controller00:
- ip: 172.29.236.11
-
# nova hypervisors
compute_hosts:
compute00:
ip: 172.29.236.12
-# ceilometer compute agent (telemetry)
-metering-compute_hosts:
- compute00:
- ip: 172.29.236.12
# cinder volume hosts (NFS-backed)
# The settings here are repeated for each infra host.
# They could instead be applied as global settings in
container_vars:
cinder_backends:
limit_container_types: cinder_volume
- lvm:
- volume_group: cinder-volumes
- volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver
- volume_backend_name: LVM_iSCSI
- iscsi_ip_address: "172.29.244.11"
+ nfs_volume:
+ volume_backend_name: NFS_VOLUME1
+ volume_driver: cinder.volume.drivers.nfs.NfsDriver
+ nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+ nfs_shares_config: /etc/cinder/nfs_shares
+ shares:
+ - ip: "172.29.244.12"
+ share: "/volumes"
+++ /dev/null
-# /etc/modules: kernel modules to load at boot time.
-#
-# This file contains the names of kernel modules that should be loaded
-# at boot time, one per line. Lines beginning with "#" are ignored.
-# Parameters can be specified after the module name.
-
-bonding
-8021q
+++ /dev/null
----
-- hosts: all
- remote_user: root
- tasks:
- - name: add public key to host
- copy:
- src: ../file/authorized_keys
- dest: /root/.ssh/authorized_keys
- - name: configure modules
- copy:
- src: ../file/modules
- dest: /etc/modules
-
-- hosts: controller
- remote_user: root
- vars_files:
- - ../var/{{ ansible_os_family }}.yml
- - ../var/flavor-vars.yml
- roles:
- # TODO: this only works for ubuntu/xenial and need to be adjusted for other distros
- - { role: configure-network, when: ansible_distribution_release == "xenial", src: "../template/controller.interface.j2", dest: "/etc/network/interfaces" }
-
-- hosts: compute
- remote_user: root
- vars_files:
- - ../var/{{ ansible_os_family }}.yml
- - ../var/flavor-vars.yml
- roles:
- # TODO: this only works for ubuntu/xenial and need to be adjusted for other distros
- - { role: configure-network, when: ansible_distribution_release == "xenial", src: "../template/compute.interface.j2", dest: "/etc/network/interfaces" }
-
-- hosts: compute01
- remote_user: root
- # TODO: this role is for configuring NFS on xenial and adjustment needed for other distros
- roles:
- - role: configure-nfs
---
host_info: {
'opnfv': {
- 'MGMT_IP': '172.29.236.10',
'VLAN_IP': '192.168.122.2',
+ 'MGMT_IP': '172.29.236.10',
+ 'VXLAN_IP': '172.29.240.10',
'STORAGE_IP': '172.29.244.10'
},
'controller00': {
- 'MGMT_IP': '172.29.236.11',
'VLAN_IP': '192.168.122.3',
+ 'MGMT_IP': '172.29.236.11',
+ 'VXLAN_IP': '172.29.240.11',
'STORAGE_IP': '172.29.244.11'
},
'compute00': {
- 'MGMT_IP': '172.29.236.12',
'VLAN_IP': '192.168.122.4',
- 'VLAN_IP_SECOND': '173.29.241.1',
+ 'MGMT_IP': '172.29.236.12',
'VXLAN_IP': '172.29.240.12',
'STORAGE_IP': '172.29.244.12'
},
'compute01': {
- 'MGMT_IP': '172.29.236.13',
'VLAN_IP': '192.168.122.5',
- 'VLAN_IP_SECOND': '173.29.241.2',
+ 'MGMT_IP': '172.29.236.13',
'VXLAN_IP': '172.29.240.13',
'STORAGE_IP': '172.29.244.13'
}
container_vars:
limit_container_types: glance
glance_nfs_client:
- - server: "172.29.244.13"
+ - server: "172.29.244.12"
remote_path: "/images"
local_path: "/var/lib/glance/images"
type: "nfs"
controller00:
ip: 172.29.236.11
-# ceilometer (telemetry API)
-metering-infra_hosts:
- controller00:
- ip: 172.29.236.11
-
-# aodh (telemetry alarm service)
-metering-alarm_hosts:
- controller00:
- ip: 172.29.236.11
-
-# gnocchi (telemetry metrics storage)
-metrics_hosts:
- controller00:
- ip: 172.29.236.11
-
# nova hypervisors
compute_hosts:
compute00:
compute01:
ip: 172.29.236.13
-# ceilometer compute agent (telemetry)
-metering-compute_hosts:
- compute00:
- ip: 172.29.236.12
- compute01:
- ip: 172.29.236.13
# cinder volume hosts (NFS-backed)
# The settings here are repeated for each infra host.
# They could instead be applied as global settings in
container_vars:
cinder_backends:
limit_container_types: cinder_volume
- lvm:
- volume_group: cinder-volumes
- volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver
- volume_backend_name: LVM_iSCSI
- iscsi_ip_address: "172.29.244.11"
+ nfs_volume:
+ volume_backend_name: NFS_VOLUME1
+ volume_driver: cinder.volume.drivers.nfs.NfsDriver
+ nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+ nfs_shares_config: /etc/cinder/nfs_shares
+ shares:
+ - ip: "172.29.244.12"
+ share: "/volumes"
- include: os-neutron-install.yml
- include: os-heat-install.yml
- include: os-horizon-install.yml
-- include: os-ceilometer-install.yml
-- include: os-aodh-install.yml
-- include: os-designate-install.yml
-#NOTE(stevelle) Ensure Gnocchi identities exist before Swift
-- include: os-gnocchi-install.yml
- when:
- - gnocchi_storage_driver is defined
- - gnocchi_storage_driver == 'swift'
- vars:
- gnocchi_identity_only: True
- include: os-swift-install.yml
-- include: os-gnocchi-install.yml
- include: os-ironic-install.yml
- include: os-tempest-install.yml
path: "{{LOG_PATH}}"
state: directory
recurse: no
- # when the deployment is not aio, we use playbook, configure-targethosts.yml, to configure all the hosts
- - name: copy multihost playbook
- copy:
- src: "{{XCI_FLAVOR_ANSIBLE_FILE_PATH}}/configure-targethosts.yml"
- dest: "{{OPNFV_RELENG_PATH}}/prototypes/xci/playbooks"
- when: XCI_FLAVOR != "aio"
# when the deployment is aio, we overwrite and use playbook, configure-opnfvhost.yml, since everything gets installed on opnfv host
- name: copy aio playbook
copy:
replace: '\1haproxy_state: enabled'
- name: copy OPNFV OpenStack playbook
shell: "/bin/cp -rf {{OPNFV_RELENG_PATH}}/prototypes/xci/file/setup-openstack.yml {{OPENSTACK_OSA_PATH}}/playbooks"
+ # Copy pinned role requirements if we are running as part of daily CI loop
- name: copy OPNFV role requirements
shell: "/bin/cp -rf {{OPNFV_RELENG_PATH}}/prototypes/xci/file/ansible-role-requirements.yml {{OPENSTACK_OSA_PATH}}"
+ when: XCI_LOOP == "daily"
- hosts: localhost
remote_user: root
tasks:
copy:
src: ../file/authorized_keys
dest: /root/.ssh/authorized_keys
- - name: configure modules
- copy:
- src: ../file/modules
- dest: /etc/modules
- hosts: controller
remote_user: root
- ../var/flavor-vars.yml
roles:
# TODO: this only works for ubuntu/xenial and need to be adjusted for other distros
- - { role: configure-network, when: ansible_distribution_release == "xenial", src: "../template/controller.interface.j2", dest: "/etc/network/interfaces" }
+ - { role: configure-network, src: "../template/controller.interface.j2", dest: "/etc/network/interfaces" }
+ # we need to force sync time with ntp or the nodes will be out of sync timewise
+ - role: synchronize-time
- hosts: compute
remote_user: root
- ../var/flavor-vars.yml
roles:
# TODO: this only works for ubuntu/xenial and need to be adjusted for other distros
- - { role: configure-network, when: ansible_distribution_release == "xenial", src: "../template/compute.interface.j2", dest: "/etc/network/interfaces" }
+ - { role: configure-network, src: "../template/compute.interface.j2", dest: "/etc/network/interfaces" }
+ # we need to force sync time with ntp or the nodes will be out of sync timewise
+ - role: synchronize-time
-- hosts: compute01
+- hosts: compute00
remote_user: root
# TODO: this role is for configuring NFS on xenial and adjustment needed for other distros
roles:
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
# TODO: this role needs to be adjusted for different distros
-- name: configure network for {{ ansible_os_family }} on interface {{ interface }}
- template:
- src: "{{ src }}"
- dest: "{{ dest }}"
-- name: restart ubuntu xenial network service
- shell: "/sbin/ifconfig {{ interface }} 0 &&/sbin/ifdown -a && /sbin/ifup -a"
+- block:
+ - name: configure modules
+ lineinfile:
+ dest: /etc/modules
+ state: present
+ create: yes
+ line: "8021q"
+ - name: add modules
+ modprobe:
+ name: 8021q
+ state: present
+ - name: ensure glean rules are removed
+ file:
+ path: "/etc/udev/rules.d/99-glean.rules"
+ state: absent
+ - name: ensure interfaces.d folder is empty
+ shell: "/bin/rm -rf /etc/network/interfaces.d/*"
+ - name: ensure interfaces file is updated
+ template:
+ src: "{{ src }}"
+ dest: "{{ dest }}"
+ - name: restart network service
+ shell: "/sbin/ifconfig {{ interface }} 0 && /sbin/ifdown -a && /sbin/ifup -a"
+ when: ansible_distribution_release == "xenial"
##############################################################################
# TODO: this is for xenial and needs to be adjusted for different distros
- block:
- - name: make NFS dir
+ - name: make NFS directories
file:
- dest: /images
- mode: 777
+ dest: "{{ item }}"
+ mode: 0777
state: directory
+ with_items:
+ - "/images"
+ - "/volumes"
- name: configure NFS service
lineinfile:
dest: /etc/services
with_items:
- "nfs 2049/tcp"
- "nfs 2049/udp"
- - name: configure NFS exports on ubuntu xenial
- copy:
- src: ../file/exports
+ - name: configure NFS exports
+ lineinfile:
dest: /etc/exports
- when: ansible_distribution_release == "xenial"
+ state: present
+ create: yes
+ line: "{{ item }}"
+ with_items:
+ - "/images *(rw,sync,no_subtree_check,no_root_squash)"
+ - "/volumes *(rw,sync,no_subtree_check,no_root_squash)"
# TODO: the service name might be different on other distros and needs to be adjusted
- name: restart ubuntu xenial NFS service
service:
--- /dev/null
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2017 Ericsson AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+# TODO: this role needs to be adjusted for different distros
+- block:
+ - name: restart chrony
+ service:
+ name: chrony
+ state: restarted
+ - name: synchronize time
+ shell: "chronyc -a 'burst 4/4' && chronyc -a makestep"
+ when: ansible_distribution_release == "xenial"
-# This file describes the network interfaces available on your system
-# and how to activate them. For more information, see interfaces(5).
-
# The loopback network interface
auto lo
iface lo inet loopback
-
# Physical interface
auto {{ interface }}
iface {{ interface }} inet manual
iface {{ interface }}.30 inet manual
vlan-raw-device {{ interface }}
-# Storage network VLAN interface (optional)
+# Storage network VLAN interface
auto {{ interface }}.20
iface {{ interface }}.20 inet manual
vlan-raw-device {{ interface }}
address {{host_info[inventory_hostname].VLAN_IP}}
netmask 255.255.255.0
gateway 192.168.122.1
+ dns-nameserver 8.8.8.8 8.8.4.4
offload-sg off
# Create veth pair, don't bomb if already exists
pre-up ip link add br-vlan-veth type veth peer name eth12 || true
post-down ip link del br-vlan-veth || true
bridge_ports br-vlan-veth
-# Add an additional address to br-vlan
-iface br-vlan inet static
- # Flat network default gateway
- # -- This needs to exist somewhere for network reachability
- # -- from the router namespace for floating IP paths.
- # -- Putting this here is primarily for tempest to work.
- address {{host_info[inventory_hostname].VLAN_IP_SECOND}}
- netmask 255.255.252.0
- dns-nameserver 8.8.8.8 8.8.4.4
-
-# compute1 Storage bridge
+# OpenStack Storage bridge
auto br-storage
iface br-storage inet static
bridge_stp off
-# This file describes the network interfaces available on your system
-# and how to activate them. For more information, see interfaces(5).
-
# The loopback network interface
auto lo
iface lo inet loopback
netmask 255.255.252.0
# OpenStack Networking VXLAN (tunnel/overlay) bridge
-#
-# Only the COMPUTE and NETWORK nodes must have an IP address
-# on this bridge. When used by infrastructure nodes, the
-# IP addresses are assigned to containers which use this
-# bridge.
-#
auto br-vxlan
-iface br-vxlan inet manual
+iface br-vxlan inet static
bridge_stp off
bridge_waitport 0
bridge_fd 0
bridge_ports {{ interface }}.30
+ address {{host_info[inventory_hostname].VXLAN_IP}}
+ netmask 255.255.252.0
# OpenStack Networking VLAN bridge
auto br-vlan
gateway 192.168.122.1
dns-nameserver 8.8.8.8 8.8.4.4
-# compute1 Storage bridge
+# OpenStack Storage bridge
auto br-storage
iface br-storage inet static
bridge_stp off
-# This file describes the network interfaces available on your system
-# and how to activate them. For more information, see interfaces(5).
-
# The loopback network interface
auto lo
iface lo inet loopback
netmask 255.255.252.0
# OpenStack Networking VXLAN (tunnel/overlay) bridge
-#
-# Only the COMPUTE and NETWORK nodes must have an IP address
-# on this bridge. When used by infrastructure nodes, the
-# IP addresses are assigned to containers which use this
-# bridge.
-#
auto br-vxlan
-iface br-vxlan inet manual
+iface br-vxlan inet static
bridge_stp off
bridge_waitport 0
bridge_fd 0
bridge_ports {{ interface }}.30
+ address {{ host_info[inventory_hostname].VXLAN_IP }}
+ netmask 255.255.252.0
# OpenStack Networking VLAN bridge
auto br-vlan
gateway 192.168.122.1
dns-nameserver 8.8.8.8 8.8.4.4
-# compute1 Storage bridge
+# OpenStack Storage bridge
auto br-storage
iface br-storage inet static
bridge_stp off
XCI_ANSIBLE_PIP_VERSION: "{{ lookup('env','XCI_ANSIBLE_PIP_VERSION') }}"
XCI_FLAVOR: "{{ lookup('env','XCI_FLAVOR') }}"
XCI_FLAVOR_ANSIBLE_FILE_PATH: "{{ lookup('env','XCI_FLAVOR_ANSIBLE_FILE_PATH') }}"
+XCI_LOOP: "{{ lookup('env','XCI_LOOP') }}"
LOG_PATH: "{{ lookup('env','LOG_PATH') }}"
OPNFV_HOST_IP: "{{ lookup('env','OPNFV_HOST_IP') }}"
ansible-playbook $ANSIBLE_VERBOSITY -i inventory provision-vm-nodes.yml
echo "-----------------------------------------------------------------------"
echo "Info: VM nodes are provisioned!"
-
+source $OPENSTACK_BIFROST_PATH/env-vars
+ironic node-list
+echo
#-------------------------------------------------------------------------------
# Configure localhost
#-------------------------------------------------------------------------------