From: Serena Feng Date: Wed, 19 Apr 2017 07:56:29 +0000 (+0000) Subject: Merge "unify data existence check" X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=commitdiff_plain;h=139e621ba828d039cee1066ed090e2d7b39bc6b1;hp=13e3d7e3c2649e7e53ea5578f48cf55f3b45946a;p=releng.git Merge "unify data existence check" --- diff --git a/jjb/apex/apex-deploy.sh b/jjb/apex/apex-deploy.sh index 4b64fd14f..06f7622f5 100755 --- a/jjb/apex/apex-deploy.sh +++ b/jjb/apex/apex-deploy.sh @@ -189,7 +189,7 @@ if [[ "$JOB_NAME" == *virtual* ]]; then # settings for virtual deployment DEPLOY_CMD="${DEPLOY_CMD} -v" if [[ "${DEPLOY_SCENARIO}" =~ fdio|ovs ]]; then - DEPLOY_CMD="${DEPLOY_CMD} --virtual-default-ram 14 --virtual-compute-ram 8" + DEPLOY_CMD="${DEPLOY_CMD} --virtual-default-ram 12 --virtual-compute-ram 7" fi if [[ "$JOB_NAME" == *csit* ]]; then DEPLOY_CMD="${DEPLOY_CMD} -e csit-environment.yaml" diff --git a/jjb/apex/apex-iso-verify.sh b/jjb/apex/apex-iso-verify.sh new file mode 100755 index 000000000..d80de354a --- /dev/null +++ b/jjb/apex/apex-iso-verify.sh @@ -0,0 +1,100 @@ +#!/bin/bash +set -o errexit +set -o nounset +set -o pipefail + +# log info to console +echo "Starting the Apex iso verify." +echo "--------------------------------------------------------" +echo + +if ! rpm -q virt-install > /dev/null; then + sudo yum -y install virt-install +fi + +# define a clean function +rm_apex_iso_verify () { +if sudo virsh list --all | grep apex-iso-verify | grep running; then + sudo virsh destroy apex-iso-verify +fi +if sudo virsh list --all | grep apex-iso-verify; then + sudo virsh undefine apex-iso-verify +fi +} + +# Make sure a pre-existing iso-verify isn't there +rm_apex_iso_verify + +# run an install from the iso +# This streams a serial console to tcp port 3737 on localhost +sudo virt-install -n apex-iso-verify -r 4096 --vcpus 4 --os-variant=rhel7 \ + --accelerate -v --noautoconsole --nographics \ + --disk path=/var/lib/libvirt/images/apex-iso-verify.qcow2,size=30,format=qcow2 \ + -l $BUILD_DIRECTORY/release/OPNFV-CentOS-7-x86_64-$OPNFV_ARTIFACT_VERSION.iso \ + --extra-args 'console=ttyS0 console=ttyS0,115200n8 serial inst.ks=file:/iso-verify.ks inst.stage2=hd:LABEL=OPNFV\x20CentOS\x207\x20x86_64:/' \ + --initrd-inject $BUILD_DIRECTORY/../ci/iso-verify.ks \ + --serial tcp,host=:3737,protocol=raw + +# Attach to tcpport 3737 and echo the output to stdout +# watch for a 5 min time out, a power off message or a tcp disconnect +python << EOP +#!/usr/bin/env python + +import sys +import socket +from time import sleep +from time import time + + +TCP_IP = '127.0.0.1' +TCP_PORT = 3737 +BUFFER_SIZE = 1024 + +try: + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s.connect((TCP_IP, TCP_PORT)) +except Exception, e: + print "Failed to connect to the iso-verofy vm's serial console" + print "this probably means that the VM failed to start" + raise e + +activity = time() +data = s.recv(BUFFER_SIZE) +last_data = data +while time() - activity < 300: + try: + if data != last_data: + activity = time() + last_data = data + data = s.recv(BUFFER_SIZE) + sys.stdout.write(data) + if 'Powering off' in data: + break + sleep(.5) + except socket.error, e: + # for now assuming that the connection was closed + # which is good, means the vm finished installing + # printing the error output just in case we need to debug + print "VM console connection lost: %s" % msg + break +s.close() + +if time() - activity > 300: + print "failing due to console inactivity" + exit(1) +else: + print "Success!" +EOP + +# save the python return code for after cleanup +python_rc=$? + +# clean up +rm_apex_iso_verify + +# Exit with the RC of the Python job +exit $python_rc + +echo +echo "--------------------------------------------------------" +echo "Done!" diff --git a/jjb/apex/apex-upload-artifact.sh b/jjb/apex/apex-upload-artifact.sh index c2de7d70d..4a2a64d68 100755 --- a/jjb/apex/apex-upload-artifact.sh +++ b/jjb/apex/apex-upload-artifact.sh @@ -4,7 +4,7 @@ set -o nounset set -o pipefail # log info to console -echo "Uploading the Apex artifact. This could take some time..." +echo "Uploading the Apex $1 artifact. This could take some time..." echo "--------------------------------------------------------" echo @@ -18,7 +18,7 @@ echo "Cloning releng repository..." [ -d releng ] && rm -rf releng git clone https://gerrit.opnfv.org/gerrit/releng $WORKSPACE/releng/ &> /dev/null #this is where we import the siging key -if [ -f $WORKSPACE/releng/utils/gpg_import_key.sh ]; then +if [ -f $WORKSPACE/releng/utils/gpg_import_key.sh ]; then source $WORKSPACE/releng/utils/gpg_import_key.sh fi @@ -88,17 +88,21 @@ if echo $WORKSPACE | grep promote > /dev/null; then uploadsnap elif gpg2 --list-keys | grep "opnfv-helpdesk@rt.linuxfoundation.org"; then echo "Signing Key avaliable" - signiso - uploadiso - signrpm - uploadrpm + if [ $1 == 'iso' ]; then + signiso + uploadiso + fi + if [ $1 == 'rpm' ]; then + signrpm + uploadrpm + fi else - uploadiso - uploadrpm + if [ $1 == 'iso' ]; then uploadiso; fi + if [ $1 == 'rpm' ]; then uploadrpm; fi fi echo echo "--------------------------------------------------------" echo "Done!" -echo "ISO Artifact is available as http://$GS_URL/opnfv-$OPNFV_ARTIFACT_VERSION.iso" -echo "RPM Artifact is available as http://$GS_URL/$(basename $OPNFV_RPM_URL)" +if [ $1 == 'iso' ]; then echo "ISO Artifact is available as http://$GS_URL/opnfv-$OPNFV_ARTIFACT_VERSION.iso"; fi +if [ $1 == 'rpm' ]; then echo "RPM Artifact is available as http://$GS_URL/$(basename $OPNFV_RPM_URL)"; fi diff --git a/jjb/apex/apex.yml b/jjb/apex/apex.yml index e7982ba55..37bbbb6f1 100644 --- a/jjb/apex/apex.yml +++ b/jjb/apex/apex.yml @@ -443,7 +443,16 @@ git-revision: false same-node: true block: true - - 'apex-upload-artifact' + - 'apex-upload-rpm-artifact' + - trigger-builds: + - project: 'apex-iso-verify-{stream}' + predefined-parameters: | + BUILD_DIRECTORY=apex-build-{stream}/.build + OPNFV_CLEAN=yes + git-revision: false + block: true + same-node: true + - 'apex-upload-iso-artifact' - job-template: name: 'apex-deploy-virtual-{scenario}-{stream}' @@ -1013,8 +1022,7 @@ same-node: true - shell: !include-raw-escape: ./apex-snapshot-create.sh - - shell: - !include-raw-escape: ./apex-upload-artifact.sh + - 'apex-upload-snapshot-artifact' # FDIO promote - job-template: @@ -1062,8 +1070,7 @@ same-node: true - shell: !include-raw-escape: ./apex-snapshot-create.sh - - shell: - !include-raw-escape: ./apex-upload-artifact.sh + - 'apex-upload-snapshot-artifact' - job-template: name: 'apex-gs-clean-{stream}' @@ -1147,10 +1154,25 @@ !include-raw: ./apex-workspace-cleanup.sh - builder: - name: 'apex-upload-artifact' + name: 'apex-iso-verify' builders: - shell: - !include-raw: ./apex-upload-artifact.sh + !include-raw: ./apex-iso-verify.sh + +- builder: + name: 'apex-upload-snapshot-artifact' + builders: + - shell: ./apex-upload-artifact.sh snapshot + +- builder: + name: 'apex-upload-iso-artifact' + builders: + - shell: ./apex-upload-artifact.sh iso + +- builder: + name: 'apex-upload-rpm-artifact' + builders: + - shell: ./apex-upload-artifact.sh rpm - builder: name: 'apex-gs-cleanup' diff --git a/jjb/bottlenecks/bottlenecks-project-jobs.yml b/jjb/bottlenecks/bottlenecks-project-jobs.yml index a0abb9331..5dced2aad 100644 --- a/jjb/bottlenecks/bottlenecks-project-jobs.yml +++ b/jjb/bottlenecks/bottlenecks-project-jobs.yml @@ -70,8 +70,8 @@ - branch-compare-type: 'ANT' branch-pattern: '**/{branch}' builders: - - bottlenecks-hello - #- bottlenecks-unit-tests + #- bottlenecks-hello + - bottlenecks-unit-tests - job-template: name: 'bottlenecks-merge-{stream}' @@ -206,10 +206,10 @@ # install python packages easy_install -U setuptools easy_install -U pip - pip install -r requirements.txt + pip install -r $WORKSPACE/requirements/verify.txt # unit tests - /bin/bash $WORKSPACE/tests.sh + /bin/bash $WORKSPACE/verify.sh deactivate @@ -220,4 +220,4 @@ #!/bin/bash set -o errexit - echo "hello" + echo -e "Wellcome to Bottlenecks! \nMerge event is planning to support more functions! " diff --git a/jjb/compass4nfv/compass-dovetail-jobs.yml b/jjb/compass4nfv/compass-dovetail-jobs.yml index d49d0ec5f..30c80e648 100644 --- a/jjb/compass4nfv/compass-dovetail-jobs.yml +++ b/jjb/compass4nfv/compass-dovetail-jobs.yml @@ -6,8 +6,8 @@ #---------------------------------- # BRANCH ANCHORS #---------------------------------- - colorado: &colorado - stream: colorado + danube: &danube + stream: danube branch: 'stable/{stream}' gs-pathname: '/{stream}' disabled: false @@ -20,14 +20,14 @@ pod: - baremetal: slave-label: compass-baremetal - os-version: 'trusty' - <<: *colorado + os-version: 'xenial' + <<: *danube #----------------------------------- # scenarios #----------------------------------- scenario: - 'os-nosdn-nofeature-ha': - disabled: false + disabled: true auto-trigger-name: 'compass-{scenario}-{pod}-weekly-{stream}-trigger' jobs: @@ -108,17 +108,6 @@ build-step-failure-threshold: 'never' failure-threshold: 'never' unstable-threshold: 'FAILURE' - - trigger-builds: - - project: 'dovetail-compass-{pod}-proposed_tests-weekly-{stream}' - current-parameters: false - predefined-parameters: - DEPLOY_SCENARIO={scenario} - block: true - same-node: true - block-thresholds: - build-step-failure-threshold: 'never' - failure-threshold: 'never' - unstable-threshold: 'FAILURE' - job-template: name: 'compass-deploy-{pod}-weekly-{stream}' @@ -192,13 +181,13 @@ - choice: name: COMPASS_OPENSTACK_VERSION choices: - - 'mitaka' + - 'newton' ######################## # trigger macros ######################## - trigger: - name: 'compass-os-nosdn-nofeature-ha-baremetal-weekly-colorado-trigger' + name: 'compass-os-nosdn-nofeature-ha-baremetal-weekly-danube-trigger' triggers: - timed: 'H H * * 0' diff --git a/jjb/compass4nfv/compass-project-jobs.yml b/jjb/compass4nfv/compass-project-jobs.yml index f962518e0..59482459e 100644 --- a/jjb/compass4nfv/compass-project-jobs.yml +++ b/jjb/compass4nfv/compass-project-jobs.yml @@ -125,7 +125,7 @@ description: "URL to Google Storage." - string: name: PPA_REPO - default: "http://205.177.226.237:9999{ppa-pathname}" + default: "http://artifacts.opnfv.org/compass4nfv/package{ppa-pathname}" - string: name: PPA_CACHE default: "$WORKSPACE/work/repo/" diff --git a/jjb/compass4nfv/compass-verify-jobs.yml b/jjb/compass4nfv/compass-verify-jobs.yml index 14279e649..56f54d838 100644 --- a/jjb/compass4nfv/compass-verify-jobs.yml +++ b/jjb/compass4nfv/compass-verify-jobs.yml @@ -339,7 +339,7 @@ description: "URL to Google Storage." - string: name: PPA_REPO - default: "http://205.177.226.237:9999{ppa-pathname}" + default: "http://artifacts.opnfv.org/compass4nfv/package{ppa-pathname}" - string: name: PPA_CACHE default: "$WORKSPACE/work/repo/" diff --git a/jjb/cperf/cperf-ci-jobs.yml b/jjb/cperf/cperf-ci-jobs.yml index f6e068530..dc209d644 100644 --- a/jjb/cperf/cperf-ci-jobs.yml +++ b/jjb/cperf/cperf-ci-jobs.yml @@ -162,7 +162,7 @@ -v of_port:6653" robot_suite="/home/opnfv/repos/odl_test/csit/suites/openflowplugin/Performance/010_Cbench.robot" - docker run -ti -v /tmp:/tmp opnfv/cperf:$DOCKER_TAG ${robot_cmd} ${robot_suite} + docker run -i -v /tmp:/tmp opnfv/cperf:$DOCKER_TAG ${robot_cmd} ${robot_suite} - builder: name: cperf-cleanup diff --git a/jjb/daisy4nfv/daisy4nfv-basic.sh b/jjb/daisy4nfv/daisy4nfv-basic.sh index 04b9b7bfa..87f5482e0 100755 --- a/jjb/daisy4nfv/daisy4nfv-basic.sh +++ b/jjb/daisy4nfv/daisy4nfv-basic.sh @@ -4,4 +4,3 @@ echo "--------------------------------------------------------" echo "This is diasy4nfv basic job!" echo "--------------------------------------------------------" -sudo rm -rf /home/jenkins-ci/opnfv/slave_root/workspace/daisy4nfv-verify-build-master/* diff --git a/jjb/dovetail/dovetail-ci-jobs.yml b/jjb/dovetail/dovetail-ci-jobs.yml index 5651fc3f2..869048088 100644 --- a/jjb/dovetail/dovetail-ci-jobs.yml +++ b/jjb/dovetail/dovetail-ci-jobs.yml @@ -140,7 +140,6 @@ #-------------------------------- testsuite: - 'debug' - - 'proposed_tests' - 'compliance_set' jobs: diff --git a/jjb/dovetail/dovetail-weekly-jobs.yml b/jjb/dovetail/dovetail-weekly-jobs.yml index eaa11b54a..915feb5e8 100644 --- a/jjb/dovetail/dovetail-weekly-jobs.yml +++ b/jjb/dovetail/dovetail-weekly-jobs.yml @@ -10,8 +10,8 @@ dovetail-branch: '{stream}' gs-pathname: '' docker-tag: 'latest' - colorado: &colorado - stream: colorado + danube: &danube + stream: danube branch: 'stable/{stream}' dovetail-branch: master gs-pathname: '/{stream}' @@ -28,40 +28,39 @@ pod: # - baremetal: # slave-label: apex-baremetal -# sut: apex -# <<: *colorado +# SUT: apex +# <<: *danube - baremetal: slave-label: compass-baremetal - sut: compass - <<: *colorado + SUT: compass + <<: *danube # - baremetal: # slave-label: fuel-baremetal -# sut: fuel -# <<: *master +# SUT: fuel +# <<: *danube # - baremetal: # slave-label: joid-baremetal -# sut: joid -# <<: *colorado +# SUT: joid +# <<: *danube testsuite: - 'debug' - - 'proposed_tests' - 'compliance_set' loop: - 'weekly': - job-timeout: 60 + job-timeout: 180 jobs: - - 'dovetail-{sut}-{pod}-{testsuite}-{loop}-{stream}' + - 'dovetail-{SUT}-{pod}-{testsuite}-{loop}-{stream}' ################################ # job template ################################ - job-template: - name: 'dovetail-{sut}-{pod}-{testsuite}-{loop}-{stream}' + name: 'dovetail-{SUT}-{pod}-{testsuite}-{loop}-{stream}' - disabled: false + disabled: true concurrent: true @@ -84,7 +83,7 @@ - project-parameter: project: '{project}' branch: '{dovetail-branch}' - - '{sut}-defaults' + - '{SUT}-defaults' - '{slave-label}-defaults' - string: name: DEPLOY_SCENARIO diff --git a/jjb/kvmfornfv/kvmfornfv.yml b/jjb/kvmfornfv/kvmfornfv.yml index 8d607f985..9624778f8 100644 --- a/jjb/kvmfornfv/kvmfornfv.yml +++ b/jjb/kvmfornfv/kvmfornfv.yml @@ -11,7 +11,7 @@ - danube: branch: 'stable/{stream}' gs-pathname: '/{stream}' - disabled: false + disabled: true ##################################### # patch verification phases ##################################### diff --git a/jjb/xci/bifrost-cleanup-job.yml b/jjb/xci/bifrost-cleanup-job.yml index d4b2157da..d5a444d09 100644 --- a/jjb/xci/bifrost-cleanup-job.yml +++ b/jjb/xci/bifrost-cleanup-job.yml @@ -69,7 +69,7 @@ while [[ $try_to_rm -lt 6 ]]; do gsutil -m rm -r $BIFROST_GS_URL && _exitcode=$? && break _exitcode=$? - echo "gsutil rm failed! Trying again... (attempt #$i)" + echo "gsutil rm failed! Trying again... (attempt #$try_to_rm)" let try_to_rm += 1 # Give it some time... sleep 10 diff --git a/jjb/xci/bifrost-periodic-jobs.yml b/jjb/xci/bifrost-periodic-jobs.yml index 0c29fd3c9..3e9ff678e 100644 --- a/jjb/xci/bifrost-periodic-jobs.yml +++ b/jjb/xci/bifrost-periodic-jobs.yml @@ -122,6 +122,9 @@ - string: name: ANSIBLE_VERBOSITY default: '' + - string: + name: XCI_LOOP + default: 'periodic' wrappers: - fix-workspace-permissions diff --git a/jjb/xci/bifrost-verify-jobs.yml b/jjb/xci/bifrost-verify-jobs.yml index 80c816ca1..806829620 100644 --- a/jjb/xci/bifrost-verify-jobs.yml +++ b/jjb/xci/bifrost-verify-jobs.yml @@ -140,6 +140,9 @@ - string: name: ANSIBLE_VERBOSITY default: '-vvvv' + - string: + name: XCI_LOOP + default: 'verify' scm: - git: diff --git a/jjb/xci/osa-periodic-jobs.yml b/jjb/xci/osa-periodic-jobs.yml index 42b49411b..56a4b18b4 100644 --- a/jjb/xci/osa-periodic-jobs.yml +++ b/jjb/xci/osa-periodic-jobs.yml @@ -119,6 +119,9 @@ - string: name: ANSIBLE_VERBOSITY default: '' + - string: + name: XCI_LOOP + default: 'periodic' wrappers: - fix-workspace-permissions diff --git a/jjb/xci/xci-daily-jobs.yml b/jjb/xci/xci-daily-jobs.yml index 94bfafed0..64e13d3eb 100644 --- a/jjb/xci/xci-daily-jobs.yml +++ b/jjb/xci/xci-daily-jobs.yml @@ -109,6 +109,9 @@ - label: name: SLAVE_LABEL default: '{slave-label}' + - string: + name: XCI_LOOP + default: 'daily' triggers: - '{auto-trigger-name}' @@ -125,6 +128,7 @@ predefined-parameters: | DEPLOY_SCENARIO=$DEPLOY_SCENARIO XCI_FLAVOR=$XCI_FLAVOR + XCI_LOOP=$XCI_LOOP same-node: true block: true - trigger-builds: @@ -133,6 +137,7 @@ predefined-parameters: | DEPLOY_SCENARIO=$DEPLOY_SCENARIO XCI_FLAVOR=$XCI_FLAVOR + XCI_LOOP=$XCI_LOOP same-node: true block: true block-thresholds: @@ -205,6 +210,9 @@ - string: name: ANSIBLE_VERBOSITY default: '' + - string: + name: XCI_LOOP + default: 'daily' builders: - description-setter: diff --git a/jjb/xci/xci-deploy.sh b/jjb/xci/xci-deploy.sh index 07ca795da..b007b852f 100755 --- a/jjb/xci/xci-deploy.sh +++ b/jjb/xci/xci-deploy.sh @@ -15,14 +15,14 @@ cd $WORKSPACE/prototypes/xci # for daily jobs, we want to use working versions # for periodic jobs, we will use whatever is set in the job, probably master -if [[ "$JOB_NAME" =~ "daily" ]]; then +if [[ "$XCI_LOOP" == "daily" ]]; then # source pinned-vars to get releng version source ./config/pinned-versions # checkout the version git checkout -q $OPNFV_RELENG_VERSION echo "Info: Using $OPNFV_RELENG_VERSION" -elif [[ "$JOB_NAME" =~ "periodic" ]]; then +elif [[ "$XCI_LOOP" == "periodic" ]]; then echo "Info: Using $OPNFV_RELENG_VERSION" fi @@ -31,7 +31,7 @@ fi # to take this into account while deploying anyways # clone openstack-ansible # stable/ocata already use pinned versions so this is only valid for master -if [[ "$JOB_NAME" =~ "periodic" && "$OPENSTACK_OSA_VERSION" == "master" ]]; then +if [[ "$XCI_LOOP" == "periodic" && "$OPENSTACK_OSA_VERSION" == "master" ]]; then cd $WORKSPACE # get the url to openstack-ansible git source ./config/env-vars diff --git a/modules/opnfv/deployment/compass/adapter.py b/modules/opnfv/deployment/compass/adapter.py index 856c7fc38..38aa45227 100644 --- a/modules/opnfv/deployment/compass/adapter.py +++ b/modules/opnfv/deployment/compass/adapter.py @@ -7,6 +7,7 @@ # which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 +import json import netaddr import re @@ -161,9 +162,10 @@ class CompassAdapter(manager.DeploymentHandler): fields = lines[i].strip().encode().rsplit('\t') host_id = fields[0].strip().encode() name = 'host{0}'.format(host_id) - node_roles = fields[1].strip().encode().lower() + node_roles_str = fields[1].strip().encode().lower() + node_roles_list = json.loads(node_roles_str) node_roles = [manager.Role.ODL if x == 'odl' - else x for x in node_roles] + else x for x in node_roles_list] roles = [x for x in [manager.Role.CONTROLLER, manager.Role.COMPUTE, manager.Role.ODL, diff --git a/prototypes/openstack-ansible/playbooks/configure-targethosts.yml b/prototypes/openstack-ansible/playbooks/configure-targethosts.yml index 1f4ad063e..538fe17ec 100644 --- a/prototypes/openstack-ansible/playbooks/configure-targethosts.yml +++ b/prototypes/openstack-ansible/playbooks/configure-targethosts.yml @@ -47,7 +47,7 @@ remote_user: root tasks: - name: make nfs dir - file: "dest=/images mode=777 state=directory" + file: "dest=/images mode=0777 state=directory" - name: configure sdrvice shell: "echo 'nfs 2049/tcp' >> /etc/services && echo 'nfs 2049/udp' >> /etc/services" - name: configure NFS diff --git a/prototypes/xci/config/env-vars b/prototypes/xci/config/env-vars index 052be2ace..cefb412a6 100755 --- a/prototypes/xci/config/env-vars +++ b/prototypes/xci/config/env-vars @@ -9,6 +9,7 @@ export OPENSTACK_OSA_ETC_PATH=/etc/openstack_deploy export CLEAN_DIB_IMAGES=false export OPNFV_HOST_IP=192.168.122.2 export XCI_FLAVOR_ANSIBLE_FILE_PATH=$OPNFV_RELENG_PATH/prototypes/xci/file/$XCI_FLAVOR +export CI_LOOP=${CI_LOOP:-daily} export JOB_NAME=${JOB_NAME:-false} # TODO: this currently matches to bifrost ansible version # there is perhaps better way to do this diff --git a/prototypes/xci/config/pinned-versions b/prototypes/xci/config/pinned-versions index 1cd33813c..e3b49c7d4 100755 --- a/prototypes/xci/config/pinned-versions +++ b/prototypes/xci/config/pinned-versions @@ -21,7 +21,7 @@ #------------------------------------------------------------------------------- # use releng from master until the development work with the sandbox is complete export OPNFV_RELENG_VERSION="master" -# HEAD of "master" as of 28.03.2017 -export OPENSTACK_BIFROST_VERSION=${OPENSTACK_BIFROST_VERSION:-"2600d546ed7116f5aad81972b0987a269f3c45b4"} -# HEAD of "master" as of 26.03.2017 -export OPENSTACK_OSA_VERSION=${OPENSTACK_OSA_VERSION:-"baba7b317a5898cd73b4a11c4ce364c7e2d3d77f"} +# HEAD of "master" as of 04.04.2017 +export OPENSTACK_BIFROST_VERSION=${OPENSTACK_BIFROST_VERSION:-"6109f824e5510e794dbf1968c3859e8b6356d280"} +# HEAD of "master" as of 04.04.2017 +export OPENSTACK_OSA_VERSION=${OPENSTACK_OSA_VERSION:-"d9e1330c7ff9d72a604b6b4f3af765f66a01b30e"} diff --git a/prototypes/xci/docs/developer-guide.rst b/prototypes/xci/docs/developer-guide.rst new file mode 100644 index 000000000..9a07b1267 --- /dev/null +++ b/prototypes/xci/docs/developer-guide.rst @@ -0,0 +1,31 @@ +######################### +OPNFV XCI Developer Guide +######################### + +This document will contain details about the XCI and how things are put +together in order to support different flavors and different distros in future. + +Document is for anyone who will + +- do hands on development with XCI such as new features to XCI itself or + bugfixes +- integrate new features +- want to know what is going on behind the scenes + +It will also have guidance regarding how to develop for the sandbox. + +If you are looking for User's Guide, please check README.rst in the root of +xci folder or take a look at +`Wiki `_. + +=================================== +Components of XCI Developer Sandbox +=================================== + +TBD + +============= +Detailed Flow +============= + +TBD diff --git a/prototypes/xci/file/ansible-role-requirements.yml b/prototypes/xci/file/ansible-role-requirements.yml index 4faab1950..842bcc44c 100644 --- a/prototypes/xci/file/ansible-role-requirements.yml +++ b/prototypes/xci/file/ansible-role-requirements.yml @@ -7,199 +7,193 @@ # which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## +# these versions are extracted based on the osa commit d9e1330c7ff9d72a604b6b4f3af765f66a01b30e on 04.04.2017 +# https://review.openstack.org/gitweb?p=openstack/openstack-ansible.git;a=commit;h=d9e1330c7ff9d72a604b6b4f3af765f66a01b30e - name: apt_package_pinning scm: git src: https://git.openstack.org/openstack/openstack-ansible-apt_package_pinning - version: master + version: 364fc9fcd8ff652546c13d9c20ac808bc0e35f66 - name: pip_install scm: git src: https://git.openstack.org/openstack/openstack-ansible-pip_install - version: master + version: 793ae4d01397bd91ebe18e9670e8e27d1ae91960 - name: galera_client scm: git src: https://git.openstack.org/openstack/openstack-ansible-galera_client - version: master + version: c093c13e01826da545bf9a0259e0be441bc1b5e1 - name: galera_server scm: git src: https://git.openstack.org/openstack/openstack-ansible-galera_server - version: master + version: fd0a6b104a32badbe7e7594e2c829261a53bfb11 - name: ceph_client scm: git src: https://git.openstack.org/openstack/openstack-ansible-ceph_client - version: master + version: 9149bfa8e3c4284b656834ba7765ea3aa48bec2e - name: haproxy_server scm: git src: https://git.openstack.org/openstack/openstack-ansible-haproxy_server - version: master + version: 32415ab81c61083ac5a83b65274703e4a5470e5e - name: keepalived scm: git src: https://github.com/evrardjp/ansible-keepalived - version: master + version: 4f7c8eb16e3cbd8c8748f126c1eea73db5c8efe9 - name: lxc_container_create scm: git src: https://git.openstack.org/openstack/openstack-ansible-lxc_container_create - version: master + version: 097da38126d90cfca36cdc3955aaf658a00db599 - name: lxc_hosts scm: git src: https://git.openstack.org/openstack/openstack-ansible-lxc_hosts - version: master + version: 2931d0c87a1c592ad7f1f2f83cdcf468e8dea932 - name: memcached_server scm: git src: https://git.openstack.org/openstack/openstack-ansible-memcached_server - version: master + version: 58e17aa13ebe7b0aa5da7c00afc75d6716d2720d - name: openstack-ansible-security scm: git src: https://git.openstack.org/openstack/openstack-ansible-security - version: master + version: 9d745ec4fe8ac3e6d6cbb2412abe5196a9d2dad7 - name: openstack_hosts scm: git src: https://git.openstack.org/openstack/openstack-ansible-openstack_hosts - version: master + version: 2076dfddf418b1bdd64d3782346823902aa996bc - name: os_keystone scm: git src: https://git.openstack.org/openstack/openstack-ansible-os_keystone - version: master + version: cee7a02143a1826479e6444c6fb5f1c2b6074ab7 - name: openstack_openrc scm: git src: https://git.openstack.org/openstack/openstack-ansible-openstack_openrc - version: master + version: fb98ad8d7bfe7fba0c964cb061313f1b8767c4b0 - name: os_aodh scm: git src: https://git.openstack.org/openstack/openstack-ansible-os_aodh - version: master + version: 9dcacb8fd6feef02e485f99c83535707ae67876b - name: os_barbican scm: git src: https://git.openstack.org/openstack/openstack-ansible-os_barbican - version: master + version: bb3f39cb2f3c31c6980aa65c8953ff6293b992c0 - name: os_ceilometer scm: git src: https://git.openstack.org/openstack/openstack-ansible-os_ceilometer - version: master + version: 178ad8245fa019f0610c628c58c377997b011e8a - name: os_cinder scm: git src: https://git.openstack.org/openstack/openstack-ansible-os_cinder - version: master + version: 1321fd39d8f55d1dc3baf91b4194469b349d7dc4 - name: os_glance scm: git src: https://git.openstack.org/openstack/openstack-ansible-os_glance - version: master + version: f39ef212bfa2edff8334bfb632cc463001c77c11 - name: os_gnocchi scm: git src: https://git.openstack.org/openstack/openstack-ansible-os_gnocchi - version: master + version: 318bd76e5e72402e8ff5b372b469c27a9395341b - name: os_heat scm: git src: https://git.openstack.org/openstack/openstack-ansible-os_heat - version: master + version: 07d59ddb757b2d2557fba52ac537803e646e65b4 - name: os_horizon scm: git src: https://git.openstack.org/openstack/openstack-ansible-os_horizon - version: master + version: 69ef49c4f7a42f082f4bcff824d13f57145e2b83 - name: os_ironic scm: git src: https://git.openstack.org/openstack/openstack-ansible-os_ironic - version: master + version: 57e8a0eaaa2159f33e64a1b037180383196919d1 - name: os_magnum scm: git src: https://git.openstack.org/openstack/openstack-ansible-os_magnum - version: master + version: 8329c257dff25686827bd1cc904506d76ad1d12f - name: os_trove scm: git src: https://git.openstack.org/openstack/openstack-ansible-os_trove - version: master + version: b948402c76d6188caa7be376098354cdb850d638 - name: os_neutron scm: git src: https://git.openstack.org/openstack/openstack-ansible-os_neutron - version: master + version: 2a92a4e1857e7457683aefd87ee5a4e751fc701a - name: os_nova scm: git src: https://git.openstack.org/openstack/openstack-ansible-os_nova - version: master + version: 511963b7921ec7c2db24e8ee1d71a940b0aafae4 - name: os_rally scm: git src: https://git.openstack.org/openstack/openstack-ansible-os_rally - version: master + version: 96153c5b3285d11d00611a03135c9d8f267e0f52 - name: os_sahara scm: git src: https://git.openstack.org/openstack/openstack-ansible-os_sahara - version: master + version: 012d3f3530f878e5143d58380f94d1f514baad04 - name: os_swift scm: git src: https://git.openstack.org/openstack/openstack-ansible-os_swift - version: master + version: d62d6a23ac0b01d0320dbcb6c710dfd5f3cecfdf - name: os_tempest scm: git src: https://git.openstack.org/openstack/openstack-ansible-os_tempest - version: master + version: 9d2bfb09d1ebbc9102329b0d42de33aa321e57b1 - name: plugins scm: git src: https://git.openstack.org/openstack/openstack-ansible-plugins - version: master + version: 3d2e23bb7e1d6775789d7f65ce8a878a7ee1d3c7 - name: rabbitmq_server scm: git src: https://git.openstack.org/openstack/openstack-ansible-rabbitmq_server - version: master + version: 9b0ce64fe235705e237bc4b476ecc0ad602d67a8 - name: repo_build scm: git src: https://git.openstack.org/openstack/openstack-ansible-repo_build - version: master + version: fe3ae20f74a912925d5c78040984957a6d55f9de - name: repo_server scm: git src: https://git.openstack.org/openstack/openstack-ansible-repo_server - version: master + version: 7ea0820e0941282cd5c5cc263e939ffbee54ba52 - name: rsyslog_client scm: git src: https://git.openstack.org/openstack/openstack-ansible-rsyslog_client - version: master + version: 19615e47137eee46ee92c0308532fe1d2212333c - name: rsyslog_server scm: git src: https://git.openstack.org/openstack/openstack-ansible-rsyslog_server - version: master + version: efd7b21798da49802012e390a0ddf7cc38636eeb - name: sshd scm: git src: https://github.com/willshersystems/ansible-sshd - version: master + version: 426e11c4dffeca09fcc4d16103a91e5e65180040 - name: bird scm: git src: https://github.com/logan2211/ansible-bird - version: master + version: 2c4d29560d3617abddf0e63e0c95536364dedd92 - name: etcd scm: git src: https://github.com/logan2211/ansible-etcd - version: master + version: ef63b0c5fd352b61084fd5aca286ee7f3fea932b - name: unbound scm: git src: https://github.com/logan2211/ansible-unbound - version: master + version: 5329d03eb9c15373d648a801563087c576bbfcde - name: resolvconf scm: git src: https://github.com/logan2211/ansible-resolvconf - version: master + version: 3b2b7cf2e900b194829565b351bf32bb63954548 - name: os_designate scm: git src: https://git.openstack.org/openstack/openstack-ansible-os_designate - version: master + version: b7098a6bdea73c869f45a86e0cc78d21b032161e - name: ceph.ceph-common scm: git src: https://github.com/ceph/ansible-ceph-common - version: master + version: ef149767fa9565ec887f0bdb007ff752bd61e5d5 - name: ceph.ceph-docker-common scm: git src: https://github.com/ceph/ansible-ceph-docker-common - version: master + version: ca86fd0ef6d24aa2c750a625acdcb8012c374aa0 - name: ceph-mon scm: git src: https://github.com/ceph/ansible-ceph-mon - version: master + version: c5be4d6056dfe6a482ca3fcc483a6050cc8929a1 - name: ceph-osd scm: git src: https://github.com/ceph/ansible-ceph-osd - version: master -- name: os_octavia - scm: git - src: https://git.openstack.org/openstack/openstack-ansible-os_octavia - version: master -- name: os_molteniron - scm: git - src: https://git.openstack.org/openstack/openstack-ansible-os_molteniron - version: master + version: 7bc5a61ceb96e487b7a9fe9643f6dafa6492f2b5 diff --git a/prototypes/xci/file/exports b/prototypes/xci/file/exports deleted file mode 100644 index af64d618d..000000000 --- a/prototypes/xci/file/exports +++ /dev/null @@ -1,14 +0,0 @@ -# /etc/exports: the access control list for filesystems which may be exported -# to NFS clients. See exports(5). -# -# Example for NFSv2 and NFSv3: -# /srv/homes hostname1(rw,sync,no_subtree_check) hostname2(ro,sync,no_subtree_check) -# -# Example for NFSv4: -# /srv/nfs4 gss/krb5i(rw,sync,fsid=0,crossmnt,no_subtree_check) -# /srv/nfs4/homes gss/krb5i(rw,sync,no_subtree_check) -# -# glance images are stored on compute host and made available to image hosts via nfs -# see image_hosts section in openstack_user_config.yml for details -/images *(rw,sync,no_subtree_check,no_root_squash) - diff --git a/prototypes/xci/file/ha/flavor-vars.yml b/prototypes/xci/file/ha/flavor-vars.yml index 3cd1d6246..167502c95 100644 --- a/prototypes/xci/file/ha/flavor-vars.yml +++ b/prototypes/xci/file/ha/flavor-vars.yml @@ -1,37 +1,39 @@ --- host_info: { 'opnfv': { - 'MGMT_IP': '172.29.236.10', 'VLAN_IP': '192.168.122.2', + 'MGMT_IP': '172.29.236.10', + 'VXLAN_IP': '172.29.240.10', 'STORAGE_IP': '172.29.244.10' }, 'controller00': { - 'MGMT_IP': '172.29.236.11', 'VLAN_IP': '192.168.122.3', + 'MGMT_IP': '172.29.236.11', + 'VXLAN_IP': '172.29.240.11', 'STORAGE_IP': '172.29.244.11' }, 'controller01': { - 'MGMT_IP': '172.29.236.12', 'VLAN_IP': '192.168.122.4', + 'MGMT_IP': '172.29.236.12', + 'VXLAN_IP': '172.29.240.12', 'STORAGE_IP': '172.29.244.12' }, 'controller02': { - 'MGMT_IP': '172.29.236.13', 'VLAN_IP': '192.168.122.5', + 'MGMT_IP': '172.29.236.13', + 'VXLAN_IP': '172.29.240.13', 'STORAGE_IP': '172.29.244.13' }, 'compute00': { - 'MGMT_IP': '172.29.236.14', 'VLAN_IP': '192.168.122.6', - 'STORAGE_IP': '172.29.244.14', - 'VLAN_IP_SECOND': '173.29.241.1', - 'VXLAN_IP': '172.29.240.14' + 'MGMT_IP': '172.29.236.14', + 'VXLAN_IP': '172.29.240.14', + 'STORAGE_IP': '172.29.244.14' }, 'compute01': { - 'MGMT_IP': '172.29.236.15', 'VLAN_IP': '192.168.122.7', - 'STORAGE_IP': '172.29.244.15', - 'VLAN_IP_SECOND': '173.29.241.2', - 'VXLAN_IP': '172.29.240.15' + 'MGMT_IP': '172.29.236.15', + 'VXLAN_IP': '172.29.240.15', + 'STORAGE_IP': '172.29.244.15' } } diff --git a/prototypes/xci/file/ha/openstack_user_config.yml b/prototypes/xci/file/ha/openstack_user_config.yml index 43e88c0d0..09fb734c1 100644 --- a/prototypes/xci/file/ha/openstack_user_config.yml +++ b/prototypes/xci/file/ha/openstack_user_config.yml @@ -138,7 +138,7 @@ image_hosts: container_vars: limit_container_types: glance glance_nfs_client: - - server: "172.29.244.15" + - server: "172.29.244.14" remote_path: "/images" local_path: "/var/lib/glance/images" type: "nfs" @@ -148,7 +148,7 @@ image_hosts: container_vars: limit_container_types: glance glance_nfs_client: - - server: "172.29.244.15" + - server: "172.29.244.14" remote_path: "/images" local_path: "/var/lib/glance/images" type: "nfs" @@ -158,7 +158,7 @@ image_hosts: container_vars: limit_container_types: glance glance_nfs_client: - - server: "172.29.244.15" + - server: "172.29.244.14" remote_path: "/images" local_path: "/var/lib/glance/images" type: "nfs" @@ -200,33 +200,6 @@ network_hosts: controller02: ip: 172.29.236.13 -# ceilometer (telemetry API) -metering-infra_hosts: - controller00: - ip: 172.29.236.11 - controller01: - ip: 172.29.236.12 - controller02: - ip: 172.29.236.13 - -# aodh (telemetry alarm service) -metering-alarm_hosts: - controller00: - ip: 172.29.236.11 - controller01: - ip: 172.29.236.12 - controller02: - ip: 172.29.236.13 - -# gnocchi (telemetry metrics storage) -metrics_hosts: - controller00: - ip: 172.29.236.11 - controller01: - ip: 172.29.236.12 - controller02: - ip: 172.29.236.13 - # nova hypervisors compute_hosts: compute00: @@ -234,12 +207,6 @@ compute_hosts: compute01: ip: 172.29.236.15 -# ceilometer compute agent (telemetry) -metering-compute_hosts: - compute00: - ip: 172.29.236.14 - compute01: - ip: 172.29.236.15 # cinder volume hosts (NFS-backed) # The settings here are repeated for each infra host. # They could instead be applied as global settings in @@ -251,28 +218,37 @@ storage_hosts: container_vars: cinder_backends: limit_container_types: cinder_volume - lvm: - volume_group: cinder-volumes - volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver - volume_backend_name: LVM_iSCSI - iscsi_ip_address: "172.29.244.11" + nfs_volume: + volume_backend_name: NFS_VOLUME1 + volume_driver: cinder.volume.drivers.nfs.NfsDriver + nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120" + nfs_shares_config: /etc/cinder/nfs_shares + shares: + - ip: "172.29.244.14" + share: "/volumes" controller01: ip: 172.29.236.12 container_vars: cinder_backends: limit_container_types: cinder_volume - lvm: - volume_group: cinder-volumes - volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver - volume_backend_name: LVM_iSCSI - iscsi_ip_address: "172.29.244.12" + nfs_volume: + volume_backend_name: NFS_VOLUME1 + volume_driver: cinder.volume.drivers.nfs.NfsDriver + nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120" + nfs_shares_config: /etc/cinder/nfs_shares + shares: + - ip: "172.29.244.14" + share: "/volumes" controller02: ip: 172.29.236.13 container_vars: cinder_backends: limit_container_types: cinder_volume - lvm: - volume_group: cinder-volumes - volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver - volume_backend_name: LVM_iSCSI - iscsi_ip_address: "172.29.244.13" + nfs_volume: + volume_backend_name: NFS_VOLUME1 + volume_driver: cinder.volume.drivers.nfs.NfsDriver + nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120" + nfs_shares_config: /etc/cinder/nfs_shares + shares: + - ip: "172.29.244.14" + share: "/volumes" diff --git a/prototypes/xci/file/mini/configure-targethosts.yml b/prototypes/xci/file/mini/configure-targethosts.yml deleted file mode 100644 index 395f44a64..000000000 --- a/prototypes/xci/file/mini/configure-targethosts.yml +++ /dev/null @@ -1,32 +0,0 @@ ---- -- hosts: all - remote_user: root - tasks: - - name: add public key to host - copy: - src: ../file/authorized_keys - dest: /root/.ssh/authorized_keys - - name: configure modules - copy: - src: ../file/modules - dest: /etc/modules - -- hosts: controller - remote_user: root - vars_files: - - ../var/{{ ansible_os_family }}.yml - - ../var/flavor-vars.yml - roles: - # TODO: this only works for ubuntu/xenial and need to be adjusted for other distros - - { role: configure-network, when: ansible_distribution_release == "xenial", src: "../template/controller.interface.j2", dest: "/etc/network/interfaces" } - -- hosts: compute - remote_user: root - vars_files: - - ../var/{{ ansible_os_family }}.yml - - ../var/flavor-vars.yml - roles: - # TODO: this only works for ubuntu/xenial and need to be adjusted for other distros - - { role: configure-network, when: ansible_distribution_release == "xenial", src: "../template/compute.interface.j2", dest: "/etc/network/interfaces" } - # TODO: this role is for configuring NFS on xenial and adjustment needed for other distros - - role: configure-nfs diff --git a/prototypes/xci/file/mini/flavor-vars.yml b/prototypes/xci/file/mini/flavor-vars.yml index 01fba7129..0d446ba20 100644 --- a/prototypes/xci/file/mini/flavor-vars.yml +++ b/prototypes/xci/file/mini/flavor-vars.yml @@ -1,19 +1,20 @@ --- host_info: { 'opnfv': { - 'MGMT_IP': '172.29.236.10', 'VLAN_IP': '192.168.122.2', + 'MGMT_IP': '172.29.236.10', + 'VXLAN_IP': '172.29.240.10', 'STORAGE_IP': '172.29.244.10' }, 'controller00': { - 'MGMT_IP': '172.29.236.11', 'VLAN_IP': '192.168.122.3', + 'MGMT_IP': '172.29.236.11', + 'VXLAN_IP': '172.29.240.11', 'STORAGE_IP': '172.29.244.11' }, 'compute00': { - 'MGMT_IP': '172.29.236.12', 'VLAN_IP': '192.168.122.4', - 'VLAN_IP_SECOND': '173.29.241.1', + 'MGMT_IP': '172.29.236.12', 'VXLAN_IP': '172.29.240.12', 'STORAGE_IP': '172.29.244.12' }, diff --git a/prototypes/xci/file/mini/openstack_user_config.yml b/prototypes/xci/file/mini/openstack_user_config.yml index c41f4329d..f9ccee24f 100644 --- a/prototypes/xci/file/mini/openstack_user_config.yml +++ b/prototypes/xci/file/mini/openstack_user_config.yml @@ -144,30 +144,11 @@ network_hosts: controller00: ip: 172.29.236.11 -# ceilometer (telemetry API) -metering-infra_hosts: - controller00: - ip: 172.29.236.11 - -# aodh (telemetry alarm service) -metering-alarm_hosts: - controller00: - ip: 172.29.236.11 - -# gnocchi (telemetry metrics storage) -metrics_hosts: - controller00: - ip: 172.29.236.11 - # nova hypervisors compute_hosts: compute00: ip: 172.29.236.12 -# ceilometer compute agent (telemetry) -metering-compute_hosts: - compute00: - ip: 172.29.236.12 # cinder volume hosts (NFS-backed) # The settings here are repeated for each infra host. # They could instead be applied as global settings in @@ -179,8 +160,11 @@ storage_hosts: container_vars: cinder_backends: limit_container_types: cinder_volume - lvm: - volume_group: cinder-volumes - volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver - volume_backend_name: LVM_iSCSI - iscsi_ip_address: "172.29.244.11" + nfs_volume: + volume_backend_name: NFS_VOLUME1 + volume_driver: cinder.volume.drivers.nfs.NfsDriver + nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120" + nfs_shares_config: /etc/cinder/nfs_shares + shares: + - ip: "172.29.244.12" + share: "/volumes" diff --git a/prototypes/xci/file/modules b/prototypes/xci/file/modules deleted file mode 100644 index 60a517f18..000000000 --- a/prototypes/xci/file/modules +++ /dev/null @@ -1,8 +0,0 @@ -# /etc/modules: kernel modules to load at boot time. -# -# This file contains the names of kernel modules that should be loaded -# at boot time, one per line. Lines beginning with "#" are ignored. -# Parameters can be specified after the module name. - -bonding -8021q diff --git a/prototypes/xci/file/noha/configure-targethosts.yml b/prototypes/xci/file/noha/configure-targethosts.yml deleted file mode 100644 index 6dc147f3b..000000000 --- a/prototypes/xci/file/noha/configure-targethosts.yml +++ /dev/null @@ -1,36 +0,0 @@ ---- -- hosts: all - remote_user: root - tasks: - - name: add public key to host - copy: - src: ../file/authorized_keys - dest: /root/.ssh/authorized_keys - - name: configure modules - copy: - src: ../file/modules - dest: /etc/modules - -- hosts: controller - remote_user: root - vars_files: - - ../var/{{ ansible_os_family }}.yml - - ../var/flavor-vars.yml - roles: - # TODO: this only works for ubuntu/xenial and need to be adjusted for other distros - - { role: configure-network, when: ansible_distribution_release == "xenial", src: "../template/controller.interface.j2", dest: "/etc/network/interfaces" } - -- hosts: compute - remote_user: root - vars_files: - - ../var/{{ ansible_os_family }}.yml - - ../var/flavor-vars.yml - roles: - # TODO: this only works for ubuntu/xenial and need to be adjusted for other distros - - { role: configure-network, when: ansible_distribution_release == "xenial", src: "../template/compute.interface.j2", dest: "/etc/network/interfaces" } - -- hosts: compute01 - remote_user: root - # TODO: this role is for configuring NFS on xenial and adjustment needed for other distros - roles: - - role: configure-nfs diff --git a/prototypes/xci/file/noha/flavor-vars.yml b/prototypes/xci/file/noha/flavor-vars.yml index 7f52d343a..3c69a34bb 100644 --- a/prototypes/xci/file/noha/flavor-vars.yml +++ b/prototypes/xci/file/noha/flavor-vars.yml @@ -1,26 +1,26 @@ --- host_info: { 'opnfv': { - 'MGMT_IP': '172.29.236.10', 'VLAN_IP': '192.168.122.2', + 'MGMT_IP': '172.29.236.10', + 'VXLAN_IP': '172.29.240.10', 'STORAGE_IP': '172.29.244.10' }, 'controller00': { - 'MGMT_IP': '172.29.236.11', 'VLAN_IP': '192.168.122.3', + 'MGMT_IP': '172.29.236.11', + 'VXLAN_IP': '172.29.240.11', 'STORAGE_IP': '172.29.244.11' }, 'compute00': { - 'MGMT_IP': '172.29.236.12', 'VLAN_IP': '192.168.122.4', - 'VLAN_IP_SECOND': '173.29.241.1', + 'MGMT_IP': '172.29.236.12', 'VXLAN_IP': '172.29.240.12', 'STORAGE_IP': '172.29.244.12' }, 'compute01': { - 'MGMT_IP': '172.29.236.13', 'VLAN_IP': '192.168.122.5', - 'VLAN_IP_SECOND': '173.29.241.2', + 'MGMT_IP': '172.29.236.13', 'VXLAN_IP': '172.29.240.13', 'STORAGE_IP': '172.29.244.13' } diff --git a/prototypes/xci/file/noha/openstack_user_config.yml b/prototypes/xci/file/noha/openstack_user_config.yml index 999741580..fb12655e7 100644 --- a/prototypes/xci/file/noha/openstack_user_config.yml +++ b/prototypes/xci/file/noha/openstack_user_config.yml @@ -118,7 +118,7 @@ image_hosts: container_vars: limit_container_types: glance glance_nfs_client: - - server: "172.29.244.13" + - server: "172.29.244.12" remote_path: "/images" local_path: "/var/lib/glance/images" type: "nfs" @@ -144,21 +144,6 @@ network_hosts: controller00: ip: 172.29.236.11 -# ceilometer (telemetry API) -metering-infra_hosts: - controller00: - ip: 172.29.236.11 - -# aodh (telemetry alarm service) -metering-alarm_hosts: - controller00: - ip: 172.29.236.11 - -# gnocchi (telemetry metrics storage) -metrics_hosts: - controller00: - ip: 172.29.236.11 - # nova hypervisors compute_hosts: compute00: @@ -166,12 +151,6 @@ compute_hosts: compute01: ip: 172.29.236.13 -# ceilometer compute agent (telemetry) -metering-compute_hosts: - compute00: - ip: 172.29.236.12 - compute01: - ip: 172.29.236.13 # cinder volume hosts (NFS-backed) # The settings here are repeated for each infra host. # They could instead be applied as global settings in @@ -183,8 +162,11 @@ storage_hosts: container_vars: cinder_backends: limit_container_types: cinder_volume - lvm: - volume_group: cinder-volumes - volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver - volume_backend_name: LVM_iSCSI - iscsi_ip_address: "172.29.244.11" + nfs_volume: + volume_backend_name: NFS_VOLUME1 + volume_driver: cinder.volume.drivers.nfs.NfsDriver + nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120" + nfs_shares_config: /etc/cinder/nfs_shares + shares: + - ip: "172.29.244.12" + share: "/volumes" diff --git a/prototypes/xci/file/setup-openstack.yml b/prototypes/xci/file/setup-openstack.yml index bd5d5cd93..415c48993 100644 --- a/prototypes/xci/file/setup-openstack.yml +++ b/prototypes/xci/file/setup-openstack.yml @@ -20,17 +20,6 @@ - include: os-neutron-install.yml - include: os-heat-install.yml - include: os-horizon-install.yml -- include: os-ceilometer-install.yml -- include: os-aodh-install.yml -- include: os-designate-install.yml -#NOTE(stevelle) Ensure Gnocchi identities exist before Swift -- include: os-gnocchi-install.yml - when: - - gnocchi_storage_driver is defined - - gnocchi_storage_driver == 'swift' - vars: - gnocchi_identity_only: True - include: os-swift-install.yml -- include: os-gnocchi-install.yml - include: os-ironic-install.yml - include: os-tempest-install.yml diff --git a/prototypes/xci/playbooks/configure-localhost.yml b/prototypes/xci/playbooks/configure-localhost.yml index 2a559645e..34b974cd1 100644 --- a/prototypes/xci/playbooks/configure-localhost.yml +++ b/prototypes/xci/playbooks/configure-localhost.yml @@ -21,12 +21,6 @@ path: "{{LOG_PATH}}" state: directory recurse: no - # when the deployment is not aio, we use playbook, configure-targethosts.yml, to configure all the hosts - - name: copy multihost playbook - copy: - src: "{{XCI_FLAVOR_ANSIBLE_FILE_PATH}}/configure-targethosts.yml" - dest: "{{OPNFV_RELENG_PATH}}/prototypes/xci/playbooks" - when: XCI_FLAVOR != "aio" # when the deployment is aio, we overwrite and use playbook, configure-opnfvhost.yml, since everything gets installed on opnfv host - name: copy aio playbook copy: diff --git a/prototypes/xci/playbooks/configure-opnfvhost.yml b/prototypes/xci/playbooks/configure-opnfvhost.yml index 06e27e7fc..8c794c422 100644 --- a/prototypes/xci/playbooks/configure-opnfvhost.yml +++ b/prototypes/xci/playbooks/configure-opnfvhost.yml @@ -54,8 +54,10 @@ replace: '\1haproxy_state: enabled' - name: copy OPNFV OpenStack playbook shell: "/bin/cp -rf {{OPNFV_RELENG_PATH}}/prototypes/xci/file/setup-openstack.yml {{OPENSTACK_OSA_PATH}}/playbooks" + # Copy pinned role requirements if we are running as part of daily CI loop - name: copy OPNFV role requirements shell: "/bin/cp -rf {{OPNFV_RELENG_PATH}}/prototypes/xci/file/ansible-role-requirements.yml {{OPENSTACK_OSA_PATH}}" + when: XCI_LOOP == "daily" - hosts: localhost remote_user: root tasks: diff --git a/prototypes/xci/file/ha/configure-targethosts.yml b/prototypes/xci/playbooks/configure-targethosts.yml similarity index 62% rename from prototypes/xci/file/ha/configure-targethosts.yml rename to prototypes/xci/playbooks/configure-targethosts.yml index 6dc147f3b..50da1f223 100644 --- a/prototypes/xci/file/ha/configure-targethosts.yml +++ b/prototypes/xci/playbooks/configure-targethosts.yml @@ -6,10 +6,6 @@ copy: src: ../file/authorized_keys dest: /root/.ssh/authorized_keys - - name: configure modules - copy: - src: ../file/modules - dest: /etc/modules - hosts: controller remote_user: root @@ -18,7 +14,9 @@ - ../var/flavor-vars.yml roles: # TODO: this only works for ubuntu/xenial and need to be adjusted for other distros - - { role: configure-network, when: ansible_distribution_release == "xenial", src: "../template/controller.interface.j2", dest: "/etc/network/interfaces" } + - { role: configure-network, src: "../template/controller.interface.j2", dest: "/etc/network/interfaces" } + # we need to force sync time with ntp or the nodes will be out of sync timewise + - role: synchronize-time - hosts: compute remote_user: root @@ -27,9 +25,11 @@ - ../var/flavor-vars.yml roles: # TODO: this only works for ubuntu/xenial and need to be adjusted for other distros - - { role: configure-network, when: ansible_distribution_release == "xenial", src: "../template/compute.interface.j2", dest: "/etc/network/interfaces" } + - { role: configure-network, src: "../template/compute.interface.j2", dest: "/etc/network/interfaces" } + # we need to force sync time with ntp or the nodes will be out of sync timewise + - role: synchronize-time -- hosts: compute01 +- hosts: compute00 remote_user: root # TODO: this role is for configuring NFS on xenial and adjustment needed for other distros roles: diff --git a/prototypes/xci/playbooks/roles/configure-network/tasks/main.yml b/prototypes/xci/playbooks/roles/configure-network/tasks/main.yml index 8bc84822c..aafadf712 100644 --- a/prototypes/xci/playbooks/roles/configure-network/tasks/main.yml +++ b/prototypes/xci/playbooks/roles/configure-network/tasks/main.yml @@ -8,9 +8,27 @@ # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## # TODO: this role needs to be adjusted for different distros -- name: configure network for {{ ansible_os_family }} on interface {{ interface }} - template: - src: "{{ src }}" - dest: "{{ dest }}" -- name: restart ubuntu xenial network service - shell: "/sbin/ifconfig {{ interface }} 0 &&/sbin/ifdown -a && /sbin/ifup -a" +- block: + - name: configure modules + lineinfile: + dest: /etc/modules + state: present + create: yes + line: "8021q" + - name: add modules + modprobe: + name: 8021q + state: present + - name: ensure glean rules are removed + file: + path: "/etc/udev/rules.d/99-glean.rules" + state: absent + - name: ensure interfaces.d folder is empty + shell: "/bin/rm -rf /etc/network/interfaces.d/*" + - name: ensure interfaces file is updated + template: + src: "{{ src }}" + dest: "{{ dest }}" + - name: restart network service + shell: "/sbin/ifconfig {{ interface }} 0 && /sbin/ifdown -a && /sbin/ifup -a" + when: ansible_distribution_release == "xenial" diff --git a/prototypes/xci/playbooks/roles/configure-nfs/tasks/main.yml b/prototypes/xci/playbooks/roles/configure-nfs/tasks/main.yml index b188f4dbb..c52da0bf3 100644 --- a/prototypes/xci/playbooks/roles/configure-nfs/tasks/main.yml +++ b/prototypes/xci/playbooks/roles/configure-nfs/tasks/main.yml @@ -9,11 +9,14 @@ ############################################################################## # TODO: this is for xenial and needs to be adjusted for different distros - block: - - name: make NFS dir + - name: make NFS directories file: - dest: /images - mode: 777 + dest: "{{ item }}" + mode: 0777 state: directory + with_items: + - "/images" + - "/volumes" - name: configure NFS service lineinfile: dest: /etc/services @@ -23,11 +26,15 @@ with_items: - "nfs 2049/tcp" - "nfs 2049/udp" - - name: configure NFS exports on ubuntu xenial - copy: - src: ../file/exports + - name: configure NFS exports + lineinfile: dest: /etc/exports - when: ansible_distribution_release == "xenial" + state: present + create: yes + line: "{{ item }}" + with_items: + - "/images *(rw,sync,no_subtree_check,no_root_squash)" + - "/volumes *(rw,sync,no_subtree_check,no_root_squash)" # TODO: the service name might be different on other distros and needs to be adjusted - name: restart ubuntu xenial NFS service service: diff --git a/prototypes/xci/playbooks/roles/synchronize-time/tasks/main.yml b/prototypes/xci/playbooks/roles/synchronize-time/tasks/main.yml new file mode 100644 index 000000000..5c39d897b --- /dev/null +++ b/prototypes/xci/playbooks/roles/synchronize-time/tasks/main.yml @@ -0,0 +1,18 @@ +--- +# SPDX-license-identifier: Apache-2.0 +############################################################################## +# Copyright (c) 2017 Ericsson AB and others. +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +# TODO: this role needs to be adjusted for different distros +- block: + - name: restart chrony + service: + name: chrony + state: restarted + - name: synchronize time + shell: "chronyc -a 'burst 4/4' && chronyc -a makestep" + when: ansible_distribution_release == "xenial" diff --git a/prototypes/xci/template/compute.interface.j2 b/prototypes/xci/template/compute.interface.j2 index 0c5147c45..094544c3b 100644 --- a/prototypes/xci/template/compute.interface.j2 +++ b/prototypes/xci/template/compute.interface.j2 @@ -1,11 +1,7 @@ -# This file describes the network interfaces available on your system -# and how to activate them. For more information, see interfaces(5). - # The loopback network interface auto lo iface lo inet loopback - # Physical interface auto {{ interface }} iface {{ interface }} inet manual @@ -20,7 +16,7 @@ auto {{ interface }}.30 iface {{ interface }}.30 inet manual vlan-raw-device {{ interface }} -# Storage network VLAN interface (optional) +# Storage network VLAN interface auto {{ interface }}.20 iface {{ interface }}.20 inet manual vlan-raw-device {{ interface }} @@ -55,6 +51,7 @@ iface br-vlan inet static address {{host_info[inventory_hostname].VLAN_IP}} netmask 255.255.255.0 gateway 192.168.122.1 + dns-nameserver 8.8.8.8 8.8.4.4 offload-sg off # Create veth pair, don't bomb if already exists pre-up ip link add br-vlan-veth type veth peer name eth12 || true @@ -65,17 +62,7 @@ iface br-vlan inet static post-down ip link del br-vlan-veth || true bridge_ports br-vlan-veth -# Add an additional address to br-vlan -iface br-vlan inet static - # Flat network default gateway - # -- This needs to exist somewhere for network reachability - # -- from the router namespace for floating IP paths. - # -- Putting this here is primarily for tempest to work. - address {{host_info[inventory_hostname].VLAN_IP_SECOND}} - netmask 255.255.252.0 - dns-nameserver 8.8.8.8 8.8.4.4 - -# compute1 Storage bridge +# OpenStack Storage bridge auto br-storage iface br-storage inet static bridge_stp off diff --git a/prototypes/xci/template/controller.interface.j2 b/prototypes/xci/template/controller.interface.j2 index fbaa8b8dd..638e78e18 100644 --- a/prototypes/xci/template/controller.interface.j2 +++ b/prototypes/xci/template/controller.interface.j2 @@ -1,6 +1,3 @@ -# This file describes the network interfaces available on your system -# and how to activate them. For more information, see interfaces(5). - # The loopback network interface auto lo iface lo inet loopback @@ -35,18 +32,14 @@ iface br-mgmt inet static netmask 255.255.252.0 # OpenStack Networking VXLAN (tunnel/overlay) bridge -# -# Only the COMPUTE and NETWORK nodes must have an IP address -# on this bridge. When used by infrastructure nodes, the -# IP addresses are assigned to containers which use this -# bridge. -# auto br-vxlan -iface br-vxlan inet manual +iface br-vxlan inet static bridge_stp off bridge_waitport 0 bridge_fd 0 bridge_ports {{ interface }}.30 + address {{host_info[inventory_hostname].VXLAN_IP}} + netmask 255.255.252.0 # OpenStack Networking VLAN bridge auto br-vlan @@ -60,7 +53,7 @@ iface br-vlan inet static gateway 192.168.122.1 dns-nameserver 8.8.8.8 8.8.4.4 -# compute1 Storage bridge +# OpenStack Storage bridge auto br-storage iface br-storage inet static bridge_stp off diff --git a/prototypes/xci/template/opnfv.interface.j2 b/prototypes/xci/template/opnfv.interface.j2 index fbaa8b8dd..e9f8649c6 100644 --- a/prototypes/xci/template/opnfv.interface.j2 +++ b/prototypes/xci/template/opnfv.interface.j2 @@ -1,6 +1,3 @@ -# This file describes the network interfaces available on your system -# and how to activate them. For more information, see interfaces(5). - # The loopback network interface auto lo iface lo inet loopback @@ -35,18 +32,14 @@ iface br-mgmt inet static netmask 255.255.252.0 # OpenStack Networking VXLAN (tunnel/overlay) bridge -# -# Only the COMPUTE and NETWORK nodes must have an IP address -# on this bridge. When used by infrastructure nodes, the -# IP addresses are assigned to containers which use this -# bridge. -# auto br-vxlan -iface br-vxlan inet manual +iface br-vxlan inet static bridge_stp off bridge_waitport 0 bridge_fd 0 bridge_ports {{ interface }}.30 + address {{ host_info[inventory_hostname].VXLAN_IP }} + netmask 255.255.252.0 # OpenStack Networking VLAN bridge auto br-vlan @@ -60,7 +53,7 @@ iface br-vlan inet static gateway 192.168.122.1 dns-nameserver 8.8.8.8 8.8.4.4 -# compute1 Storage bridge +# OpenStack Storage bridge auto br-storage iface br-storage inet static bridge_stp off diff --git a/prototypes/xci/var/opnfv.yml b/prototypes/xci/var/opnfv.yml index dd3761bd1..12cb55675 100644 --- a/prototypes/xci/var/opnfv.yml +++ b/prototypes/xci/var/opnfv.yml @@ -20,5 +20,6 @@ OPENSTACK_OSA_ETC_PATH: "{{ lookup('env','OPENSTACK_OSA_ETC_PATH') }}" XCI_ANSIBLE_PIP_VERSION: "{{ lookup('env','XCI_ANSIBLE_PIP_VERSION') }}" XCI_FLAVOR: "{{ lookup('env','XCI_FLAVOR') }}" XCI_FLAVOR_ANSIBLE_FILE_PATH: "{{ lookup('env','XCI_FLAVOR_ANSIBLE_FILE_PATH') }}" +XCI_LOOP: "{{ lookup('env','XCI_LOOP') }}" LOG_PATH: "{{ lookup('env','LOG_PATH') }}" OPNFV_HOST_IP: "{{ lookup('env','OPNFV_HOST_IP') }}" diff --git a/prototypes/xci/xci-deploy.sh b/prototypes/xci/xci-deploy.sh index da5bb26cf..2fd9be022 100755 --- a/prototypes/xci/xci-deploy.sh +++ b/prototypes/xci/xci-deploy.sh @@ -70,7 +70,9 @@ cd $XCI_PATH/playbooks ansible-playbook $ANSIBLE_VERBOSITY -i inventory provision-vm-nodes.yml echo "-----------------------------------------------------------------------" echo "Info: VM nodes are provisioned!" - +source $OPENSTACK_BIFROST_PATH/env-vars +ironic node-list +echo #------------------------------------------------------------------------------- # Configure localhost #-------------------------------------------------------------------------------