Merge "Add repository archiver for compliance checks"
authorTrevor Bramwell <tbramwell@linuxfoundation.org>
Wed, 30 Aug 2017 16:58:53 +0000 (16:58 +0000)
committerGerrit Code Review <gerrit@opnfv.org>
Wed, 30 Aug 2017 16:58:53 +0000 (16:58 +0000)
360 files changed:
.yamllint [new file with mode: 0644]
INFO
jjb-sandbox/releng/releng-sandbox-jobs.yml
jjb/apex/apex-deploy.sh
jjb/apex/apex-download-artifact.sh
jjb/apex/apex-upload-artifact.sh
jjb/apex/apex.yml
jjb/apex/apex.yml.j2
jjb/apex/scenarios.yaml.hidden
jjb/apex/update-build-result.groovy [new file with mode: 0644]
jjb/armband/armband-ci-jobs.yml
jjb/armband/armband-deploy.sh
jjb/armband/armband-download-artifact.sh
jjb/armband/armband-project-jobs.yml
jjb/armband/armband-verify-jobs.yml
jjb/armband/build.sh
jjb/barometer/barometer-build.sh [new file with mode: 0644]
jjb/barometer/barometer-upload-artifact.sh [new file with mode: 0644]
jjb/barometer/barometer.yml
jjb/bottlenecks/bottlenecks-ci-jobs.yml
jjb/bottlenecks/bottlenecks-cleanup.sh
jjb/bottlenecks/bottlenecks-run-suite.sh
jjb/calipso/calipso.yml [new file with mode: 0644]
jjb/ci_gate_security/anteater-clone-all-repos.sh [new file with mode: 0755]
jjb/ci_gate_security/anteater-report-to-gerrit.sh
jjb/ci_gate_security/anteater-security-audit-weekly.sh [new file with mode: 0644]
jjb/ci_gate_security/anteater-security-audit.sh
jjb/ci_gate_security/opnfv-ci-gate-security.yml
jjb/compass4nfv/compass-ci-jobs.yml
jjb/compass4nfv/compass-deploy.sh
jjb/compass4nfv/compass-dovetail-jobs.yml
jjb/compass4nfv/compass-verify-jobs.yml
jjb/daisy4nfv/daisy-daily-jobs.yml
jjb/daisy4nfv/daisy-deploy.sh
jjb/daisy4nfv/daisy-project-jobs.yml
jjb/doctor/doctor.yml
jjb/dovetail/dovetail-ci-jobs.yml
jjb/dovetail/dovetail-cleanup.sh
jjb/dovetail/dovetail-run.sh
jjb/dovetail/dovetail-weekly-jobs.yml
jjb/fuel/fuel-daily-jobs.yml
jjb/fuel/fuel-deploy.sh
jjb/fuel/fuel-download-artifact.sh
jjb/fuel/fuel-project-jobs.yml
jjb/fuel/fuel-verify-jobs.yml
jjb/fuel/fuel-weekly-jobs.yml
jjb/functest/functest-alpine.sh [new file with mode: 0644]
jjb/functest/functest-daily-jobs.yml
jjb/functest/functest-loop.sh
jjb/functest/functest-project-jobs.yml
jjb/functest/functest-suite.sh
jjb/functest/set-functest-env.sh
jjb/global/installer-params.yml
jjb/global/releng-defaults.yml
jjb/global/releng-macros.yml
jjb/global/slave-params.yml
jjb/joid/joid-daily-jobs.yml
jjb/joid/joid-deploy.sh
jjb/kvmfornfv/kvmfornfv.yml
jjb/multisite/fuel-deploy-for-multisite.sh [deleted file]
jjb/multisite/multisite-daily-jobs.yml [deleted file]
jjb/netready/netready.yml
jjb/nfvbench/nfvbench.yml [new file with mode: 0644]
jjb/orchestra/orchestra-daily-jobs.yml [new file with mode: 0644]
jjb/orchestra/orchestra-project-jobs.yml [new file with mode: 0644]
jjb/ovn4nfv/ovn4nfv-daily-jobs.yml [new file with mode: 0644]
jjb/ovn4nfv/ovn4nfv-project-jobs.yml [new file with mode: 0644]
jjb/ovsnfv/ovsnfv.yml
jjb/qtip/helpers/cleanup-deploy.sh [deleted file]
jjb/qtip/helpers/validate-deploy.sh [deleted file]
jjb/qtip/helpers/validate-setup.sh [deleted file]
jjb/qtip/qtip-experimental-jobs.yml [new file with mode: 0644]
jjb/qtip/qtip-validate-jobs.yml
jjb/qtip/qtip-verify-jobs.yml
jjb/releng/automate.yml [moved from jjb/releng/testapi-automate.yml with 73% similarity]
jjb/releng/docker-deploy.sh [new file with mode: 0644]
jjb/releng/docker-update.sh [new file with mode: 0644]
jjb/releng/opnfv-docker-arm.yml
jjb/releng/opnfv-docker.sh
jjb/releng/opnfv-docker.yml
jjb/releng/opnfv-lint.yml
jjb/releng/releng-ci-jobs.yml
jjb/releng/testapi-docker-deploy.sh [deleted file]
jjb/releng/testapi-docker-update.sh [deleted file]
jjb/securedlab/check-jinja2.yml
jjb/sfc/sfc-project-jobs.yml [moved from jjb/snaps/snaps.yml with 81% similarity]
jjb/snaps/snaps-verify-jobs.yml [moved from jjb/multisite/multisite-verify-jobs.yml with 75% similarity]
jjb/storperf/storperf-daily-jobs.yml [new file with mode: 0644]
jjb/storperf/storperf-verify-jobs.yml [new file with mode: 0644]
jjb/storperf/storperf.yml
jjb/test-requirements.txt [deleted file]
jjb/xci/bifrost-cleanup-job.yml
jjb/xci/bifrost-periodic-jobs.yml
jjb/xci/bifrost-provision.sh
jjb/xci/bifrost-verify-jobs.yml
jjb/xci/bifrost-verify.sh
jjb/xci/osa-periodic-jobs.yml
jjb/xci/xci-daily-jobs.yml
jjb/xci/xci-deploy.sh
jjb/xci/xci-verify-jobs.yml [new file with mode: 0644]
jjb/yardstick/yardstick-daily-jobs.yml
jjb/yardstick/yardstick-daily.sh
jjb/yardstick/yardstick-get-k8s-conf.sh [new file with mode: 0755]
modules/tox.ini [new file with mode: 0644]
prototypes/bifrost/README.md [deleted file]
prototypes/bifrost/playbooks/opnfv-virtual.yaml [deleted file]
prototypes/bifrost/scripts/bifrost-provision.sh [deleted file]
prototypes/bifrost/scripts/destroy-env.sh [deleted file]
prototypes/openstack-ansible/README.md [deleted file]
prototypes/openstack-ansible/file/cinder.yml [deleted file]
prototypes/openstack-ansible/file/exports [deleted file]
prototypes/openstack-ansible/file/modules [deleted file]
prototypes/openstack-ansible/file/openstack_user_config.yml [deleted file]
prototypes/openstack-ansible/file/opnfv-setup-openstack.yml [deleted file]
prototypes/openstack-ansible/file/user_variables.yml [deleted file]
prototypes/openstack-ansible/playbooks/configure-targethosts.yml [deleted file]
prototypes/openstack-ansible/playbooks/configure-xcimaster.yml [deleted file]
prototypes/openstack-ansible/playbooks/inventory [deleted file]
prototypes/openstack-ansible/scripts/osa-deploy.sh [deleted file]
prototypes/openstack-ansible/template/bifrost/compute.interface.j2 [deleted file]
prototypes/openstack-ansible/template/bifrost/controller.interface.j2 [deleted file]
prototypes/openstack-ansible/var/ubuntu.yml [deleted file]
prototypes/puppet-infracloud/README.md [deleted file]
prototypes/puppet-infracloud/creds/clouds.yaml [deleted file]
prototypes/puppet-infracloud/deploy_on_baremetal.md [deleted file]
prototypes/puppet-infracloud/hiera/common.yaml [deleted file]
prototypes/puppet-infracloud/hiera/common_baremetal.yaml [deleted file]
prototypes/puppet-infracloud/install_modules.sh [deleted file]
prototypes/puppet-infracloud/install_puppet.sh [deleted file]
prototypes/puppet-infracloud/manifests/site.pp [deleted file]
prototypes/puppet-infracloud/modules.env [deleted file]
prototypes/puppet-infracloud/modules/opnfv/manifests/compute.pp [deleted file]
prototypes/puppet-infracloud/modules/opnfv/manifests/controller.pp [deleted file]
prototypes/puppet-infracloud/modules/opnfv/manifests/server.pp [deleted file]
prototypes/xci/README.rst [deleted file]
prototypes/xci/config/aio-vars [deleted file]
prototypes/xci/config/env-vars [deleted file]
prototypes/xci/config/ha-vars [deleted file]
prototypes/xci/config/mini-vars [deleted file]
prototypes/xci/config/noha-vars [deleted file]
prototypes/xci/config/pinned-versions [deleted file]
prototypes/xci/config/user-vars [deleted file]
prototypes/xci/docs/developer-guide.rst [deleted file]
prototypes/xci/file/aio/configure-opnfvhost.yml [deleted file]
prototypes/xci/file/aio/flavor-vars.yml [deleted file]
prototypes/xci/file/aio/inventory [deleted file]
prototypes/xci/file/ansible-role-requirements.yml [deleted file]
prototypes/xci/file/cinder.yml [deleted file]
prototypes/xci/file/ha/flavor-vars.yml [deleted file]
prototypes/xci/file/ha/inventory [deleted file]
prototypes/xci/file/ha/openstack_user_config.yml [deleted file]
prototypes/xci/file/ha/user_variables.yml [deleted file]
prototypes/xci/file/install-ansible.sh [deleted file]
prototypes/xci/file/mini/flavor-vars.yml [deleted file]
prototypes/xci/file/mini/inventory [deleted file]
prototypes/xci/file/mini/openstack_user_config.yml [deleted file]
prototypes/xci/file/mini/user_variables.yml [deleted file]
prototypes/xci/file/noha/flavor-vars.yml [deleted file]
prototypes/xci/file/noha/inventory [deleted file]
prototypes/xci/file/noha/openstack_user_config.yml [deleted file]
prototypes/xci/file/noha/user_variables.yml [deleted file]
prototypes/xci/file/setup-openstack.yml [deleted file]
prototypes/xci/playbooks/configure-localhost.yml [deleted file]
prototypes/xci/playbooks/configure-opnfvhost.yml [deleted file]
prototypes/xci/playbooks/configure-targethosts.yml [deleted file]
prototypes/xci/playbooks/inventory [deleted file]
prototypes/xci/playbooks/provision-vm-nodes.yml [deleted file]
prototypes/xci/playbooks/roles/clone-repository/tasks/main.yml [deleted file]
prototypes/xci/playbooks/roles/configure-network/tasks/main.yml [deleted file]
prototypes/xci/playbooks/roles/configure-nfs/tasks/main.yml [deleted file]
prototypes/xci/playbooks/roles/remove-folders/tasks/main.yml [deleted file]
prototypes/xci/playbooks/roles/synchronize-time/tasks/main.yml [deleted file]
prototypes/xci/template/compute.interface.j2 [deleted file]
prototypes/xci/template/controller.interface.j2 [deleted file]
prototypes/xci/template/opnfv.interface.j2 [deleted file]
prototypes/xci/var/Debian.yml [deleted file]
prototypes/xci/var/RedHat.yml [deleted file]
prototypes/xci/var/Suse.yml [deleted file]
prototypes/xci/var/opnfv.yml [deleted file]
prototypes/xci/xci-deploy.sh [deleted file]
setup.py [deleted file]
tox.ini
utils/create_pod_file.py
utils/fetch_os_creds.sh
utils/jenkins-jnlp-connect.sh
utils/push-test-logs.sh
utils/test/reporting/api/__init__.py [moved from utils/test/reporting/api/api/__init__.py with 100% similarity]
utils/test/reporting/api/conf.py [moved from utils/test/reporting/api/api/conf.py with 100% similarity]
utils/test/reporting/api/handlers/__init__.py [moved from utils/test/reporting/api/api/handlers/__init__.py with 100% similarity]
utils/test/reporting/api/handlers/landing.py [moved from utils/test/reporting/api/api/handlers/landing.py with 69% similarity]
utils/test/reporting/api/handlers/projects.py [moved from utils/test/reporting/api/api/handlers/projects.py with 100% similarity]
utils/test/reporting/api/handlers/testcases.py [moved from utils/test/reporting/api/api/handlers/testcases.py with 100% similarity]
utils/test/reporting/api/requirements.txt [deleted file]
utils/test/reporting/api/server.py [moved from utils/test/reporting/api/api/server.py with 100% similarity]
utils/test/reporting/api/setup.cfg [deleted file]
utils/test/reporting/api/setup.py [deleted file]
utils/test/reporting/api/urls.py [moved from utils/test/reporting/api/api/urls.py with 100% similarity]
utils/test/reporting/docker/Dockerfile
utils/test/reporting/docker/nginx.conf
utils/test/reporting/docker/reporting.sh
utils/test/reporting/docker/requirements.pip
utils/test/reporting/docker/supervisor.conf
utils/test/reporting/docker/web_server.sh [new file with mode: 0755]
utils/test/reporting/docs/_build/.buildinfo [new file with mode: 0644]
utils/test/reporting/docs/_build/.doctrees/environment.pickle [new file with mode: 0644]
utils/test/reporting/docs/_build/.doctrees/index.doctree [new file with mode: 0644]
utils/test/reporting/docs/conf.py [new file with mode: 0644]
utils/test/reporting/docs/index.rst [new file with mode: 0644]
utils/test/reporting/pages/angular.sh
utils/test/reporting/pages/app/index.html
utils/test/reporting/pages/app/scripts/controllers/table.controller.js
utils/test/reporting/pages/app/scripts/controllers/testvisual.controller.js
utils/test/reporting/pages/app/scripts/factory/table.factory.js
utils/test/reporting/pages/app/views/commons/table.html
utils/test/reporting/pages/app/views/commons/testCaseVisual.html
utils/test/reporting/pages/config.sh [new file with mode: 0755]
utils/test/reporting/reporting/__init__.py [moved from utils/test/reporting/functest/__init__.py with 100% similarity]
utils/test/reporting/reporting/functest/__init__.py [moved from utils/test/reporting/qtip/__init__.py with 100% similarity]
utils/test/reporting/reporting/functest/img/gauge_0.png [moved from utils/test/reporting/functest/img/gauge_0.png with 100% similarity]
utils/test/reporting/reporting/functest/img/gauge_100.png [moved from utils/test/reporting/functest/img/gauge_100.png with 100% similarity]
utils/test/reporting/reporting/functest/img/gauge_16.7.png [moved from utils/test/reporting/functest/img/gauge_16.7.png with 100% similarity]
utils/test/reporting/reporting/functest/img/gauge_25.png [moved from utils/test/reporting/functest/img/gauge_25.png with 100% similarity]
utils/test/reporting/reporting/functest/img/gauge_33.3.png [moved from utils/test/reporting/functest/img/gauge_33.3.png with 100% similarity]
utils/test/reporting/reporting/functest/img/gauge_41.7.png [moved from utils/test/reporting/functest/img/gauge_41.7.png with 100% similarity]
utils/test/reporting/reporting/functest/img/gauge_50.png [moved from utils/test/reporting/functest/img/gauge_50.png with 100% similarity]
utils/test/reporting/reporting/functest/img/gauge_58.3.png [moved from utils/test/reporting/functest/img/gauge_58.3.png with 100% similarity]
utils/test/reporting/reporting/functest/img/gauge_66.7.png [moved from utils/test/reporting/functest/img/gauge_66.7.png with 100% similarity]
utils/test/reporting/reporting/functest/img/gauge_75.png [moved from utils/test/reporting/functest/img/gauge_75.png with 100% similarity]
utils/test/reporting/reporting/functest/img/gauge_8.3.png [moved from utils/test/reporting/functest/img/gauge_8.3.png with 100% similarity]
utils/test/reporting/reporting/functest/img/gauge_83.3.png [moved from utils/test/reporting/functest/img/gauge_83.3.png with 100% similarity]
utils/test/reporting/reporting/functest/img/gauge_91.7.png [moved from utils/test/reporting/functest/img/gauge_91.7.png with 100% similarity]
utils/test/reporting/reporting/functest/img/icon-nok.png [moved from utils/test/reporting/functest/img/icon-nok.png with 100% similarity]
utils/test/reporting/reporting/functest/img/icon-ok.png [moved from utils/test/reporting/functest/img/icon-ok.png with 100% similarity]
utils/test/reporting/reporting/functest/img/weather-clear.png [moved from utils/test/reporting/functest/img/weather-clear.png with 100% similarity]
utils/test/reporting/reporting/functest/img/weather-few-clouds.png [moved from utils/test/reporting/functest/img/weather-few-clouds.png with 100% similarity]
utils/test/reporting/reporting/functest/img/weather-overcast.png [moved from utils/test/reporting/functest/img/weather-overcast.png with 100% similarity]
utils/test/reporting/reporting/functest/img/weather-storm.png [moved from utils/test/reporting/functest/img/weather-storm.png with 100% similarity]
utils/test/reporting/reporting/functest/index.html [moved from utils/test/reporting/functest/index.html with 100% similarity]
utils/test/reporting/reporting/functest/reporting-status.py [moved from utils/test/reporting/functest/reporting-status.py with 98% similarity]
utils/test/reporting/reporting/functest/reporting-tempest.py [moved from utils/test/reporting/functest/reporting-tempest.py with 86% similarity]
utils/test/reporting/reporting/functest/reporting-vims.py [moved from utils/test/reporting/functest/reporting-vims.py with 97% similarity]
utils/test/reporting/reporting/functest/scenarioResult.py [moved from utils/test/reporting/functest/scenarioResult.py with 100% similarity]
utils/test/reporting/reporting/functest/template/index-status-tmpl.html [moved from utils/test/reporting/functest/template/index-status-tmpl.html with 82% similarity]
utils/test/reporting/reporting/functest/template/index-tempest-tmpl.html [moved from utils/test/reporting/functest/template/index-tempest-tmpl.html with 100% similarity]
utils/test/reporting/reporting/functest/template/index-vims-tmpl.html [moved from utils/test/reporting/functest/template/index-vims-tmpl.html with 100% similarity]
utils/test/reporting/reporting/functest/testCase.py [moved from utils/test/reporting/functest/testCase.py with 100% similarity]
utils/test/reporting/reporting/qtip/__init__.py [moved from utils/test/reporting/tests/__init__.py with 100% similarity]
utils/test/reporting/reporting/qtip/index.html [moved from utils/test/reporting/qtip/index.html with 100% similarity]
utils/test/reporting/reporting/qtip/reporting-status.py [moved from utils/test/reporting/qtip/reporting-status.py with 94% similarity]
utils/test/reporting/reporting/qtip/template/index-status-tmpl.html [moved from utils/test/reporting/qtip/template/index-status-tmpl.html with 92% similarity]
utils/test/reporting/reporting/reporting.yaml [moved from utils/test/reporting/reporting.yaml with 98% similarity]
utils/test/reporting/reporting/storperf/__init__.py [moved from utils/test/reporting/tests/unit/__init__.py with 100% similarity]
utils/test/reporting/reporting/storperf/reporting-status.py [moved from utils/test/reporting/storperf/reporting-status.py with 98% similarity]
utils/test/reporting/reporting/storperf/template/index-status-tmpl.html [moved from utils/test/reporting/storperf/template/index-status-tmpl.html with 100% similarity]
utils/test/reporting/reporting/tests/__init__.py [moved from utils/test/reporting/tests/unit/utils/__init__.py with 100% similarity]
utils/test/reporting/reporting/tests/unit/__init__.py [moved from utils/test/reporting/utils/__init__.py with 100% similarity]
utils/test/reporting/reporting/tests/unit/utils/__init__.py [moved from prototypes/puppet-infracloud/.gitkeep with 100% similarity]
utils/test/reporting/reporting/tests/unit/utils/test_utils.py [moved from utils/test/reporting/tests/unit/utils/test_utils.py with 78% similarity]
utils/test/reporting/reporting/utils/__init__.py [moved from utils/test/reporting/pages/app/scripts/app.config.js with 100% similarity]
utils/test/reporting/reporting/utils/reporting_utils.py [moved from utils/test/reporting/utils/reporting_utils.py with 91% similarity]
utils/test/reporting/reporting/utils/scenarioResult.py [moved from utils/test/reporting/utils/scenarioResult.py with 100% similarity]
utils/test/reporting/reporting/yardstick/__init__.py [new file with mode: 0644]
utils/test/reporting/reporting/yardstick/img/gauge_0.png [moved from utils/test/reporting/yardstick/img/gauge_0.png with 100% similarity]
utils/test/reporting/reporting/yardstick/img/gauge_100.png [moved from utils/test/reporting/yardstick/img/gauge_100.png with 100% similarity]
utils/test/reporting/reporting/yardstick/img/gauge_16.7.png [moved from utils/test/reporting/yardstick/img/gauge_16.7.png with 100% similarity]
utils/test/reporting/reporting/yardstick/img/gauge_25.png [moved from utils/test/reporting/yardstick/img/gauge_25.png with 100% similarity]
utils/test/reporting/reporting/yardstick/img/gauge_33.3.png [moved from utils/test/reporting/yardstick/img/gauge_33.3.png with 100% similarity]
utils/test/reporting/reporting/yardstick/img/gauge_41.7.png [moved from utils/test/reporting/yardstick/img/gauge_41.7.png with 100% similarity]
utils/test/reporting/reporting/yardstick/img/gauge_50.png [moved from utils/test/reporting/yardstick/img/gauge_50.png with 100% similarity]
utils/test/reporting/reporting/yardstick/img/gauge_58.3.png [moved from utils/test/reporting/yardstick/img/gauge_58.3.png with 100% similarity]
utils/test/reporting/reporting/yardstick/img/gauge_66.7.png [moved from utils/test/reporting/yardstick/img/gauge_66.7.png with 100% similarity]
utils/test/reporting/reporting/yardstick/img/gauge_75.png [moved from utils/test/reporting/yardstick/img/gauge_75.png with 100% similarity]
utils/test/reporting/reporting/yardstick/img/gauge_8.3.png [moved from utils/test/reporting/yardstick/img/gauge_8.3.png with 100% similarity]
utils/test/reporting/reporting/yardstick/img/gauge_83.3.png [moved from utils/test/reporting/yardstick/img/gauge_83.3.png with 100% similarity]
utils/test/reporting/reporting/yardstick/img/gauge_91.7.png [moved from utils/test/reporting/yardstick/img/gauge_91.7.png with 100% similarity]
utils/test/reporting/reporting/yardstick/img/icon-nok.png [moved from utils/test/reporting/yardstick/img/icon-nok.png with 100% similarity]
utils/test/reporting/reporting/yardstick/img/icon-ok.png [moved from utils/test/reporting/yardstick/img/icon-ok.png with 100% similarity]
utils/test/reporting/reporting/yardstick/img/weather-clear.png [moved from utils/test/reporting/yardstick/img/weather-clear.png with 100% similarity]
utils/test/reporting/reporting/yardstick/img/weather-few-clouds.png [moved from utils/test/reporting/yardstick/img/weather-few-clouds.png with 100% similarity]
utils/test/reporting/reporting/yardstick/img/weather-overcast.png [moved from utils/test/reporting/yardstick/img/weather-overcast.png with 100% similarity]
utils/test/reporting/reporting/yardstick/img/weather-storm.png [moved from utils/test/reporting/yardstick/img/weather-storm.png with 100% similarity]
utils/test/reporting/reporting/yardstick/index.html [moved from utils/test/reporting/yardstick/index.html with 100% similarity]
utils/test/reporting/reporting/yardstick/reporting-status.py [moved from utils/test/reporting/yardstick/reporting-status.py with 98% similarity]
utils/test/reporting/reporting/yardstick/scenarios.py [moved from utils/test/reporting/yardstick/scenarios.py with 100% similarity]
utils/test/reporting/reporting/yardstick/template/index-status-tmpl.html [moved from utils/test/reporting/yardstick/template/index-status-tmpl.html with 100% similarity]
utils/test/reporting/requirements.txt [new file with mode: 0644]
utils/test/reporting/run_test.sh [new file with mode: 0755]
utils/test/reporting/run_unit_tests.sh [deleted file]
utils/test/reporting/setup.cfg [new file with mode: 0644]
utils/test/reporting/setup.py
utils/test/reporting/test-requirements.txt [new file with mode: 0644]
utils/test/reporting/tox.ini [new file with mode: 0644]
utils/test/testapi/.gitignore [new file with mode: 0644]
utils/test/testapi/3rd_party/static/testapi-ui/app.js
utils/test/testapi/3rd_party/static/testapi-ui/components/guidelines/guidelines.html [deleted file]
utils/test/testapi/3rd_party/static/testapi-ui/components/guidelines/guidelinesController.js [deleted file]
utils/test/testapi/3rd_party/static/testapi-ui/components/guidelines/partials/guidelineDetails.html [deleted file]
utils/test/testapi/3rd_party/static/testapi-ui/components/guidelines/partials/testListModal.html [deleted file]
utils/test/testapi/3rd_party/static/testapi-ui/components/pods/pods.html [new file with mode: 0644]
utils/test/testapi/3rd_party/static/testapi-ui/components/pods/podsController.js [new file with mode: 0644]
utils/test/testapi/3rd_party/static/testapi-ui/components/results/results.html
utils/test/testapi/3rd_party/static/testapi-ui/components/results/resultsController.js
utils/test/testapi/3rd_party/static/testapi-ui/index.html
utils/test/testapi/3rd_party/static/testapi-ui/shared/header/header.html
utils/test/testapi/deployment/deploy.py
utils/test/testapi/deployment/docker-compose.yml.template
utils/test/testapi/docker/Dockerfile
utils/test/testapi/docker/prepare-env.sh
utils/test/testapi/etc/config.ini
utils/test/testapi/htmlize/htmlize.py
utils/test/testapi/install.sh [deleted file]
utils/test/testapi/opnfv_testapi/cmd/server.py
utils/test/testapi/opnfv_testapi/common/check.py
utils/test/testapi/opnfv_testapi/common/config.py
utils/test/testapi/opnfv_testapi/common/message.py
utils/test/testapi/opnfv_testapi/common/raises.py
utils/test/testapi/opnfv_testapi/db/__init__.py [new file with mode: 0644]
utils/test/testapi/opnfv_testapi/db/api.py [new file with mode: 0644]
utils/test/testapi/opnfv_testapi/resources/handlers.py
utils/test/testapi/opnfv_testapi/resources/models.py
utils/test/testapi/opnfv_testapi/resources/result_handlers.py
utils/test/testapi/opnfv_testapi/resources/result_models.py
utils/test/testapi/opnfv_testapi/resources/scenario_handlers.py
utils/test/testapi/opnfv_testapi/resources/scenario_models.py
utils/test/testapi/opnfv_testapi/router/url_mappings.py
utils/test/testapi/opnfv_testapi/tests/unit/common/noparam.ini
utils/test/testapi/opnfv_testapi/tests/unit/common/normal.ini
utils/test/testapi/opnfv_testapi/tests/unit/common/nosection.ini
utils/test/testapi/opnfv_testapi/tests/unit/common/notboolean.ini
utils/test/testapi/opnfv_testapi/tests/unit/common/notint.ini
utils/test/testapi/opnfv_testapi/tests/unit/common/test_config.py
utils/test/testapi/opnfv_testapi/tests/unit/conftest.py [new file with mode: 0644]
utils/test/testapi/opnfv_testapi/tests/unit/executor.py
utils/test/testapi/opnfv_testapi/tests/unit/fake_pymongo.py
utils/test/testapi/opnfv_testapi/tests/unit/resources/__init__.py [new file with mode: 0644]
utils/test/testapi/opnfv_testapi/tests/unit/resources/scenario-c1.json [moved from utils/test/testapi/opnfv_testapi/tests/unit/scenario-c1.json with 100% similarity]
utils/test/testapi/opnfv_testapi/tests/unit/resources/scenario-c2.json [moved from utils/test/testapi/opnfv_testapi/tests/unit/scenario-c2.json with 96% similarity]
utils/test/testapi/opnfv_testapi/tests/unit/resources/test_base.py [moved from utils/test/testapi/opnfv_testapi/tests/unit/test_base.py with 75% similarity]
utils/test/testapi/opnfv_testapi/tests/unit/resources/test_fake_pymongo.py [moved from utils/test/testapi/opnfv_testapi/tests/unit/test_fake_pymongo.py with 100% similarity]
utils/test/testapi/opnfv_testapi/tests/unit/resources/test_pod.py [moved from utils/test/testapi/opnfv_testapi/tests/unit/test_pod.py with 97% similarity]
utils/test/testapi/opnfv_testapi/tests/unit/resources/test_project.py [moved from utils/test/testapi/opnfv_testapi/tests/unit/test_project.py with 98% similarity]
utils/test/testapi/opnfv_testapi/tests/unit/resources/test_result.py [moved from utils/test/testapi/opnfv_testapi/tests/unit/test_result.py with 83% similarity]
utils/test/testapi/opnfv_testapi/tests/unit/resources/test_scenario.py [new file with mode: 0644]
utils/test/testapi/opnfv_testapi/tests/unit/resources/test_testcase.py [moved from utils/test/testapi/opnfv_testapi/tests/unit/test_testcase.py with 99% similarity]
utils/test/testapi/opnfv_testapi/tests/unit/resources/test_token.py [moved from utils/test/testapi/opnfv_testapi/tests/unit/test_token.py with 96% similarity]
utils/test/testapi/opnfv_testapi/tests/unit/resources/test_version.py [moved from utils/test/testapi/opnfv_testapi/tests/unit/test_version.py with 94% similarity]
utils/test/testapi/opnfv_testapi/tests/unit/test_scenario.py [deleted file]
utils/test/testapi/opnfv_testapi/tornado_swagger/swagger.py
utils/test/testapi/opnfv_testapi/ui/auth/constants.py
utils/test/testapi/opnfv_testapi/ui/auth/sign.py
utils/test/testapi/opnfv_testapi/ui/auth/user.py
utils/test/testapi/opnfv_testapi/ui/root.py
utils/test/testapi/requirements.txt
utils/test/testapi/run_test.sh [deleted file]
utils/test/testapi/setup.py
utils/test/testapi/test-requirements.txt
utils/test/testapi/tox.ini
utils/test/testapi/update/templates/backup_mongodb.py
utils/test/testapi/update/templates/update_mongodb.py
utils/upload-artifact.sh [new file with mode: 0644]

diff --git a/.yamllint b/.yamllint
new file mode 100644 (file)
index 0000000..4402f17
--- /dev/null
+++ b/.yamllint
@@ -0,0 +1,8 @@
+---
+extends: default
+
+rules:
+  # 120 chars should be enough and don't fail if a line is longer
+  line-length:
+    max: 120
+    level: warning
diff --git a/INFO b/INFO
index 0fb3582..38ac5b3 100644 (file)
--- a/INFO
+++ b/INFO
@@ -9,6 +9,7 @@ Jira Project Prefix: RELENG
 Mailing list tag: [releng]
 IRC: Server:freenode.net Channel:#opnfv-octopus
 Repository: releng
+Other Repositories: releng-xci
 
 Committers:
 Fatih Degirmenci (Ericsson, fatih.degirmenci@ericsson.com)
index 97fea89..f5aab1a 100644 (file)
@@ -56,3 +56,4 @@
     publishers:
         - archive-artifacts:
             artifacts: 'job_output/*'
+        - email-jenkins-admins-on-failure
index 3a2ca60..ce9544b 100755 (executable)
@@ -3,7 +3,6 @@ set -o errexit
 set -o nounset
 set -o pipefail
 
-APEX_PKGS="common undercloud onos"
 IPV6_FLAG=False
 
 # log info to console
@@ -11,6 +10,8 @@ echo "Starting the Apex deployment."
 echo "--------------------------------------------------------"
 echo
 
+sudo rm -rf /tmp/tmp*
+
 if [ -z "$DEPLOY_SCENARIO" ]; then
   echo "Deploy scenario not set!"
   exit 1
@@ -57,7 +58,9 @@ else
   BASE=$CONFIG
   IMAGES=$RESOURCES
   LIB="/var/opt/opnfv/lib"
-
+  sudo mkdir -p /var/log/apex
+  sudo chmod 777 /var/log/apex
+  cd /var/log/apex
 fi
 
 # Install Dependencies
index 52c3c67..860cd60 100755 (executable)
@@ -3,8 +3,6 @@ set -o errexit
 set -o nounset
 set -o pipefail
 
-APEX_PKGS="common undercloud onos"
-
 # log info to console
 echo "Downloading the Apex artifact. This could take some time..."
 echo "--------------------------------------------------------"
@@ -40,9 +38,8 @@ else
   # find version of RPM
   VERSION_EXTENSION=$(echo $(basename $RPM_LIST) | grep -Eo '[0-9]+\.[0-9]+-([0-9]{8}|[a-z]+-[0-9]\.[0-9]+)')
   # build RPM List which already includes base Apex RPM
-  for pkg in ${APEX_PKGS}; do
-    RPM_LIST+=" ${RPM_INSTALL_PATH}/opnfv-apex-${pkg}-${VERSION_EXTENSION}.noarch.rpm"
-  done
+  RPM_LIST+=" ${RPM_INSTALL_PATH}/opnfv-apex-undercloud-${VERSION_EXTENSION}.noarch.rpm"
+  RPM_LIST+=" ${RPM_INSTALL_PATH}/python34-opnfv-apex-${VERSION_EXTENSION}.noarch.rpm"
 
   # remove old / install new RPMs
   if rpm -q opnfv-apex > /dev/null; then
index f53451d..4037d25 100755 (executable)
@@ -126,15 +126,13 @@ elif [ "$ARTIFACT_TYPE" == 'rpm' ]; then
     RPM_INSTALL_PATH=$BUILD_DIRECTORY/noarch
     RPM_LIST=$RPM_INSTALL_PATH/$(basename $OPNFV_RPM_URL)
     VERSION_EXTENSION=$(echo $(basename $OPNFV_RPM_URL) | sed 's/opnfv-apex-//')
-    for pkg in common undercloud onos; do
-      RPM_LIST+=" ${RPM_INSTALL_PATH}/opnfv-apex-${pkg}-${VERSION_EXTENSION}"
-    done
+    RPM_LIST+=" ${RPM_INSTALL_PATH}/opnfv-apex-undercloud-${VERSION_EXTENSION}"
+    RPM_LIST+=" ${RPM_INSTALL_PATH}/python34-opnfv-apex-${VERSION_EXTENSION}"
     SRPM_INSTALL_PATH=$BUILD_DIRECTORY
     SRPM_LIST=$SRPM_INSTALL_PATH/$(basename $OPNFV_SRPM_URL)
     VERSION_EXTENSION=$(echo $(basename $OPNFV_SRPM_URL) | sed 's/opnfv-apex-//')
-    for pkg in common undercloud onos; do
-      SRPM_LIST+=" ${SRPM_INSTALL_PATH}/opnfv-apex-${pkg}-${VERSION_EXTENSION}"
-    done
+    SRPM_LIST+=" ${SRPM_INSTALL_PATH}/opnfv-apex-undercloud-${VERSION_EXTENSION}"
+    SRPM_LIST+=" ${SRPM_INSTALL_PATH}/python34-opnfv-apex-${VERSION_EXTENSION}"
 
     if [[ -n "$SIGN_ARTIFACT" && "$SIGN_ARTIFACT" == "true" ]]; then
       signrpm
index a395cf2..b33c78e 100644 (file)
         - 'apex-csit-promote-daily-{stream}'
         - 'apex-fdio-promote-daily-{stream}'
         - 'apex-verify-iso-{stream}'
-        - 'apex-run-deploy-test-baremetal-{stream}'
+        - 'apex-{scenario}-baremetal-{scenario_stream}'
+        - 'apex-testsuite-{scenario}-baremetal-{scenario_stream}'
         - 'apex-upload-snapshot'
         - 'apex-create-snapshot'
     # stream:    branch with - in place of / (eg. stable-arno)
     # branch:    branch (eg. stable/arno)
     stream:
-        - master:
+        - master: &master
             branch: 'master'
             gs-pathname: ''
             build-slave: 'apex-build-master'
@@ -27,8 +28,9 @@
             baremetal-slave: 'apex-baremetal-master'
             verify-scenario: 'os-odl-nofeature-ha'
             concurrent-builds: 3
+            scenario_stream: 'master'
 
-        - danube:
+        - danube: &danube
             branch: 'stable/danube'
             gs-pathname: '/danube'
             build-slave: 'apex-build-danube'
             baremetal-slave: 'apex-baremetal-danube'
             verify-scenario: 'os-odl_l3-nofeature-ha'
             concurrent-builds: 1
-            disabled: false
+            scenario_stream: 'danube'
+            disabled: true
+
+    scenario:
+        - 'os-nosdn-nofeature-noha':
+              <<: *danube
+        - 'os-nosdn-nofeature-ha':
+              <<: *danube
+        - 'os-nosdn-nofeature-ha-ipv6':
+              <<: *danube
+        - 'os-nosdn-ovs-noha':
+              <<: *danube
+        - 'os-nosdn-ovs-ha':
+              <<: *danube
+        - 'os-nosdn-fdio-noha':
+              <<: *danube
+        - 'os-nosdn-fdio-ha':
+              <<: *danube
+        - 'os-nosdn-kvm-ha':
+              <<: *danube
+        - 'os-nosdn-kvm-noha':
+              <<: *danube
+        - 'os-odl_l2-fdio-noha':
+              <<: *danube
+        - 'os-odl_l2-fdio-ha':
+              <<: *danube
+        - 'os-odl_netvirt-fdio-noha':
+              <<: *danube
+        - 'os-odl_l2-sfc-noha':
+              <<: *danube
+        - 'os-odl_l3-nofeature-noha':
+              <<: *danube
+        - 'os-odl_l3-nofeature-ha':
+              <<: *danube
+        - 'os-odl_l3-ovs-noha':
+              <<: *danube
+        - 'os-odl_l3-ovs-ha':
+              <<: *danube
+        - 'os-odl-bgpvpn-ha':
+              <<: *danube
+        - 'os-odl-gluon-noha':
+              <<: *danube
+        - 'os-odl_l3-fdio-noha':
+              <<: *danube
+        - 'os-odl_l3-fdio-ha':
+              <<: *danube
+        - 'os-odl_l3-fdio_dvr-noha':
+              <<: *danube
+        - 'os-odl_l3-fdio_dvr-ha':
+              <<: *danube
+        - 'os-odl_l3-csit-noha':
+              <<: *danube
+        - 'os-onos-nofeature-ha':
+              <<: *danube
+        - 'os-ovn-nofeature-noha':
+              <<: *danube
+        - 'os-nosdn-nofeature-noha':
+              <<: *master
+        - 'os-nosdn-nofeature-ha':
+              <<: *master
+        - 'os-odl-nofeature-ha':
+              <<: *master
+        - 'os-odl-nofeature-noha':
+              <<: *master
+        - 'os-odl-bgpvpn-ha':
+              <<: *master
+        - 'os-ovn-nofeature-noha':
+              <<: *master
+        - 'os-nosdn-fdio-noha':
+              <<: *master
+        - 'os-nosdn-fdio-ha':
+              <<: *master
+        - 'os-odl-fdio-noha':
+              <<: *master
+        - 'os-odl-fdio-ha':
+              <<: *master
+        - 'os-nosdn-bar-ha':
+              <<: *master
+        - 'os-nosdn-bar-noha':
+              <<: *master
+        - 'os-nosdn-nofeature-ha-ipv6':
+              <<: *master
+        - 'os-nosdn-ovs_dpdk-noha':
+              <<: *master
+        - 'os-nosdn-ovs_dpdk-ha':
+              <<: *master
+        - 'os-nosdn-kvm_ovs_dpdk-noha':
+              <<: *master
+        - 'os-nosdn-kvm_ovs_dpdk-ha':
+              <<: *master
+        - 'os-odl-sfc-noha':
+              <<: *master
+        - 'os-odl-sfc-ha':
+              <<: *master
+        - 'os-odl-fdio-dvr-noha':
+              <<: *master
+        - 'os-odl-fdio-dvr-ha':
+              <<: *master
 
     platform:
          - 'baremetal'
                     branch-pattern: '**/{branch}'
                 file-paths:
                   - compare-type: ANT
-                    pattern: 'tests/**'
+                    pattern: 'apex/tests/**'
     properties:
         - logrotate-default
         - throttle:
                     pattern: 'lib/**'
                   - compare-type: ANT
                     pattern: 'config/**'
+                  - compare-type: ANT
+                    pattern: 'apex/**'
 
     properties:
         - logrotate-default
+        - build-blocker:
+            use-build-blocker: true
+            block-level: 'NODE'
+            blocking-jobs:
+                - 'apex-verify.*'
         - throttle:
-            max-per-node: 3
+            max-per-node: 1
             max-total: 10
             option: 'project'
 
                     pattern: 'lib/**'
                   - compare-type: ANT
                     pattern: 'config/**'
+                  - compare-type: ANT
+                    pattern: 'apex/**'
 
     properties:
         - logrotate-default
+        - build-blocker:
+            use-build-blocker: true
+            block-level: 'NODE'
+            blocking-jobs:
+                - 'apex-verify.*'
         - throttle:
-            max-per-node: 3
+            max-per-node: 1
             max-total: 10
             option: 'project'
 
                   kill-phase-on: FAILURE
                   abort-all-job: true
                   git-revision: true
+        - shell: |
+            echo DEPLOY_SCENARIO=$(echo $GERRIT_EVENT_COMMENT_TEXT | grep start-gate-scenario | grep -Eo 'os-.*') > detected_scenario
+        - inject:
+           properties-file: detected_scenario
         - multijob:
             name: functest-smoke
             condition: SUCCESSFUL
               - name: 'functest-apex-virtual-suite-{stream}'
                 current-parameters: false
                 predefined-parameters: |
-                  DEPLOY_SCENARIO={verify-scenario}
+                  DEPLOY_SCENARIO=$DEPLOY_SCENARIO
                   FUNCTEST_SUITE_NAME=healthcheck
                   GERRIT_BRANCH=$GERRIT_BRANCH
                   GERRIT_REFSPEC=$GERRIT_REFSPEC
 - job-template:
     name: 'apex-deploy-{platform}-{stream}'
 
-    # Job template for virtual deployment
-    #
-    # Required Variables:
-    #     stream:    branch with - in place of / (eg. stable)
-    #     branch:    branch (eg. stable)
     node: 'apex-{platform}-{stream}'
 
     concurrent: true
 
     disabled: false
-
+    quiet-period: 30
     scm:
         - git-scm-gerrit
 
     wrappers:
         - timeout:
-            timeout: 120
+            timeout: 140
             fail: true
 
     parameters:
+        - '{project}-{platform}-{stream}-defaults'
         - project-parameter:
             project: '{project}'
             branch: '{branch}'
                 - 'apex-deploy.*'
                 - 'functest.*'
                 - 'yardstick.*'
+                - 'dovetail.*'
+                - 'storperf.*'
         - throttle:
             max-per-node: 1
             max-total: 10
 
 # Baremetal Deploy and Test
 - job-template:
-    name: 'apex-run-deploy-test-baremetal-{stream}'
+    name: 'apex-{scenario}-baremetal-{scenario_stream}'
 
-    # Job template for daily build
-    #
-    # Required Variables:
-    #     stream:    branch with - in place of / (eg. stable)
-    #     branch:    branch (eg. stable)
     project-type: 'multijob'
 
     disabled: false
 
     parameters:
         - '{project}-defaults'
-        - '{project}-baremetal-{stream}-defaults'
+        - '{project}-baremetal-{scenario_stream}-defaults'
         - project-parameter:
             project: '{project}'
             branch: '{branch}'
             gs-pathname: '{gs-pathname}'
         - string:
             name: DEPLOY_SCENARIO
-            default: '{verify-scenario}'
+            default: '{scenario}'
             description: "Scenario to deploy with."
     properties:
         - logrotate-default
                 - 'apex-runner.*'
                 - 'apex-.*-promote.*'
                 - 'apex-run.*'
+                - 'apex-.+-baremetal-.+'
+        - throttle:
+            max-per-node: 1
+            max-total: 10
+            option: 'project'
     builders:
         - description-setter:
             description: "Deployed on $NODE_NAME - Scenario: $DEPLOY_SCENARIO"
         - multijob:
             name: 'Baremetal Deploy'
-            condition: ALWAYS
+            condition: SUCCESSFUL
             projects:
-                - name: 'apex-deploy-baremetal-{stream}'
+                - name: 'apex-deploy-baremetal-{scenario_stream}'
                   node-parameters: true
                   current-parameters: true
                   predefined-parameters: |
                   kill-phase-on: FAILURE
                   abort-all-job: true
                   git-revision: false
+        - multijob:
+            name: 'OPNFV Test Suite'
+            projects:
+                - name: 'apex-testsuite-{scenario}-baremetal-{scenario_stream}'
+                  node-parameters: true
+                  current-parameters: false
+                  predefined-parameters:
+                    DEPLOY_SCENARIO=$DEPLOY_SCENARIO
+                  kill-phase-on: NEVER
+                  abort-all-job: true
+                  git-revision: false
+
+    publishers:
+        - groovy-postbuild:
+            script:
+                !include-raw-escape: ./update-build-result.groovy
+
+# Baremetal test job
+- job-template:
+    name: 'apex-testsuite-{scenario}-baremetal-{scenario_stream}'
+
+    project-type: 'multijob'
+
+    disabled: false
+
+    parameters:
+        - '{project}-defaults'
+        - '{project}-baremetal-{scenario_stream}-defaults'
+        - project-parameter:
+            project: '{project}'
+            branch: '{branch}'
+        - apex-parameter:
+            gs-pathname: '{gs-pathname}'
+        - string:
+            name: DEPLOY_SCENARIO
+            default: '{scenario}'
+            description: "Scenario to deploy with."
+    properties:
+        - logrotate-default
+        - build-blocker:
+            use-build-blocker: true
+            block-level: 'NODE'
+            blocking-jobs:
+                - 'apex-verify.*'
+                - 'apex-runner.*'
+                - 'apex-.*-promote.*'
+                - 'apex-run.*'
+                - 'apex-testsuite-.+-baremetal-.+'
+        - throttle:
+            max-per-node: 1
+            max-total: 10
+            option: 'project'
+    builders:
+        - description-setter:
+            description: "Testing on $NODE_NAME - Scenario: $DEPLOY_SCENARIO"
         - multijob:
             name: Functest
             condition: ALWAYS
             projects:
-                - name: 'functest-apex-baremetal-daily-{stream}'
+                - name: 'functest-apex-baremetal-daily-{scenario_stream}'
                   node-parameters: true
                   current-parameters: false
                   predefined-parameters:
             name: Yardstick
             condition: ALWAYS
             projects:
-                - name: 'yardstick-apex-baremetal-daily-{stream}'
+                - name: 'yardstick-apex-baremetal-daily-{scenario_stream}'
+                  node-parameters: true
+                  current-parameters: false
+                  predefined-parameters:
+                    DEPLOY_SCENARIO=$DEPLOY_SCENARIO
+                  kill-phase-on: NEVER
+                  abort-all-job: false
+                  git-revision: false
+        - multijob:
+            name: Dovetail
+            condition: ALWAYS
+            projects:
+                - name: 'dovetail-apex-baremetal-proposed_tests-{scenario_stream}'
                   node-parameters: true
                   current-parameters: false
                   predefined-parameters:
                     DEPLOY_SCENARIO=$DEPLOY_SCENARIO
                   kill-phase-on: NEVER
+                  enable-condition: "def m = '$DEPLOY_SCENARIO' ==~ /os-(nosdn-nofeature|nosdn-kvm|odl_l3-fdio)-ha/"
                   abort-all-job: false
                   git-revision: false
+#        - multijob:
+#            name: StorPerf
+#            condition: ALWAYS
+#            projects:
+#                - name: 'storperf-apex-baremetal-daily-{scenario_stream}'
+#                  node-parameters: true
+#                  current-parameters: false
+#                  predefined-parameters:
+#                    DEPLOY_SCENARIO=$DEPLOY_SCENARIO
+#                  kill-phase-on: NEVER
+#                  abort-all-job: false
+#                  git-revision: false
+# Build status is always success due conditional plugin prefetching
+# build status before multijob phases execute
+#        - conditional-step:
+#            condition-kind: current-status
+#            condition-worst: SUCCESS
+#            condtion-best: SUCCESS
+#            on-evaluation-failure: mark-unstable
+#            steps:
+#                - shell: 'echo "Tests Passed"'
 
 
 # danube Daily
             condition: SUCCESSFUL
             projects:
 
-                - name: 'apex-run-deploy-test-baremetal-danube'
+                - name: 'apex-os-nosdn-nofeature-noha-baremetal-danube'
                   node-parameters: false
                   current-parameters: false
                   predefined-parameters: |
-                    DEPLOY_SCENARIO=os-nosdn-nofeature-noha
                     OPNFV_CLEAN=yes
                   kill-phase-on: NEVER
                   abort-all-job: true
                   git-revision: false
 
-                - name: 'apex-run-deploy-test-baremetal-danube'
+                - name: 'apex-os-nosdn-nofeature-ha-baremetal-danube'
                   node-parameters: false
                   current-parameters: false
                   predefined-parameters: |
-                    DEPLOY_SCENARIO=os-nosdn-nofeature-ha
                     OPNFV_CLEAN=yes
                   kill-phase-on: NEVER
                   abort-all-job: true
                   git-revision: false
 
-                - name: 'apex-run-deploy-test-baremetal-danube'
+                - name: 'apex-os-nosdn-nofeature-ha-ipv6-baremetal-danube'
                   node-parameters: false
                   current-parameters: false
                   predefined-parameters: |
-                    DEPLOY_SCENARIO=os-nosdn-nofeature-ha-ipv6
                     OPNFV_CLEAN=yes
                   kill-phase-on: NEVER
                   abort-all-job: true
                   git-revision: false
 
-                - name: 'apex-run-deploy-test-baremetal-danube'
+                - name: 'apex-os-nosdn-ovs-noha-baremetal-danube'
                   node-parameters: false
                   current-parameters: false
                   predefined-parameters: |
-                    DEPLOY_SCENARIO=os-nosdn-ovs-noha
                     OPNFV_CLEAN=yes
                   kill-phase-on: NEVER
                   abort-all-job: true
                   git-revision: false
 
-                - name: 'apex-run-deploy-test-baremetal-danube'
+                - name: 'apex-os-nosdn-ovs-ha-baremetal-danube'
                   node-parameters: false
                   current-parameters: false
                   predefined-parameters: |
-                    DEPLOY_SCENARIO=os-nosdn-ovs-ha
                     OPNFV_CLEAN=yes
                   kill-phase-on: NEVER
                   abort-all-job: true
                   git-revision: false
 
-                - name: 'apex-run-deploy-test-baremetal-danube'
+                - name: 'apex-os-nosdn-fdio-noha-baremetal-danube'
                   node-parameters: false
                   current-parameters: false
                   predefined-parameters: |
-                    DEPLOY_SCENARIO=os-nosdn-fdio-noha
                     OPNFV_CLEAN=yes
                   kill-phase-on: NEVER
                   abort-all-job: true
                   git-revision: false
 
-                - name: 'apex-run-deploy-test-baremetal-danube'
+                - name: 'apex-os-nosdn-fdio-ha-baremetal-danube'
                   node-parameters: false
                   current-parameters: false
                   predefined-parameters: |
-                    DEPLOY_SCENARIO=os-nosdn-fdio-ha
                     OPNFV_CLEAN=yes
                   kill-phase-on: NEVER
                   abort-all-job: true
                   git-revision: false
 
-                - name: 'apex-run-deploy-test-baremetal-danube'
+                - name: 'apex-os-nosdn-kvm-ha-baremetal-danube'
                   node-parameters: false
                   current-parameters: false
                   predefined-parameters: |
-                    DEPLOY_SCENARIO=os-nosdn-kvm-ha
                     OPNFV_CLEAN=yes
                   kill-phase-on: NEVER
                   abort-all-job: true
                   git-revision: false
 
-                - name: 'apex-run-deploy-test-baremetal-danube'
+                - name: 'apex-os-nosdn-kvm-noha-baremetal-danube'
                   node-parameters: false
                   current-parameters: false
                   predefined-parameters: |
-                    DEPLOY_SCENARIO=os-nosdn-kvm-noha
                     OPNFV_CLEAN=yes
                   kill-phase-on: NEVER
                   abort-all-job: true
                   git-revision: false
 
-                - name: 'apex-run-deploy-test-baremetal-danube'
+                - name: 'apex-os-odl_l2-fdio-noha-baremetal-danube'
                   node-parameters: false
                   current-parameters: false
                   predefined-parameters: |
-                    DEPLOY_SCENARIO=os-odl_l2-fdio-noha
                     OPNFV_CLEAN=yes
                   kill-phase-on: NEVER
                   abort-all-job: true
                   git-revision: false
 
-                - name: 'apex-run-deploy-test-baremetal-danube'
+                - name: 'apex-os-odl_l2-fdio-ha-baremetal-danube'
                   node-parameters: false
                   current-parameters: false
                   predefined-parameters: |
-                    DEPLOY_SCENARIO=os-odl_l2-fdio-ha
                     OPNFV_CLEAN=yes
                   kill-phase-on: NEVER
                   abort-all-job: true
                   git-revision: false
 
-                - name: 'apex-run-deploy-test-baremetal-danube'
+                - name: 'apex-os-odl_netvirt-fdio-noha-baremetal-danube'
                   node-parameters: false
                   current-parameters: false
                   predefined-parameters: |
-                    DEPLOY_SCENARIO=os-odl_netvirt-fdio-noha
                     OPNFV_CLEAN=yes
                   kill-phase-on: NEVER
                   abort-all-job: true
                   git-revision: false
 
-                - name: 'apex-run-deploy-test-baremetal-danube'
+                - name: 'apex-os-odl_l2-sfc-noha-baremetal-danube'
                   node-parameters: false
                   current-parameters: false
                   predefined-parameters: |
-                    DEPLOY_SCENARIO=os-odl_l2-sfc-noha
                     OPNFV_CLEAN=yes
                   kill-phase-on: NEVER
                   abort-all-job: true
                   git-revision: false
 
-                - name: 'apex-run-deploy-test-baremetal-danube'
+                - name: 'apex-os-odl_l3-nofeature-noha-baremetal-danube'
                   node-parameters: false
                   current-parameters: false
                   predefined-parameters: |
-                    DEPLOY_SCENARIO=os-odl_l3-nofeature-noha
                     OPNFV_CLEAN=yes
                   kill-phase-on: NEVER
                   abort-all-job: true
                   git-revision: false
 
-                - name: 'apex-run-deploy-test-baremetal-danube'
+                - name: 'apex-os-odl_l3-nofeature-ha-baremetal-danube'
                   node-parameters: false
                   current-parameters: false
                   predefined-parameters: |
-                    DEPLOY_SCENARIO=os-odl_l3-nofeature-ha
                     OPNFV_CLEAN=yes
                   kill-phase-on: NEVER
                   abort-all-job: true
                   git-revision: false
 
-                - name: 'apex-run-deploy-test-baremetal-danube'
+                - name: 'apex-os-odl_l3-ovs-noha-baremetal-danube'
                   node-parameters: false
                   current-parameters: false
                   predefined-parameters: |
-                    DEPLOY_SCENARIO=os-odl_l3-ovs-noha
                     OPNFV_CLEAN=yes
                   kill-phase-on: NEVER
                   abort-all-job: true
                   git-revision: false
 
-                - name: 'apex-run-deploy-test-baremetal-danube'
+                - name: 'apex-os-odl_l3-ovs-ha-baremetal-danube'
                   node-parameters: false
                   current-parameters: false
                   predefined-parameters: |
-                    DEPLOY_SCENARIO=os-odl_l3-ovs-ha
                     OPNFV_CLEAN=yes
                   kill-phase-on: NEVER
                   abort-all-job: true
                   git-revision: false
 
-                - name: 'apex-run-deploy-test-baremetal-danube'
+                - name: 'apex-os-odl-bgpvpn-ha-baremetal-danube'
                   node-parameters: false
                   current-parameters: false
                   predefined-parameters: |
-                    DEPLOY_SCENARIO=os-odl-bgpvpn-ha
                     OPNFV_CLEAN=yes
                   kill-phase-on: NEVER
                   abort-all-job: true
                   git-revision: false
 
-                - name: 'apex-run-deploy-test-baremetal-danube'
+                - name: 'apex-os-odl-gluon-noha-baremetal-danube'
                   node-parameters: false
                   current-parameters: false
                   predefined-parameters: |
-                    DEPLOY_SCENARIO=os-odl-gluon-noha
                     OPNFV_CLEAN=yes
                   kill-phase-on: NEVER
                   abort-all-job: true
                   git-revision: false
 
-                - name: 'apex-run-deploy-test-baremetal-danube'
+                - name: 'apex-os-odl_l3-fdio-noha-baremetal-danube'
                   node-parameters: false
                   current-parameters: false
                   predefined-parameters: |
-                    DEPLOY_SCENARIO=os-odl_l3-fdio-noha
                     OPNFV_CLEAN=yes
                   kill-phase-on: NEVER
                   abort-all-job: true
                   git-revision: false
 
-                - name: 'apex-run-deploy-test-baremetal-danube'
+                - name: 'apex-os-odl_l3-fdio-ha-baremetal-danube'
                   node-parameters: false
                   current-parameters: false
                   predefined-parameters: |
-                    DEPLOY_SCENARIO=os-odl_l3-fdio-ha
                     OPNFV_CLEAN=yes
                   kill-phase-on: NEVER
                   abort-all-job: true
                   git-revision: false
 
-                - name: 'apex-run-deploy-test-baremetal-danube'
+                - name: 'apex-os-odl_l3-fdio_dvr-noha-baremetal-danube'
                   node-parameters: false
                   current-parameters: false
                   predefined-parameters: |
-                    DEPLOY_SCENARIO=os-odl_l3-fdio_dvr-noha
                     OPNFV_CLEAN=yes
                   kill-phase-on: NEVER
                   abort-all-job: true
                   git-revision: false
 
-                - name: 'apex-run-deploy-test-baremetal-danube'
+                - name: 'apex-os-odl_l3-fdio_dvr-ha-baremetal-danube'
                   node-parameters: false
                   current-parameters: false
                   predefined-parameters: |
-                    DEPLOY_SCENARIO=os-odl_l3-fdio_dvr-ha
                     OPNFV_CLEAN=yes
                   kill-phase-on: NEVER
                   abort-all-job: true
                   git-revision: false
 
-                - name: 'apex-run-deploy-test-baremetal-danube'
+                - name: 'apex-os-odl_l3-csit-noha-baremetal-danube'
                   node-parameters: false
                   current-parameters: false
                   predefined-parameters: |
-                    DEPLOY_SCENARIO=os-odl_l3-csit-noha
                     OPNFV_CLEAN=yes
                   kill-phase-on: NEVER
                   abort-all-job: true
                   git-revision: false
 
-                - name: 'apex-run-deploy-test-baremetal-danube'
+                - name: 'apex-os-onos-nofeature-ha-baremetal-danube'
                   node-parameters: false
                   current-parameters: false
                   predefined-parameters: |
-                    DEPLOY_SCENARIO=os-onos-nofeature-ha
                     OPNFV_CLEAN=yes
                   kill-phase-on: NEVER
                   abort-all-job: true
                   git-revision: false
 
-                - name: 'apex-run-deploy-test-baremetal-danube'
+                - name: 'apex-os-ovn-nofeature-noha-baremetal-danube'
                   node-parameters: false
                   current-parameters: false
                   predefined-parameters: |
-                    DEPLOY_SCENARIO=os-ovn-nofeature-noha
                     OPNFV_CLEAN=yes
                   kill-phase-on: NEVER
                   abort-all-job: true
             condition: SUCCESSFUL
             projects:
 
-                - name: 'apex-run-deploy-test-baremetal-master'
+                - name: 'apex-os-nosdn-nofeature-noha-baremetal-master'
+                  node-parameters: false
+                  current-parameters: false
+                  predefined-parameters: |
+                    OPNFV_CLEAN=yes
+                  kill-phase-on: NEVER
+                  abort-all-job: true
+                  git-revision: false
+
+                - name: 'apex-os-nosdn-nofeature-ha-baremetal-master'
                   node-parameters: false
                   current-parameters: false
                   predefined-parameters: |
-                    DEPLOY_SCENARIO=os-nosdn-nofeature-noha
                     OPNFV_CLEAN=yes
                   kill-phase-on: NEVER
                   abort-all-job: true
                   git-revision: false
 
-                - name: 'apex-run-deploy-test-baremetal-master'
+                - name: 'apex-os-odl-nofeature-ha-baremetal-master'
                   node-parameters: false
                   current-parameters: false
                   predefined-parameters: |
-                    DEPLOY_SCENARIO=os-nosdn-nofeature-ha
                     OPNFV_CLEAN=yes
                   kill-phase-on: NEVER
                   abort-all-job: true
                   git-revision: false
 
-                - name: 'apex-run-deploy-test-baremetal-master'
+                - name: 'apex-os-odl-nofeature-noha-baremetal-master'
                   node-parameters: false
                   current-parameters: false
                   predefined-parameters: |
-                    DEPLOY_SCENARIO=os-odl-nofeature-ha
                     OPNFV_CLEAN=yes
                   kill-phase-on: NEVER
                   abort-all-job: true
                   git-revision: false
 
-                - name: 'apex-run-deploy-test-baremetal-master'
+                - name: 'apex-os-odl-bgpvpn-ha-baremetal-master'
+                  node-parameters: false
+                  current-parameters: false
+                  predefined-parameters: |
+                    OPNFV_CLEAN=yes
+                  kill-phase-on: NEVER
+                  abort-all-job: true
+                  git-revision: false
+
+                - name: 'apex-os-ovn-nofeature-noha-baremetal-master'
+                  node-parameters: false
+                  current-parameters: false
+                  predefined-parameters: |
+                    OPNFV_CLEAN=yes
+                  kill-phase-on: NEVER
+                  abort-all-job: true
+                  git-revision: false
+
+                - name: 'apex-os-nosdn-fdio-noha-baremetal-master'
+                  node-parameters: false
+                  current-parameters: false
+                  predefined-parameters: |
+                    OPNFV_CLEAN=yes
+                  kill-phase-on: NEVER
+                  abort-all-job: true
+                  git-revision: false
+
+                - name: 'apex-os-nosdn-fdio-ha-baremetal-master'
+                  node-parameters: false
+                  current-parameters: false
+                  predefined-parameters: |
+                    OPNFV_CLEAN=yes
+                  kill-phase-on: NEVER
+                  abort-all-job: true
+                  git-revision: false
+
+                - name: 'apex-os-odl-fdio-noha-baremetal-master'
+                  node-parameters: false
+                  current-parameters: false
+                  predefined-parameters: |
+                    OPNFV_CLEAN=yes
+                  kill-phase-on: NEVER
+                  abort-all-job: true
+                  git-revision: false
+
+                - name: 'apex-os-odl-fdio-ha-baremetal-master'
+                  node-parameters: false
+                  current-parameters: false
+                  predefined-parameters: |
+                    OPNFV_CLEAN=yes
+                  kill-phase-on: NEVER
+                  abort-all-job: true
+                  git-revision: false
+
+                - name: 'apex-os-nosdn-bar-ha-baremetal-master'
+                  node-parameters: false
+                  current-parameters: false
+                  predefined-parameters: |
+                    OPNFV_CLEAN=yes
+                  kill-phase-on: NEVER
+                  abort-all-job: true
+                  git-revision: false
+
+                - name: 'apex-os-nosdn-bar-noha-baremetal-master'
+                  node-parameters: false
+                  current-parameters: false
+                  predefined-parameters: |
+                    OPNFV_CLEAN=yes
+                  kill-phase-on: NEVER
+                  abort-all-job: true
+                  git-revision: false
+
+                - name: 'apex-os-nosdn-nofeature-ha-ipv6-baremetal-master'
+                  node-parameters: false
+                  current-parameters: false
+                  predefined-parameters: |
+                    OPNFV_CLEAN=yes
+                  kill-phase-on: NEVER
+                  abort-all-job: true
+                  git-revision: false
+
+                - name: 'apex-os-nosdn-ovs_dpdk-noha-baremetal-master'
+                  node-parameters: false
+                  current-parameters: false
+                  predefined-parameters: |
+                    OPNFV_CLEAN=yes
+                  kill-phase-on: NEVER
+                  abort-all-job: true
+                  git-revision: false
+
+                - name: 'apex-os-nosdn-ovs_dpdk-ha-baremetal-master'
+                  node-parameters: false
+                  current-parameters: false
+                  predefined-parameters: |
+                    OPNFV_CLEAN=yes
+                  kill-phase-on: NEVER
+                  abort-all-job: true
+                  git-revision: false
+
+                - name: 'apex-os-nosdn-kvm_ovs_dpdk-noha-baremetal-master'
+                  node-parameters: false
+                  current-parameters: false
+                  predefined-parameters: |
+                    OPNFV_CLEAN=yes
+                  kill-phase-on: NEVER
+                  abort-all-job: true
+                  git-revision: false
+
+                - name: 'apex-os-nosdn-kvm_ovs_dpdk-ha-baremetal-master'
+                  node-parameters: false
+                  current-parameters: false
+                  predefined-parameters: |
+                    OPNFV_CLEAN=yes
+                  kill-phase-on: NEVER
+                  abort-all-job: true
+                  git-revision: false
+
+                - name: 'apex-os-odl-sfc-noha-baremetal-master'
+                  node-parameters: false
+                  current-parameters: false
+                  predefined-parameters: |
+                    OPNFV_CLEAN=yes
+                  kill-phase-on: NEVER
+                  abort-all-job: true
+                  git-revision: false
+
+                - name: 'apex-os-odl-sfc-ha-baremetal-master'
+                  node-parameters: false
+                  current-parameters: false
+                  predefined-parameters: |
+                    OPNFV_CLEAN=yes
+                  kill-phase-on: NEVER
+                  abort-all-job: true
+                  git-revision: false
+
+                - name: 'apex-os-odl-fdio-dvr-noha-baremetal-master'
+                  node-parameters: false
+                  current-parameters: false
+                  predefined-parameters: |
+                    OPNFV_CLEAN=yes
+                  kill-phase-on: NEVER
+                  abort-all-job: true
+                  git-revision: false
+
+                - name: 'apex-os-odl-fdio-dvr-ha-baremetal-master'
                   node-parameters: false
                   current-parameters: false
                   predefined-parameters: |
-                    DEPLOY_SCENARIO=os-odl-nofeature-noha
                     OPNFV_CLEAN=yes
                   kill-phase-on: NEVER
                   abort-all-job: true
 - trigger:
     name: 'apex-master'
     triggers:
-        - timed: '0 3 1 1 7'
+        - timed: '0 12 * * *'
 - trigger:
     name: 'apex-danube'
     triggers:
-        - timed: '0 12 * * *'
+        - timed: '0 3 1 1 7'
index 752cf28..499e426 100644 (file)
         - 'apex-csit-promote-daily-{stream}'
         - 'apex-fdio-promote-daily-{stream}'
         - 'apex-verify-iso-{stream}'
-        - 'apex-run-deploy-test-baremetal-{stream}'
+        - 'apex-{scenario}-baremetal-{scenario_stream}'
+        - 'apex-testsuite-{scenario}-baremetal-{scenario_stream}'
         - 'apex-upload-snapshot'
         - 'apex-create-snapshot'
     # stream:    branch with - in place of / (eg. stable-arno)
     # branch:    branch (eg. stable/arno)
     stream:
-        - master:
+        - master: &master
             branch: 'master'
             gs-pathname: ''
             build-slave: 'apex-build-master'
@@ -27,8 +28,9 @@
             baremetal-slave: 'apex-baremetal-master'
             verify-scenario: 'os-odl-nofeature-ha'
             concurrent-builds: 3
+            scenario_stream: 'master'
 
-        - danube:
+        - danube: &danube
             branch: 'stable/danube'
             gs-pathname: '/danube'
             build-slave: 'apex-build-danube'
             baremetal-slave: 'apex-baremetal-danube'
             verify-scenario: 'os-odl_l3-nofeature-ha'
             concurrent-builds: 1
-            disabled: false
+            scenario_stream: 'danube'
+            disabled: true
+
+    scenario:
+        {%- for stream in scenarios %}
+        {%- for scenario in scenarios[stream] %}
+        - '{{scenario}}':
+              <<: *{{stream}}
+        {%- endfor %}
+        {%- endfor %}
 
     platform:
          - 'baremetal'
@@ -81,7 +92,7 @@
                     branch-pattern: '**/{branch}'
                 file-paths:
                   - compare-type: ANT
-                    pattern: 'tests/**'
+                    pattern: 'apex/tests/**'
     properties:
         - logrotate-default
         - throttle:
                     pattern: 'lib/**'
                   - compare-type: ANT
                     pattern: 'config/**'
+                  - compare-type: ANT
+                    pattern: 'apex/**'
 
     properties:
         - logrotate-default
+        - build-blocker:
+            use-build-blocker: true
+            block-level: 'NODE'
+            blocking-jobs:
+                - 'apex-verify.*'
         - throttle:
-            max-per-node: 3
+            max-per-node: 1
             max-total: 10
             option: 'project'
 
                     pattern: 'lib/**'
                   - compare-type: ANT
                     pattern: 'config/**'
+                  - compare-type: ANT
+                    pattern: 'apex/**'
 
     properties:
         - logrotate-default
+        - build-blocker:
+            use-build-blocker: true
+            block-level: 'NODE'
+            blocking-jobs:
+                - 'apex-verify.*'
         - throttle:
-            max-per-node: 3
+            max-per-node: 1
             max-total: 10
             option: 'project'
 
                   kill-phase-on: FAILURE
                   abort-all-job: true
                   git-revision: true
+        - shell: |
+            echo DEPLOY_SCENARIO=$(echo $GERRIT_EVENT_COMMENT_TEXT | grep start-gate-scenario | grep -Eo 'os-.*') > detected_scenario
+        - inject:
+           properties-file: detected_scenario
         - multijob:
             name: functest-smoke
             condition: SUCCESSFUL
               - name: 'functest-apex-virtual-suite-{stream}'
                 current-parameters: false
                 predefined-parameters: |
-                  DEPLOY_SCENARIO={verify-scenario}
+                  DEPLOY_SCENARIO=$DEPLOY_SCENARIO
                   FUNCTEST_SUITE_NAME=healthcheck
                   GERRIT_BRANCH=$GERRIT_BRANCH
                   GERRIT_REFSPEC=$GERRIT_REFSPEC
 - job-template:
     name: 'apex-deploy-{platform}-{stream}'
 
-    # Job template for virtual deployment
-    #
-    # Required Variables:
-    #     stream:    branch with - in place of / (eg. stable)
-    #     branch:    branch (eg. stable)
     node: 'apex-{platform}-{stream}'
 
     concurrent: true
 
     disabled: false
-
+    quiet-period: 30
     scm:
         - git-scm-gerrit
 
     wrappers:
         - timeout:
-            timeout: 120
+            timeout: 140
             fail: true
 
     parameters:
+        - '{project}-{platform}-{stream}-defaults'
         - project-parameter:
             project: '{project}'
             branch: '{branch}'
                 - 'apex-deploy.*'
                 - 'functest.*'
                 - 'yardstick.*'
+                - 'dovetail.*'
+                - 'storperf.*'
         - throttle:
             max-per-node: 1
             max-total: 10
 
 # Baremetal Deploy and Test
 - job-template:
-    name: 'apex-run-deploy-test-baremetal-{stream}'
+    name: 'apex-{scenario}-baremetal-{scenario_stream}'
 
-    # Job template for daily build
-    #
-    # Required Variables:
-    #     stream:    branch with - in place of / (eg. stable)
-    #     branch:    branch (eg. stable)
     project-type: 'multijob'
 
     disabled: false
 
     parameters:
         - '{project}-defaults'
-        - '{project}-baremetal-{stream}-defaults'
+        - '{project}-baremetal-{scenario_stream}-defaults'
         - project-parameter:
             project: '{project}'
             branch: '{branch}'
             gs-pathname: '{gs-pathname}'
         - string:
             name: DEPLOY_SCENARIO
-            default: '{verify-scenario}'
+            default: '{scenario}'
             description: "Scenario to deploy with."
     properties:
         - logrotate-default
                 - 'apex-runner.*'
                 - 'apex-.*-promote.*'
                 - 'apex-run.*'
+                - 'apex-.+-baremetal-.+'
+        - throttle:
+            max-per-node: 1
+            max-total: 10
+            option: 'project'
     builders:
         - description-setter:
             description: "Deployed on $NODE_NAME - Scenario: $DEPLOY_SCENARIO"
         - multijob:
             name: 'Baremetal Deploy'
-            condition: ALWAYS
+            condition: SUCCESSFUL
             projects:
-                - name: 'apex-deploy-baremetal-{stream}'
+                - name: 'apex-deploy-baremetal-{scenario_stream}'
                   node-parameters: true
                   current-parameters: true
                   predefined-parameters: |
                   kill-phase-on: FAILURE
                   abort-all-job: true
                   git-revision: false
+        - multijob:
+            name: 'OPNFV Test Suite'
+            projects:
+                - name: 'apex-testsuite-{scenario}-baremetal-{scenario_stream}'
+                  node-parameters: true
+                  current-parameters: false
+                  predefined-parameters:
+                    DEPLOY_SCENARIO=$DEPLOY_SCENARIO
+                  kill-phase-on: NEVER
+                  abort-all-job: true
+                  git-revision: false
+
+    publishers:
+        - groovy-postbuild:
+            script:
+                !include-raw-escape: ./update-build-result.groovy
+
+# Baremetal test job
+- job-template:
+    name: 'apex-testsuite-{scenario}-baremetal-{scenario_stream}'
+
+    project-type: 'multijob'
+
+    disabled: false
+
+    parameters:
+        - '{project}-defaults'
+        - '{project}-baremetal-{scenario_stream}-defaults'
+        - project-parameter:
+            project: '{project}'
+            branch: '{branch}'
+        - apex-parameter:
+            gs-pathname: '{gs-pathname}'
+        - string:
+            name: DEPLOY_SCENARIO
+            default: '{scenario}'
+            description: "Scenario to deploy with."
+    properties:
+        - logrotate-default
+        - build-blocker:
+            use-build-blocker: true
+            block-level: 'NODE'
+            blocking-jobs:
+                - 'apex-verify.*'
+                - 'apex-runner.*'
+                - 'apex-.*-promote.*'
+                - 'apex-run.*'
+                - 'apex-testsuite-.+-baremetal-.+'
+        - throttle:
+            max-per-node: 1
+            max-total: 10
+            option: 'project'
+    builders:
+        - description-setter:
+            description: "Testing on $NODE_NAME - Scenario: $DEPLOY_SCENARIO"
         - multijob:
             name: Functest
             condition: ALWAYS
             projects:
-                - name: 'functest-apex-baremetal-daily-{stream}'
+                - name: 'functest-apex-baremetal-daily-{scenario_stream}'
                   node-parameters: true
                   current-parameters: false
                   predefined-parameters:
             name: Yardstick
             condition: ALWAYS
             projects:
-                - name: 'yardstick-apex-baremetal-daily-{stream}'
+                - name: 'yardstick-apex-baremetal-daily-{scenario_stream}'
                   node-parameters: true
                   current-parameters: false
                   predefined-parameters:
                   kill-phase-on: NEVER
                   abort-all-job: false
                   git-revision: false
+        - multijob:
+            name: Dovetail
+            condition: ALWAYS
+            projects:
+                - name: 'dovetail-apex-baremetal-proposed_tests-{scenario_stream}'
+                  node-parameters: true
+                  current-parameters: false
+                  predefined-parameters:
+                    DEPLOY_SCENARIO=$DEPLOY_SCENARIO
+                  kill-phase-on: NEVER
+                  enable-condition: "def m = '$DEPLOY_SCENARIO' ==~ /os-(nosdn-nofeature|nosdn-kvm|odl_l3-fdio)-ha/"
+                  abort-all-job: false
+                  git-revision: false
+#        - multijob:
+#            name: StorPerf
+#            condition: ALWAYS
+#            projects:
+#                - name: 'storperf-apex-baremetal-daily-{scenario_stream}'
+#                  node-parameters: true
+#                  current-parameters: false
+#                  predefined-parameters:
+#                    DEPLOY_SCENARIO=$DEPLOY_SCENARIO
+#                  kill-phase-on: NEVER
+#                  abort-all-job: false
+#                  git-revision: false
+# Build status is always success due conditional plugin prefetching
+# build status before multijob phases execute
+#        - conditional-step:
+#            condition-kind: current-status
+#            condition-worst: SUCCESS
+#            condtion-best: SUCCESS
+#            on-evaluation-failure: mark-unstable
+#            steps:
+#                - shell: 'echo "Tests Passed"'
 
 {% for stream in scenarios %}
 # {{ stream }} Daily
             condition: SUCCESSFUL
             projects:
 {% for scenario in scenarios[stream] %}
-                - name: 'apex-run-deploy-test-baremetal-{{ stream }}'
+                - name: 'apex-{{ scenario }}-baremetal-{{ stream }}'
                   node-parameters: false
                   current-parameters: false
                   predefined-parameters: |
-                    DEPLOY_SCENARIO={{scenario}}
                     OPNFV_CLEAN=yes
                   kill-phase-on: NEVER
                   abort-all-job: true
 - trigger:
     name: 'apex-master'
     triggers:
-        - timed: '0 3 1 1 7'
+        - timed: '0 12 * * *'
 - trigger:
     name: 'apex-danube'
     triggers:
-        - timed: '0 12 * * *'
+        - timed: '0 3 1 1 7'
 
index dc9107a..def4e79 100644 (file)
@@ -3,6 +3,23 @@ master:
   - 'os-nosdn-nofeature-ha'
   - 'os-odl-nofeature-ha'
   - 'os-odl-nofeature-noha'
+  - 'os-odl-bgpvpn-ha'
+  - 'os-ovn-nofeature-noha'
+  - 'os-nosdn-fdio-noha'
+  - 'os-nosdn-fdio-ha'
+  - 'os-odl-fdio-noha'
+  - 'os-odl-fdio-ha'
+  - 'os-nosdn-bar-ha'
+  - 'os-nosdn-bar-noha'
+  - 'os-nosdn-nofeature-ha-ipv6'
+  - 'os-nosdn-ovs_dpdk-noha'
+  - 'os-nosdn-ovs_dpdk-ha'
+  - 'os-nosdn-kvm_ovs_dpdk-noha'
+  - 'os-nosdn-kvm_ovs_dpdk-ha'
+  - 'os-odl-sfc-noha'
+  - 'os-odl-sfc-ha'
+  - 'os-odl-fdio-dvr-noha'
+  - 'os-odl-fdio-dvr-ha'
 danube:
   - 'os-nosdn-nofeature-noha'
   - 'os-nosdn-nofeature-ha'
diff --git a/jjb/apex/update-build-result.groovy b/jjb/apex/update-build-result.groovy
new file mode 100644 (file)
index 0000000..9edca6b
--- /dev/null
@@ -0,0 +1,5 @@
+import hudson.model.*
+if (manager.logContains('^.*apex-deploy-baremetal.*SUCCESS$')
+      && manager.build.@result == hudson.model.Result.FAILURE) {
+    manager.build.@result = hudson.model.Result.UNSTABLE
+}
index 55d8ff9..f1bff07 100644 (file)
         branch: '{stream}'
         gs-pathname: ''
         disabled: false
-    danube: &danube
-        stream: danube
+    euphrates: &euphrates
+        stream: euphrates
         branch: 'stable/{stream}'
         gs-pathname: '/{stream}'
-        disabled: false
+        disabled: true
 #--------------------------------
 # POD, INSTALLER, AND BRANCH MAPPING
 #--------------------------------
 # CI POD's
 #--------------------------------
-#        danube
+#        euphrates
 #--------------------------------
     pod:
         - armband-baremetal:
             slave-label: armband-baremetal
             installer: fuel
-            <<: *danube
+            <<: *euphrates
         - armband-virtual:
             slave-label: armband-virtual
             installer: fuel
-            <<: *danube
+            <<: *euphrates
 #--------------------------------
 #        master
 #--------------------------------
 #--------------------------------
 # NONE-CI POD's
 #--------------------------------
-#        danube
+#        euphrates
 #--------------------------------
         - arm-pod2:
             slave-label: arm-pod2
             installer: fuel
-            <<: *danube
-        - arm-pod3:
-            slave-label: arm-pod3
+            <<: *euphrates
+        - arm-pod5:
+            slave-label: arm-pod5
             installer: fuel
-            <<: *danube
+            <<: *euphrates
         - arm-pod4:
             slave-label: arm-pod4
             installer: fuel
-            <<: *danube
-        - arm-virtual1:
-            slave-label: arm-virtual1
+            <<: *euphrates
+        - arm-virtual2:
+            slave-label: arm-virtual2
             installer: fuel
-            <<: *danube
+            <<: *euphrates
 #--------------------------------
 #        master
 #--------------------------------
             slave-label: arm-pod2
             installer: fuel
             <<: *master
-        - arm-pod3:
-            slave-label: arm-pod3
+        - arm-pod5:
+            slave-label: arm-pod5
             installer: fuel
             <<: *master
         - arm-pod4:
             slave-label: arm-pod4
             installer: fuel
             <<: *master
-        - arm-virtual1:
-            slave-label: arm-virtual1
+        - arm-virtual2:
+            slave-label: arm-virtual2
             installer: fuel
             <<: *master
 #--------------------------------
     publishers:
         - email:
             recipients: armband@enea.com
+        - email-jenkins-admins-on-failure
 
 ########################
 # parameter macros
             name: GS_URL
             default: artifacts.opnfv.org/$PROJECT{gs-pathname}
             description: "URL to Google Storage."
+        - string:
+            name: SSH_KEY
+            default: "/tmp/mcp.rsa"
+            description: "Path to private SSH key to access environment nodes. For MCP deployments only."
 
 ########################
 # trigger macros
 - trigger:
     name: 'fuel-os-nosdn-nofeature-ha-armband-baremetal-master-trigger'
     triggers:
-        - timed: ''
+        - timed: '0 1 * * *'
 - trigger:
     name: 'fuel-os-odl_l3-nofeature-ha-armband-baremetal-master-trigger'
     triggers:
-        - timed: ''
+        - timed: '0 16 * * *'
 - trigger:
     name: 'fuel-os-odl_l2-bgpvpn-ha-armband-baremetal-master-trigger'
     triggers:
         - timed: ''
 
 #----------------------------------------------------------------------
-# Enea Armband CI Baremetal Triggers running against danube branch
+# Enea Armband CI Baremetal Triggers running against euphrates branch
 #----------------------------------------------------------------------
 - trigger:
-    name: 'fuel-os-odl_l2-nofeature-ha-armband-baremetal-danube-trigger'
+    name: 'fuel-os-odl_l2-nofeature-ha-armband-baremetal-euphrates-trigger'
     triggers:
-        - timed: '0 0,16 * * 2,4'
+        - timed: ''
 - trigger:
-    name: 'fuel-os-nosdn-nofeature-ha-armband-baremetal-danube-trigger'
+    name: 'fuel-os-nosdn-nofeature-ha-armband-baremetal-euphrates-trigger'
     triggers:
-        - timed: '0 0 * * 1,5,7'
+        - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l2-bgpvpn-ha-armband-baremetal-danube-trigger'
+    name: 'fuel-os-odl_l2-bgpvpn-ha-armband-baremetal-euphrates-trigger'
     triggers:
-        - timed: '0 16 * * 1,5,7'
+        - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l3-nofeature-ha-armband-baremetal-danube-trigger'
+    name: 'fuel-os-odl_l3-nofeature-ha-armband-baremetal-euphrates-trigger'
     triggers:
-        - timed: '0 8 * * 2,4,6'
+        - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l2-nofeature-noha-armband-baremetal-danube-trigger'
+    name: 'fuel-os-odl_l2-nofeature-noha-armband-baremetal-euphrates-trigger'
     triggers:
-        - timed: '0 8 * * 1,3,5,7'
+        - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l2-sfc-ha-armband-baremetal-danube-trigger'
+    name: 'fuel-os-odl_l2-sfc-ha-armband-baremetal-euphrates-trigger'
     triggers:
-        - timed: '0 0 * * 3,6'
+        - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l2-sfc-noha-armband-baremetal-danube-trigger'
+    name: 'fuel-os-odl_l2-sfc-noha-armband-baremetal-euphrates-trigger'
     triggers:
-        - timed: '0 16 * * 3,6'
+        - timed: ''
 #---------------------------------------------------------------
 # Enea Armband CI Virtual Triggers running against master branch
 #---------------------------------------------------------------
     triggers:
         - timed: ''
 #--------------------------------------------------------------------
-# Enea Armband CI Virtual Triggers running against danube branch
+# Enea Armband CI Virtual Triggers running against euphrates branch
 #--------------------------------------------------------------------
 - trigger:
-    name: 'fuel-os-odl_l2-nofeature-ha-armband-virtual-danube-trigger'
+    name: 'fuel-os-odl_l2-nofeature-ha-armband-virtual-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-nosdn-nofeature-ha-armband-virtual-danube-trigger'
+    name: 'fuel-os-nosdn-nofeature-ha-armband-virtual-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l3-nofeature-ha-armband-virtual-danube-trigger'
+    name: 'fuel-os-odl_l3-nofeature-ha-armband-virtual-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l2-bgpvpn-ha-armband-virtual-danube-trigger'
+    name: 'fuel-os-odl_l2-bgpvpn-ha-armband-virtual-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l2-nofeature-noha-armband-virtual-danube-trigger'
+    name: 'fuel-os-odl_l2-nofeature-noha-armband-virtual-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l2-sfc-ha-armband-virtual-danube-trigger'
+    name: 'fuel-os-odl_l2-sfc-ha-armband-virtual-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l2-sfc-noha-armband-virtual-danube-trigger'
+    name: 'fuel-os-odl_l2-sfc-noha-armband-virtual-euphrates-trigger'
     triggers:
         - timed: ''
 
 #--------------------------------------------------------------------
-# Enea Armband Non CI Virtual Triggers running against danube branch
+# Enea Armband Non CI Virtual Triggers running against euphrates branch
 #--------------------------------------------------------------------
 - trigger:
-    name: 'fuel-os-odl_l2-nofeature-ha-arm-virtual1-danube-trigger'
+    name: 'fuel-os-odl_l2-nofeature-ha-arm-virtual2-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-nosdn-nofeature-ha-arm-virtual1-danube-trigger'
+    name: 'fuel-os-nosdn-nofeature-ha-arm-virtual2-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l3-nofeature-ha-arm-virtual1-danube-trigger'
+    name: 'fuel-os-odl_l3-nofeature-ha-arm-virtual2-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l2-bgpvpn-ha-arm-virtual1-danube-trigger'
+    name: 'fuel-os-odl_l2-bgpvpn-ha-arm-virtual2-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l2-nofeature-noha-arm-virtual1-danube-trigger'
+    name: 'fuel-os-odl_l2-nofeature-noha-arm-virtual2-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l2-sfc-ha-arm-virtual1-danube-trigger'
+    name: 'fuel-os-odl_l2-sfc-ha-arm-virtual2-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l2-sfc-noha-arm-virtual1-danube-trigger'
+    name: 'fuel-os-odl_l2-sfc-noha-arm-virtual2-euphrates-trigger'
     triggers:
         - timed: ''
 
 # Enea Armband Non CI Virtual Triggers running against master branch
 #--------------------------------------------------------------------
 - trigger:
-    name: 'fuel-os-odl_l2-nofeature-ha-arm-virtual1-master-trigger'
+    name: 'fuel-os-odl_l2-nofeature-ha-arm-virtual2-master-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-nosdn-nofeature-ha-arm-virtual1-master-trigger'
+    name: 'fuel-os-nosdn-nofeature-ha-arm-virtual2-master-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l3-nofeature-ha-arm-virtual1-master-trigger'
+    name: 'fuel-os-odl_l3-nofeature-ha-arm-virtual2-master-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l2-bgpvpn-ha-arm-virtual1-master-trigger'
+    name: 'fuel-os-odl_l2-bgpvpn-ha-arm-virtual2-master-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l2-nofeature-noha-arm-virtual1-master-trigger'
+    name: 'fuel-os-odl_l2-nofeature-noha-arm-virtual2-master-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l2-sfc-ha-arm-virtual1-master-trigger'
+    name: 'fuel-os-odl_l2-sfc-ha-arm-virtual2-master-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l2-sfc-noha-arm-virtual1-master-trigger'
+    name: 'fuel-os-odl_l2-sfc-noha-arm-virtual2-master-trigger'
     triggers:
         - timed: ''
 
     triggers:
         - timed: ''
 #---------------------------------------------------------------
-# Enea Armband POD 2 Triggers running against danube branch
+# Enea Armband POD 2 Triggers running against euphrates branch
 #---------------------------------------------------------------
 - trigger:
-    name: 'fuel-os-odl_l2-nofeature-ha-arm-pod2-danube-trigger'
+    name: 'fuel-os-odl_l2-nofeature-ha-arm-pod2-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-nosdn-nofeature-ha-arm-pod2-danube-trigger'
+    name: 'fuel-os-nosdn-nofeature-ha-arm-pod2-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l3-nofeature-ha-arm-pod2-danube-trigger'
+    name: 'fuel-os-odl_l3-nofeature-ha-arm-pod2-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l2-bgpvpn-ha-arm-pod2-danube-trigger'
+    name: 'fuel-os-odl_l2-bgpvpn-ha-arm-pod2-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l2-nofeature-noha-arm-pod2-danube-trigger'
+    name: 'fuel-os-odl_l2-nofeature-noha-arm-pod2-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l2-sfc-ha-arm-pod2-danube-trigger'
+    name: 'fuel-os-odl_l2-sfc-ha-arm-pod2-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l2-sfc-noha-arm-pod2-danube-trigger'
+    name: 'fuel-os-odl_l2-sfc-noha-arm-pod2-euphrates-trigger'
     triggers:
         - timed: ''
 #----------------------------------------------------------
 # Enea Armband POD 3 Triggers running against master branch
 #----------------------------------------------------------
 - trigger:
-    name: 'fuel-os-odl_l2-nofeature-ha-arm-pod3-master-trigger'
+    name: 'fuel-os-odl_l2-nofeature-ha-arm-pod5-master-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-nosdn-nofeature-ha-arm-pod3-master-trigger'
+    name: 'fuel-os-nosdn-nofeature-ha-arm-pod5-master-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l3-nofeature-ha-arm-pod3-master-trigger'
+    name: 'fuel-os-odl_l3-nofeature-ha-arm-pod5-master-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l2-bgpvpn-ha-arm-pod3-master-trigger'
+    name: 'fuel-os-odl_l2-bgpvpn-ha-arm-pod5-master-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l2-nofeature-noha-arm-pod3-master-trigger'
+    name: 'fuel-os-odl_l2-nofeature-noha-arm-pod5-master-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l2-sfc-ha-arm-pod3-master-trigger'
+    name: 'fuel-os-odl_l2-sfc-ha-arm-pod5-master-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l2-sfc-noha-arm-pod3-master-trigger'
+    name: 'fuel-os-odl_l2-sfc-noha-arm-pod5-master-trigger'
     triggers:
         - timed: ''
 #---------------------------------------------------------------
-# Enea Armband POD 3 Triggers running against danube branch
+# Enea Armband POD 3 Triggers running against euphrates branch
 #---------------------------------------------------------------
 - trigger:
-    name: 'fuel-os-odl_l2-nofeature-ha-arm-pod3-danube-trigger'
+    name: 'fuel-os-odl_l2-nofeature-ha-arm-pod5-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-nosdn-nofeature-ha-arm-pod3-danube-trigger'
+    name: 'fuel-os-nosdn-nofeature-ha-arm-pod5-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l3-nofeature-ha-arm-pod3-danube-trigger'
+    name: 'fuel-os-odl_l3-nofeature-ha-arm-pod5-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l2-bgpvpn-ha-arm-pod3-danube-trigger'
+    name: 'fuel-os-odl_l2-bgpvpn-ha-arm-pod5-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l2-nofeature-noha-arm-pod3-danube-trigger'
+    name: 'fuel-os-odl_l2-nofeature-noha-arm-pod5-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l2-sfc-ha-arm-pod3-danube-trigger'
+    name: 'fuel-os-odl_l2-sfc-ha-arm-pod5-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l2-sfc-noha-arm-pod3-danube-trigger'
+    name: 'fuel-os-odl_l2-sfc-noha-arm-pod5-euphrates-trigger'
     triggers:
         - timed: ''
 #--------------------------------------------------------------------------
     triggers:
         - timed: ''
 #--------------------------------------------------------------------------
-# Enea Armband POD 3 Triggers running against danube branch (aarch64 slave)
+# Enea Armband POD 3 Triggers running against euphrates branch (aarch64 slave)
 #--------------------------------------------------------------------------
 - trigger:
-    name: 'fuel-os-odl_l2-nofeature-ha-arm-pod4-danube-trigger'
+    name: 'fuel-os-odl_l2-nofeature-ha-arm-pod4-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-nosdn-nofeature-ha-arm-pod4-danube-trigger'
+    name: 'fuel-os-nosdn-nofeature-ha-arm-pod4-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l3-nofeature-ha-arm-pod4-danube-trigger'
+    name: 'fuel-os-odl_l3-nofeature-ha-arm-pod4-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l2-bgpvpn-ha-arm-pod4-danube-trigger'
+    name: 'fuel-os-odl_l2-bgpvpn-ha-arm-pod4-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l2-nofeature-noha-arm-pod4-danube-trigger'
+    name: 'fuel-os-odl_l2-nofeature-noha-arm-pod4-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l2-sfc-ha-arm-pod4-danube-trigger'
+    name: 'fuel-os-odl_l2-sfc-ha-arm-pod4-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l2-sfc-noha-arm-pod4-danube-trigger'
+    name: 'fuel-os-odl_l2-sfc-noha-arm-pod4-euphrates-trigger'
     triggers:
         - timed: ''
index e445e08..05679aa 100755 (executable)
@@ -2,7 +2,7 @@
 # SPDX-license-identifier: Apache-2.0
 ##############################################################################
 # Copyright (c) 2016 Ericsson AB and others.
-#           (c) 2016 Enea Software AB
+#           (c) 2017 Enea Software AB
 # All rights reserved. This program and the accompanying materials
 # are made available under the terms of the Apache License, Version 2.0
 # which accompanies this distribution, and is available at
@@ -13,16 +13,18 @@ set -o pipefail
 
 export TERM="vt220"
 
-# source the file so we get OPNFV vars
-source latest.properties
+if [[ "$BRANCH" != 'master' ]]; then
+    # source the file so we get OPNFV vars
+    source latest.properties
 
-# echo the info about artifact that is used during the deployment
-echo "Using ${OPNFV_ARTIFACT_URL/*\/} for deployment"
+    # echo the info about artifact that is used during the deployment
+    echo "Using ${OPNFV_ARTIFACT_URL/*\/} for deployment"
+fi
 
 if [[ "$JOB_NAME" =~ "merge" ]]; then
     # set simplest scenario for virtual deploys to run for merges
     DEPLOY_SCENARIO="os-nosdn-nofeature-ha"
-else
+elif [[ "$BRANCH" != 'master' ]]; then
     # for none-merge deployments
     # checkout the commit that was used for building the downloaded artifact
     # to make sure the ISO and deployment mechanism uses same versions
@@ -33,13 +35,6 @@ fi
 # set deployment parameters
 export TMPDIR=${WORKSPACE}/tmpdir
 
-# arm-pod4 is an aarch64 jenkins slave for the same POD as the
-# x86 jenkins slave arm-pod3; therefore we use the same pod name
-# to deploy the pod from both jenkins slaves
-if [[ "${NODE_NAME}" == "arm-pod4" ]]; then
-    NODE_NAME="arm-pod3"
-fi
-
 LAB_NAME=${NODE_NAME/-*}
 POD_NAME=${NODE_NAME/*-}
 
@@ -68,10 +63,6 @@ if [[ $LAB_CONFIG_URL =~ ^(git|ssh):// ]]; then
     fi
 fi
 
-if [[ "$NODE_NAME" =~ "virtual" ]]; then
-    POD_NAME="virtual_kvm"
-fi
-
 # releng wants us to use nothing else but opnfv.iso for now. We comply.
 ISO_FILE=$WORKSPACE/opnfv.iso
 
@@ -93,7 +84,7 @@ fi
 # construct the command
 DEPLOY_COMMAND="$WORKSPACE/ci/deploy.sh -b ${LAB_CONFIG_URL} \
     -l $LAB_NAME -p $POD_NAME -s $DEPLOY_SCENARIO -i file://${ISO_FILE} \
-    -H -B ${DEFAULT_BRIDGE:-pxebr} -S $TMPDIR -L $WORKSPACE/$FUEL_LOG_FILENAME \
+    -B ${DEFAULT_BRIDGE:-pxebr} -S $TMPDIR -L $WORKSPACE/$FUEL_LOG_FILENAME \
     ${DEPLOY_CACHE}"
 
 # log info to console
@@ -102,7 +93,7 @@ echo "--------------------------------------------------------"
 echo "Scenario: $DEPLOY_SCENARIO"
 echo "Lab: $LAB_NAME"
 echo "POD: $POD_NAME"
-echo "ISO: ${OPNFV_ARTIFACT_URL/*\/}"
+[[ "$BRANCH" != 'master' ]] && echo "ISO: ${OPNFV_ARTIFACT_URL/*\/}"
 echo
 echo "Starting the deployment using $INSTALLER_TYPE. This could take some time..."
 echo "--------------------------------------------------------"
index e2dd097..4f83305 100755 (executable)
@@ -2,6 +2,7 @@
 # SPDX-license-identifier: Apache-2.0
 ##############################################################################
 # Copyright (c) 2016 Ericsson AB and others.
+#           (c) 2017 Enea AB
 # All rights reserved. This program and the accompanying materials
 # are made available under the terms of the Apache License, Version 2.0
 # which accompanies this distribution, and is available at
@@ -10,6 +11,9 @@
 set -o errexit
 set -o pipefail
 
+# disable Fuel ISO download for master branch
+[[ "$BRANCH" == 'master' ]] && exit 0
+
 echo "Host info: $(hostname) $(hostname -I)"
 
 # Configurable environment variables:
index f6840a0..0623b55 100644 (file)
             branch: '{stream}'
             gs-pathname: ''
             disabled: false
-        - danube:
+        - euphrates:
             branch: 'stable/{stream}'
             gs-pathname: '/{stream}'
-            disabled: false
+            disabled: true
 
 - job-template:
     name: 'armband-{installer}-build-daily-{stream}'
@@ -67,6 +67,7 @@
     publishers:
         - email:
             recipients: armband@enea.com
+        - email-jenkins-admins-on-failure
 
 ########################
 # parameter macros
index 567456d..c9476b1 100644 (file)
             branch: '{stream}'
             gs-pathname: ''
             disabled: false
-        - danube:
+        - euphrates:
             branch: 'stable/{stream}'
             gs-pathname: '/{stream}'
-            disabled: false
+            disabled: true
 #####################################
 # patch verification phases
 #####################################
index a71cf11..29c01bb 100755 (executable)
@@ -2,12 +2,21 @@
 # SPDX-license-identifier: Apache-2.0
 ##############################################################################
 # Copyright (c) 2016 Ericsson AB and others.
-# Copyright (c) 2016 Enea AB.
+# Copyright (c) 2017 Enea AB.
 # All rights reserved. This program and the accompanying materials
 # are made available under the terms of the Apache License, Version 2.0
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
+
+# disable Armband iso build for master branch
+if [[ "$BRANCH" == 'master' ]]; then
+    touch $WORKSPACE/.noupload
+    echo "--------------------------------------------------------"
+    echo "Done!"
+    exit 0
+fi
+
 set -o errexit
 set -o nounset
 set -o pipefail
diff --git a/jjb/barometer/barometer-build.sh b/jjb/barometer/barometer-build.sh
new file mode 100644 (file)
index 0000000..e40841b
--- /dev/null
@@ -0,0 +1,21 @@
+set -x
+
+OPNFV_ARTIFACT_VERSION=$(date -u +"%Y-%m-%d_%H-%M-%S")
+OPNFV_ARTIFACT_URL="$GS_URL/$OPNFV_ARTIFACT_VERSION/"
+
+# log info to console
+echo "Starting the build of Barometer RPMs"
+echo "------------------------------------"
+echo
+
+cd ci
+./install_dependencies.sh
+./build_rpm.sh
+cd $WORKSPACE
+
+# save information regarding artifact into file
+(
+    echo "OPNFV_ARTIFACT_VERSION=$OPNFV_ARTIFACT_VERSION"
+    echo "OPNFV_ARTIFACT_URL=$OPNFV_ARTIFACT_URL"
+) > $WORKSPACE/opnfv.properties
+
diff --git a/jjb/barometer/barometer-upload-artifact.sh b/jjb/barometer/barometer-upload-artifact.sh
new file mode 100644 (file)
index 0000000..817cc57
--- /dev/null
@@ -0,0 +1,46 @@
+#!/bin/bash
+set -o nounset
+set -o pipefail
+
+RPM_WORKDIR=$WORKSPACE/rpmbuild
+RPM_DIR=$RPM_WORKDIR/RPMS/x86_64/
+cd $WORKSPACE/
+
+# source the opnfv.properties to get ARTIFACT_VERSION
+source $WORKSPACE/opnfv.properties
+
+# upload property files
+gsutil cp $WORKSPACE/opnfv.properties gs://$OPNFV_ARTIFACT_URL/opnfv.properties > gsutil.properties.log 2>&1
+gsutil cp $WORKSPACE/opnfv.properties gs://$GS_URL/latest.properties > gsutil.latest.log 2>&1
+
+echo "Uploading the barometer RPMs to artifacts.opnfv.org"
+echo "---------------------------------------------------"
+echo
+
+gsutil -m cp -r $RPM_DIR/* gs://$OPNFV_ARTIFACT_URL > $WORKSPACE/gsutil.log 2>&1
+
+# Check if the RPMs were pushed
+gsutil ls gs://$OPNFV_ARTIFACT_URL > /dev/null 2>&1
+if [[ $? -ne 0 ]]; then
+  echo "Problem while uploading barometer RPMs to gs://$OPNFV_ARTIFACT_URL!"
+  echo "Check log $WORKSPACE/gsutil.log on the appropriate build server"
+  exit 1
+fi
+
+gsutil -m setmeta \
+    -h "Cache-Control:private, max-age=0, no-transform" \
+    gs://$OPNFV_ARTIFACT_URL/*.rpm > /dev/null 2>&1
+
+gsutil -m setmeta \
+    -h "Content-Type:text/html" \
+    -h "Cache-Control:private, max-age=0, no-transform" \
+    gs://$GS_URL/latest.properties \
+    gs://$OPNFV_ARTIFACT_URL/opnfv.properties > /dev/null 2>&1
+
+echo
+echo "--------------------------------------------------------"
+echo "Done!"
+echo "Artifact is available at $OPNFV_ARTIFACT_URL"
+
+#cleanup the RPM repo from the build machine.
+rm -rf $RPM_WORKDIR
index 68b8a04..2d3e972 100644 (file)
         - shell: |
             pwd
             cd src
-            ./install_build_deps.sh
             make clobber
             make
 
         - project-parameter:
             project: '{project}'
             branch: '{branch}'
+        - barometer-project-parameter:
+            gs-pathname: '{gs-pathname}'
         - 'opnfv-build-centos-defaults'
 
     scm:
          - timed: '@midnight'
 
     builders:
-        - shell: |
-            pwd
-            cd ci
-            ./install_dependencies.sh
-            ./build_rpm.sh
+        - shell:
+            !include-raw-escape: ./barometer-build.sh
+        - shell:
+            !include-raw-escape: ./barometer-upload-artifact.sh
+
+########################
+# parameter macros
+########################
+- parameter:
+    name: barometer-project-parameter
+    parameters:
+        - string:
+            name: GS_URL
+            default: '$GS_BASE{gs-pathname}'
+            description: "URL to Google Storage."
index c56ca19..71601cf 100644 (file)
     pod:
 #compass CI PODs
         - baremetal:
-            slave-label: compass-baremetal
+            slave-label: compass-baremetal-master
             installer: compass
             auto-trigger-name: 'daily-trigger-disabled'
             <<: *master
         - virtual:
-            slave-label: compass-virtual
+            slave-label: compass-virtual-master
             installer: compass
             auto-trigger-name: 'daily-trigger-disabled'
             <<: *master
         - baremetal:
-            slave-label: compass-baremetal
+            slave-label: compass-baremetal-branch
             installer: compass
             auto-trigger-name: 'daily-trigger-disabled'
             <<: *danube
         - virtual:
-            slave-label: compass-virtual
+            slave-label: compass-virtual-branch
             installer: compass
             auto-trigger-name: 'daily-trigger-disabled'
             <<: *danube
@@ -70,8 +70,6 @@
        #     <<: *master
 #--------------------------------------------
     suite:
-        - 'rubbos'
-        - 'vstf'
         - 'posca_stress_traffic'
         - 'posca_stress_ping'
 
 
     publishers:
         - email:
-            recipients: hongbo.tianhongbo@huawei.com matthew.lijun@huawei.com liangqi1@huawei.com sunshine.wang@huawei.com
+            recipients: gabriel.yuyang@huawei.com, liyin11@huawei.com
+        - email-jenkins-admins-on-failure
 
 ########################
 # builder macros
 ####################
 
 - parameter:
-    name: 'bottlenecks-params-compass-baremetal'
+    name: 'bottlenecks-params-compass-baremetal-master'
     parameters:
         - string:
             name: BOTTLENECKS_DB_TARGET
-            default: '104.197.68.199:8086'
+            default: 'http://testresults.opnfv.org/test/api/v1/results'
             description: 'Arguments to use in order to choose the backend DB'
 
 - parameter:
-    name: 'bottlenecks-params-compass-virtual'
+    name: 'bottlenecks-params-compass-virtual-master'
     parameters:
         - string:
             name: BOTTLENECKS_DB_TARGET
-            default: ''
+            default: 'http://testresults.opnfv.org/test/api/v1/results'
             description: 'Arguments to use in order to choose the backend DB'
 
 - parameter:
-    name: 'bottlenecks-params-orange-pod2'
+    name: 'bottlenecks-params-compass-baremetal-branch'
     parameters:
         - string:
             name: BOTTLENECKS_DB_TARGET
-            default: '104.197.68.199:8086'
+            default: 'http://testresults.opnfv.org/test/api/v1/results'
+            description: 'Arguments to use in order to choose the backend DB'
+
+- parameter:
+    name: 'bottlenecks-params-compass-virtual-branch'
+    parameters:
+        - string:
+            name: BOTTLENECKS_DB_TARGET
+            default: 'http://testresults.opnfv.org/test/api/v1/results'
             description: 'Arguments to use in order to choose the backend DB'
index 04e620c..d0e2088 100644 (file)
@@ -10,6 +10,7 @@
 
 #clean up correlated dockers and their images
 bash $WORKSPACE/docker/docker_cleanup.sh -d bottlenecks --debug
+bash $WORKSPACE/docker/docker_cleanup.sh -d Bottlenecks --debug
 bash $WORKSPACE/docker/docker_cleanup.sh -d yardstick --debug
 bash $WORKSPACE/docker/docker_cleanup.sh -d kibana --debug
 bash $WORKSPACE/docker/docker_cleanup.sh -d elasticsearch --debug
index e6f8d1b..a757043 100644 (file)
 #!/bin/bash
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
 #set -e
 [[ $GERRIT_REFSPEC_DEBUG == true ]] && redirect="/dev/stdout" || redirect="/dev/null"
 BOTTLENECKS_IMAGE=opnfv/bottlenecks
 REPORT="True"
 
-if [[ $SUITE_NAME == rubbos || $SUITE_NAME == vstf ]]; then
-    echo "Bottlenecks: to pull image $BOTTLENECKS_IMAGE:${DOCKER_TAG}"
-    docker pull $BOTTLENECKS_IMAGE:$DOCKER_TAG >${redirect}
+RELENG_REPO=${WORKSPACE}/releng
+[ -d ${RELENG_REPO} ] && rm -rf ${RELENG_REPO}
+git clone https://gerrit.opnfv.org/gerrit/releng ${RELENG_REPO} >${redirect}
 
-    echo "Bottlenecks: docker start running"
-    opts="--privileged=true -id"
-    envs="-e INSTALLER_TYPE=${INSTALLER_TYPE} -e INSTALLER_IP=${INSTALLER_IP} \
-          -e NODE_NAME=${NODE_NAME} -e EXTERNAL_NET=${EXTERNAL_NETWORK} \
-          -e BOTTLENECKS_BRANCH=${BOTTLENECKS_BRANCH} -e GERRIT_REFSPEC_DEBUG=${GERRIT_REFSPEC_DEBUG} \
-          -e BOTTLENECKS_DB_TARGET=${BOTTLENECKS_DB_TARGET} -e PACKAGE_URL=${PACKAGE_URL}"
-    cmd="sudo docker run ${opts} ${envs} $BOTTLENECKS_IMAGE:${DOCKER_TAG} /bin/bash"
-    echo "Bottlenecks: docker cmd running ${cmd}"
-    ${cmd} >${redirect}
-
-    echo "Bottlenecks: obtain docker id"
-    container_id=$(docker ps | grep "$BOTTLENECKS_IMAGE:${DOCKER_TAG}" | awk '{print $1}' | head -1)
-    if [ -z ${container_id} ]; then
-        echo "Cannot find $BOTTLENECKS_IMAGE container ID ${container_id}. Please check if it exists."
-        docker ps -a
+YARDSTICK_REPO=${WORKSPACE}/yardstick
+[ -d ${YARDSTICK_REPO} ] && rm -rf ${YARDSTICK_REPO}
+git clone https://gerrit.opnfv.org/gerrit/yardstick ${YARDSTICK_REPO} >${redirect}
+
+OPENRC=/tmp/admin_rc.sh
+OS_CACERT=/tmp/os_cacert
+
+BOTTLENECKS_CONFIG=/tmp
+
+if [[ $SUITE_NAME == *posca* ]]; then
+    POSCA_SCRIPT=/home/opnfv/bottlenecks/testsuites/posca
+
+    # Preparing OpenStack RC and Cacert files
+    echo "BOTTLENECKS INFO: fetching os credentials from $INSTALLER_TYPE"
+    if [[ $INSTALLER_TYPE == 'compass' ]]; then
+        if [[ ${BRANCH} == 'master' ]]; then
+            ${RELENG_REPO}/utils/fetch_os_creds.sh -d ${OPENRC} -i ${INSTALLER_TYPE} -a ${INSTALLER_IP} -o ${OS_CACERT} >${redirect}
+            if [[ -f ${OS_CACERT} ]]; then
+                echo "BOTTLENECKS INFO: successfully fetching os_cacert for openstack: ${OS_CACERT}"
+            else
+                echo "BOTTLENECKS ERROR: couldn't find os_cacert file: ${OS_CACERT}, please check if the it's been properly provided."
+                exit 1
+            fi
+        else
+            ${RELENG_REPO}/utils/fetch_os_creds.sh -d ${OPENRC} -i ${INSTALLER_TYPE} -a ${INSTALLER_IP}  >${redirect}
+        fi
+    fi
+
+    if [[ -f ${OPENRC} ]]; then
+        echo "BOTTLENECKS INFO: openstack credentials path is ${OPENRC}"
+        if [[ $INSTALLER_TYPE == 'compass' && ${BRANCH} == 'master' ]]; then
+            echo "BOTTLENECKS INFO: writing ${OS_CACERT} to ${OPENRC}"
+            echo "export OS_CACERT=${OS_CACERT}" >> ${OPENRC}
+        fi
+        cat ${OPENRC}
+    else
+        echo "BOTTLENECKS ERROR: couldn't find openstack rc file: ${OPENRC}, please check if the it's been properly provided."
         exit 1
     fi
 
-    echo "Bottlenecks: to prepare openstack environment"
-    prepare_env="${REPO_DIR}/ci/prepare_env.sh"
-    echo "Bottlenecks: docker cmd running: ${prepare_env}"
-    sudo docker exec ${container_id} ${prepare_env}
-
-    echo "Bottlenecks: to run testsuite ${SUITE_NAME}"
-    run_testsuite="${REPO_DIR}/run_tests.sh -s ${SUITE_NAME}"
-    echo "Bottlenecks: docker cmd running: ${run_testsuite}"
-    sudo docker exec ${container_id} ${run_testsuite}
-else
-    echo "Bottlenecks: installing POSCA docker-compose"
-    if [ -d usr/local/bin/docker-compose ]; then
-        rm -rf usr/local/bin/docker-compose
+    # Finding and crearting POD description files from different deployments
+    ssh_options="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
+
+    if [ "$INSTALLER_TYPE" == "fuel" ]; then
+        echo "Fetching id_rsa file from jump_server $INSTALLER_IP..."
+        sshpass -p r00tme sudo scp $ssh_options root@${INSTALLER_IP}:~/.ssh/id_rsa ${BOTTLENECKS_CONFIG}/id_rsa
+    fi
+
+    if [ "$INSTALLER_TYPE" == "apex" ]; then
+        echo "Fetching id_rsa file from jump_server $INSTALLER_IP..."
+        sudo scp $ssh_options stack@${INSTALLER_IP}:~/.ssh/id_rsa ${BOTTLENECKS_CONFIG}/id_rsa
     fi
-    curl -L https://github.com/docker/compose/releases/download/1.11.0/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose
-    chmod +x /usr/local/bin/docker-compose
 
-    echo "Bottlenecks: composing up dockers"
-    cd $WORKSPACE
-    docker-compose -f $WORKSPACE/docker/bottleneck-compose/docker-compose.yml up -d
+    set +e
 
-    echo "Bottlenecks: running traffic stress/factor testing in posca testsuite "
-    POSCA_SCRIPT=/home/opnfv/bottlenecks/testsuites/posca
+    sudo pip install virtualenv
+
+    cd ${RELENG_REPO}/modules
+    sudo virtualenv venv
+    source venv/bin/activate
+    sudo pip install -e ./ >/dev/null
+    sudo pip install netaddr
+
+    if [[ ${INSTALLER_TYPE} == compass ]]; then
+        options="-u root -p root"
+    elif [[ ${INSTALLER_TYPE} == fuel ]]; then
+        options="-u root -p r00tme"
+    elif [[ ${INSTALLER_TYPE} == apex ]]; then
+        options="-u stack -k /root/.ssh/id_rsa"
+    else
+        echo "Don't support to generate pod.yaml on ${INSTALLER_TYPE} currently."
+    fi
+
+    if [[ ${INSTALLER_TYPE} != compass ]]; then
+        cmd="sudo python ${RELENG_REPO}/utils/create_pod_file.py -t ${INSTALLER_TYPE} \
+         -i ${INSTALLER_IP} ${options} -f ${BOTTLENECKS_CONFIG}/pod.yaml \
+         -s ${BOTTLENECKS_CONFIG}/id_rsa"
+        echo ${cmd}
+        ${cmd}
+    else
+        cmd="sudo cp ${YARDSTICK_REPO}/etc/yardstick/nodes/compass_sclab_virtual/pod.yaml \
+        ${BOTTLENECKS_CONFIG}"
+        echo ${cmd}
+        ${cmd}
+    fi
+
+    deactivate
+
+    set -e
+
+    cd ${WORKSPACE}
+
+    if [ -f ${BOTTLENECKS_CONFIG}/pod.yaml ]; then
+        echo "FILE: ${BOTTLENECKS_CONFIG}/pod.yaml:"
+        cat ${BOTTLENECKS_CONFIG}/pod.yaml
+    else
+        echo "ERROR: cannot find file ${BOTTLENECKS_CONFIG}/pod.yaml. Please check if it is existing."
+        sudo ls -al ${BOTTLENECKS_CONFIG}
+    fi
+
+    # Pulling Bottlenecks docker and passing environment variables
+    echo "INFO: pulling Bottlenecks docker ${DOCKER_TAG}"
+    docker pull opnfv/bottlenecks:${DOCKER_TAG} >$redirect
+
+    opts="--privileged=true -id"
+    envs="-e INSTALLER_TYPE=${INSTALLER_TYPE} -e INSTALLER_IP=${INSTALLER_IP} \
+          -e NODE_NAME=${NODE_NAME} -e EXTERNAL_NET=${EXTERNAL_NETWORK} \
+          -e BRANCH=${BRANCH} -e GERRIT_REFSPEC_DEBUG=${GERRIT_REFSPEC_DEBUG} \
+          -e BOTTLENECKS_DB_TARGET=${BOTTLENECKS_DB_TARGET} -e PACKAGE_URL=${PACKAGE_URL} \
+          -e DEPLOY_SCENARIO=${DEPLOY_SCENARIO}"
+    docker_volume="-v /var/run/docker.sock:/var/run/docker.sock -v /tmp:/tmp"
+
+    cmd="docker run ${opts} ${envs} --name bottlenecks-load-master ${docker_volume} opnfv/bottlenecks:${DOCKER_TAG} /bin/bash"
+    echo "BOTTLENECKS INFO: running docker run commond: ${cmd}"
+    ${cmd} >$redirect
+    sleep 5
+
+    # Running test cases through Bottlenecks docker
     if [[ $SUITE_NAME == posca_stress_traffic ]]; then
         TEST_CASE=posca_factor_system_bandwidth
-        echo "Bottlenecks: pulling tutum/influxdb for yardstick"
-        docker pull tutum/influxdb:0.13
-        sleep 5
-        docker exec bottleneckcompose_bottlenecks_1 python ${POSCA_SCRIPT}/run_posca.py testcase $TEST_CASE $REPORT
+        testcase_cmd="docker exec bottlenecks-load-master python ${POSCA_SCRIPT}/../run_testsuite.py testcase $TEST_CASE $REPORT"
+        echo "BOTTLENECKS INFO: running test case ${TEST_CASE} with report indicator: ${testcase_cmd}"
+        ${testcase_cmd} >$redirect
     elif [[ $SUITE_NAME == posca_stress_ping ]]; then
         TEST_CASE=posca_factor_ping
-        sleep 5
-        docker exec bottleneckcompose_bottlenecks_1 python ${POSCA_SCRIPT}/run_posca.py testcase $TEST_CASE $REPORT
+        testcase_cmd="docker exec bottlenecks-load-master python ${POSCA_SCRIPT}/../run_testsuite.py testcase $TEST_CASE $REPORT"
+        echo "BOTTLENECKS INFO: running test case ${TEST_CASE} with report indicator: ${testcase_cmd}"
+        ${testcase_cmd} >$redirect
     fi
-
-    echo "Bottlenecks: cleaning up docker-compose images and dockers"
-    docker-compose -f $WORKSPACE/docker/bottleneck-compose/docker-compose.yml down --rmi all
-fi
\ No newline at end of file
+fi
diff --git a/jjb/calipso/calipso.yml b/jjb/calipso/calipso.yml
new file mode 100644 (file)
index 0000000..c5ba8eb
--- /dev/null
@@ -0,0 +1,61 @@
+- project:
+    name: calipso
+
+    project: '{name}'
+
+    jobs:
+        - 'calipso-verify-{stream}'
+
+    stream:
+        - master:
+            branch: '{stream}'
+            disabled: false
+
+- job-template:
+    name: 'calipso-verify-{stream}'
+
+    disabled: '{obj:disabled}'
+
+    parameters:
+        - project-parameter:
+            project: '{project}'
+            branch: '{branch}'
+        - 'opnfv-build-defaults'
+
+
+    scm:
+        - git-scm-gerrit
+
+    triggers:
+        - gerrit:
+            server-name: 'gerrit.opnfv.org'
+            trigger-on:
+                - patchset-created-event:
+                    exclude-drafts: 'false'
+                    exclude-trivial-rebase: 'false'
+                    exclude-no-code-change: 'false'
+                - draft-published-event
+                - comment-added-contains-event:
+                    comment-contains-value: 'recheck'
+                - comment-added-contains-event:
+                    comment-contains-value: 'reverify'
+            projects:
+              - project-compare-type: 'ANT'
+                project-pattern: '{project}'
+                branches:
+                  - branch-compare-type: 'ANT'
+                    branch-pattern: '**/{branch}'
+
+    builders:
+        - verify-unit-tests
+
+- builder:
+    name: verify-unit-tests
+    builders:
+        - shell: |
+            #!/bin/bash
+            set -o errexit
+            set -o nounset
+            set -o pipefail
+            cd $WORKSPACE
+            PYTHONPATH=$PWD/app app/test/verify.sh
diff --git a/jjb/ci_gate_security/anteater-clone-all-repos.sh b/jjb/ci_gate_security/anteater-clone-all-repos.sh
new file mode 100755 (executable)
index 0000000..8a9e73d
--- /dev/null
@@ -0,0 +1,33 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+set -o errexit
+set -o pipefail
+set -o nounset
+export PATH=$PATH:/usr/local/bin/
+
+
+#WORKSPACE="$(pwd)"
+
+cd $WORKSPACE
+if [ ! -d "$WORKSPACE/allrepos" ]; then
+  mkdir $WORKSPACE/allrepos
+fi
+
+cd $WORKSPACE/allrepos
+
+declare -a PROJECT_LIST
+EXCLUDE_PROJECTS="All-Projects|All-Users|securedlab"
+
+PROJECT_LIST=($(ssh gerrit.opnfv.org -p 29418 gerrit ls-projects | egrep -v $EXCLUDE_PROJECTS))
+echo "PROJECT_LIST=(${PROJECT_LIST[*]})" > $WORKSPACE/opnfv-projects.sh
+
+for PROJECT in ${PROJECT_LIST[@]}; do
+  echo "> Cloning $PROJECT"
+  if [ ! -d "$PROJECT" ]; then
+    git clone "https://gerrit.opnfv.org/gerrit/$PROJECT.git"
+  else
+    pushd "$PROJECT" > /dev/null
+    git pull -f
+    popd > /dev/null
+  fi
+done
index 71c5a06..00a78ce 100644 (file)
@@ -1,5 +1,5 @@
 #!/bin/bash
-set -o errexit
+# SPDX-license-identifier: Apache-2.0
 set -o pipefail
 export PATH=$PATH:/usr/local/bin/
 EXITSTATUS=0
@@ -12,14 +12,14 @@ if [[ -e securityaudit.log ]] ; then
     if grep ERROR securityaudit.log; then
         EXITSTATUS=1
     fi
-    
-    cat securityaudit.log  | awk -F"ERROR - " '{print $2}' > shortlog
-    
+
+    cat securityaudit.log  | awk -F"ERROR - " '{print $2}' | sed -e "s/\"/\\\\\"/g;s/\'/\\\\\'/g"> shortlog
+
     ssh -p 29418 gerrit.opnfv.org \
         "gerrit review -p $GERRIT_PROJECT \
         -m \"$(cat shortlog)\" \
         $GERRIT_PATCHSET_REVISION \
         --notify NONE"
-    
+
     exit $EXITSTATUS
 fi
diff --git a/jjb/ci_gate_security/anteater-security-audit-weekly.sh b/jjb/ci_gate_security/anteater-security-audit-weekly.sh
new file mode 100644 (file)
index 0000000..1190963
--- /dev/null
@@ -0,0 +1,37 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+
+echo "--------------------------------------------------------"
+vols="-v $WORKSPACE/allrepos/:/home/opnfv/anteater/allrepos/"
+echo "Pulling releng-anteater docker image"
+echo "--------------------------------------------------------"
+docker pull opnfv/releng-anteater
+echo "--------------------------------------------------------"
+cmd="docker run -id $vols opnfv/releng-anteater /bin/bash"
+echo "Running docker command $cmd"
+container_id=$($cmd)
+echo "Container ID is $container_id"
+source $WORKSPACE/opnfv-projects.sh
+for project in "${PROJECT_LIST[@]}"
+
+do
+  cmd="/home/opnfv/venv/bin/anteater --project testproj --path /home/opnfv/anteater/allrepos/$project"
+  echo "Executing command inside container"
+  echo "$cmd"
+  echo "--------------------------------------------------------"
+  docker exec $container_id $cmd > $WORKSPACE/"$project".securityaudit.log 2>&1
+done
+
+exit_code=$?
+echo "--------------------------------------------------------"
+echo "Stopping docker container with ID $container_id"
+docker stop $container_id
+
+
+#gsutil cp $WORKSPACE/securityaudit.log \
+#    gs://$GS_URL/$PROJECT-securityaudit-weekly.log 2>&1
+#
+#gsutil -m setmeta \
+#    -h "Content-Type:text/html" \
+#    -h "Cache-Control:private, max-age=0, no-transform" \
+#    gs://$GS_URL/$PROJECT-securityaudit-weekly.log > /dev/null 2>&1
index d5c0e40..35f9354 100644 (file)
@@ -1,5 +1,7 @@
 #!/bin/bash
 cd $WORKSPACE
+REPORTDIR='.reports'
+mkdir -p $REPORTDIR
 echo "Generating patchset file to list changed files"
 git diff HEAD^1 --name-only | sed "s#^#/home/opnfv/anteater/$PROJECT/#" > $WORKSPACE/patchset
 echo "Changed files are"
@@ -7,7 +9,7 @@ echo "--------------------------------------------------------"
 cat $WORKSPACE/patchset
 echo "--------------------------------------------------------"
 
-vols="-v $WORKSPACE:/home/opnfv/anteater/$PROJECT"
+vols="-v $WORKSPACE:/home/opnfv/anteater/$PROJECT -v $WORKSPACE/$REPORTDIR:/home/opnfv/anteater/$REPORTDIR"
 envs="-e PROJECT=$PROJECT"
 
 echo "Pulling releng-anteater docker image"
@@ -15,18 +17,14 @@ echo "--------------------------------------------------------"
 docker pull opnfv/releng-anteater
 echo "--------------------------------------------------------"
 
-cmd="sudo docker run --privileged=true -id $envs $vols opnfv/releng-anteater /bin/bash"
-echo "Running docker command $cmd"
-container_id=$($cmd)
-echo "Container ID is $container_id"
-cmd="anteater --project $PROJECT --patchset /home/opnfv/anteater/$PROJECT/patchset"
-echo "Executing command inside container"
+cmd="docker run -i $envs $vols --rm opnfv/releng-anteater \
+/home/opnfv/venv/bin/anteater --project $PROJECT --patchset /home/opnfv/anteater/$PROJECT/patchset"
+echo "Running docker container"
 echo "$cmd"
-echo "--------------------------------------------------------"
-docker exec $container_id $cmd > $WORKSPACE/securityaudit.log 2>&1
+$cmd > $WORKSPACE/securityaudit.log 2>&1
 exit_code=$?
 echo "--------------------------------------------------------"
-echo "Stopping docker container with ID $container_id"
-docker stop $container_id
+echo "Docker container exited with code: $exit_code"
+echo "--------------------------------------------------------"
 cat securityaudit.log
 exit 0
index e2ad03e..55d629c 100644 (file)
@@ -1,3 +1,4 @@
+# SPDX-license-identifier: Apache-2.0
 ########################
 # Job configuration for opnfv-anteater (security audit)
 ########################
@@ -9,6 +10,7 @@
 
     jobs:
         - 'opnfv-security-audit-verify-{stream}'
+        - 'opnfv-security-audit-weekly-{stream}'
 
     stream:
         - master:
 ########################
 # job templates
 ########################
+- job-template:
+    name: 'opnfv-security-audit-weekly-{stream}'
+
+    disabled: '{obj:disabled}'
+
+    parameters:
+        - label:
+            name: SLAVE_LABEL
+            default: 'ericsson-build3'
+            description: 'Slave label on Jenkins'
+        - project-parameter:
+            project: releng
+            branch: '{branch}'
+
+    triggers:
+        - timed: '@weekly'
+
+    builders:
+        - anteater-security-audit-weekly
+
 - job-template:
     name: 'opnfv-security-audit-verify-{stream}'
 
@@ -55,7 +77,7 @@
                     comment-contains-value: 'reverify'
             projects:
               - project-compare-type: 'REG_EXP'
-                project-pattern: 'sandbox|releng'
+                project-pattern: 'apex|armband|bamboo|barometer|bottlenecks|calipso|compass4nfv|conductor|cooper|cperf|daisy|doctor|dovetail|dpacc|enfv|escalator|fds|functest|octopus|pharos|releng|sandbox|yardstick'
                 branches:
                   - branch-compare-type: 'ANT'
                     branch-pattern: '**/{branch}'
     builders:
         - anteater-security-audit
         - report-security-audit-result-to-gerrit
+    publishers:
+      - archive-artifacts:
+          artifacts: ".reports/*"
+
 ########################
 # builder macros
 ########################
     builders:
         - shell:
             !include-raw: ./anteater-report-to-gerrit.sh
+
+- builder:
+    name: anteater-security-audit-weekly
+    builders:
+        - shell:
+            !include-raw:
+                - ./anteater-clone-all-repos.sh
+                - ./anteater-security-audit-weekly.sh
+
index 3ba69fa..8b4a74b 100644 (file)
         stream: master
         branch: '{stream}'
         gs-pathname: ''
+        ppa-pathname: '/{stream}'
         disabled: false
         openstack-version: ocata
     danube: &danube
         stream: danube
         branch: 'stable/{stream}'
         gs-pathname: '/{stream}'
+        ppa-pathname: '/{stream}'
         disabled: false
         openstack-version: newton
 #--------------------------------
         - 'os-nosdn-openo-ha':
             disabled: false
             auto-trigger-name: 'compass-{scenario}-{pod}-{stream}-trigger'
+        - 'os-odl-sfc-ha':
+            disabled: false
+            auto-trigger-name: 'compass-{scenario}-{pod}-{stream}-trigger'
+        - 'os-nosdn-dpdk-ha':
+            disabled: false
+            auto-trigger-name: 'compass-{scenario}-{pod}-{stream}-trigger'
+        - 'k8-nosdn-nofeature-ha':
+            disabled: false
+            auto-trigger-name: 'compass-{scenario}-{pod}-{stream}-trigger'
+        - 'os-nosdn-nofeature-noha':
+            disabled: false
+            auto-trigger-name: 'compass-{scenario}-{pod}-{stream}-trigger'
+        - 'os-odl_l3-nofeature-noha':
+            disabled: false
+            auto-trigger-name: 'compass-{scenario}-{pod}-{stream}-trigger'
+        - 'os-odl_l2-moon-noha':
+            disabled: false
+            auto-trigger-name: 'compass-{scenario}-{pod}-{stream}-trigger'
+        - 'os-nosdn-kvm-noha':
+            disabled: false
+            auto-trigger-name: 'compass-{scenario}-{pod}-{stream}-trigger'
+        - 'os-odl-sfc-noha':
+            disabled: false
+            auto-trigger-name: 'compass-{scenario}-{pod}-{stream}-trigger'
+        - 'os-nosdn-dpdk-noha':
+            disabled: false
+            auto-trigger-name: 'compass-{scenario}-{pod}-{stream}-trigger'
 
 
     jobs:
         - compass-ci-parameter:
             installer: '{installer}'
             gs-pathname: '{gs-pathname}'
+            ppa-pathname: '{ppa-pathname}'
         - string:
             name: DEPLOY_SCENARIO
             default: '{scenario}'
                 unstable-threshold: 'FAILURE'
         # dovetail only master by now, not sync with A/B/C branches
         # here the stream means the SUT stream, dovetail stream is defined in its own job
-        # only run on os-(nosdn|odl_l2)-(nofeature|bgpvpn)-ha scenario
+        # only run on os-(nosdn|odl_l2|onos|odl_l3)-nofeature-ha scenario
+        # run against SUT master branch, dovetail docker image with latest tag
+        # run against SUT danube branch, dovetail docker image with latest tag(odd days)and cvp.X.X.X tag(even days)
         - conditional-step:
-            condition-kind: regex-match
-            regex: os-(nosdn|odl_l2)-(nofeature|bgpvpn)-ha
-            label: '{scenario}'
+            condition-kind: and
+            condition-operands:
+                - condition-kind: regex-match
+                  regex: danube
+                  label: '{stream}'
+                - condition-kind: regex-match
+                  regex: os-(nosdn|odl_l2|onos|odl_l3)-nofeature-ha
+                  label: '{scenario}'
+                - condition-kind: day-of-week
+                  day-selector: select-days
+                  days:
+                      MON: true
+                      WED: true
+                      FRI: true
+                      SUN: true
+                  use-build-time: true
+            steps:
+                - trigger-builds:
+                    - project: 'dovetail-compass-{pod}-proposed_tests-{stream}'
+                      current-parameters: false
+                      predefined-parameters: |
+                        DOCKER_TAG=latest
+                        DEPLOY_SCENARIO={scenario}
+                      block: true
+                      same-node: true
+                      block-thresholds:
+                        build-step-failure-threshold: 'never'
+                        failure-threshold: 'never'
+                        unstable-threshold: 'FAILURE'
+        - conditional-step:
+            condition-kind: and
+            condition-operands:
+                - condition-kind: regex-match
+                  regex: danube
+                  label: '{stream}'
+                - condition-kind: regex-match
+                  regex: os-(nosdn|odl_l2|onos|odl_l3)-nofeature-ha
+                  label: '{scenario}'
+                - condition-kind: day-of-week
+                  day-selector: select-days
+                  days:
+                      TUES: true
+                      THURS: true
+                      SAT: true
+                  use-build-time: true
+            steps:
+                - trigger-builds:
+                    - project: 'dovetail-compass-{pod}-proposed_tests-{stream}'
+                      current-parameters: false
+                      predefined-parameters:
+                        DEPLOY_SCENARIO={scenario}
+                      block: true
+                      same-node: true
+                      block-thresholds:
+                        build-step-failure-threshold: 'never'
+                        failure-threshold: 'never'
+                        unstable-threshold: 'FAILURE'
+        - conditional-step:
+            condition-kind: and
+            condition-operands:
+                - condition-kind: regex-match
+                  regex: os-(nosdn|odl_l2|onos|odl_l3)-nofeature-ha
+                  label: '{scenario}'
+                - condition-kind: regex-match
+                  regex: master
+                  label: '{stream}'
             steps:
                 - trigger-builds:
                     - project: 'dovetail-compass-{pod}-proposed_tests-{stream}'
         - build-name:
             name: '$BUILD_NUMBER - Scenario: $DEPLOY_SCENARIO'
         - timeout:
-            timeout: 120
+            timeout: 240
             abort: true
         - fix-workspace-permissions
 
         - compass-ci-parameter:
             installer: '{installer}'
             gs-pathname: '{gs-pathname}'
+            ppa-pathname: '{ppa-pathname}'
         - '{slave-label}-defaults'
         - '{installer}-defaults'
 
     builders:
         - description-setter:
             description: "POD: $NODE_NAME"
-        - shell:
-            !include-raw-escape: ./compass-download-artifact.sh
-        - shell:
-            !include-raw-escape: ./compass-deploy.sh
+        - conditional-step:
+            condition-kind: regex-match
+            regex: master
+            label: '{stream}'
+            steps:
+                - shell:
+                    !include-raw-escape: ./compass-build.sh
+                - shell:
+                    !include-raw-escape: ./compass-deploy.sh
+        - conditional-step:
+            condition-kind: regex-match
+            regex: danube
+            label: '{stream}'
+            steps:
+                - shell:
+                    !include-raw-escape: ./compass-download-artifact.sh
+                - shell:
+                    !include-raw-escape: ./compass-deploy.sh
+
 
 ########################
 # parameter macros
             name: GS_URL
             default: '$GS_BASE{gs-pathname}'
             description: "URL to Google Storage."
+        - string:
+            name: CACHE_DIRECTORY
+            default: "$HOME/opnfv/cache/$PROJECT{gs-pathname}"
+            description: "Directory where the cache to be used during the build is located."
+        - string:
+            name: PPA_REPO
+            default: "http://artifacts.opnfv.org/compass4nfv/package{ppa-pathname}"
+        - string:
+            name: PPA_CACHE
+            default: "$WORKSPACE/work/repo/"
 
 ########################
 # trigger macros
 ########################
+
+#---------------------------
+# ha-baremetal-centos-master
+#---------------------------
 - trigger:
     name: 'compass-os-nosdn-nofeature-ha-baremetal-centos-master-trigger'
     triggers:
 - trigger:
     name: 'compass-os-odl_l2-nofeature-ha-baremetal-centos-master-trigger'
     triggers:
-        - timed: '0 23 * * *'
+        - timed: ''
 - trigger:
     name: 'compass-os-odl_l3-nofeature-ha-baremetal-centos-master-trigger'
     triggers:
 - trigger:
     name: 'compass-os-onos-nofeature-ha-baremetal-centos-master-trigger'
     triggers:
-        - timed: '0 7 * * *'
+        - timed: ''
 - trigger:
     name: 'compass-os-ocl-nofeature-ha-baremetal-centos-master-trigger'
     triggers:
-        - timed: '0 11 * * *'
+        - timed: ''
 - trigger:
     name: 'compass-os-onos-sfc-ha-baremetal-centos-master-trigger'
     triggers:
-        - timed: '0 3 * * *'
+        - timed: ''
 - trigger:
     name: 'compass-os-odl_l2-moon-ha-baremetal-centos-master-trigger'
     triggers:
     name: 'compass-os-nosdn-kvm-ha-baremetal-centos-master-trigger'
     triggers:
         - timed: ''
+- trigger:
+    name: 'compass-os-nosdn-dpdk-ha-baremetal-centos-master-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'compass-os-odl-sfc-ha-baremetal-centos-master-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'compass-k8-nosdn-nofeature-ha-baremetal-centos-master-trigger'
+    triggers:
+        - timed: ''
+
+#-----------------------------
+# noha-baremetal-centos-master
+#-----------------------------
+- trigger:
+    name: 'compass-os-nosdn-nofeature-noha-baremetal-centos-master-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'compass-os-odl_l3-nofeature-noha-baremetal-centos-master-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'compass-os-odl_l2-moon-noha-baremetal-centos-master-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'compass-os-nosdn-kvm-noha-baremetal-centos-master-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'compass-os-odl-sfc-noha-baremetal-centos-master-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'compass-os-nosdn-dpdk-noha-baremetal-centos-master-trigger'
+    triggers:
+        - timed: ''
 
+#--------------------
+# ha-baremetal-master
+#--------------------
 - trigger:
     name: 'compass-os-nosdn-nofeature-ha-baremetal-master-trigger'
     triggers:
-        - timed: '0 2 * * *'
+        - timed: '0 20 * * *'
 - trigger:
     name: 'compass-os-nosdn-openo-ha-baremetal-master-trigger'
     triggers:
-        - timed: '0 3 * * *'
+        - timed: ''
 - trigger:
     name: 'compass-os-odl_l2-nofeature-ha-baremetal-master-trigger'
     triggers:
-        - timed: '0 22 * * *'
+        - timed: ''
 - trigger:
     name: 'compass-os-odl_l3-nofeature-ha-baremetal-master-trigger'
     triggers:
 - trigger:
     name: 'compass-os-onos-nofeature-ha-baremetal-master-trigger'
     triggers:
-        - timed: '0 14 * * *'
+        - timed: ''
 - trigger:
     name: 'compass-os-ocl-nofeature-ha-baremetal-master-trigger'
     triggers:
-        - timed: '0 10 * * *'
+        - timed: ''
 - trigger:
     name: 'compass-os-onos-sfc-ha-baremetal-master-trigger'
     triggers:
-        - timed: '0 6 * * *'
+        - timed: ''
 - trigger:
     name: 'compass-os-odl_l2-moon-ha-baremetal-master-trigger'
     triggers:
-        - timed: ''
+        - timed: '0 12 * * *'
 - trigger:
     name: 'compass-os-nosdn-kvm-ha-baremetal-master-trigger'
+    triggers:
+        - timed: '0 14 * * *'
+- trigger:
+    name: 'compass-os-nosdn-dpdk-ha-baremetal-master-trigger'
+    triggers:
+        - timed: '0 16 * * *'
+- trigger:
+    name: 'compass-k8-nosdn-nofeature-ha-baremetal-master-trigger'
     triggers:
         - timed: ''
+- trigger:
+    name: 'compass-os-odl-sfc-ha-baremetal-master-trigger'
+    triggers:
+        - timed: '0 10 * * *'
 
+#----------------------
+# noha-baremetal-master
+#----------------------
+- trigger:
+    name: 'compass-os-nosdn-kvm-noha-baremetal-master-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'compass-os-nosdn-nofeature-noha-baremetal-master-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'compass-os-odl_l3-nofeature-noha-baremetal-master-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'compass-os-odl_l2-moon-noha-baremetal-master-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'compass-os-odl-sfc-noha-baremetal-master-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'compass-os-nosdn-dpdk-noha-baremetal-master-trigger'
+    triggers:
+        - timed: ''
+
+#--------------------
+# ha-baremetal-danube
+#--------------------
 - trigger:
     name: 'compass-os-nosdn-nofeature-ha-baremetal-danube-trigger'
     triggers:
 - trigger:
     name: 'compass-os-ocl-nofeature-ha-baremetal-danube-trigger'
     triggers:
-        - timed: '0 5 * * *'
+        - timed: ''
 - trigger:
     name: 'compass-os-onos-sfc-ha-baremetal-danube-trigger'
     triggers:
-        - timed: ''
+        - timed: '0 5 * * *'
 - trigger:
     name: 'compass-os-odl_l2-moon-ha-baremetal-danube-trigger'
     triggers:
     name: 'compass-os-nosdn-kvm-ha-baremetal-danube-trigger'
     triggers:
         - timed: ''
+- trigger:
+    name: 'compass-os-nosdn-dpdk-ha-baremetal-danube-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'compass-k8-nosdn-nofeature-ha-baremetal-danube-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'compass-os-odl-sfc-ha-baremetal-danube-trigger'
+    triggers:
+        - timed: ''
+
+#----------------------
+# noha-baremetal-danube
+#----------------------
+- trigger:
+    name: 'compass-os-nosdn-kvm-noha-baremetal-danube-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'compass-os-nosdn-nofeature-noha-baremetal-danube-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'compass-os-odl_l3-nofeature-noha-baremetal-danube-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'compass-os-odl_l2-moon-noha-baremetal-danube-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'compass-os-odl-sfc-noha-baremetal-danube-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'compass-os-nosdn-dpdk-noha-baremetal-danube-trigger'
+    triggers:
+        - timed: ''
 
+#------------------
+# ha-virtual-master
+#------------------
 - trigger:
     name: 'compass-os-nosdn-nofeature-ha-virtual-master-trigger'
     triggers:
 - trigger:
     name: 'compass-os-nosdn-openo-ha-virtual-master-trigger'
     triggers:
-        - timed: '0 22 * * *'
+        - timed: ''
 - trigger:
     name: 'compass-os-odl_l2-nofeature-ha-virtual-master-trigger'
     triggers:
-        - timed: '0 20 * * *'
+        - timed: ''
 - trigger:
     name: 'compass-os-odl_l3-nofeature-ha-virtual-master-trigger'
     triggers:
 - trigger:
     name: 'compass-os-onos-nofeature-ha-virtual-master-trigger'
     triggers:
-        - timed: '0 18 * * *'
+        - timed: ''
 - trigger:
     name: 'compass-os-ocl-nofeature-ha-virtual-master-trigger'
     triggers:
-        - timed: '0 16 * * *'
+        - timed: ''
 - trigger:
     name: 'compass-os-onos-sfc-ha-virtual-master-trigger'
     triggers:
-        - timed: '0 15 * * *'
+        - timed: ''
 - trigger:
     name: 'compass-os-odl_l2-moon-ha-virtual-master-trigger'
     triggers:
-        - timed: '0 14 * * *'
+        - timed: '0 12 * * *'
 - trigger:
     name: 'compass-os-nosdn-kvm-ha-virtual-master-trigger'
+    triggers:
+        - timed: '0 13 * * *'
+- trigger:
+    name: 'compass-os-nosdn-dpdk-ha-virtual-master-trigger'
+    triggers:
+        - timed: '0 17 * * *'
+- trigger:
+    name: 'compass-k8-nosdn-nofeature-ha-virtual-master-trigger'
     triggers:
         - timed: ''
+- trigger:
+    name: 'compass-os-odl-sfc-ha-virtual-master-trigger'
+    triggers:
+        - timed: '0 16 * * *'
+
+#--------------------
+# noha-virtual-master
+#--------------------
+- trigger:
+    name: 'compass-os-nosdn-kvm-noha-virtual-master-trigger'
+    triggers:
+        - timed: '0 13 * * *'
+- trigger:
+    name: 'compass-os-nosdn-nofeature-noha-virtual-master-trigger'
+    triggers:
+        - timed: '0 14 * * *'
+- trigger:
+    name: 'compass-os-odl_l3-nofeature-noha-virtual-master-trigger'
+    triggers:
+        - timed: '0 15 * * *'
+- trigger:
+    name: 'compass-os-odl_l2-moon-noha-virtual-master-trigger'
+    triggers:
+        - timed: '0 18 * * *'
+- trigger:
+    name: 'compass-os-odl-sfc-noha-virtual-master-trigger'
+    triggers:
+        - timed: '0 20 * * *'
+- trigger:
+    name: 'compass-os-nosdn-dpdk-noha-virtual-master-trigger'
+    triggers:
+        - timed: '0 11 * * *'
 
+#------------------
+# ha-virtual-danube
+#------------------
 - trigger:
     name: 'compass-os-nosdn-nofeature-ha-virtual-danube-trigger'
     triggers:
     name: 'compass-os-nosdn-kvm-ha-virtual-danube-trigger'
     triggers:
         - timed: ''
+- trigger:
+    name: 'compass-os-nosdn-dpdk-ha-virtual-danube-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'compass-os-odl-sfc-ha-virtual-danube-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'compass-k8-nosdn-nofeature-ha-virtual-danube-trigger'
+    triggers:
+        - timed: ''
+
+#--------------------
+# noha-virtual-danube
+#--------------------
+- trigger:
+    name: 'compass-os-nosdn-kvm-noha-virtual-danube-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'compass-os-nosdn-nofeature-noha-virtual-danube-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'compass-os-odl_l3-nofeature-noha-virtual-danube-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'compass-os-odl_l2-moon-noha-virtual-danube-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'compass-os-odl-sfc-noha-virtual-danube-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'compass-os-nosdn-dpdk-noha-virtual-danube-trigger'
+    triggers:
+        - timed: ''
index 2668ccd..9d4ae51 100644 (file)
@@ -6,24 +6,23 @@ echo "Starting the deployment on baremetal environment using $INSTALLER_TYPE. Th
 echo "--------------------------------------------------------"
 echo
 
-# source the properties file so we get OPNFV vars
-source $BUILD_DIRECTORY/latest.properties
-
-# echo the info about artifact that is used during the deployment
-echo "Using ${OPNFV_ARTIFACT_URL/*\/} for deployment"
-
-if [[ ! "$JOB_NAME" =~ (verify|merge) ]]; then
-    # for none-merge deployments
-    # checkout the commit that was used for building the downloaded artifact
-    # to make sure the ISO and deployment mechanism uses same versions
-    echo "Checking out $OPNFV_GIT_SHA1"
-    git checkout $OPNFV_GIT_SHA1 --quiet
-fi
-
 echo 1 > /proc/sys/vm/drop_caches
 
 export CONFDIR=$WORKSPACE/deploy/conf
 if [[ "$BRANCH" = 'stable/danube' ]]; then
+    # source the properties file so we get OPNFV vars
+    source $BUILD_DIRECTORY/latest.properties
+    # echo the info about artifact that is used during the deployment
+    echo "Using ${OPNFV_ARTIFACT_URL/*\/} for deployment"
+
+    if [[ ! "$JOB_NAME" =~ (verify|merge) ]]; then
+        # for none-merge deployments
+        # checkout the commit that was used for building the downloaded artifact
+        # to make sure the ISO and deployment mechanism uses same versions
+        echo "Checking out $OPNFV_GIT_SHA1"
+        git checkout $OPNFV_GIT_SHA1 --quiet
+    fi
+
     export ISO_URL=file://$BUILD_DIRECTORY/compass.iso
 else
     export ISO_URL=file://$BUILD_DIRECTORY/compass.tar.gz
@@ -40,6 +39,8 @@ elif [[ "${DEPLOY_SCENARIO}" =~ "-onos" ]]; then
     export NETWORK_CONF_FILE=network_onos.yml
 elif [[ "${DEPLOY_SCENARIO}" =~ "-openo" ]]; then
     export NETWORK_CONF_FILE=network_openo.yml
+elif [[ "${DEPLOY_SCENARIO}" =~ "-dpdk" ]]; then
+    export NETWORK_CONF_FILE=network_dpdk.yml
 else
     export NETWORK_CONF_FILE=network.yml
 fi
@@ -51,6 +52,11 @@ fi
 if [[ "$NODE_NAME" =~ "-virtual" ]]; then
     export NETWORK_CONF=$CONFDIR/vm_environment/$NODE_NAME/${NETWORK_CONF_FILE}
     export DHA_CONF=$CONFDIR/vm_environment/${DEPLOY_SCENARIO}.yml
+    if [[ "${DEPLOY_SCENARIO}" =~ "-moon-noha" ]]; then
+        export VIRT_NUMBER=3
+    elif [[ "${DEPLOY_SCENARIO}" =~ "-noha" ]]; then
+        export VIRT_NUMBER=2
+    fi
 else
     export INSTALL_NIC=eth1
     export NETWORK_CONF=$CONFDIR/hardware_environment/$NODE_NAME/${NETWORK_CONF_FILE}
index 966dae5..101db82 100644 (file)
@@ -19,7 +19,7 @@
 #------------------------------------
     pod:
         - baremetal:
-            slave-label: compass-baremetal
+            slave-label: compass-baremetal-branch
             os-version: 'xenial'
             <<: *danube
 #-----------------------------------
         - build-name:
             name: '$BUILD_NUMBER - Scenario: $DEPLOY_SCENARIO'
         - timeout:
-            timeout: 120
+            timeout: 240
             abort: true
         - fix-workspace-permissions
 
index 4b05e22..ee91e02 100644 (file)
@@ -29,7 +29,7 @@
             os-version: 'xenial'
             openstack-os-version: ''
         - 'centos7':
-            disabled: false
+            disabled: true
             os-version: 'centos7'
             openstack-os-version: ''
 #####################################
@@ -74,7 +74,7 @@
     wrappers:
         - ssh-agent-wrapper
         - timeout:
-            timeout: 120
+            timeout: 240
             fail: true
         - fix-workspace-permissions
 
             condition: SUCCESSFUL
             projects:
                 - name: 'functest-compass-virtual-suite-{stream}'
-                  current-parameters: true
-                  predefined-parameters:
+                  current-parameters: false
+                  predefined-parameters: |
                     FUNCTEST_SUITE_NAME=healthcheck
+                    DEPLOY_SCENARIO=os-nosdn-nofeature-ha
                   node-parameters: true
                   kill-phase-on: NEVER
                   abort-all-job: true
                 - name: 'functest-compass-virtual-suite-{stream}'
-                  current-parameters: true
-                  predefined-parameters:
+                  current-parameters: false
+                  predefined-parameters: |
                     FUNCTEST_SUITE_NAME=vping_ssh
+                    DEPLOY_SCENARIO=os-nosdn-nofeature-ha
                   node-parameters: true
                   kill-phase-on: NEVER
                   abort-all-job: true
     wrappers:
         - ssh-agent-wrapper
         - timeout:
-            timeout: 120
+            timeout: 240
             fail: true
         - fix-workspace-permissions
 
     wrappers:
         - ssh-agent-wrapper
         - timeout:
-            timeout: 120
+            timeout: 240
             fail: true
         - fix-workspace-permissions
 
index aac76ba..9a680e7 100644 (file)
@@ -45,6 +45,9 @@
         # NOHA scenarios
         - 'os-nosdn-nofeature-noha':
             auto-trigger-name: 'daisy-{scenario}-{pod}-daily-{stream}-trigger'
+        # ODL_L3 scenarios
+        - 'os-odl-nofeature-ha':
+            auto-trigger-name: 'daisy-{scenario}-{pod}-daily-{stream}-trigger'
 
     jobs:
         - '{project}-{scenario}-{pod}-daily-{stream}'
 #-----------------------------------------------
 # Triggers for job running on daisy-baremetal against master branch
 #-----------------------------------------------
-# HA Scenarios
+# Basic HA Scenarios
 - trigger:
     name: 'daisy-os-nosdn-nofeature-ha-baremetal-daily-master-trigger'
     triggers:
-        - timed: ''
-# NOHA Scenarios
+        - timed: '0 16 * * *'
+# Basic NOHA Scenarios
 - trigger:
     name: 'daisy-os-nosdn-nofeature-noha-baremetal-daily-master-trigger'
     triggers:
         - timed: ''
+# ODL Scenarios
+- trigger:
+    name: 'daisy-os-odl-nofeature-ha-baremetal-daily-master-trigger'
+    triggers:
+        - timed: '0 12 * * *'
 #-----------------------------------------------
 # Triggers for job running on daisy-virtual against master branch
 #-----------------------------------------------
+# Basic HA Scenarios
 - trigger:
     name: 'daisy-os-nosdn-nofeature-ha-virtual-daily-master-trigger'
     triggers:
-        - timed: ''
-# NOHA Scenarios
+        - timed: '0 16 * * *'
+# Basic NOHA Scenarios
 - trigger:
     name: 'daisy-os-nosdn-nofeature-noha-virtual-daily-master-trigger'
     triggers:
-        - timed: 'H 8,22 * * *'
-
+        - timed: ''
+# ODL Scenarios
+- trigger:
+    name: 'daisy-os-odl-nofeature-ha-virtual-daily-master-trigger'
+    triggers:
+        - timed: '0 12 * * *'
index 785f3a5..0bcac4b 100755 (executable)
@@ -28,7 +28,7 @@ git clone ssh://jenkins-zte@gerrit.opnfv.org:29418/securedlab --quiet \
 # daisy ci/deploy/deploy.sh use $BASE_DIR/labs dir
 cp -r securedlab/labs .
 
-DEPLOY_COMMAND="sudo ./ci/deploy/deploy.sh -b $BASE_DIR \
+DEPLOY_COMMAND="sudo -E ./ci/deploy/deploy.sh -b $BASE_DIR \
                 -l $LAB_NAME -p $POD_NAME -B $BRIDGE -s $DEPLOY_SCENARIO"
 
 # log info to console
index 0a9d43d..fd0da79 100644 (file)
             branch: '{stream}'
             gs-pathname: ''
             disabled: false
-        - danube:
-            branch: 'stable/{stream}'
-            gs-pathname: '/{stream}'
-            disabled: false
 
     phase:
         - 'build':
@@ -64,7 +60,7 @@
         - git-scm
 
     triggers:
-        - timed: '0 H/8 * * *'
+        - timed: '0 8 * * *'
 
     parameters:
         - project-parameter:
 
     publishers:
         - '{installer}-recipients'
+        - email-jenkins-admins-on-failure
 
 - job-template:
     name: '{installer}-{phase}-daily-{stream}'
     publishers:
         - email:
             recipients: hu.zhijiang@zte.com.cn lu.yao135@zte.com.cn zhou.ya@zte.com.cn yangyang1@zte.com.cn julienjut@gmail.com
+        - email-jenkins-admins-on-failure
 
 - parameter:
     name: 'daisy-project-parameter'
index 807d436..d535d61 100644 (file)
 
     installer:
         - apex:
-            slave-label: 'ool-virtual1'
-            pod: 'ool-virtual1'
+            slave-label: 'doctor-apex-verify'
         - fuel:
-            slave-label: 'ool-virtual2'
-            pod: 'ool-virtual2'
+            slave-label: 'doctor-fuel-verify'
         #- joid:
         #    slave-label: 'ool-virtual3'
         #    pod: 'ool-virtual3'
 
     task:
         - verify:
-            profiler: 'none'
             auto-trigger-name: 'doctor-verify'
-        - profiling:
-            profiler: 'poc'
-            auto-trigger-name: 'experimental'
+            is-python: false
+        - python-verify:
+            auto-trigger-name: 'doctor-verify'
+            is-python: true
+
+    pod:
+        - arm-pod2:
+            slave-label: '{pod}'
+        - arm-pod5:
+            slave-label: '{pod}'
 
     jobs:
         - 'doctor-verify-{stream}'
+        - 'doctor-{task}-{installer}-{inspector}-{pod}-{stream}'
         - 'doctor-{task}-{installer}-{inspector}-{stream}'
 
 - job-template:
     builders:
         - shell: "[ -e tests/run.sh ] && bash -n ./tests/run.sh"
 
+- job-template:
+    name: 'doctor-{task}-{installer}-{inspector}-{pod}-{stream}'
+
+    node: '{slave-label}'
+
+    disabled: '{obj:disabled}'
+
+    parameters:
+        - project-parameter:
+            project: '{project}'
+            branch: '{branch}'
+        - 'opnfv-build-ubuntu-defaults'
+
+    scm:
+        - git-scm-gerrit
+
+
+    triggers:
+        - '{auto-trigger-name}':
+            project: '{project}'
+            branch: '{branch}'
+            files: 'tests/**'
+
+    builders:
+        - shell: "[ -e tests/run.sh ] && bash -n ./tests/run.sh"
+
+
 - job-template:
     name: 'doctor-{task}-{installer}-{inspector}-{stream}'
 
             default: 'doctor-notification'
         - string:
             name: TESTCASE_OPTIONS
-            default: '-e INSPECTOR_TYPE={inspector} -e PROFILER_TYPE={profiler} -v $WORKSPACE:/home/opnfv/repos/doctor'
+            default: '-e INSPECTOR_TYPE={inspector} -e PYTHON_ENABLE={is-python} -v $WORKSPACE:/home/opnfv/repos/doctor'
             description: 'Addtional parameters specific to test case(s)'
         # functest-parameter
         - string:
         - '{auto-trigger-name}':
             project: '{project}'
             branch: '{branch}'
+            files: 'tests/**'
 
     builders:
         - 'clean-workspace-log'
             #       so this symbolic link should not be in 'tests/'. Otherwise,
             #       we'll have the same log twice in jenkins console log.
             ln -sfn $HOME/opnfv/functest/results/{stream} functest_results
+            # NOTE: Get functest script in $WORKSPACE. This functest script is
+            #       needed to perform VM image download in set-functest-env.sh
+            #       from E release cycle.
+            mkdir -p functest/ci
+            wget https://git.opnfv.org/functest/plain/functest/ci/download_images.sh -O functest/ci/download_images.sh
         - 'functest-suite-builder'
         - shell: |
-            functest_log="$HOME/opnfv/functest/results/{stream}/{project}.log"
+            functest_log="$HOME/opnfv/functest/results/{stream}/$FUNCTEST_SUITE_NAME.log"
             # NOTE: checking the test result, as the previous job could return
             #       0 regardless the result of doctor test scenario.
             grep -e ' OK$' $functest_log || exit 1
         - archive:
             artifacts: 'tests/*.log'
         - archive:
-            artifacts: 'functest_results/{project}.log'
+            artifacts: 'functest_results/$FUNCTEST_SUITE_NAME.log'
+        - email-jenkins-admins-on-failure
 
 
 #####################################
                     branch-pattern: '**/{branch}'
                 file-paths:
                   - compare-type: ANT
-                    pattern: 'tests/**'
+                    pattern: '{files}'
             skip-vote:
                 successful: true
                 failed: true
index 682948d..f6dda5f 100644 (file)
@@ -25,7 +25,7 @@
         branch: 'stable/{stream}'
         dovetail-branch: master
         gs-pathname: '/{stream}'
-        docker-tag: 'latest'
+        docker-tag: 'cvp.0.5.0'
 
 #-----------------------------------
 # POD, PLATFORM, AND BRANCH MAPPING
 # that have not been switched using labels for slaves
 #--------------------------------
 #apex PODs
-        - lf-pod1:
-            slave-label: '{pod}'
+        - virtual:
+            slave-label: apex-virtual-master
             SUT: apex
             auto-trigger-name: 'daily-trigger-disabled'
             <<: *master
-        - lf-pod1:
-            slave-label: '{pod}'
+        - baremetal:
+            slave-label: apex-baremetal-master
+            SUT: apex
+            auto-trigger-name: 'daily-trigger-disabled'
+            <<: *master
+        - virtual:
+            slave-label: apex-virtual-danube
+            SUT: apex
+            auto-trigger-name: 'daily-trigger-disabled'
+            <<: *danube
+        - baremetal:
+            slave-label: apex-baremetal-danube
             SUT: apex
             auto-trigger-name: 'daily-trigger-disabled'
             <<: *danube
             SUT: fuel
             auto-trigger-name: 'daily-trigger-disabled'
             <<: *master
-        - arm-pod3:
+        - arm-pod5:
             slave-label: '{pod}'
             SUT: fuel
             auto-trigger-name: 'daily-trigger-disabled'
             <<: *master
-        - arm-virtual1:
+        - arm-virtual2:
             slave-label: '{pod}'
             SUT: fuel
             auto-trigger-name: 'daily-trigger-disabled'
             <<: *danube
 #--------------------------------
     testsuite:
-        - 'debug'
         - 'compliance_set'
         - 'proposed_tests'
 
         - build-name:
             name: '$BUILD_NUMBER - Scenario: $DEPLOY_SCENARIO'
         - timeout:
-            timeout: 180
+            timeout: 240
             abort: true
         - fix-workspace-permissions
 
             name: DOVETAIL_REPO_DIR
             default: "/home/opnfv/dovetail"
             description: "Directory where the dovetail repository is cloned"
+        - string:
+            name: SUT_BRANCH
+            default: '{branch}'
+            description: "SUT branch"
 
     scm:
         - git-scm
             artifacts: 'results/**/*'
             allow-empty: true
             fingerprint: true
+        - email-jenkins-admins-on-failure
 
 #--------------------------
 # builder macros
index 0ee789a..2d66fe0 100755 (executable)
@@ -1,20 +1,27 @@
 #!/bin/bash
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
 
 [[ $CI_DEBUG == true ]] && redirect="/dev/stdout" || redirect="/dev/null"
 
 # clean up dependent project docker images, which has no containers and image tag None
 clean_images=(opnfv/functest opnfv/yardstick opnfv/testapi mongo)
 for clean_image in "${clean_images[@]}"; do
-    echo "Removing image $image_id, which has no containers and image tag is None"
     dangling_images=($(docker images -f "dangling=true" | grep ${clean_image} | awk '{print $3}'))
     if [[ -n ${dangling_images} ]]; then
         for image_id in "${dangling_images[@]}"; do
+            echo "Removing image $image_id, which has no containers and image tag is None"
             docker rmi $image_id >${redirect}
         done
     fi
 done
 
-echo "Remove containers with image opnfv/dovetail:<None>..."
+echo "Remove dovetail images with tag None and containers with these images ..."
 dangling_images=($(docker images -f "dangling=true" | grep opnfv/dovetail | awk '{print $3}'))
 if [[ -n ${dangling_images} ]]; then
     for image_id in "${dangling_images[@]}"; do
@@ -30,13 +37,13 @@ if [[ ! -z $(docker ps -a | grep opnfv/dovetail) ]]; then
     docker ps -a | grep opnfv/dovetail | awk '{print $1}' | xargs docker rm -f >${redirect}
 fi
 
-echo "Remove dovetail existing images if exist..."
-if [[ ! -z $(docker images | grep opnfv/dovetail) ]]; then
-    echo "Docker images to remove:"
-    docker images | head -1 && docker images | grep opnfv/dovetail >${redirect}
-    image_tags=($(docker images | grep opnfv/dovetail | awk '{print $2}'))
-    for tag in "${image_tags[@]}"; do
-        echo "Removing docker image opnfv/dovetail:$tag..."
-        docker rmi opnfv/dovetail:$tag >${redirect}
-    done
-fi
+#echo "Remove dovetail existing images if exist..."
+#if [[ ! -z $(docker images | grep opnfv/dovetail) ]]; then
+#    echo "Docker images to remove:"
+#    docker images | head -1 && docker images | grep opnfv/dovetail >${redirect}
+#    image_tags=($(docker images | grep opnfv/dovetail | awk '{print $2}'))
+#    for tag in "${image_tags[@]}"; do
+#        echo "Removing docker image opnfv/dovetail:$tag..."
+#        docker rmi opnfv/dovetail:$tag >${redirect}
+#    done
+#fi
index dce7e58..7dd6a2d 100755 (executable)
@@ -1,4 +1,11 @@
 #!/bin/bash
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
 
 #the noun INSTALLER is used in community, here is just the example to run.
 #multi-platforms are supported.
@@ -7,14 +14,12 @@ set -e
 [[ $CI_DEBUG == true ]] && redirect="/dev/stdout" || redirect="/dev/null"
 
 DOVETAIL_HOME=${WORKSPACE}/cvp
-if [ -d ${DOVETAIL_HOME} ]; then
-    sudo rm -rf ${DOVETAIL_HOME}/*
-else
-    sudo mkdir -p ${DOVETAIL_HOME}
-fi
+[ -d ${DOVETAIL_HOME} ] && sudo rm -rf ${DOVETAIL_HOME}
+
+mkdir -p ${DOVETAIL_HOME}
 
 DOVETAIL_CONFIG=${DOVETAIL_HOME}/pre_config
-sudo mkdir -p ${DOVETAIL_CONFIG}
+mkdir -p ${DOVETAIL_CONFIG}
 
 sshkey=""
 # The path of openrc.sh is defined in fetch_os_creds.sh
@@ -47,7 +52,12 @@ releng_repo=${WORKSPACE}/releng
 git clone https://gerrit.opnfv.org/gerrit/releng ${releng_repo} >/dev/null
 
 if [[ ${INSTALLER_TYPE} != 'joid' ]]; then
-    sudo /bin/bash ${releng_repo}/utils/fetch_os_creds.sh -d ${OPENRC} -i ${INSTALLER_TYPE} -a ${INSTALLER_IP} >${redirect}
+    echo "SUT branch is $SUT_BRANCH"
+    echo "dovetail branch is $BRANCH"
+    BRANCH_BACKUP=$BRANCH
+    export BRANCH=$SUT_BRANCH
+    ${releng_repo}/utils/fetch_os_creds.sh -d ${OPENRC} -i ${INSTALLER_TYPE} -a ${INSTALLER_IP} >${redirect}
+    export BRANCH=$BRANCH_BACKUP
 fi
 
 if [[ -f $OPENRC ]]; then
@@ -59,6 +69,8 @@ else
     exit 1
 fi
 
+set +e
+
 sudo pip install virtualenv
 
 cd ${releng_repo}/modules
@@ -71,6 +83,8 @@ if [[ ${INSTALLER_TYPE} == compass ]]; then
     options="-u root -p root"
 elif [[ ${INSTALLER_TYPE} == fuel ]]; then
     options="-u root -p r00tme"
+elif [[ ${INSTALLER_TYPE} == apex ]]; then
+    options="-u stack -k /root/.ssh/id_rsa"
 else
     echo "Don't support to generate pod.yaml on ${INSTALLER_TYPE} currently."
     echo "HA test cases may not run properly."
@@ -83,6 +97,8 @@ ${cmd}
 
 deactivate
 
+set -e
+
 cd ${WORKSPACE}
 
 if [ -f ${DOVETAIL_CONFIG}/pod.yaml ]; then
@@ -101,8 +117,31 @@ if [ "$INSTALLER_TYPE" == "fuel" ]; then
     sshpass -p r00tme sudo scp $ssh_options root@${INSTALLER_IP}:~/.ssh/id_rsa ${DOVETAIL_CONFIG}/id_rsa
 fi
 
+if [ "$INSTALLER_TYPE" == "apex" ]; then
+    echo "Fetching id_rsa file from jump_server $INSTALLER_IP..."
+    sudo scp $ssh_options stack@${INSTALLER_IP}:~/.ssh/id_rsa ${DOVETAIL_CONFIG}/id_rsa
+fi
+
+image_path=${HOME}/opnfv/dovetail/images
+if [[ ! -d ${image_path} ]]; then
+    mkdir -p ${image_path}
+fi
 # sdnvpn test case needs to download this image first before running
-sudo wget -nc http://artifacts.opnfv.org/sdnvpn/ubuntu-16.04-server-cloudimg-amd64-disk1.img -P ${DOVETAIL_CONFIG}
+ubuntu_image=${image_path}/ubuntu-16.04-server-cloudimg-amd64-disk1.img
+if [[ ! -f ${ubuntu_image} ]]; then
+    echo "Download image ubuntu-16.04-server-cloudimg-amd64-disk1.img ..."
+    wget -q -nc http://artifacts.opnfv.org/sdnvpn/ubuntu-16.04-server-cloudimg-amd64-disk1.img -P ${image_path}
+fi
+sudo cp ${ubuntu_image} ${DOVETAIL_CONFIG}
+
+# functest needs to download this image first before running
+cirros_image=${image_path}/cirros-0.3.5-x86_64-disk.img
+if [[ ! -f ${cirros_image} ]]; then
+    echo "Download image cirros-0.3.5-x86_64-disk.img ..."
+    wget -q -nc http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img -P ${image_path}
+fi
+sudo cp ${cirros_image} ${DOVETAIL_CONFIG}
+
 
 opts="--privileged=true -id"
 
@@ -134,6 +173,37 @@ if [ $(docker ps | grep "opnfv/dovetail:${DOCKER_TAG}" | wc -l) == 0 ]; then
     exit 1
 fi
 
+# Modify tempest_conf.yaml file
+tempest_conf_file=${DOVETAIL_CONFIG}/tempest_conf.yaml
+if [ ${INSTALLER_TYPE} == 'compass' ]; then
+    volume_device='vdb'
+else
+    volume_device='vdc'
+fi
+
+cat << EOF >$tempest_conf_file
+
+compute:
+    min_compute_nodes: 2
+    volume_device_name: ${volume_device}
+    min_microversion: 2.2
+    max_microversion: latest
+
+compute-feature-enabled:
+    live_migration: True
+    block_migration_for_live_migration: True
+    block_migrate_cinder_iscsi: True
+    attach_encrypted_volume: True
+
+EOF
+
+echo "${tempest_conf_file}..."
+cat ${tempest_conf_file}
+
+cp_tempest_cmd="docker cp ${DOVETAIL_CONFIG}/tempest_conf.yaml $container_id:/home/opnfv/dovetail/dovetail/userconfig"
+echo "exec command: ${cp_tempest_cmd}"
+$cp_tempest_cmd
+
 list_cmd="dovetail list ${TESTSUITE}"
 run_cmd="dovetail run --testsuite ${TESTSUITE} -d"
 echo "Container exec command: ${list_cmd}"
@@ -148,5 +218,8 @@ sudo cp -r ${DOVETAIL_HOME}/results ./
 # PRIMARY_GROUP=$(id -gn $CURRENT_USER)
 # sudo chown -R ${CURRENT_USER}:${PRIMARY_GROUP} ${WORKSPACE}/results
 
+#remove useless workspace from yardstick to save disk space
+sudo rm -rf ./results/workspace
+
 echo "Dovetail: done!"
 
index 700657d..11904cb 100644 (file)
             artifacts: 'results/**/*'
             allow-empty: true
             fingerprint: true
+        - email-jenkins-admins-on-failure
 
 ########################
 # builder macros
index dd0590c..cbdd3dd 100644 (file)
         branch: '{stream}'
         disabled: false
         gs-pathname: ''
-    danube: &danube
-        stream: danube
+    euphrates: &euphrates
+        stream: euphrates
         branch: 'stable/{stream}'
-        disabled: false
+        disabled: true
         gs-pathname: '/{stream}'
 #--------------------------------
 # POD, INSTALLER, AND BRANCH MAPPING
             <<: *master
         - baremetal:
             slave-label: fuel-baremetal
-            <<: *danube
+            <<: *euphrates
         - virtual:
             slave-label: fuel-virtual
-            <<: *danube
+            <<: *euphrates
 #--------------------------------
 #        None-CI PODs
 #--------------------------------
         - zte-pod1:
             slave-label: zte-pod1
             <<: *master
-        - zte-pod2:
-            slave-label: zte-pod2
-            <<: *master
         - zte-pod3:
             slave-label: zte-pod3
             <<: *master
         - zte-pod1:
             slave-label: zte-pod1
-            <<: *danube
+            <<: *euphrates
         - zte-pod3:
             slave-label: zte-pod3
-            <<: *danube
+            <<: *euphrates
 #--------------------------------
 #       scenarios
 #--------------------------------
                         build-step-failure-threshold: 'never'
                         failure-threshold: 'never'
                         unstable-threshold: 'FAILURE'
+        # ZTE pod1 weekly(Sunday), os-odl_l2-nofeature-ha, run against master and euphrates
+        - conditional-step:
+            condition-kind: and
+            condition-operands:
+                - condition-kind: regex-match
+                  regex: os-odl_l2-nofeature-ha
+                  label: '{scenario}'
+                - condition-kind: regex-match
+                  regex: zte-pod1
+                  label: '{pod}'
+                - condition-kind: day-of-week
+                  day-selector: select-days
+                  days:
+                      SAT: true
+                  use-build-time: true
+            steps:
+                - trigger-builds:
+                    - project: 'dovetail-fuel-zte-pod1-proposed_tests-{stream}'
+                      current-parameters: false
+                      predefined-parameters:
+                        DEPLOY_SCENARIO={scenario}
+                      block: true
+                      same-node: true
+                      block-thresholds:
+                        build-step-failure-threshold: 'never'
+                        failure-threshold: 'never'
+                        unstable-threshold: 'FAILURE'
 
     publishers:
         - email:
             recipients: peter.barabas@ericsson.com fzhadaev@mirantis.com
+        - email-jenkins-admins-on-failure
 
 - job-template:
     name: 'fuel-deploy-{pod}-daily-{stream}'
     publishers:
         - email:
             recipients: peter.barabas@ericsson.com fzhadaev@mirantis.com
+        - email-jenkins-admins-on-failure
 
 ########################
 # parameter macros
             name: GS_URL
             default: artifacts.opnfv.org/$PROJECT{gs-pathname}
             description: "URL to Google Storage."
+        - string:
+            name: SSH_KEY
+            default: "/tmp/mcp.rsa"
+            description: "Path to private SSH key to access environment nodes. For MCP deployments only."
 ########################
 # trigger macros
 ########################
 - trigger:
     name: 'fuel-os-nosdn-nofeature-ha-baremetal-daily-master-trigger'
     triggers:
-        - timed: '' # '5 20 * * *'
+        - timed: '5 20 * * *'
 - trigger:
     name: 'fuel-os-odl_l2-nofeature-ha-baremetal-daily-master-trigger'
     triggers:
 - trigger:
     name: 'fuel-os-odl_l3-nofeature-ha-baremetal-daily-master-trigger'
     triggers:
-        - timed: '' # '5 2 * * *'
+        - timed: '5 2 * * *'
 - trigger:
     name: 'fuel-os-nosdn-ovs-ha-baremetal-daily-master-trigger'
     triggers:
-        - timed: '' # '5 5 * * *'
+        - timed: '5 5 * * *'
 - trigger:
     name: 'fuel-os-onos-sfc-ha-baremetal-daily-master-trigger'
     triggers:
     triggers:
         - timed: ''
 #-----------------------------------------------
-# Triggers for job running on fuel-baremetal against danube branch
+# Triggers for job running on fuel-baremetal against euphrates branch
 #-----------------------------------------------
 # HA Scenarios
 - trigger:
-    name: 'fuel-os-nosdn-nofeature-ha-baremetal-daily-danube-trigger'
+    name: 'fuel-os-nosdn-nofeature-ha-baremetal-daily-euphrates-trigger'
     triggers:
-        - timed: '0 20 * * *'
+        - timed: '' # '0 20 * * *'
 - trigger:
-    name: 'fuel-os-odl_l2-nofeature-ha-baremetal-daily-danube-trigger'
+    name: 'fuel-os-odl_l2-nofeature-ha-baremetal-daily-euphrates-trigger'
     triggers:
-        - timed: '0 23 * * *'
+        - timed: '' # '0 23 * * *'
 - trigger:
-    name: 'fuel-os-odl_l3-nofeature-ha-baremetal-daily-danube-trigger'
+    name: 'fuel-os-odl_l3-nofeature-ha-baremetal-daily-euphrates-trigger'
     triggers:
-        - timed: '0 2 * * *'
+        - timed: '' # '0 2 * * *'
 - trigger:
-    name: 'fuel-os-onos-sfc-ha-baremetal-daily-danube-trigger'
+    name: 'fuel-os-onos-sfc-ha-baremetal-daily-euphrates-trigger'
     triggers:
         - timed: '' # '0 5 * * *'
 - trigger:
-    name: 'fuel-os-onos-nofeature-ha-baremetal-daily-danube-trigger'
+    name: 'fuel-os-onos-nofeature-ha-baremetal-daily-euphrates-trigger'
     triggers:
         - timed: '' # '0 8 * * *'
 - trigger:
-    name: 'fuel-os-odl_l2-sfc-ha-baremetal-daily-danube-trigger'
+    name: 'fuel-os-odl_l2-sfc-ha-baremetal-daily-euphrates-trigger'
     triggers:
-        - timed: '0 11 * * *'
+        - timed: '' # '0 11 * * *'
 - trigger:
-    name: 'fuel-os-odl_l2-bgpvpn-ha-baremetal-daily-danube-trigger'
+    name: 'fuel-os-odl_l2-bgpvpn-ha-baremetal-daily-euphrates-trigger'
     triggers:
-        - timed: '0 14 * * *'
+        - timed: '' # '0 14 * * *'
 - trigger:
-    name: 'fuel-os-nosdn-kvm-ha-baremetal-daily-danube-trigger'
+    name: 'fuel-os-nosdn-kvm-ha-baremetal-daily-euphrates-trigger'
     triggers:
-        - timed: '0 17 * * *'
+        - timed: '' # '0 17 * * *'
 - trigger:
-    name: 'fuel-os-nosdn-ovs-ha-baremetal-daily-danube-trigger'
+    name: 'fuel-os-nosdn-ovs-ha-baremetal-daily-euphrates-trigger'
     triggers:
-        - timed: '0 20 * * *'
+        - timed: '' # '0 20 * * *'
 - trigger:
-    name: 'fuel-os-nosdn-kvm_ovs_dpdk-ha-baremetal-daily-danube-trigger'
+    name: 'fuel-os-nosdn-kvm_ovs_dpdk-ha-baremetal-daily-euphrates-trigger'
     triggers:
-        - timed: '0 12 * * *'
+        - timed: '' # '0 12 * * *'
 - trigger:
-    name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-ha-baremetal-daily-danube-trigger'
+    name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-ha-baremetal-daily-euphrates-trigger'
     triggers:
-        - timed: '0 8 * * *'
+        - timed: '' # '0 8 * * *'
 # NOHA Scenarios
 - trigger:
-    name: 'fuel-os-nosdn-nofeature-noha-baremetal-daily-danube-trigger'
+    name: 'fuel-os-nosdn-nofeature-noha-baremetal-daily-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l2-nofeature-noha-baremetal-daily-danube-trigger'
+    name: 'fuel-os-odl_l2-nofeature-noha-baremetal-daily-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l3-nofeature-noha-baremetal-daily-danube-trigger'
+    name: 'fuel-os-odl_l3-nofeature-noha-baremetal-daily-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-onos-sfc-noha-baremetal-daily-danube-trigger'
+    name: 'fuel-os-onos-sfc-noha-baremetal-daily-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-onos-nofeature-noha-baremetal-daily-danube-trigger'
+    name: 'fuel-os-onos-nofeature-noha-baremetal-daily-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l2-sfc-noha-baremetal-daily-danube-trigger'
+    name: 'fuel-os-odl_l2-sfc-noha-baremetal-daily-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l2-bgpvpn-noha-baremetal-daily-danube-trigger'
+    name: 'fuel-os-odl_l2-bgpvpn-noha-baremetal-daily-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-nosdn-kvm-noha-baremetal-daily-danube-trigger'
+    name: 'fuel-os-nosdn-kvm-noha-baremetal-daily-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-nosdn-ovs-noha-baremetal-daily-danube-trigger'
+    name: 'fuel-os-nosdn-ovs-noha-baremetal-daily-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-baremetal-daily-danube-trigger'
+    name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-baremetal-daily-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-baremetal-daily-danube-trigger'
+    name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-baremetal-daily-euphrates-trigger'
     triggers:
         - timed: ''
 #-----------------------------------------------
 - trigger:
     name: 'fuel-os-odl_l2-nofeature-noha-virtual-daily-master-trigger'
     triggers:
-        - timed: '' # '35 15 * * *'
+        - timed: '35 15 * * *'
 - trigger:
     name: 'fuel-os-odl_l3-nofeature-noha-virtual-daily-master-trigger'
     triggers:
-        - timed: '' # '5 18 * * *'
+        - timed: '5 18 * * *'
 - trigger:
     name: 'fuel-os-onos-sfc-noha-virtual-daily-master-trigger'
     triggers:
 - trigger:
     name: 'fuel-os-nosdn-ovs-noha-virtual-daily-master-trigger'
     triggers:
-        - timed: '' # '5 9 * * *'
+        - timed: '5 9 * * *'
 - trigger:
     name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-virtual-daily-master-trigger'
     triggers:
     triggers:
         - timed: '' # '30 20 * * *'
 #-----------------------------------------------
-# Triggers for job running on fuel-virtual against danube branch
+# Triggers for job running on fuel-virtual against euphrates branch
 #-----------------------------------------------
 - trigger:
-    name: 'fuel-os-nosdn-nofeature-ha-virtual-daily-danube-trigger'
+    name: 'fuel-os-nosdn-nofeature-ha-virtual-daily-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l2-nofeature-ha-virtual-daily-danube-trigger'
+    name: 'fuel-os-odl_l2-nofeature-ha-virtual-daily-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l3-nofeature-ha-virtual-daily-danube-trigger'
+    name: 'fuel-os-odl_l3-nofeature-ha-virtual-daily-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-onos-sfc-ha-virtual-daily-danube-trigger'
+    name: 'fuel-os-onos-sfc-ha-virtual-daily-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-onos-nofeature-ha-virtual-daily-danube-trigger'
+    name: 'fuel-os-onos-nofeature-ha-virtual-daily-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l2-bgpvpn-ha-virtual-daily-danube-trigger'
+    name: 'fuel-os-odl_l2-bgpvpn-ha-virtual-daily-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l2-sfc-ha-virtual-daily-danube-trigger'
+    name: 'fuel-os-odl_l2-sfc-ha-virtual-daily-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-nosdn-kvm-ha-virtual-daily-danube-trigger'
+    name: 'fuel-os-nosdn-kvm-ha-virtual-daily-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-nosdn-ovs-ha-virtual-daily-danube-trigger'
+    name: 'fuel-os-nosdn-ovs-ha-virtual-daily-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-nosdn-kvm_ovs_dpdk-ha-virtual-daily-danube-trigger'
+    name: 'fuel-os-nosdn-kvm_ovs_dpdk-ha-virtual-daily-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-ha-virtual-daily-danube-trigger'
+    name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-ha-virtual-daily-euphrates-trigger'
     triggers:
         - timed: ''
 # NOHA Scenarios
 - trigger:
-    name: 'fuel-os-nosdn-nofeature-noha-virtual-daily-danube-trigger'
+    name: 'fuel-os-nosdn-nofeature-noha-virtual-daily-euphrates-trigger'
     triggers:
-        - timed: '0 13 * * *'
+        - timed: '' # '0 13 * * *'
 - trigger:
-    name: 'fuel-os-odl_l2-nofeature-noha-virtual-daily-danube-trigger'
+    name: 'fuel-os-odl_l2-nofeature-noha-virtual-daily-euphrates-trigger'
     triggers:
-        - timed: '30 15 * * *'
+        - timed: '' # '30 15 * * *'
 - trigger:
-    name: 'fuel-os-odl_l3-nofeature-noha-virtual-daily-danube-trigger'
+    name: 'fuel-os-odl_l3-nofeature-noha-virtual-daily-euphrates-trigger'
     triggers:
-        - timed: '0 18 * * *'
+        - timed: '' # '0 18 * * *'
 - trigger:
-    name: 'fuel-os-onos-sfc-noha-virtual-daily-danube-trigger'
+    name: 'fuel-os-onos-sfc-noha-virtual-daily-euphrates-trigger'
     triggers:
         - timed: '' # '30 20 * * *'
 - trigger:
-    name: 'fuel-os-onos-nofeature-noha-virtual-daily-danube-trigger'
+    name: 'fuel-os-onos-nofeature-noha-virtual-daily-euphrates-trigger'
     triggers:
         - timed: '' # '0 23 * * *'
 - trigger:
-    name: 'fuel-os-odl_l2-sfc-noha-virtual-daily-danube-trigger'
+    name: 'fuel-os-odl_l2-sfc-noha-virtual-daily-euphrates-trigger'
     triggers:
-        - timed: '30 1 * * *'
+        - timed: '' # '30 1 * * *'
 - trigger:
-    name: 'fuel-os-odl_l2-bgpvpn-noha-virtual-daily-danube-trigger'
+    name: 'fuel-os-odl_l2-bgpvpn-noha-virtual-daily-euphrates-trigger'
     triggers:
-        - timed: '0 4 * * *'
+        - timed: '' # '0 4 * * *'
 - trigger:
-    name: 'fuel-os-nosdn-kvm-noha-virtual-daily-danube-trigger'
+    name: 'fuel-os-nosdn-kvm-noha-virtual-daily-euphrates-trigger'
     triggers:
-        - timed: '30 6 * * *'
+        - timed: '' # '30 6 * * *'
 - trigger:
-    name: 'fuel-os-nosdn-ovs-noha-virtual-daily-danube-trigger'
+    name: 'fuel-os-nosdn-ovs-noha-virtual-daily-euphrates-trigger'
     triggers:
-        - timed: '0 9 * * *'
+        - timed: '' # '0 9 * * *'
 - trigger:
-    name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-virtual-daily-danube-trigger'
+    name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-virtual-daily-euphrates-trigger'
     triggers:
-        - timed: '0 16 * * *'
+        - timed: '' # '0 16 * * *'
 - trigger:
-    name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-virtual-daily-danube-trigger'
+    name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-virtual-daily-euphrates-trigger'
     triggers:
-        - timed: '0 20 * * *'
+        - timed: '' # '0 20 * * *'
 #-----------------------------------------------
 # ZTE POD1 Triggers running against master branch
 #-----------------------------------------------
 - trigger:
     name: 'fuel-os-nosdn-ovs-ha-zte-pod1-daily-master-trigger'
     triggers:
-        - timed: ''
+        - timed: '0 18 * * *'
 - trigger:
     name: 'fuel-os-nosdn-kvm_ovs_dpdk-ha-zte-pod1-daily-master-trigger'
     triggers:
     name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-zte-pod1-daily-master-trigger'
     triggers:
         - timed: ''
-
-#-----------------------------------------------
-# ZTE POD2 Triggers running against master branch
-#-----------------------------------------------
-- trigger:
-    name: 'fuel-os-nosdn-nofeature-ha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-odl_l2-nofeature-ha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-odl_l3-nofeature-ha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-onos-sfc-ha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-onos-nofeature-ha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-odl_l2-bgpvpn-ha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-odl_l2-sfc-ha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-kvm-ha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-ovs-ha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-kvm_ovs_dpdk-ha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-ha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-# NOHA Scenarios
-- trigger:
-    name: 'fuel-os-nosdn-nofeature-noha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-odl_l2-nofeature-noha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-odl_l3-nofeature-noha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-onos-sfc-noha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-onos-nofeature-noha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-odl_l2-sfc-noha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-odl_l2-bgpvpn-noha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-kvm-noha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-ovs-noha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-zte-pod2-daily-master-trigger'
-    triggers:
-        - timed: ''
 #-----------------------------------------------
 # ZTE POD3 Triggers running against master branch
 #-----------------------------------------------
 - trigger:
     name: 'fuel-os-nosdn-nofeature-ha-zte-pod3-daily-master-trigger'
     triggers:
-        - timed: ''
+        - timed: '0 10 * * *'
 - trigger:
     name: 'fuel-os-odl_l2-nofeature-ha-zte-pod3-daily-master-trigger'
     triggers:
 - trigger:
     name: 'fuel-os-nosdn-kvm-ha-zte-pod3-daily-master-trigger'
     triggers:
-        - timed: '0 10 * * *'
+        - timed: ''
 - trigger:
     name: 'fuel-os-nosdn-ovs-ha-zte-pod3-daily-master-trigger'
     triggers:
     triggers:
         - timed: ''
 #-----------------------------------------------
-# ZTE POD1 Triggers running against danube branch
+# ZTE POD1 Triggers running against euphrates branch
 #-----------------------------------------------
 - trigger:
-    name: 'fuel-os-nosdn-nofeature-ha-zte-pod1-daily-danube-trigger'
+    name: 'fuel-os-nosdn-nofeature-ha-zte-pod1-daily-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l2-nofeature-ha-zte-pod1-daily-danube-trigger'
+    name: 'fuel-os-odl_l2-nofeature-ha-zte-pod1-daily-euphrates-trigger'
     triggers:
-        - timed: '0 2 * * *'
+        - timed: '' # '0 2 * * *'
 - trigger:
-    name: 'fuel-os-odl_l3-nofeature-ha-zte-pod1-daily-danube-trigger'
+    name: 'fuel-os-odl_l3-nofeature-ha-zte-pod1-daily-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-onos-sfc-ha-zte-pod1-daily-danube-trigger'
+    name: 'fuel-os-onos-sfc-ha-zte-pod1-daily-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-onos-nofeature-ha-zte-pod1-daily-danube-trigger'
+    name: 'fuel-os-onos-nofeature-ha-zte-pod1-daily-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l2-bgpvpn-ha-zte-pod1-daily-danube-trigger'
+    name: 'fuel-os-odl_l2-bgpvpn-ha-zte-pod1-daily-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l2-sfc-ha-zte-pod1-daily-danube-trigger'
+    name: 'fuel-os-odl_l2-sfc-ha-zte-pod1-daily-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-nosdn-kvm-ha-zte-pod1-daily-danube-trigger'
+    name: 'fuel-os-nosdn-kvm-ha-zte-pod1-daily-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-nosdn-ovs-ha-zte-pod1-daily-danube-trigger'
+    name: 'fuel-os-nosdn-ovs-ha-zte-pod1-daily-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-nosdn-kvm_ovs_dpdk-ha-zte-pod1-daily-danube-trigger'
+    name: 'fuel-os-nosdn-kvm_ovs_dpdk-ha-zte-pod1-daily-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-ha-zte-pod1-daily-danube-trigger'
+    name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-ha-zte-pod1-daily-euphrates-trigger'
     triggers:
         - timed: ''
 # NOHA Scenarios
 - trigger:
-    name: 'fuel-os-nosdn-nofeature-noha-zte-pod1-daily-danube-trigger'
+    name: 'fuel-os-nosdn-nofeature-noha-zte-pod1-daily-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l2-nofeature-noha-zte-pod1-daily-danube-trigger'
+    name: 'fuel-os-odl_l2-nofeature-noha-zte-pod1-daily-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l3-nofeature-noha-zte-pod1-daily-danube-trigger'
+    name: 'fuel-os-odl_l3-nofeature-noha-zte-pod1-daily-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-onos-sfc-noha-zte-pod1-daily-danube-trigger'
+    name: 'fuel-os-onos-sfc-noha-zte-pod1-daily-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-onos-nofeature-noha-zte-pod1-daily-danube-trigger'
+    name: 'fuel-os-onos-nofeature-noha-zte-pod1-daily-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l2-sfc-noha-zte-pod1-daily-danube-trigger'
+    name: 'fuel-os-odl_l2-sfc-noha-zte-pod1-daily-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l2-bgpvpn-noha-zte-pod1-daily-danube-trigger'
+    name: 'fuel-os-odl_l2-bgpvpn-noha-zte-pod1-daily-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-nosdn-kvm-noha-zte-pod1-daily-danube-trigger'
+    name: 'fuel-os-nosdn-kvm-noha-zte-pod1-daily-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-nosdn-ovs-noha-zte-pod1-daily-danube-trigger'
+    name: 'fuel-os-nosdn-ovs-noha-zte-pod1-daily-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-zte-pod1-daily-danube-trigger'
+    name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-zte-pod1-daily-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-zte-pod1-daily-danube-trigger'
+    name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-zte-pod1-daily-euphrates-trigger'
     triggers:
         - timed: ''
-
 #-----------------------------------------------
-# ZTE POD2 Triggers running against danube branch
+# ZTE POD3 Triggers running against euphrates branch
 #-----------------------------------------------
 - trigger:
-    name: 'fuel-os-nosdn-nofeature-ha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-odl_l2-nofeature-ha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-odl_l3-nofeature-ha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-onos-sfc-ha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-onos-nofeature-ha-zte-pod2-daily-danube-trigger'
+    name: 'fuel-os-nosdn-nofeature-ha-zte-pod3-daily-euphrates-trigger'
     triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-odl_l2-bgpvpn-ha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-odl_l2-sfc-ha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-kvm-ha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-ovs-ha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-kvm_ovs_dpdk-ha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-ha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-# NOHA Scenarios
-- trigger:
-    name: 'fuel-os-nosdn-nofeature-noha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-odl_l2-nofeature-noha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-odl_l3-nofeature-noha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-onos-sfc-noha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-onos-nofeature-noha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-odl_l2-sfc-noha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-odl_l2-bgpvpn-noha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-kvm-noha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-ovs-noha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-zte-pod2-daily-danube-trigger'
-    triggers:
-        - timed: ''
-#-----------------------------------------------
-# ZTE POD3 Triggers running against danube branch
-#-----------------------------------------------
-- trigger:
-    name: 'fuel-os-nosdn-nofeature-ha-zte-pod3-daily-danube-trigger'
-    triggers:
-        - timed: '0 18 * * *'
+        - timed: '' # '0 18 * * *'
 - trigger:
-    name: 'fuel-os-odl_l2-nofeature-ha-zte-pod3-daily-danube-trigger'
+    name: 'fuel-os-odl_l2-nofeature-ha-zte-pod3-daily-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l3-nofeature-ha-zte-pod3-daily-danube-trigger'
+    name: 'fuel-os-odl_l3-nofeature-ha-zte-pod3-daily-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-onos-sfc-ha-zte-pod3-daily-danube-trigger'
+    name: 'fuel-os-onos-sfc-ha-zte-pod3-daily-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-onos-nofeature-ha-zte-pod3-daily-danube-trigger'
+    name: 'fuel-os-onos-nofeature-ha-zte-pod3-daily-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l2-bgpvpn-ha-zte-pod3-daily-danube-trigger'
+    name: 'fuel-os-odl_l2-bgpvpn-ha-zte-pod3-daily-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l2-sfc-ha-zte-pod3-daily-danube-trigger'
+    name: 'fuel-os-odl_l2-sfc-ha-zte-pod3-daily-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-nosdn-kvm-ha-zte-pod3-daily-danube-trigger'
+    name: 'fuel-os-nosdn-kvm-ha-zte-pod3-daily-euphrates-trigger'
     triggers:
-        - timed: '0 2 * * *'
+        - timed: '' # '0 2 * * *'
 - trigger:
-    name: 'fuel-os-nosdn-ovs-ha-zte-pod3-daily-danube-trigger'
+    name: 'fuel-os-nosdn-ovs-ha-zte-pod3-daily-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-nosdn-kvm_ovs_dpdk-ha-zte-pod3-daily-danube-trigger'
+    name: 'fuel-os-nosdn-kvm_ovs_dpdk-ha-zte-pod3-daily-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-ha-zte-pod3-daily-danube-trigger'
+    name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-ha-zte-pod3-daily-euphrates-trigger'
     triggers:
         - timed: ''
 # NOHA Scenarios
 - trigger:
-    name: 'fuel-os-nosdn-nofeature-noha-zte-pod3-daily-danube-trigger'
+    name: 'fuel-os-nosdn-nofeature-noha-zte-pod3-daily-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l2-nofeature-noha-zte-pod3-daily-danube-trigger'
+    name: 'fuel-os-odl_l2-nofeature-noha-zte-pod3-daily-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l3-nofeature-noha-zte-pod3-daily-danube-trigger'
+    name: 'fuel-os-odl_l3-nofeature-noha-zte-pod3-daily-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-onos-sfc-noha-zte-pod3-daily-danube-trigger'
+    name: 'fuel-os-onos-sfc-noha-zte-pod3-daily-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-onos-nofeature-noha-zte-pod3-daily-danube-trigger'
+    name: 'fuel-os-onos-nofeature-noha-zte-pod3-daily-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l2-sfc-noha-zte-pod3-daily-danube-trigger'
+    name: 'fuel-os-odl_l2-sfc-noha-zte-pod3-daily-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-odl_l2-bgpvpn-noha-zte-pod3-daily-danube-trigger'
+    name: 'fuel-os-odl_l2-bgpvpn-noha-zte-pod3-daily-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-nosdn-kvm-noha-zte-pod3-daily-danube-trigger'
+    name: 'fuel-os-nosdn-kvm-noha-zte-pod3-daily-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-nosdn-ovs-noha-zte-pod3-daily-danube-trigger'
+    name: 'fuel-os-nosdn-ovs-noha-zte-pod3-daily-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-zte-pod3-daily-danube-trigger'
+    name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-zte-pod3-daily-euphrates-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-zte-pod3-daily-danube-trigger'
+    name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-zte-pod3-daily-euphrates-trigger'
     triggers:
         - timed: ''
index 4d48ee5..eebd8bc 100755 (executable)
@@ -12,11 +12,13 @@ set -o pipefail
 
 export TERM="vt220"
 
-# source the file so we get OPNFV vars
-source latest.properties
+if [[ "$BRANCH" != 'master' ]]; then
+    # source the file so we get OPNFV vars
+    source latest.properties
 
-# echo the info about artifact that is used during the deployment
-echo "Using ${OPNFV_ARTIFACT_URL/*\/} for deployment"
+    # echo the info about artifact that is used during the deployment
+    echo "Using ${OPNFV_ARTIFACT_URL/*\/} for deployment"
+fi
 
 if [[ "$JOB_NAME" =~ "merge" ]]; then
     # set simplest scenario for virtual deploys to run for merges
@@ -61,13 +63,20 @@ echo "Cloning securedlab repo $BRANCH"
 git clone ssh://jenkins-ericsson@gerrit.opnfv.org:29418/securedlab --quiet \
     --branch $BRANCH
 
+# Source local_env if present, which contains POD-specific config
+local_env="${WORKSPACE}/securedlab/labs/$LAB_NAME/$POD_NAME/fuel/config/local_env"
+if [ -e "${local_env}" ]; then
+    echo "-- Sourcing local environment file"
+    source "${local_env}"
+fi
+
 # log file name
 FUEL_LOG_FILENAME="${JOB_NAME}_${BUILD_NUMBER}.log.tar.gz"
 
 # construct the command
 DEPLOY_COMMAND="sudo $WORKSPACE/ci/deploy.sh -b file://$WORKSPACE/securedlab \
     -l $LAB_NAME -p $POD_NAME -s $DEPLOY_SCENARIO -i file://$WORKSPACE/opnfv.iso \
-    -H -B $BRIDGE -S $TMPDIR -L $WORKSPACE/$FUEL_LOG_FILENAME"
+    -B ${DEFAULT_BRIDGE:-${BRIDGE}} -S $TMPDIR -L $WORKSPACE/$FUEL_LOG_FILENAME"
 
 # log info to console
 echo "Deployment parameters"
@@ -75,7 +84,7 @@ echo "--------------------------------------------------------"
 echo "Scenario: $DEPLOY_SCENARIO"
 echo "Lab: $LAB_NAME"
 echo "POD: $POD_NAME"
-echo "ISO: ${OPNFV_ARTIFACT_URL/*\/}"
+[[ "$BRANCH" != 'master' ]] && echo "ISO: ${OPNFV_ARTIFACT_URL/*\/}"
 echo
 echo "Starting the deployment using $INSTALLER_TYPE. This could take some time..."
 echo "--------------------------------------------------------"
index 8cc552e..c3b8253 100755 (executable)
@@ -10,6 +10,9 @@
 set -o errexit
 set -o pipefail
 
+# disable Fuel ISO download for master branch
+[[ "$BRANCH" == 'master' ]] && exit 0
+
 # use proxy url to replace the nomral URL, for googleusercontent.com will be blocked randomly
 [[ "$NODE_NAME" =~ (zte) ]] && GS_URL=${GS_BASE_PROXY%%/*}/$GS_URL
 
index 1f0ddd3..6bb7e51 100644 (file)
             branch: '{stream}'
             gs-pathname: ''
             disabled: false
-        - danube:
+        - euphrates:
             branch: 'stable/{stream}'
             gs-pathname: '/{stream}'
-            disabled: false
+            disabled: true
 
     jobs:
         - 'fuel-build-daily-{stream}'
@@ -79,6 +79,7 @@
     publishers:
         - email:
             recipients: fzhadaev@mirantis.com
+        - email-jenkins-admins-on-failure
 
 - job-template:
     name: 'fuel-merge-build-{stream}'
     publishers:
         - email:
             recipients: fzhadaev@mirantis.com
+        - email-jenkins-admins-on-failure
 
 - job-template:
     name: 'fuel-deploy-generic-daily-{stream}'
index 549f7da..469ca92 100644 (file)
             branch: '{stream}'
             gs-pathname: ''
             disabled: false
-        - danube:
+        - euphrates:
             branch: 'stable/{stream}'
             gs-pathname: '/{stream}'
-            disabled: false
+            disabled: true
 #####################################
 # patch verification phases
 #####################################
index bd42ed8..57e36e1 100644 (file)
         branch: '{stream}'
         disabled: false
         gs-pathname: ''
-    danube: &danube
-        stream: danube
+    euphrates: &euphrates
+        stream: euphrates
         branch: 'stable/{stream}'
-        disabled: false
+        disabled: true
         gs-pathname: '/{stream}'
 #--------------------------------
 # POD, INSTALLER, AND BRANCH MAPPING
             <<: *master
         - baremetal:
             slave-label: fuel-baremetal
-            <<: *danube
+            <<: *euphrates
         - virtual:
             slave-label: fuel-virtual
-            <<: *danube
+            <<: *euphrates
 #--------------------------------
 #       scenarios
 #--------------------------------
     publishers:
         - email:
             recipients: peter.barabas@ericsson.com fzhadaev@mirantis.com
+        - email-jenkins-admins-on-failure
 
 - job-template:
     name: 'fuel-deploy-{pod}-weekly-{stream}'
     publishers:
         - email:
             recipients: peter.barabas@ericsson.com fzhadaev@mirantis.com
+        - email-jenkins-admins-on-failure
 
 ########################
 # parameter macros
diff --git a/jjb/functest/functest-alpine.sh b/jjb/functest/functest-alpine.sh
new file mode 100644 (file)
index 0000000..f0e08e1
--- /dev/null
@@ -0,0 +1,79 @@
+#!/bin/bash
+
+set -e
+set +u
+set +o pipefail
+
+[[ $CI_DEBUG == true ]] && redirect="/dev/stdout" || redirect="/dev/null"
+FUNCTEST_DIR=/home/opnfv/functest
+
+# Prepare OpenStack credentials volume
+if [[ ${INSTALLER_TYPE} == 'joid' ]]; then
+    rc_file=$LAB_CONFIG/admin-openrc
+elif [[ ${INSTALLER_TYPE} == 'compass' && ${BRANCH} == 'master' ]]; then
+    cacert_file_vol="-v ${HOME}/os_cacert:${FUNCTEST_DIR}/conf/os_cacert"
+    echo "export OS_CACERT=${FUNCTEST_DIR}/conf/os_cacert" >> ${HOME}/opnfv-openrc.sh
+    rc_file=${HOME}/opnfv-openrc.sh
+else
+    rc_file=${HOME}/opnfv-openrc.sh
+fi
+rc_file_vol="-v ${rc_file}:${FUNCTEST_DIR}/conf/openstack.creds"
+
+
+# Set iptables rule to allow forwarding return traffic for container
+if ! sudo iptables -C FORWARD -j RETURN 2> ${redirect} || ! sudo iptables -L FORWARD | awk 'NR==3' | grep RETURN 2> ${redirect}; then
+    sudo iptables -I FORWARD -j RETURN
+fi
+
+DEPLOY_TYPE=baremetal
+[[ $BUILD_TAG =~ "virtual" ]] && DEPLOY_TYPE=virt
+HOST_ARCH=$(uname -m)
+
+echo "Functest: Start Docker and prepare environment"
+
+echo "Functest: Download images that will be used by test cases"
+images_dir="${HOME}/opnfv/functest/images"
+download_script=${WORKSPACE}/functest/ci/download_images.sh
+if [[ ! -f ${download_script} ]]; then
+    # to support Danube as well
+    wget https://git.opnfv.org/functest/plain/functest/ci/download_images.sh -O ${download_script} 2> ${redirect}
+fi
+chmod +x ${download_script}
+${download_script} ${images_dir} ${DEPLOY_SCENARIO} ${HOST_ARCH} 2> ${redirect}
+
+images_vol="-v ${images_dir}:${FUNCTEST_DIR}/images"
+
+dir_result="${HOME}/opnfv/functest/results/${BRANCH##*/}"
+mkdir -p ${dir_result}
+sudo rm -rf ${dir_result}/*
+results_vol="-v ${dir_result}:${FUNCTEST_DIR}/results"
+custom_params=
+test -f ${HOME}/opnfv/functest/custom/params_${DOCKER_TAG} && custom_params=$(cat ${HOME}/opnfv/functest/custom/params_${DOCKER_TAG})
+
+envs="-e INSTALLER_TYPE=${INSTALLER_TYPE} -e INSTALLER_IP=${INSTALLER_IP} \
+    -e NODE_NAME=${NODE_NAME} -e DEPLOY_SCENARIO=${DEPLOY_SCENARIO} \
+    -e BUILD_TAG=${BUILD_TAG} -e DEPLOY_TYPE=${DEPLOY_TYPE}"
+
+if [[ ${INSTALLER_TYPE} == 'compass' && ${DEPLOY_SCENARIO} == *'os-nosdn-openo-ha'* ]]; then
+    ssh_options="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
+    openo_msb_port=${openo_msb_port:-80}
+    openo_msb_endpoint="$(sshpass -p'root' ssh 2>/dev/null $ssh_options root@${installer_ip} \
+    'mysql -ucompass -pcompass -Dcompass -e "select package_config from cluster;" \
+    | sed s/,/\\n/g | grep openo_ip | cut -d \" -f 4'):$openo_msb_port"
+
+    envs=${env}" -e OPENO_MSB_ENDPOINT=${openo_msb_endpoint}"
+fi
+
+volumes="${images_vol} ${results_vol} ${sshkey_vol} ${rc_file_vol} ${cacert_file_vol}"
+
+set +e
+
+tiers=(healthcheck smoke features vnf)
+for tier in ${tiers[@]}; do
+    FUNCTEST_IMAGE=ollivier/functest-${tier}
+    echo "Functest: Pulling Functest Docker image ${FUNCTEST_IMAGE} ..."
+    docker pull ${FUNCTEST_IMAGE}>/dev/null
+    cmd="docker run ${envs} ${volumes} ${FUNCTEST_IMAGE}"
+    echo "Running Functest tier '${tier}'. CMD: ${cmd}"
+    ${cmd}
+done
index 8de092d..23649fc 100644 (file)
             slave-label: '{pod}'
             installer: fuel
             <<: *master
-        - arm-pod3:
+        - arm-pod5:
             slave-label: '{pod}'
             installer: fuel
             <<: *master
             slave-label: '{pod}'
             installer: fuel
             <<: *master
-        - arm-virtual1:
+        - arm-virtual2:
             slave-label: '{pod}'
             installer: fuel
             <<: *master
             slave-label: '{pod}'
             installer: fuel
             <<: *danube
-        - arm-pod3:
+        - arm-pod5:
             slave-label: '{pod}'
             installer: fuel
             <<: *danube
             slave-label: '{pod}'
             installer: fuel
             <<: *danube
-        - arm-virtual1:
+        - arm-virtual2:
             slave-label: '{pod}'
             installer: fuel
             <<: *danube
 # PODs for verify jobs triggered by each patch upload
-        - ool-virtual1:
-            slave-label: '{pod}'
-            installer: apex
-            <<: *master
+#        - ool-virtual1:
+#            slave-label: '{pod}'
+#            installer: apex
+#            <<: *master
 #--------------------------------
 
+    alpine-pod:
+        - ericsson-virtual-pod1bl01:
+            slave-label: '{alpine-pod}'
+            installer: fuel
+            <<: *master
+        - huawei-virtual5:
+            slave-label: '{alpine-pod}'
+            installer: compass
+            <<: *master
+
     testsuite:
         - 'suite':
             job-timeout: 60
 
     jobs:
         - 'functest-{installer}-{pod}-{testsuite}-{stream}'
+        - 'functest-alpine-{installer}-{alpine-pod}-{testsuite}-{stream}'
 
 ################################
 # job template
             description: "Built on $NODE_NAME"
         - 'functest-{testsuite}-builder'
 
+- job-template:
+    name: 'functest-alpine-{installer}-{alpine-pod}-{testsuite}-{stream}'
+
+    concurrent: true
+
+    properties:
+        - logrotate-default
+        - throttle:
+            enabled: true
+            max-per-node: 1
+            option: 'project'
+
+    wrappers:
+        - build-name:
+            name: '$BUILD_NUMBER Suite: $FUNCTEST_SUITE_NAME Scenario: $DEPLOY_SCENARIO'
+        - timeout:
+            timeout: '{job-timeout}'
+            abort: true
+
+    parameters:
+        - project-parameter:
+            project: '{project}'
+            branch: '{branch}'
+        - '{installer}-defaults'
+        - '{slave-label}-defaults'
+        - 'functest-{testsuite}-parameter'
+        - string:
+            name: DEPLOY_SCENARIO
+            default: 'os-nosdn-nofeature-noha'
+        - functest-parameter:
+            gs-pathname: '{gs-pathname}'
+
+    scm:
+        - git-scm
+
+    builders:
+        - description-setter:
+            description: "Built on $NODE_NAME"
+        - 'functest-alpine-daily-builder'
+
 ########################
 # parameter macros
 ########################
         - 'functest-store-results'
         - 'functest-exit'
 
+- builder:
+    name: functest-alpine-daily-builder
+    builders:
+        - shell:
+            !include-raw:
+                - ./functest-env-presetup.sh
+                - ../../utils/fetch_os_creds.sh
+                - ./functest-alpine.sh
+                - ../../utils/push-test-logs.sh
+
 - builder:
     name: functest-daily
     builders:
index 6768906..00a5f13 100755 (executable)
@@ -2,7 +2,11 @@
 set +e
 
 [[ "$PUSH_RESULTS_TO_DB" == "true" ]] && flags+="-r"
-cmd="python ${FUNCTEST_REPO_DIR}/functest/ci/run_tests.py -t all ${flags}"
+if [ "$BRANCH" == 'master' ]; then
+    cmd="run_tests -t all ${flags}"
+else
+    cmd="python ${FUNCTEST_REPO_DIR}/functest/ci/run_tests.py -t all ${flags}"
+fi
 
 container_id=$(docker ps -a | grep opnfv/functest | awk '{print $1}' | head -1)
 docker exec $container_id $cmd
index 7036f20..07d5df4 100644 (file)
@@ -9,6 +9,7 @@
 
     jobs:
         - 'functest-verify-{stream}'
+        - 'functest-docs-upload-{stream}'
 
     stream:
         - master:
@@ -18,7 +19,7 @@
         - danube:
             branch: 'stable/{stream}'
             gs-pathname: '/{stream}'
-            disabled: false
+            disabled: true
 
 - job-template:
     name: 'functest-verify-{stream}'
                     healthy: 50
                     unhealthy: 40
                     failing: 30
+        - email-jenkins-admins-on-failure
+
+- job-template:
+    name: 'functest-docs-upload-{stream}'
+
+    disabled: '{obj:disabled}'
+
+    parameters:
+        - project-parameter:
+            project: '{project}'
+            branch: '{branch}'
+        - 'opnfv-build-ubuntu-defaults'
+
+    scm:
+        - git-scm
+
+    triggers:
+        - gerrit:
+            server-name: 'gerrit.opnfv.org'
+            trigger-on:
+                - change-merged-event
+                - comment-added-contains-event:
+                    comment-contains-value: 'remerge'
+            projects:
+              - project-compare-type: 'ANT'
+                project-pattern: '{project}'
+                branches:
+                  - branch-compare-type: 'ANT'
+                    branch-pattern: '**/{branch}'
+                disable-strict-forbidden-file-verification: 'true'
+                forbidden-file-paths:
+                  - compare-type: ANT
+                    pattern: 'docs/**|.gitignore'
+
+    builders:
+        - functest-upload-doc-artifact
 
 ################################
 # job builders
     builders:
         - shell: |
             cd $WORKSPACE && tox
+
+- builder:
+    name: functest-upload-doc-artifact
+    builders:
+        - shell: |
+            cd $WORKSPACE && tox -edocs
+            wget -O - https://git.opnfv.org/releng/plain/utils/upload-artifact.sh | bash -s "api/_build" "docs"
index 5d1ed28..9b7f135 100755 (executable)
@@ -10,7 +10,11 @@ global_ret_val=0
 
 tests=($(echo $FUNCTEST_SUITE_NAME | tr "," "\n"))
 for test in ${tests[@]}; do
-    cmd="python /home/opnfv/repos/functest/functest/ci/run_tests.py -t $test"
+    if [ "$BRANCH" == 'master' ]; then
+        cmd="run_tests -t $test"
+    else
+        cmd="python /home/opnfv/repos/functest/functest/ci/run_tests.py -t $test"
+    fi
     docker exec $container_id $cmd
     let global_ret_val+=$?
 done
index 558e248..f18f054 100755 (executable)
@@ -6,11 +6,20 @@ set +o pipefail
 
 [[ $CI_DEBUG == true ]] && redirect="/dev/stdout" || redirect="/dev/null"
 
+DEPLOY_TYPE=baremetal
+[[ $BUILD_TAG =~ "virtual" ]] && DEPLOY_TYPE=virt
+HOST_ARCH=$(uname -m)
+
 # Prepare OpenStack credentials volume
+rc_file_vol="-v ${HOME}/opnfv-openrc.sh:/home/opnfv/functest/conf/openstack.creds"
+
 if [[ ${INSTALLER_TYPE} == 'joid' ]]; then
     rc_file_vol="-v $LAB_CONFIG/admin-openrc:/home/opnfv/functest/conf/openstack.creds"
-else
-    rc_file_vol="-v ${HOME}/opnfv-openrc.sh:/home/opnfv/functest/conf/openstack.creds"
+elif [[ ${INSTALLER_TYPE} == 'compass' && ${BRANCH} == 'master' ]]; then
+    cacert_file_vol="-v ${HOME}/os_cacert:/home/opnfv/functest/conf/os_cacert"
+    echo "export OS_CACERT=/home/opnfv/functest/conf/os_cacert" >> ${HOME}/opnfv-openrc.sh
+elif [[ ${INSTALLER_TYPE} == 'fuel' && ${DEPLOY_TYPE} == 'baremetal' ]]; then
+    cacert_file_vol="-v ${HOME}/os_cacert:/etc/ssl/certs/mcp_os_cacert"
 fi
 
 
@@ -19,17 +28,15 @@ if ! sudo iptables -C FORWARD -j RETURN 2> ${redirect} || ! sudo iptables -L FOR
     sudo iptables -I FORWARD -j RETURN
 fi
 
-DEPLOY_TYPE=baremetal
-[[ $BUILD_TAG =~ "virtual" ]] && DEPLOY_TYPE=virt
-
 echo "Functest: Start Docker and prepare environment"
 
 if [ "$BRANCH" != 'stable/danube' ]; then
   echo "Functest: Download images that will be used by test cases"
   images_dir="${HOME}/opnfv/functest/images"
   chmod +x ${WORKSPACE}/functest/ci/download_images.sh
-  ${WORKSPACE}/functest/ci/download_images.sh ${images_dir} 2> ${redirect}
+  ${WORKSPACE}/functest/ci/download_images.sh ${images_dir} > ${redirect}
   images_vol="-v ${images_dir}:/home/opnfv/functest/images"
+  echo "Functest: Images successfully downloaded"
 fi
 
 dir_result="${HOME}/opnfv/functest/results/${BRANCH##*/}"
@@ -38,11 +45,18 @@ sudo rm -rf ${dir_result}/*
 results_vol="-v ${dir_result}:/home/opnfv/functest/results"
 custom_params=
 test -f ${HOME}/opnfv/functest/custom/params_${DOCKER_TAG} && custom_params=$(cat ${HOME}/opnfv/functest/custom/params_${DOCKER_TAG})
+echo "Functest: custom parameters successfully retrieved: ${custom_params}"
 
 envs="-e INSTALLER_TYPE=${INSTALLER_TYPE} -e INSTALLER_IP=${INSTALLER_IP} \
     -e NODE_NAME=${NODE_NAME} -e DEPLOY_SCENARIO=${DEPLOY_SCENARIO} \
     -e BUILD_TAG=${BUILD_TAG} -e CI_DEBUG=${CI_DEBUG} -e DEPLOY_TYPE=${DEPLOY_TYPE}"
 
+if [[ ${INSTALLER_TYPE} == 'fuel' && ! -z ${SALT_MASTER_IP} ]]; then
+  HOST_ARCH=$(ssh -l ubuntu ${SALT_MASTER_IP} -i ${SSH_KEY} -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+  "sudo salt 'cmp*' grains.get cpuarch --out yaml |awk '{print \$2; exit}'")
+  envs="${envs} -e POD_ARCH=${HOST_ARCH}"
+fi
+
 if [[ ${INSTALLER_TYPE} == 'compass' && ${DEPLOY_SCENARIO} == *'os-nosdn-openo-ha'* ]]; then
     ssh_options="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
     openo_msb_port=${openo_msb_port:-80}
@@ -54,12 +68,13 @@ if [[ ${INSTALLER_TYPE} == 'compass' && ${DEPLOY_SCENARIO} == *'os-nosdn-openo-h
 fi
 
 if [ "$BRANCH" != 'stable/danube' ]; then
-  volumes="${images_vol} ${results_vol} ${sshkey_vol} ${stackrc_vol} ${rc_file_vol}"
+  volumes="${images_vol} ${results_vol} ${sshkey_vol} ${stackrc_vol} ${rc_file_vol} ${cacert_file_vol}"
 else
   volumes="${results_vol} ${sshkey_vol} ${stackrc_vol} ${rc_file_vol}"
 fi
 
-HOST_ARCH=$(uname -m)
+echo "Functest: volumes defined"
+
 FUNCTEST_IMAGE="opnfv/functest"
 if [ "$HOST_ARCH" = "aarch64" ]; then
     FUNCTEST_IMAGE="${FUNCTEST_IMAGE}_${HOST_ARCH}"
@@ -90,7 +105,12 @@ if [ $(docker ps | grep "${FUNCTEST_IMAGE}:${DOCKER_TAG}" | wc -l) == 0 ]; then
     exit 1
 fi
 
-cmd="python ${FUNCTEST_REPO_DIR}/functest/ci/prepare_env.py start"
+if [ "$BRANCH" == 'master' ]; then
+    cmd="prepare_env start"
+else
+    cmd="python ${FUNCTEST_REPO_DIR}/functest/ci/prepare_env.py start"
+fi
+
 
 echo "Executing command inside the docker: ${cmd}"
 docker exec ${container_id} ${cmd}
index 40fc42c..10afd6f 100644 (file)
+---
 - parameter:
     name: 'apex-defaults'
     parameters:
-        - string:
-            name: INSTALLER_IP
-            default: '192.168.X.X'
-            description: 'IP of the installer'
-        - string:
-            name: INSTALLER_TYPE
-            default: apex
-            description: 'Installer used for deploying OPNFV on this POD'
-        - string:
-            name: EXTERNAL_NETWORK
-            default: 'external'
-            description: 'external network for test'
+      - string:
+          name: INSTALLER_IP
+          default: '192.168.X.X'
+          description: 'IP of the installer'
+      - string:
+          name: INSTALLER_TYPE
+          default: apex
+          description: 'Installer used for deploying OPNFV on this POD'
+      - string:
+          name: EXTERNAL_NETWORK
+          default: 'external'
+          description: 'external network for test'
 
 - parameter:
     name: 'compass-defaults'
     parameters:
-        - string:
-            name: INSTALLER_IP
-            default: '192.168.200.2'
-            description: 'IP of the installer'
-        - string:
-            name: INSTALLER_TYPE
-            default: compass
-            description: 'Installer used for deploying OPNFV on this POD'
-        - string:
-            name: EXTERNAL_NETWORK
-            default: 'ext-net'
-            description: 'external network for test'
+      - string:
+          name: INSTALLER_IP
+          default: '192.168.200.2'
+          description: 'IP of the installer'
+      - string:
+          name: INSTALLER_TYPE
+          default: compass
+          description: 'Installer used for deploying OPNFV on this POD'
+      - string:
+          name: EXTERNAL_NETWORK
+          default: 'ext-net'
+          description: 'external network for test'
 
 - parameter:
     name: 'fuel-defaults'
     parameters:
-        - string:
-            name: INSTALLER_IP
-            default: '10.20.0.2'
-            description: 'IP of the installer'
-        - string:
-            name: INSTALLER_TYPE
-            default: fuel
-            description: 'Installer used for deploying OPNFV on this POD'
-        - string:
-            name: EXTERNAL_NETWORK
-            default: 'admin_floating_net'
-            description: 'external network for test'
-        - string:
-            name: BRIDGE
-            default: 'pxebr'
-            description: 'pxe bridge for booting of Fuel master'
+      - string:
+          name: INSTALLER_IP
+          default: '10.20.0.2'
+          description: 'IP of the installer'
+      - string:
+          name: SALT_MASTER_IP
+          default: '192.168.10.100'
+          description: 'IP of the salt master (for mcp deployments)'
+      - string:
+          name: SSH_KEY
+          default: '/tmp/mcp.rsa'
+          description: 'Path to private SSH key to access environment nodes'
+      - string:
+          name: INSTALLER_TYPE
+          default: fuel
+          description: 'Installer used for deploying OPNFV on this POD'
+      - string:
+          name: EXTERNAL_NETWORK
+          default: 'admin_floating_net'
+          description: 'external network for test'
+      - string:
+          name: BRIDGE
+          default: 'pxebr'
+          description: 'Bridge(s) to be used by salt master'
 
 - parameter:
     name: 'joid-defaults'
     parameters:
-        - string:
-            name: INSTALLER_IP
-            default: '192.168.122.5'
-            description: 'IP of the installer'
-        - string:
-            name: INSTALLER_TYPE
-            default: joid
-            description: 'Installer used for deploying OPNFV on this POD'
-        - string:
-            name: MODEL
-            default: 'os'
-            description: 'Model to deploy (os|k8)'
-        - string:
-            name: OS_RELEASE
-            default: 'newton'
-            description: 'OpenStack release (mitaka|newton)'
-        - string:
-            name: EXTERNAL_NETWORK
-            default: ext-net
-            description: "External network used for Floating ips."
-        - string:
-            name: LAB_CONFIG
-            default: "$HOME/joid_config"
-            description: "Local lab config and Openstack openrc location"
-        - string:
-            name: MAAS_REINSTALL
-            default: 'false'
-            description: "Reinstall MAAS and Bootstrap before deploy [true/false]"
-        - string:
-            name: UBUNTU_DISTRO
-            default: 'xenial'
-            description: "Ubuntu distribution to use for Openstack (xenial)"
-        - string:
-            name: CPU_ARCHITECTURE
-            default: 'amd64'
-            description: "CPU Architecture to use for Ubuntu distro "
+      - string:
+          name: INSTALLER_IP
+          default: '192.168.122.5'
+          description: 'IP of the installer'
+      - string:
+          name: INSTALLER_TYPE
+          default: joid
+          description: 'Installer used for deploying OPNFV on this POD'
+      - string:
+          name: MODEL
+          default: 'os'
+          description: 'Model to deploy (os|k8)'
+      - string:
+          name: OS_RELEASE
+          default: 'ocata'
+          description: 'OpenStack release (mitaka|newton|ocata)'
+      - string:
+          name: EXTERNAL_NETWORK
+          default: ext-net
+          description: "External network used for Floating ips."
+      - string:
+          name: LAB_CONFIG
+          default: "$HOME/joid_config"
+          description: "Local lab config and Openstack openrc location"
+      - string:
+          name: MAAS_REINSTALL
+          default: 'false'
+          description: "Reinstall MAAS and Bootstrap before deploy [true/false]"
+      - string:
+          name: UBUNTU_DISTRO
+          default: 'xenial'
+          description: "Ubuntu distribution to use for Openstack (xenial)"
+      - string:
+          name: CPU_ARCHITECTURE
+          default: 'amd64'
+          description: "CPU Architecture to use for Ubuntu distro "
 
 - parameter:
     name: 'daisy-defaults'
     parameters:
-        - string:
-            name: INSTALLER_IP
-            default: '10.20.7.3'
-            description: 'IP of the installer'
-        - string:
-            name: INSTALLER_TYPE
-            default: daisy
-            description: 'Installer used for deploying OPNFV on this POD'
-        - string:
-            name: BRIDGE
-            default: 'br7'
-            description: 'pxe bridge for booting of Fuel master'
+      - string:
+          name: INSTALLER_IP
+          default: '10.20.7.3'
+          description: 'IP of the installer'
+      - string:
+          name: INSTALLER_TYPE
+          default: daisy
+          description: 'Installer used for deploying OPNFV on this POD'
+      - string:
+          name: BRIDGE
+          default: 'br7'
+          description: 'pxe bridge for booting of Daisy master'
 
 - parameter:
     name: 'infra-defaults'
     parameters:
-        - string:
-            name: INSTALLER_IP
-            default: '192.168.122.2'
-            description: 'IP of the installer'
-        - string:
-            name: INSTALLER_TYPE
-            default: infra
-            description: 'Installer used for deploying OPNFV on this POD'
+      - string:
+          name: INSTALLER_IP
+          default: '192.168.122.2'
+          description: 'IP of the installer'
+      - string:
+          name: INSTALLER_TYPE
+          default: infra
+          description: 'Installer used for deploying OPNFV on this POD'
+
 - parameter:
     name: 'netvirt-defaults'
     parameters:
-        - string:
-            name: INSTALLER_IP
-            default: '192.168.X.X'
-            description: 'IP of the installer'
-        - string:
-            name: INSTALLER_TYPE
-            default: apex
-            description: 'Installer used for deploying OPNFV on this POD'
-        - string:
-            name: EXTERNAL_NETWORK
-            default: 'external'
-            description: 'external network for test'
+      - string:
+          name: INSTALLER_IP
+          default: '192.168.X.X'
+          description: 'IP of the installer'
+      - string:
+          name: INSTALLER_TYPE
+          default: apex
+          description: 'Installer used for deploying OPNFV on this POD'
+      - string:
+          name: EXTERNAL_NETWORK
+          default: 'external'
+          description: 'external network for test'
index 2838886..2e94767 100644 (file)
@@ -1,14 +1,22 @@
+---
 # jjb defaults
 
 - defaults:
     name: global
 
     wrappers:
-        - ssh-agent-wrapper
+      - ssh-agent-wrapper
 
     project-type: freestyle
 
     node: master
 
     properties:
-        - logrotate-default
+      - logrotate-default
+
+    publishers:
+      # Any project that has a publisher will not have this macro
+      # included due to the nature of JJB defaults. Projects will have
+      # to explicitly add this macro to their list of publishers in
+      # order for emails to be sent.
+      - email-jenkins-admins-on-failure
index 5341db4..20b13b8 100644 (file)
@@ -1,3 +1,4 @@
+---
 # Releng macros
 #
 # NOTE: make sure macros are listed in execution ordered.
 - parameter:
     name: project-parameter
     parameters:
-        - string:
-            name: PROJECT
-            default: '{project}'
-            description: "JJB configured PROJECT parameter to identify an opnfv Gerrit project"
-        - string:
-            name: GS_BASE
-            default: artifacts.opnfv.org/$PROJECT
-            description: "URL to Google Storage."
-        - string:
-            name: GS_BASE_PROXY
-            default: build.opnfv.org/artifacts.opnfv.org/$PROJECT
-            description: "URL to Google Storage proxy"
-        - string:
-            name: BRANCH
-            default: '{branch}'
-            description: "JJB configured BRANCH parameter (e.g. master, stable/danube)"
-        - string:
-            name: GERRIT_BRANCH
-            default: '{branch}'
-            description: "JJB configured GERRIT_BRANCH parameter (deprecated)"
+      - string:
+          name: PROJECT
+          default: '{project}'
+          description: "JJB configured PROJECT parameter to identify an opnfv Gerrit project"
+      - string:
+          name: GS_BASE
+          default: artifacts.opnfv.org/$PROJECT
+          description: "URL to Google Storage."
+      - string:
+          name: GS_BASE_PROXY
+          default: build.opnfv.org/artifacts.opnfv.org/$PROJECT
+          description: "URL to Google Storage proxy"
+      - string:
+          name: BRANCH
+          default: '{branch}'
+          description: "JJB configured BRANCH parameter (e.g. master, stable/danube)"
+      - string:
+          name: GERRIT_BRANCH
+          default: '{branch}'
+          description: "JJB configured GERRIT_BRANCH parameter (deprecated)"
 
 - property:
     name: logrotate-default
     properties:
-        - build-discarder:
-            days-to-keep: 60
-            num-to-keep: 200
-            artifact-days-to-keep: 60
-            artifact-num-to-keep: 200
+      - build-discarder:
+          days-to-keep: 60
+          num-to-keep: 200
+          artifact-days-to-keep: 60
+          artifact-num-to-keep: 200
 
 - scm:
     name: git-scm
     scm:
-        - git: &git-scm-defaults
-            credentials-id: '$SSH_CREDENTIAL_ID'
-            url: '$GIT_BASE'
-            branches:
-                - 'origin/$BRANCH'
-            timeout: 15
+      - git: &git-scm-defaults
+          credentials-id: '$SSH_CREDENTIAL_ID'
+          url: '$GIT_BASE'
+          branches:
+            - 'origin/$BRANCH'
+          timeout: 15
 
 - scm:
     name: git-scm-gerrit
     scm:
-        - git:
-            choosing-strategy: 'gerrit'
-            refspec: '$GERRIT_REFSPEC'
-            <<: *git-scm-defaults
+      - git:
+          choosing-strategy: 'gerrit'
+          refspec: '$GERRIT_REFSPEC'
+          <<: *git-scm-defaults
 - scm:
     name: git-scm-with-submodules
     scm:
-        - git:
-            credentials-id: '$SSH_CREDENTIAL_ID'
-            url: '$GIT_BASE'
-            refspec: ''
-            branches:
-                - 'refs/heads/{branch}'
-            skip-tag: true
-            wipe-workspace: true
-            submodule:
-                recursive: true
-                timeout: 20
+      - git:
+          credentials-id: '$SSH_CREDENTIAL_ID'
+          url: '$GIT_BASE'
+          refspec: ''
+          branches:
+            - 'refs/heads/{branch}'
+          skip-tag: true
+          wipe-workspace: true
+          submodule:
+            recursive: true
+            timeout: 20
 - trigger:
     name: 'daily-trigger-disabled'
     triggers:
-        - timed: ''
+      - timed: ''
 
 - trigger:
     name: 'weekly-trigger-disabled'
     triggers:
-        - timed: ''
+      - timed: ''
 
 - trigger:
     name: gerrit-trigger-patchset-created
     triggers:
-        - gerrit:
-            server-name: 'gerrit.opnfv.org'
-            trigger-on:
-                - patchset-created-event:
-                    exclude-drafts: 'false'
-                    exclude-trivial-rebase: 'false'
-                    exclude-no-code-change: 'false'
-                - draft-published-event
-                - comment-added-contains-event:
-                    comment-contains-value: 'recheck'
-                - comment-added-contains-event:
-                    comment-contains-value: 'reverify'
-            projects:
-              - project-compare-type: 'ANT'
-                project-pattern: '{project}'
-                branches:
-                  - branch-compare-type: 'ANT'
-                    branch-pattern: '**/{branch}'
-                file-paths:
-                  - compare-type: 'ANT'
-                    pattern: '{files}'
-            skip-vote:
-                successful: false
-                failed: false
-                unstable: false
-                notbuilt: false
+      - gerrit:
+          server-name: 'gerrit.opnfv.org'
+          trigger-on:
+            - patchset-created-event:
+                exclude-drafts: 'false'
+                exclude-trivial-rebase: 'false'
+                exclude-no-code-change: 'false'
+            - draft-published-event
+            - comment-added-contains-event:
+                comment-contains-value: 'recheck'
+            - comment-added-contains-event:
+                comment-contains-value: 'reverify'
+          projects:
+            - project-compare-type: 'ANT'
+              project-pattern: '{project}'
+              branches:
+                - branch-compare-type: 'ANT'
+                  branch-pattern: '**/{branch}'
+              file-paths:
+                - compare-type: 'ANT'
+                  pattern: '{files}'
+          skip-vote:
+            successful: false
+            failed: false
+            unstable: false
+            notbuilt: false
 
 - trigger:
     name: gerrit-trigger-change-merged
     triggers:
-        - gerrit:
-            server-name: 'gerrit.opnfv.org'
-            trigger-on:
-                - change-merged-event
-                - comment-added-contains-event:
-                    comment-contains-value: 'remerge'
-            projects:
-              - project-compare-type: 'ANT'
-                project-pattern: '{project}'
-                branches:
-                  - branch-compare-type: 'ANT'
-                    branch-pattern: '**/{branch}'
-                file-paths:
-                  - compare-type: 'ANT'
-                    pattern: '{files}'
+      - gerrit:
+          server-name: 'gerrit.opnfv.org'
+          trigger-on:
+            - change-merged-event
+            - comment-added-contains-event:
+                comment-contains-value: 'remerge'
+          projects:
+            - project-compare-type: 'ANT'
+              project-pattern: '{project}'
+              branches:
+                - branch-compare-type: 'ANT'
+                  branch-pattern: '**/{branch}'
+              file-paths:
+                - compare-type: 'ANT'
+                  pattern: '{files}'
 
 - trigger:
     name: 'experimental'
     triggers:
-        - gerrit:
-            server-name: 'gerrit.opnfv.org'
-            trigger-on:
-                - comment-added-contains-event:
-                    comment-contains-value: 'check-experimental'
-            projects:
-                - project-compare-type: 'ANT'
-                  project-pattern: '{project}'
-                  branches:
-                      - branch-compare-type: 'ANT'
-                        branch-pattern: '**/{branch}'
-                  file-paths:
-                      - compare-type: 'ANT'
-                        pattern: 'tests/**'
-            skip-vote:
-                successful: true
-                failed: true
-                unstable: true
-                notbuilt: true
+      - gerrit:
+          server-name: 'gerrit.opnfv.org'
+          trigger-on:
+            - comment-added-contains-event:
+                comment-contains-value: 'check-experimental'
+          projects:
+            - project-compare-type: 'ANT'
+              project-pattern: '{project}'
+              branches:
+                - branch-compare-type: 'ANT'
+                  branch-pattern: '**/{branch}'
+              file-paths:
+                - compare-type: 'ANT'
+                  pattern: '{files}'
+          skip-vote:
+            successful: true
+            failed: true
+            unstable: true
+            notbuilt: true
 
 - wrapper:
     name: ssh-agent-wrapper
     wrappers:
-        - ssh-agent-credentials:
-            users:
-                - 'd42411ac011ad6f3dd2e1fa34eaa5d87f910eb2e'
+      - ssh-agent-credentials:
+          users:
+            - 'd42411ac011ad6f3dd2e1fa34eaa5d87f910eb2e'
+
+- wrapper:
+    name: build-timeout
+    wrappers:
+      - timeout:
+          timeout: '{timeout}'
+          timeout-var: 'BUILD_TIMEOUT'
+          fail: true
 
 - wrapper:
     name: fix-workspace-permissions
     wrappers:
-        - pre-scm-buildstep:
+      - pre-scm-buildstep:
           - shell: |
-                #!/bin/bash
-                sudo chown -R $USER:$USER $WORKSPACE || exit 1
+             #!/bin/bash
+             sudo chown -R $USER:$USER $WORKSPACE || exit 1
 
 - builder:
     name: build-html-and-pdf-docs-output
     builders:
-        - shell: |
-            #!/bin/bash
-            set -o errexit
-            set -o xtrace
-            export PATH=$PATH:/usr/local/bin/
-            git clone ssh://gerrit.opnfv.org:29418/opnfvdocs docs_build/_opnfvdocs
-            GERRIT_COMMENT=gerrit_comment.txt ./docs_build/_opnfvdocs/scripts/docs-build.sh
+      - shell: |
+          #!/bin/bash
+          set -o errexit
+          set -o xtrace
+          export PATH=$PATH:/usr/local/bin/
+          git clone ssh://gerrit.opnfv.org:29418/opnfvdocs docs_build/_opnfvdocs
+          GERRIT_COMMENT=gerrit_comment.txt ./docs_build/_opnfvdocs/scripts/docs-build.sh
 
 - builder:
     name: upload-under-review-docs-to-opnfv-artifacts
     builders:
-        - shell: |
-            #!/bin/bash
-            set -o errexit
-            set -o pipefail
-            set -o xtrace
-            export PATH=$PATH:/usr/local/bin/
-
-            [[ $GERRIT_CHANGE_NUMBER =~ .+ ]]
-            [[ -d docs_output ]] || exit 0
-
-            echo
-            echo "###########################"
-            echo "UPLOADING DOCS UNDER REVIEW"
-            echo "###########################"
-            echo
-
-            gs_base="artifacts.opnfv.org/$PROJECT/review"
-            gs_path="$gs_base/$GERRIT_CHANGE_NUMBER"
-            local_path="upload/$GERRIT_CHANGE_NUMBER"
-
-            mkdir -p upload
-            mv docs_output "$local_path"
-            gsutil -m cp -r "$local_path" "gs://$gs_base"
-
-            gsutil -m setmeta \
-                -h "Content-Type:text/html" \
-                -h "Cache-Control:private, max-age=0, no-transform" \
-                "gs://$gs_path"/**.html > /dev/null 2>&1
-
-            echo "Document link(s):" >> gerrit_comment.txt
-            find "$local_path" | grep -e 'index.html$' -e 'pdf$' | \
-                sed -e "s|^$local_path|    http://$gs_path|" >> gerrit_comment.txt
+      - shell: |
+          #!/bin/bash
+          set -o errexit
+          set -o pipefail
+          set -o xtrace
+          export PATH=$PATH:/usr/local/bin/
+
+          [[ $GERRIT_CHANGE_NUMBER =~ .+ ]]
+          [[ -d docs_output ]] || exit 0
+
+          echo
+          echo "###########################"
+          echo "UPLOADING DOCS UNDER REVIEW"
+          echo "###########################"
+          echo
+
+          gs_base="artifacts.opnfv.org/$PROJECT/review"
+          gs_path="$gs_base/$GERRIT_CHANGE_NUMBER"
+          local_path="upload/$GERRIT_CHANGE_NUMBER"
+
+          mkdir -p upload
+          mv docs_output "$local_path"
+          gsutil -m cp -r "$local_path" "gs://$gs_base"
+
+          gsutil -m setmeta \
+              -h "Content-Type:text/html" \
+              -h "Cache-Control:private, max-age=0, no-transform" \
+              "gs://$gs_path"/**.html > /dev/null 2>&1
+
+          echo "Document link(s):" >> gerrit_comment.txt
+          find "$local_path" | grep -e 'index.html$' -e 'pdf$' | \
+              sed -e "s|^$local_path|    http://$gs_path|" >> gerrit_comment.txt
 
 - builder:
     name: upload-generated-docs-to-opnfv-artifacts
     builders:
-        - shell: |
-            #!/bin/bash
-            set -o errexit
-            set -o pipefail
-            set -o xtrace
-            export PATH=$PATH:/usr/local/bin/
-
-            [[ -d docs_output ]] || exit 0
-
-            echo
-            echo "########################"
-            echo "UPLOADING GENERATED DOCS"
-            echo "########################"
-            echo
-
-            echo "gs_path="$GS_URL/docs""
-            echo "local_path="upload/docs""
-
-            gs_path="$GS_URL/docs"
-            local_path="upload/docs"
-
-            mkdir -p upload
-            mv docs_output "$local_path"
-            ls "$local_path"
-
-            echo "gsutil -m cp -r "$local_path"/* "gs://$gs_path""
-            gsutil -m cp -r "$local_path"/* "gs://$gs_path"
-
-            gsutil -m setmeta \
-                -h "Content-Type:text/html" \
-                -h "Cache-Control:private, max-age=0, no-transform" \
-                "gs://$gs_path"/**.html > /dev/null 2>&1
-
-            echo "Document link(s):" >> gerrit_comment.txt
-            find "$local_path" | grep -e 'index.html$' -e 'pdf$' | \
-                sed -e "s|^$local_path|    http://$gs_path|" >> gerrit_comment.txt
-
+      - shell: |
+          #!/bin/bash
+          set -o errexit
+          set -o pipefail
+          set -o xtrace
+          export PATH=$PATH:/usr/local/bin/
+
+          [[ -d docs_output ]] || exit 0
+
+          echo
+          echo "########################"
+          echo "UPLOADING GENERATED DOCS"
+          echo "########################"
+          echo
+
+          echo "gs_path="$GS_URL/docs""
+          echo "local_path="upload/docs""
+
+          gs_path="$GS_URL/docs"
+          local_path="upload/docs"
+
+          mkdir -p upload
+          mv docs_output "$local_path"
+          ls "$local_path"
+
+          echo "gsutil -m cp -r "$local_path"/* "gs://$gs_path""
+          gsutil -m cp -r "$local_path"/* "gs://$gs_path"
+
+          gsutil -m setmeta \
+              -h "Content-Type:text/html" \
+              -h "Cache-Control:private, max-age=0, no-transform" \
+              "gs://$gs_path"/**.html > /dev/null 2>&1
+
+          echo "Document link(s):" >> gerrit_comment.txt
+          find "$local_path" | grep -e 'index.html$' -e 'pdf$' | \
+              sed -e "s|^$local_path|    http://$gs_path|" >> gerrit_comment.txt
+
+# To take advantage of this macro, have your build write
+# out the file 'gerrit_comment.txt' with information to post
+# back to gerrit and include this macro in the list of builders.
 - builder:
-    name: report-docs-build-result-to-gerrit
+    name: report-build-result-to-gerrit
     builders:
-        - shell: |
-            #!/bin/bash
-            set -o errexit
-            set -o pipefail
-            set -o xtrace
-            export PATH=$PATH:/usr/local/bin/
-            if [[ -e gerrit_comment.txt ]] ; then
-                echo
-                echo "posting review comment to gerrit..."
-                echo
-                cat gerrit_comment.txt
-                echo
-                ssh -p 29418 gerrit.opnfv.org \
-                    "gerrit review -p $GERRIT_PROJECT \
-                     -m '$(cat gerrit_comment.txt)' \
-                     $GERRIT_PATCHSET_REVISION \
-                     --notify NONE"
-            fi
+      - shell: |
+          #!/bin/bash
+          set -o errexit
+          set -o pipefail
+          set -o xtrace
+          export PATH=$PATH:/usr/local/bin/
+          if [[ -e gerrit_comment.txt ]] ; then
+              echo
+              echo "posting review comment to gerrit..."
+              echo
+              cat gerrit_comment.txt
+              echo
+              ssh -p 29418 gerrit.opnfv.org \
+                  "gerrit review -p $GERRIT_PROJECT \
+                   -m '$(cat gerrit_comment.txt)' \
+                   $GERRIT_PATCHSET_REVISION \
+                   --notify NONE"
+          fi
 
 - builder:
     name: remove-old-docs-from-opnfv-artifacts
     builders:
-        - shell: |
-            #!/bin/bash
-            set -o errexit
-            set -o pipefail
-            set -o xtrace
-            export PATH=$PATH:/usr/local/bin/
-
-            [[ $GERRIT_CHANGE_NUMBER =~ .+ ]]
-
-            gs_path="artifacts.opnfv.org/$PROJECT/review/$GERRIT_CHANGE_NUMBER"
-
-            if gsutil ls "gs://$gs_path" > /dev/null 2>&1 ; then
-                echo
-                echo "Deleting Out-of-dated Documents..."
-                gsutil -m rm -r "gs://$gs_path"
-            fi
-            gs_path="artifacts.opnfv.org/review/$GERRIT_CHANGE_NUMBER"
-
-            if gsutil ls "gs://$gs_path" > /dev/null 2>&1 ; then
-                echo
-                echo "Deleting Out-of-dated Documents..."
-                gsutil -m rm -r "gs://$gs_path"
-            fi
+      - shell: |
+          #!/bin/bash
+          set -o errexit
+          set -o pipefail
+          set -o xtrace
+          export PATH=$PATH:/usr/local/bin/
+
+          [[ $GERRIT_CHANGE_NUMBER =~ .+ ]]
+
+          gs_path="artifacts.opnfv.org/$PROJECT/review/$GERRIT_CHANGE_NUMBER"
+
+          if gsutil ls "gs://$gs_path" > /dev/null 2>&1 ; then
+              echo
+              echo "Deleting Out-of-dated Documents..."
+              gsutil -m rm -r "gs://$gs_path"
+          fi
+          gs_path="artifacts.opnfv.org/review/$GERRIT_CHANGE_NUMBER"
+
+          if gsutil ls "gs://$gs_path" > /dev/null 2>&1 ; then
+              echo
+              echo "Deleting Out-of-dated Documents..."
+              gsutil -m rm -r "gs://$gs_path"
+          fi
 
 - builder:
     name: build-and-upload-artifacts-json-api
     builders:
-        - shell: |
-            #!/bin/bash
-            set -o errexit
-            set -o pipefail
-            export PATH=$PATH:/usr/local/bin/
+      - shell: |
+          #!/bin/bash
+          set -o errexit
+          set -o pipefail
+          export PATH=$PATH:/usr/local/bin/
 
-            virtualenv -p python2.7 $WORKSPACE/releng_artifacts
-            source $WORKSPACE/releng_artifacts/bin/activate
+          virtualenv -p python2.7 $WORKSPACE/releng_artifacts
+          source $WORKSPACE/releng_artifacts/bin/activate
 
-            # install python packages
-            pip install google-api-python-client
+          # install python packages
+          pip install google-api-python-client
 
-            # generate and upload index file
-            echo "Generating Artifacts API ..."
-            python $WORKSPACE/utils/opnfv-artifacts.py > index.json
-            gsutil cp index.json gs://artifacts.opnfv.org/index.json
+          # generate and upload index file
+          echo "Generating Artifacts API ..."
+          python $WORKSPACE/utils/opnfv-artifacts.py > index.json
+          gsutil cp index.json gs://artifacts.opnfv.org/index.json
 
-            deactivate
+          deactivate
 
 - builder:
     name: lint-python-code
     builders:
-        - shell: |
-            #!/bin/bash
-            set -o errexit
-            set -o pipefail
-            set -o xtrace
-            export PATH=$PATH:/usr/local/bin/
-
-            virtualenv -p python2.7 $WORKSPACE/releng_flake8
-            source $WORKSPACE/releng_flake8/bin/activate
-
-            # install python packages
-            pip install "flake8==2.6.2"
-
-            # generate and upload lint log
-            echo "Running flake8 code on $PROJECT ..."
-
-            # Get number of flake8 violations. If none, this will be an
-            # empty string: ""
-            FLAKE_COUNT="$(find . \
+      - shell: |
+          #!/bin/bash
+          set -o errexit
+          set -o pipefail
+          set -o xtrace
+          export PATH=$PATH:/usr/local/bin/
+
+          virtualenv -p python2.7 $WORKSPACE/releng_flake8
+          source $WORKSPACE/releng_flake8/bin/activate
+
+          # install python packages
+          pip install "flake8==2.6.2"
+
+          # generate and upload lint log
+          echo "Running flake8 code on $PROJECT ..."
+
+          # Get number of flake8 violations. If none, this will be an
+          # empty string: ""
+          FLAKE_COUNT="$(find . \
+              -path './releng_flake8' -prune -o \
+              -path './.tox' -prune -o \
+              -type f -name "*.py" -print | \
+              xargs flake8 --exit-zero -qq --count 2>&1)"
+
+          # Ensure we start with a clean environment
+          rm -f lint.log
+
+          if [ ! -z $FLAKE_COUNT ]; then
+            echo "Flake8 Violations: $FLAKE_COUNT" > lint.log
+            find . \
                 -path './releng_flake8' -prune -o \
                 -path './.tox' -prune -o \
                 -type f -name "*.py" -print | \
-                xargs flake8 --exit-zero -qq --count 2>&1)"
-
-            # Ensure we start with a clean environment
-            rm -f lint.log
-
-            if [ ! -z $FLAKE_COUNT ]; then
-              echo "Flake8 Violations: $FLAKE_COUNT" > lint.log
-              find . \
-                  -path './releng_flake8' -prune -o \
-                  -path './.tox' -prune -o \
-                  -type f -name "*.py" -print | \
-                  xargs flake8 --exit-zero --first >> violation.log
-              SHOWN=$(wc -l violation.log | cut -d' ' -f1)
-              echo -e "First $SHOWN shown\n---" >> lint.log
-              cat violation.log >> lint.log
-              sed -r -i '4,$s/^/ /g' lint.log
-              rm violation.log
-            fi
-
-            deactivate
+                xargs flake8 --exit-zero --first >> violation.log
+            SHOWN=$(wc -l violation.log | cut -d' ' -f1)
+            echo -e "First $SHOWN shown\n---" >> lint.log
+            cat violation.log >> lint.log
+            sed -r -i '4,$s/^/ /g' lint.log
+            rm violation.log
+          fi
+
+          deactivate
 
 - builder:
     name: report-lint-result-to-gerrit
     builders:
-        - shell: |
-            #!/bin/bash
-            set -o errexit
-            set -o pipefail
-            set -o xtrace
-            export PATH=$PATH:/usr/local/bin/
+      - shell: |
+          #!/bin/bash
+          set -o errexit
+          set -o pipefail
+          set -o xtrace
+          export PATH=$PATH:/usr/local/bin/
 
-            # If no violations were found, no lint log will exist.
-            if [[ -e lint.log ]] ; then
-                echo -e "\nposting linting report to gerrit...\n"
+          # If no violations were found, no lint log will exist.
+          if [[ -e lint.log ]] ; then
+              echo -e "\nposting linting report to gerrit...\n"
 
-                cat lint.log
-                echo
+              cat lint.log
+              echo
 
-                ssh -p 29418 gerrit.opnfv.org \
-                    "gerrit review -p $GERRIT_PROJECT \
-                     -m \"$(cat lint.log)\" \
-                     $GERRIT_PATCHSET_REVISION \
-                     --notify NONE"
+              ssh -p 29418 gerrit.opnfv.org \
+                  "gerrit review -p $GERRIT_PROJECT \
+                   -m \"$(cat lint.log)\" \
+                   $GERRIT_PATCHSET_REVISION \
+                   --notify NONE"
 
-                exit 1
-            fi
+              exit 1
+          fi
 
 - builder:
     name: upload-review-docs
     builders:
-        - build-html-and-pdf-docs-output
-        - upload-under-review-docs-to-opnfv-artifacts
-        - report-docs-build-result-to-gerrit
+      - build-html-and-pdf-docs-output
+      - upload-under-review-docs-to-opnfv-artifacts
+      - report-build-result-to-gerrit
 
 - builder:
     name: upload-merged-docs
     builders:
-        - build-html-and-pdf-docs-output
-        - upload-generated-docs-to-opnfv-artifacts
-        - report-docs-build-result-to-gerrit
-        - remove-old-docs-from-opnfv-artifacts
+      - build-html-and-pdf-docs-output
+      - upload-generated-docs-to-opnfv-artifacts
+      - report-build-result-to-gerrit
+      - remove-old-docs-from-opnfv-artifacts
 
 - builder:
     name: check-bash-syntax
     builders:
-        - shell: "find . -name '*.sh' | xargs bash -n"
+      - shell: "find . -name '*.sh' | xargs bash -n"
 
 - builder:
     name: lint-yaml-code
     builders:
-        - shell: |
-            #!/bin/bash
-            set -o errexit
-            set -o pipefail
-            set -o xtrace
-            export PATH=$PATH:/usr/local/bin/
-
-            # install python packages
-            pip install "yamllint==1.6.0"
-
-            # generate and upload lint log
-            echo "Running yaml code on $PROJECT ..."
-
-            # Ensure we start with a clean environment
-            rm -f yaml-violation.log lint.log
-
-            # Get number of yaml violations. If none, this will be an
-            # empty string: ""
-            find . \
-                -type f -name "*.yml" -print \
-                -o -name "*.yaml" -print | \
-                xargs yamllint > yaml-violation.log || true
-
-            if [ -s "yaml-violation.log" ]; then
-              SHOWN=$(cat yaml-violation.log| grep -v "^$" |wc -l)
-              echo -e "First $SHOWN shown\n---" > lint.log
-              cat yaml-violation.log >> lint.log
-              sed -r -i '4,$s/^/ /g' lint.log
-            fi
+      - shell: |
+          #!/bin/bash
+          set -o errexit
+          set -o pipefail
+          set -o xtrace
+          export PATH=$PATH:/usr/local/bin/
+
+          # install python packages
+          pip install "yamllint==1.6.0"
+
+          # generate and upload lint log
+          echo "Running yaml code on $PROJECT ..."
+
+          # Ensure we start with a clean environment
+          rm -f yaml-violation.log lint.log
+
+          # Get number of yaml violations. If none, this will be an
+          # empty string: ""
+          find . \
+              -type f -name "*.yml" -print \
+              -o -name "*.yaml" -print | \
+              xargs yamllint > yaml-violation.log || true
+
+          if [ -s "yaml-violation.log" ]; then
+            SHOWN=$(cat yaml-violation.log| grep -v "^$" |wc -l)
+            echo -e "First $SHOWN shown\n---" > lint.log
+            cat yaml-violation.log >> lint.log
+            sed -r -i '4,$s/^/ /g' lint.log
+          fi
 
 - builder:
     name: clean-workspace-log
     builders:
-        - shell: |
-            find $WORKSPACE -type f -name '*.log' | xargs rm -f
+      - shell: |
+          find $WORKSPACE -type f -name '*.log' | xargs rm -f
 
 - publisher:
     name: archive-artifacts
     publishers:
-        - archive:
-            artifacts: '{artifacts}'
-            allow-empty: true
-            fingerprint: true
-            latest-only: true
+      - archive:
+          artifacts: '{artifacts}'
+          allow-empty: true
+          fingerprint: true
+          latest-only: true
 
 - publisher:
     name: publish-coverage
                 unhealthy: 40
                 failing: 30
 
+# The majority of the email-ext plugin options are set to the default
+# for this macro so they can be managed through Jenkins' global
+# settings.
+- publisher:
+    name: email-jenkins-admins-on-failure
+    publishers:
+      - email-ext:
+          content-type: text
+          attach-build-log: true
+          compress-log: true
+          always: false
+          failure: true
+          send-to:
+            - recipients
index 5744222..2c3505a 100644 (file)
@@ -1,3 +1,4 @@
+---
 #####################################################
 # Parameters for slaves using old labels
 # This will be cleaned up once the new job structure and
 - parameter:
     name: 'apex-baremetal-master-defaults'
     parameters:
-        - label:
-            name: SLAVE_LABEL
-            default: 'apex-baremetal-master'
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
-        - string:
-            name: SSH_KEY
-            default: /root/.ssh/id_rsa
-            description: 'SSH key to use for Apex'
-        - node:
-            name: SLAVE_NAME
-            description: 'Slave name on Jenkins'
-            allowed-slaves:
-                - lf-pod1
-            default-slaves:
-                - lf-pod1
+      - label:
+          name: SLAVE_LABEL
+          default: 'apex-baremetal-master'
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+      - string:
+          name: SSH_KEY
+          default: /root/.ssh/id_rsa
+          description: 'SSH key to use for Apex'
+      - node:
+          name: SLAVE_NAME
+          description: 'Slave name on Jenkins'
+          allowed-slaves:
+            - lf-pod1
+          default-slaves:
+            - lf-pod1
+
 - parameter:
     name: 'apex-baremetal-danube-defaults'
     parameters:
-        - label:
-            name: SLAVE_LABEL
-            default: 'apex-baremetal-danube'
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
-        - string:
-            name: SSH_KEY
-            default: /root/.ssh/id_rsa
-            description: 'SSH key to use for Apex'
-        - node:
-            name: SLAVE_NAME
-            description: 'Slave name on Jenkins'
-            allowed-slaves:
-                - lf-pod1
-            default-slaves:
-                - lf-pod1
+      - label:
+          name: SLAVE_LABEL
+          default: 'apex-baremetal-danube'
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+      - string:
+          name: SSH_KEY
+          default: /root/.ssh/id_rsa
+          description: 'SSH key to use for Apex'
+      - node:
+          name: SLAVE_NAME
+          description: 'Slave name on Jenkins'
+          allowed-slaves:
+            - lf-pod1
+          default-slaves:
+            - lf-pod1
+
 - parameter:
     name: 'apex-virtual-master-defaults'
     parameters:
-        - label:
-            name: SLAVE_LABEL
-            default: 'apex-virtual-master'
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
-        - string:
-            name: SSH_KEY
-            default: /root/.ssh/id_rsa
-            description: 'SSH key to use for Apex'
-        - node:
-            name: SLAVE_NAME
-            description: 'Slave name on Jenkins'
-            allowed-slaves:
-                - lf-virtual2
-                - lf-virtual3
-            default-slaves:
-                - lf-virtual2
-                - lf-virtual3
+      - label:
+          name: SLAVE_LABEL
+          default: 'apex-virtual-master'
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+      - string:
+          name: SSH_KEY
+          default: /root/.ssh/id_rsa
+          description: 'SSH key to use for Apex'
+      - node:
+          name: SLAVE_NAME
+          description: 'Slave name on Jenkins'
+          allowed-slaves:
+            - lf-virtual2
+            - lf-virtual3
+          default-slaves:
+            - lf-virtual2
+            - lf-virtual3
 
 - parameter:
     name: 'apex-virtual-danube-defaults'
     parameters:
-        - label:
-            name: SLAVE_LABEL
-            default: 'apex-virtual-danube'
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
-        - string:
-            name: SSH_KEY
-            default: /root/.ssh/id_rsa
-            description: 'SSH key to use for Apex'
-        - node:
-            name: SLAVE_NAME
-            description: 'Slave name on Jenkins'
-            allowed-slaves:
-                - lf-pod3
-            default-slaves:
-                - lf-pod3
+      - label:
+          name: SLAVE_LABEL
+          default: 'apex-virtual-danube'
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+      - string:
+          name: SSH_KEY
+          default: /root/.ssh/id_rsa
+          description: 'SSH key to use for Apex'
+      - node:
+          name: SLAVE_NAME
+          description: 'Slave name on Jenkins'
+          allowed-slaves:
+            - lf-pod3
+          default-slaves:
+            - lf-pod3
+
 - parameter:
     name: 'lf-pod1-defaults'
     parameters:
-        - node:
-            name: SLAVE_NAME
-            description: 'Slave name on Jenkins'
-            allowed-slaves:
-                - lf-pod1
-            default-slaves:
-                - lf-pod1
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
-        - string:
-            name: SSH_KEY
-            default: /root/.ssh/id_rsa
-            description: 'SSH key to use for Apex'
+      - node:
+          name: SLAVE_NAME
+          description: 'Slave name on Jenkins'
+          allowed-slaves:
+            - lf-pod1
+          default-slaves:
+            - lf-pod1
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+      - string:
+          name: SSH_KEY
+          default: /root/.ssh/id_rsa
+          description: 'SSH key to use for Apex'
+
 - parameter:
     name: 'lf-pod3-defaults'
     parameters:
-        - node:
-            name: SLAVE_NAME
-            description: 'Slave name on Jenkins'
-            allowed-slaves:
-                - lf-pod3
-            default-slaves:
-                - lf-pod3
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
-        - string:
-            name: SSH_KEY
-            default: /root/.ssh/id_rsa
-            description: 'SSH key to use for Apex'
+      - node:
+          name: SLAVE_NAME
+          description: 'Slave name on Jenkins'
+          allowed-slaves:
+            - lf-pod3
+          default-slaves:
+            - lf-pod3
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+      - string:
+          name: SSH_KEY
+          default: /root/.ssh/id_rsa
+          description: 'SSH key to use for Apex'
+
 #####################################################
 # Parameters for CI baremetal PODs
 #####################################################
 - parameter:
     name: 'apex-baremetal-defaults'
     parameters:
-        - label:
-            name: SLAVE_LABEL
-            default: 'apex-baremetal'
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
-        - string:
-            name: SSH_KEY
-            default: /root/.ssh/id_rsa
-            description: 'SSH key to use for Apex'
+      - label:
+          name: SLAVE_LABEL
+          default: 'apex-baremetal'
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+      - string:
+          name: SSH_KEY
+          default: /root/.ssh/id_rsa
+          description: 'SSH key to use for Apex'
 - parameter:
     name: 'compass-baremetal-defaults'
     parameters:
-        - label:
-            name: SLAVE_LABEL
-            default: 'compass-baremetal'
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
+      - label:
+          name: SLAVE_LABEL
+          default: 'compass-baremetal'
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+
 - parameter:
     name: 'compass-baremetal-master-defaults'
     parameters:
-        - label:
-            name: SLAVE_LABEL
-            default: 'compass-baremetal-master'
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
+      - label:
+          name: SLAVE_LABEL
+          default: 'compass-baremetal-master'
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+
 - parameter:
     name: 'compass-baremetal-branch-defaults'
     parameters:
-        - label:
-            name: SLAVE_LABEL
-            default: 'compass-baremetal-branch'
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
+      - label:
+          name: SLAVE_LABEL
+          default: 'compass-baremetal-branch'
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+
 - parameter:
     name: 'fuel-baremetal-defaults'
     parameters:
-        - label:
-            name: SLAVE_LABEL
-            default: 'fuel-baremetal'
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
+      - label:
+          name: SLAVE_LABEL
+          default: 'fuel-baremetal'
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+
 - parameter:
     name: 'armband-baremetal-defaults'
     parameters:
-        - label:
-            name: SLAVE_LABEL
-            default: 'armband-baremetal'
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
-        - string:
-            name: LAB_CONFIG_URL
-            default: ssh://jenkins-enea@gerrit.opnfv.org:29418/securedlab
-            description: 'Base URI to the configuration directory'
+      - label:
+          name: SLAVE_LABEL
+          default: 'armband-baremetal'
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+      - string:
+          name: LAB_CONFIG_URL
+          default: ssh://jenkins-enea@gerrit.opnfv.org:29418/securedlab
+          description: 'Base URI to the configuration directory'
+
 - parameter:
     name: 'joid-baremetal-defaults'
     parameters:
-        - label:
-            name: SLAVE_LABEL
-            default: 'joid-baremetal'
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
-        - string:
-            name: EXTERNAL_NETWORK
-            default: ext-net
-            description: "External network floating ips"
+      - label:
+          name: SLAVE_LABEL
+          default: 'joid-baremetal'
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+      - string:
+          name: EXTERNAL_NETWORK
+          default: ext-net
+          description: "External network floating ips"
+
 - parameter:
     name: 'daisy-baremetal-defaults'
     parameters:
-        - node:
-            name: SLAVE_NAME
-            description: 'Slave name on Jenkins'
-            allowed-slaves:
-                - zte-pod2
-            default-slaves:
-                - zte-pod2
-        - label:
-            name: SLAVE_LABEL
-            default: 'daisy-baremetal'
-        - string:
-            name: INSTALLER_IP
-            default: '10.20.11.2'
-            description: 'IP of the installer'
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
+      - node:
+          name: SLAVE_NAME
+          description: 'Slave name on Jenkins'
+          allowed-slaves:
+            - zte-pod2
+          default-slaves:
+            - zte-pod2
+      - label:
+          name: SLAVE_LABEL
+          default: 'daisy-baremetal'
+      - string:
+          name: INSTALLER_IP
+          default: '10.20.7.3'
+          description: 'IP of the installer'
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+
 #####################################################
 # Parameters for CI virtual PODs
 #####################################################
 - parameter:
     name: 'apex-virtual-defaults'
     parameters:
-        - label:
-            name: SLAVE_LABEL
-            default: 'apex-virtual'
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
-        - string:
-            name: SSH_KEY
-            default: /root/.ssh/id_rsa
-            description: 'SSH key to use for Apex'
+      - label:
+          name: SLAVE_LABEL
+          default: 'apex-virtual'
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+      - string:
+          name: SSH_KEY
+          default: /root/.ssh/id_rsa
+          description: 'SSH key to use for Apex'
+
 - parameter:
     name: 'compass-virtual-defaults'
     parameters:
-        - label:
-            name: SLAVE_LABEL
-            default: 'compass-virtual'
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
+      - label:
+          name: SLAVE_LABEL
+          default: 'compass-virtual'
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+
 - parameter:
     name: 'compass-virtual-master-defaults'
     parameters:
-        - label:
-            name: SLAVE_LABEL
-            default: 'compass-virtual-master'
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
+      - label:
+          name: SLAVE_LABEL
+          default: 'compass-virtual-master'
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+
 - parameter:
     name: 'compass-virtual-branch-defaults'
     parameters:
-        - label:
-            name: SLAVE_LABEL
-            default: 'compass-virtual-branch'
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
+      - label:
+          name: SLAVE_LABEL
+          default: 'compass-virtual-branch'
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+
 - parameter:
     name: 'fuel-virtual-defaults'
     parameters:
-        - label:
-            name: SLAVE_LABEL
-            default: 'fuel-virtual'
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
+      - label:
+          name: SLAVE_LABEL
+          default: 'fuel-virtual'
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+
 - parameter:
     name: 'armband-virtual-defaults'
     parameters:
-        - label:
-            name: SLAVE_LABEL
-            default: 'armband-virtual'
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
-        - string:
-            name: LAB_CONFIG_URL
-            default: ssh://jenkins-enea@gerrit.opnfv.org:29418/securedlab
-            description: 'Base URI to the configuration directory'
+      - label:
+          name: SLAVE_LABEL
+          default: 'armband-virtual'
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+      - string:
+          name: LAB_CONFIG_URL
+          default: ssh://jenkins-enea@gerrit.opnfv.org:29418/securedlab
+          description: 'Base URI to the configuration directory'
+
 - parameter:
     name: 'joid-virtual-defaults'
     parameters:
-        - label:
-            name: SLAVE_LABEL
-            default: 'joid-virtual'
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
+      - label:
+          name: SLAVE_LABEL
+          default: 'joid-virtual'
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+
 - parameter:
     name: 'daisy-virtual-defaults'
     parameters:
-        - node:
-            name: SLAVE_NAME
-            description: 'Slave name on Jenkins'
-            allowed-slaves:
-                - zte-virtual1
-                - zte-virtual2
-            default-slaves:
-                - zte-virtual1
-        - label:
-            name: SLAVE_LABEL
-            default: 'daisy-virtual'
-        - string:
-            name: INSTALLER_IP
-            default: '10.20.11.2'
-            description: 'IP of the installer'
-        - string:
-            name: BRIDGE
-            default: 'daisy1'
-            description: 'pxe bridge for booting of Fuel master'
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
+      - node:
+          name: SLAVE_NAME
+          description: 'Slave name on Jenkins'
+          allowed-slaves:
+            - zte-virtual1
+            - zte-virtual2
+          default-slaves:
+            - zte-virtual1
+      - label:
+          name: SLAVE_LABEL
+          default: 'daisy-virtual'
+      - string:
+          name: INSTALLER_IP
+          default: '10.20.11.2'
+          description: 'IP of the installer'
+      - string:
+          name: BRIDGE
+          default: 'daisy1'
+          description: 'pxe bridge for booting of Daisy master'
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+
 #####################################################
 # Parameters for build slaves
 #####################################################
 - parameter:
     name: 'opnfv-build-enea-defaults'
     parameters:
-        - label:
-            name: SLAVE_LABEL
-            default: 'opnfv-build-enea'
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
-        - string:
-            name: BUILD_DIRECTORY
-            default: $WORKSPACE/build_output
-            description: "Directory where the build artifact will be located upon the completion of the build."
+      - label:
+          name: SLAVE_LABEL
+          default: 'opnfv-build-enea'
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+      - string:
+          name: BUILD_DIRECTORY
+          default: $WORKSPACE/build_output
+          description: "Directory where the build artifact will be located upon the completion of the build."
+
 - parameter:
     name: 'opnfv-build-centos-defaults'
     parameters:
-        - label:
-            name: SLAVE_LABEL
-            default: 'opnfv-build-centos'
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
-        - string:
-            name: BUILD_DIRECTORY
-            default: $WORKSPACE/build_output
-            description: "Directory where the build artifact will be located upon the completion of the build."
+      - label:
+          name: SLAVE_LABEL
+          default: 'opnfv-build-centos'
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+      - string:
+          name: BUILD_DIRECTORY
+          default: $WORKSPACE/build_output
+          description: "Directory where the build artifact will be located upon the completion of the build."
+
 - parameter:
     name: 'opnfv-build-ubuntu-defaults'
     parameters:
-        - label:
-            name: SLAVE_LABEL
-            default: 'opnfv-build-ubuntu'
-            description: 'Slave label on Jenkins'
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
-        - string:
-            name: BUILD_DIRECTORY
-            default: $WORKSPACE/build_output
-            description: "Directory where the build artifact will be located upon the completion of the build."
+      - label:
+          name: SLAVE_LABEL
+          default: 'opnfv-build-ubuntu'
+          description: 'Slave label on Jenkins'
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+      - string:
+          name: BUILD_DIRECTORY
+          default: $WORKSPACE/build_output
+          description: "Directory where the build artifact will be located upon the completion of the build."
+
 - parameter:
     name: 'opnfv-build-defaults'
     parameters:
-        - label:
-            name: SLAVE_LABEL
-            default: 'opnfv-build'
-            description: 'Slave label on Jenkins'
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
-        - string:
-            name: BUILD_DIRECTORY
-            default: $WORKSPACE/build_output
-            description: "Directory where the build artifact will be located upon the completion of the build."
+      - label:
+          name: SLAVE_LABEL
+          default: 'opnfv-build'
+          description: 'Slave label on Jenkins'
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+      - string:
+          name: BUILD_DIRECTORY
+          default: $WORKSPACE/build_output
+          description: "Directory where the build artifact will be located upon the completion of the build."
+
 - parameter:
     name: 'huawei-build-defaults'
     parameters:
-        - node:
-            name: SLAVE_NAME
-            description: 'Slave name on Jenkins'
-            allowed-slaves:
-                - huawei-build
-            default-slaves:
-                - huawei-build
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
+      - node:
+          name: SLAVE_NAME
+          description: 'Slave name on Jenkins'
+          allowed-slaves:
+            - huawei-build
+          default-slaves:
+            - huawei-build
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+
 - parameter:
     name: 'opnfv-build-ubuntu-arm-defaults'
     parameters:
-        - label:
-            name: SLAVE_LABEL
-            default: 'opnfv-build-ubuntu-arm'
-            description: 'Slave label on Jenkins'
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
-        - string:
-            name: BUILD_DIRECTORY
-            default: $WORKSPACE/build_output
-            description: "Directory where the build artifact will be located upon the completion of the build."
+      - label:
+          name: SLAVE_LABEL
+          default: 'opnfv-build-ubuntu-arm'
+          description: 'Slave label on Jenkins'
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+      - string:
+          name: BUILD_DIRECTORY
+          default: $WORKSPACE/build_output
+          description: "Directory where the build artifact will be located upon the completion of the build."
+
 #####################################################
 # Parameters for none-CI PODs
 #####################################################
 - parameter:
     name: 'ericsson-pod1-defaults'
     parameters:
-        - node:
-            name: SLAVE_NAME
-            description: 'Slave name on Jenkins'
-            allowed-slaves:
-                - ericsson-pod1
-            default-slaves:
-                - ericsson-pod1
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
+      - node:
+          name: SLAVE_NAME
+          description: 'Slave name on Jenkins'
+          allowed-slaves:
+            - ericsson-pod1
+          default-slaves:
+            - ericsson-pod1
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+
 - parameter:
     name: 'cengn-pod1-defaults'
     parameters:
-        - node:
-            name: SLAVE_NAME
-            description: 'Slave name on Jenkins'
-            allowed-slaves:
-                - cengn-pod1
-            default-slaves:
-                - cengn-pod1
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
+      - node:
+          name: SLAVE_NAME
+          description: 'Slave name on Jenkins'
+          allowed-slaves:
+            - cengn-pod1
+          default-slaves:
+            - cengn-pod1
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+
 - parameter:
     name: 'intel-pod1-defaults'
     parameters:
-        - node:
-            name: SLAVE_NAME
-            description: 'Slave name on Jenkins'
-            allowed-slaves:
-                - intel-pod1
-            default-slaves:
-                - intel-pod1
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
+      - node:
+          name: SLAVE_NAME
+          description: 'Slave name on Jenkins'
+          allowed-slaves:
+            - intel-pod1
+          default-slaves:
+            - intel-pod1
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+
 - parameter:
     name: 'intel-pod2-defaults'
     parameters:
-        - node:
-            name: SLAVE_NAME
-            description: 'Slave name on Jenkins'
-            allowed-slaves:
-                - intel-pod2
-            default-slaves:
-                - intel-pod2
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
-        - string:
-            name: SSH_KEY
-            default: /root/.ssh/id_rsa
-            description: 'SSH key to use for Apex'
+      - node:
+          name: SLAVE_NAME
+          description: 'Slave name on Jenkins'
+          allowed-slaves:
+            - intel-pod2
+          default-slaves:
+            - intel-pod2
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+      - string:
+          name: SSH_KEY
+          default: /root/.ssh/id_rsa
+          description: 'SSH key to use for Apex'
+
 - parameter:
     name: 'intel-pod9-defaults'
     parameters:
-        - node:
-            name: SLAVE_NAME
-            description: 'Slave name on Jenkins'
-            allowed-slaves:
-                - intel-pod9
-            default-slaves:
-                - intel-pod9
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
+      - node:
+          name: SLAVE_NAME
+          description: 'Slave name on Jenkins'
+          allowed-slaves:
+            - intel-pod9
+          default-slaves:
+            - intel-pod9
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+
 - parameter:
     name: 'intel-pod10-defaults'
     parameters:
-        - node:
-            name: SLAVE_NAME
-            description: 'Slave name on Jenkins'
-            allowed-slaves:
-                - intel-pod10
-            default-slaves:
-                - intel-pod10
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
+      - node:
+          name: SLAVE_NAME
+          description: 'Slave name on Jenkins'
+          allowed-slaves:
+            - intel-pod10
+          default-slaves:
+            - intel-pod10
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+
 - parameter:
     name: 'intel-pod12-defaults'
     parameters:
-        - node:
-            name: SLAVE_NAME
-            description: 'Slave name on Jenkins'
-            allowed-slaves:
-                - intel-pod12
-            default-slaves:
-                - intel-pod12
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
+      - node:
+          name: SLAVE_NAME
+          description: 'Slave name on Jenkins'
+          allowed-slaves:
+            - intel-pod12
+          default-slaves:
+            - intel-pod12
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+
 - parameter:
     name: 'huawei-pod3-defaults'
     parameters:
-        - node:
-            name: SLAVE_NAME
-            description: 'Slave name on Jenkins'
-            allowed-slaves:
-                - huawei-pod3
-            default-slaves:
-                - huawei-pod3
-        - label:
-            name: SLAVE_LABEL
-            default: 'huawei-test'
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
+      - node:
+          name: SLAVE_NAME
+          description: 'Slave name on Jenkins'
+          allowed-slaves:
+            - huawei-pod3
+          default-slaves:
+            - huawei-pod3
+      - label:
+          name: SLAVE_LABEL
+          default: 'huawei-test'
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+
 - parameter:
     name: 'huawei-pod4-defaults'
     parameters:
-        - node:
-            name: SLAVE_NAME
-            description: 'Slave name on Jenkins'
-            allowed-slaves:
-                - huawei-pod4
-            default-slaves:
-                - huawei-pod4
-        - label:
-            name: SLAVE_LABEL
-            default: 'huawei-test'
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
+      - node:
+          name: SLAVE_NAME
+          description: 'Slave name on Jenkins'
+          allowed-slaves:
+            - huawei-pod4
+          default-slaves:
+            - huawei-pod4
+      - label:
+          name: SLAVE_LABEL
+          default: 'huawei-test'
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+
 - parameter:
     name: 'intel-pod8-defaults'
     parameters:
-        - node:
-            name: SLAVE_NAME
-            description: 'Slave name on Jenkins'
-            allowed-slaves:
-                - intel-pod8
-            default-slaves:
-                - intel-pod8
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
+      - node:
+          name: SLAVE_NAME
+          description: 'Slave name on Jenkins'
+          allowed-slaves:
+            - intel-pod8
+          default-slaves:
+            - intel-pod8
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+
+- parameter:
+    name: 'huawei-virtual5-defaults'
+    parameters:
+      - label:
+          name: SLAVE_LABEL
+          default: 'huawei-virtual5'
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+
 - parameter:
     name: 'huawei-virtual7-defaults'
     parameters:
-        - node:
-            name: SLAVE_NAME
-            description: 'Slave name on Jenkins'
-            allowed-slaves:
-                - huawei-virtual7
-            default-slaves:
-                - huawei-virtual7
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
+      - node:
+          name: SLAVE_NAME
+          description: 'Slave name on Jenkins'
+          allowed-slaves:
+            - huawei-virtual7
+          default-slaves:
+            - huawei-virtual7
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+
 - parameter:
     name: 'huawei-pod7-defaults'
     parameters:
-        - node:
-            name: SLAVE_NAME
-            description: 'Slave name on Jenkins'
-            allowed-slaves:
-                - huawei-pod7
-            default-slaves:
-                - huawei-pod7
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
+      - node:
+          name: SLAVE_NAME
+          description: 'Slave name on Jenkins'
+          allowed-slaves:
+            - huawei-pod7
+          default-slaves:
+            - huawei-pod7
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+
 - parameter:
     name: 'zte-pod1-defaults'
     parameters:
-        - node:
-            name: SLAVE_NAME
-            description: 'Slave name on Jenkins'
-            allowed-slaves:
-                - zte-pod1
-            default-slaves:
-                - zte-pod1
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
-        - string:
-            name: INSTALLER_IP
-            default: '10.20.6.2'
-            description: 'IP of the installer'
-        - string:
-            name: BRIDGE
-            default: 'br6'
-            description: 'pxe bridge for booting of Fuel master'
+      - node:
+          name: SLAVE_NAME
+          description: 'Slave name on Jenkins'
+          allowed-slaves:
+            - zte-pod1
+          default-slaves:
+            - zte-pod1
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+      - string:
+          name: INSTALLER_IP
+          default: '10.20.6.2'
+          description: 'IP of the installer'
+      - string:
+          name: BRIDGE
+          default: 'br6'
+          description: 'pxe bridge for booting of Fuel master'
+
 - parameter:
     name: 'zte-pod2-defaults'
     parameters:
-        - node:
-            name: SLAVE_NAME
-            description: 'Slave name on Jenkins'
-            allowed-slaves:
-                - zte-pod2
-            default-slaves:
-                - zte-pod2
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
-        - string:
-            name: INSTALLER_IP
-            default: '10.20.7.3'
-            description: 'IP of the installer'
-        - string:
-            name: BRIDGE
-            default: 'br7'
-            description: 'pxe bridge for booting of Fuel master'
+      - node:
+          name: SLAVE_NAME
+          description: 'Slave name on Jenkins'
+          allowed-slaves:
+            - zte-pod2
+          default-slaves:
+            - zte-pod2
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+      - string:
+          name: INSTALLER_IP
+          default: '10.20.7.3'
+          description: 'IP of the installer'
+      - string:
+          name: BRIDGE
+          default: 'br7'
+          description: 'pxe bridge for booting of Fuel master'
+
 - parameter:
     name: 'zte-pod3-defaults'
     parameters:
-        - node:
-            name: SLAVE_NAME
-            description: 'Slave name on Jenkins'
-            allowed-slaves:
-                - zte-pod3
-            default-slaves:
-                - zte-pod3
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
-        - string:
-            name: BRIDGE
-            default: 'br0'
-            description: 'pxe bridge for booting of Fuel master'
+      - node:
+          name: SLAVE_NAME
+          description: 'Slave name on Jenkins'
+          allowed-slaves:
+            - zte-pod3
+          default-slaves:
+            - zte-pod3
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+      - string:
+          name: BRIDGE
+          default: 'br0'
+          description: 'pxe bridge for booting of Fuel master'
+
+- parameter:
+    name: zte-pod4-defaults
+    parameters:
+      - node:
+          name: SLAVE_NAME
+          description: 'Slave name on Jenkins'
+          allowed-slaves:
+            - zte-pod4
+          default-slaves:
+            - zte-pod4
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+
 - parameter:
     name: 'juniper-pod1-defaults'
     parameters:
-        - node:
-            name: SLAVE_NAME
-            description: 'Slave name on Jenkins'
-            allowed-slaves:
-                - juniper-pod1
-            default-slaves:
-                - juniper-pod1
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
-        - string:
-            name: CEPH_DISKS
-            default: /srv
-            description: "Disks to use by ceph (comma separated list)"
+      - node:
+          name: SLAVE_NAME
+          description: 'Slave name on Jenkins'
+          allowed-slaves:
+            - juniper-pod1
+          default-slaves:
+            - juniper-pod1
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+      - string:
+          name: CEPH_DISKS
+          default: /srv
+          description: "Disks to use by ceph (comma separated list)"
+
 - parameter:
     name: 'orange-pod1-defaults'
     parameters:
-        - node:
-            name: SLAVE_NAME
-            description: 'Slave name on Jenkins'
-            allowed-slaves:
-                - orange-pod1
-            default-slaves:
-                - orange-pod1
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
+      - node:
+          name: SLAVE_NAME
+          description: 'Slave name on Jenkins'
+          allowed-slaves:
+            - orange-pod1
+          default-slaves:
+            - orange-pod1
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+
 - parameter:
     name: 'orange-pod2-defaults'
     parameters:
-        - node:
-            name: SLAVE_NAME
-            description: 'Slave name on Jenkins'
-            allowed-slaves:
-                - orange-pod2
-            default-slaves:
-                - orange-pod2
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
+      - node:
+          name: SLAVE_NAME
+          description: 'Slave name on Jenkins'
+          allowed-slaves:
+            - orange-pod2
+          default-slaves:
+            - orange-pod2
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+
 - parameter:
     name: 'orange-pod5-defaults'
     parameters:
-        - node:
-            name: SLAVE_NAME
-            description: 'Slave name on Jenkins'
-            allowed-slaves:
-                - orange-pod5
-            default-slaves:
-                - orange-pod5
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
+      - node:
+          name: SLAVE_NAME
+          description: 'Slave name on Jenkins'
+          allowed-slaves:
+            - orange-pod5
+          default-slaves:
+            - orange-pod5
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+
 - parameter:
     name: 'dell-pod1-defaults'
     parameters:
-        - node:
-            name: SLAVE_NAME
-            description: 'Slave name on Jenkins'
-            allowed-slaves:
-                - dell-pod1
-            default-slaves:
-                - dell-pod1
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
+      - node:
+          name: SLAVE_NAME
+          description: 'Slave name on Jenkins'
+          allowed-slaves:
+            - dell-pod1
+          default-slaves:
+            - dell-pod1
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+
 - parameter:
     name: 'dell-pod2-defaults'
     parameters:
-        - node:
-            name: SLAVE_NAME
-            description: 'Slave name on Jenkins'
-            allowed-slaves:
-                - dell-pod2
-            default-slaves:
-                - dell-pod2
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
+      - node:
+          name: SLAVE_NAME
+          description: 'Slave name on Jenkins'
+          allowed-slaves:
+            - dell-pod2
+          default-slaves:
+            - dell-pod2
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+
 - parameter:
     name: 'nokia-pod1-defaults'
     parameters:
-        - node:
-            name: SLAVE_NAME
-            description: 'Slave name on Jenkins'
-            allowed-slaves:
-                - nokia-pod1
-            default-slaves:
-                - nokia-pod1
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
-        - string:
-            name: SSH_KEY
-            default: /root/.ssh/id_rsa
-            description: 'SSH key to use for Apex'
+      - node:
+          name: SLAVE_NAME
+          description: 'Slave name on Jenkins'
+          allowed-slaves:
+            - nokia-pod1
+          default-slaves:
+            - nokia-pod1
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+      - string:
+          name: SSH_KEY
+          default: /root/.ssh/id_rsa
+          description: 'SSH key to use for Apex'
+
 - parameter:
     name: 'arm-pod2-defaults'
     parameters:
-        - node:
-            name: SLAVE_NAME
-            description: 'Slave name on Jenkins'
-            allowed-slaves:
-                - arm-pod2
-            default-slaves:
-                - arm-pod2
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
-        - string:
-            name: LAB_CONFIG_URL
-            default: ssh://jenkins-enea@gerrit.opnfv.org:29418/securedlab
-            description: 'Base URI to the configuration directory'
-- parameter:
-    name: 'arm-pod3-defaults'
-    parameters:
-        - node:
-            name: SLAVE_NAME
-            description: 'Slave name on Jenkins'
-            allowed-slaves:
-                - arm-pod3
-            default-slaves:
-                - arm-pod3
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
-        - string:
-            name: LAB_CONFIG_URL
-            default: ssh://jenkins-enea@gerrit.opnfv.org:29418/securedlab
-            description: 'Base URI to the configuration directory'
+      - node:
+          name: SLAVE_NAME
+          description: 'Slave name on Jenkins'
+          allowed-slaves:
+            - arm-pod2
+          default-slaves:
+            - arm-pod2
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+      - string:
+          name: LAB_CONFIG_URL
+          default: ssh://jenkins-enea@gerrit.opnfv.org:29418/securedlab
+          description: 'Base URI to the configuration directory'
+
+- parameter:
+    name: 'arm-pod5-defaults'
+    parameters:
+      - node:
+          name: SLAVE_NAME
+          description: 'Slave name on Jenkins'
+          allowed-slaves:
+            - arm-pod5
+          default-slaves:
+            - arm-pod5
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+      - string:
+          name: LAB_CONFIG_URL
+          default: ssh://jenkins-enea@gerrit.opnfv.org:29418/securedlab
+          description: 'Base URI to the configuration directory'
+
 - parameter:
     name: 'arm-pod4-defaults'
     parameters:
-        - node:
-            name: SLAVE_NAME
-            description: 'Slave name on Jenkins'
-            allowed-slaves:
-                - arm-pod4
-            default-slaves:
-                - arm-pod4
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
-        - string:
-            name: LAB_CONFIG_URL
-            default: ssh://jenkins-enea@gerrit.opnfv.org:29418/securedlab
-            description: 'Base URI to the configuration directory'
-- parameter:
-    name: 'arm-virtual1-defaults'
-    parameters:
-        - node:
-            name: SLAVE_NAME
-            description: 'Slave name on Jenkins'
-            allowed-slaves:
-                - arm-virtual1
-            default-slaves:
-                - arm-virtual1
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
-        - string:
-            name: LAB_CONFIG_URL
-            default: ssh://jenkins-enea@gerrit.opnfv.org:29418/securedlab
-            description: 'Base URI to the configuration directory'
+      - node:
+          name: SLAVE_NAME
+          description: 'Slave name on Jenkins'
+          allowed-slaves:
+            - arm-pod4
+          default-slaves:
+            - arm-pod4
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+      - string:
+          name: LAB_CONFIG_URL
+          default: ssh://jenkins-enea@gerrit.opnfv.org:29418/securedlab
+          description: 'Base URI to the configuration directory'
+
+- parameter:
+    name: 'arm-virtual2-defaults'
+    parameters:
+      - node:
+          name: SLAVE_NAME
+          description: 'Slave name on Jenkins'
+          allowed-slaves:
+            - arm-virtual2
+          default-slaves:
+            - arm-virtual2
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+      - string:
+          name: LAB_CONFIG_URL
+          default: ssh://jenkins-enea@gerrit.opnfv.org:29418/securedlab
+          description: 'Base URI to the configuration directory'
+
 - parameter:
     name: 'intel-virtual6-defaults'
     parameters:
-        - node:
-            name: SLAVE_NAME
-            description: 'Slave name on Jenkins'
-            allowed-slaves:
-                - intel-virtual6
-            default-slaves:
-                - intel-virtual6
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
-- parameter:
-    name: 'ool-defaults'
-    parameters:
-        - node:
-            name: SLAVE_NAME
-            description: 'Slave name on Jenkins'
-            allowed-slaves:
-                - ool-virtual1
-                - ool-virtual2
-                - ool-virtual3
-            default-slaves:
-                - '{default-slave}'
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
-        - string:
-            name: SSH_KEY
-            default: /root/.ssh/id_rsa
-            description: 'SSH key to be used'
-- parameter:
-    name: 'ool-virtual1-defaults'
-    parameters:
-        - 'ool-defaults':
-            default-slave: 'ool-virtual1'
-- parameter:
-    name: 'ool-virtual2-defaults'
-    parameters:
-        - 'ool-defaults':
-            default-slave: 'ool-virtual2'
-- parameter:
-    name: 'ool-virtual3-defaults'
-    parameters:
-        - 'ool-defaults':
-            default-slave: 'ool-virtual3'
+      - node:
+          name: SLAVE_NAME
+          description: 'Slave name on Jenkins'
+          allowed-slaves:
+            - intel-virtual6
+          default-slaves:
+            - intel-virtual6
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+
+- parameter:
+    name: 'intel-virtual10-defaults'
+    parameters:
+      - node:
+          name: SLAVE_NAME
+          description: 'Slave name on Jenkins'
+          allowed-slaves:
+            - intel-virtual10
+          default-slaves:
+            - intel-virtual10
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+
+- parameter:
+    name: 'doctor-defaults'
+    parameters:
+      - node:
+          name: SLAVE_NAME
+          description: 'Slave name on Jenkins'
+          allowed-slaves:
+            - '{default-slave}'
+          default-slaves:
+            - '{default-slave}'
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+      - string:
+          name: SSH_KEY
+          default: /root/.ssh/id_rsa
+          description: 'SSH key to be used'
+
+- parameter:
+    name: 'doctor-apex-verify-defaults'
+    parameters:
+      - 'doctor-defaults':
+          default-slave: 'doctor-apex-verify'
+
+- parameter:
+    name: 'doctor-fuel-verify-defaults'
+    parameters:
+      - 'doctor-defaults':
+          default-slave: 'doctor-fuel-verify'
+
+- parameter:
+    name: 'doctor-joid-verify-defaults'
+    parameters:
+      - 'doctor-defaults':
+          default-slave: 'doctor-joid-verify'
+
 - parameter:
     name: 'multisite-virtual-defaults'
     parameters:
-        - label:
-            name: SLAVE_LABEL
-            default: 'multisite-virtual'
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
+      - label:
+          name: SLAVE_LABEL
+          default: 'multisite-virtual'
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+
 - parameter:
     name: 'ericsson-virtual5-defaults'
     parameters:
-        - label:
-            name: SLAVE_LABEL
-            default: 'ericsson-virtual5'
-        - string:
-            name: GIT_BASE
-            default: https://git.opendaylight.org/gerrit/p/$PROJECT.git
-            description: 'Git URL to use on this Jenkins Slave'
+      - label:
+          name: SLAVE_LABEL
+          default: 'ericsson-virtual5'
+      - string:
+          name: GIT_BASE
+          default: https://git.opendaylight.org/gerrit/p/$PROJECT.git
+          description: 'Git URL to use on this Jenkins Slave'
+
 - parameter:
     name: 'ericsson-virtual12-defaults'
     parameters:
-        - label:
-            name: SLAVE_LABEL
-            default: 'ericsson-virtual12'
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
+      - label:
+          name: SLAVE_LABEL
+          default: 'ericsson-virtual12'
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+
 - parameter:
     name: 'ericsson-virtual13-defaults'
     parameters:
-        - label:
-            name: SLAVE_LABEL
-            default: 'ericsson-virtual13'
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
+      - label:
+          name: SLAVE_LABEL
+          default: 'ericsson-virtual13'
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+
+- parameter:
+    name: 'ericsson-virtual-pod1bl01-defaults'
+    parameters:
+      - label:
+          name: SLAVE_LABEL
+          default: 'ericsson-virtual-pod1bl01'
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+
 - parameter:
     name: 'odl-netvirt-virtual-defaults'
     parameters:
-        - label:
-            name: SLAVE_LABEL
-            default: 'odl-netvirt-virtual'
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
+      - label:
+          name: SLAVE_LABEL
+          default: 'odl-netvirt-virtual'
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+
 - parameter:
     name: 'odl-netvirt-virtual-intel-defaults'
     parameters:
-        - label:
-            name: SLAVE_LABEL
-            default: 'odl-netvirt-virtual-intel'
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
+      - label:
+          name: SLAVE_LABEL
+          default: 'odl-netvirt-virtual-intel'
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+
 #####################################################
 # These slaves are just dummy slaves for sandbox jobs
 #####################################################
 - parameter:
     name: 'sandbox-baremetal-defaults'
     parameters:
-        - label:
-            name: SLAVE_LABEL
-            default: 'sandbox-baremetal'
-            description: 'Slave label on Jenkins'
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
-        - string:
-            name: BUILD_DIRECTORY
-            default: $WORKSPACE/build_output
-            description: "Directory where the build artifact will be located upon the completion of the build."
+      - label:
+          name: SLAVE_LABEL
+          default: 'sandbox-baremetal'
+          description: 'Slave label on Jenkins'
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+      - string:
+          name: BUILD_DIRECTORY
+          default: $WORKSPACE/build_output
+          description: "Directory where the build artifact will be located upon the completion of the build."
+
 - parameter:
     name: 'sandbox-virtual-defaults'
     parameters:
-        - label:
-            name: SLAVE_LABEL
-            default: 'sandbox-virtual'
-            description: 'Slave label on Jenkins'
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
-        - string:
-            name: BUILD_DIRECTORY
-            default: $WORKSPACE/build_output
-            description: "Directory where the build artifact will be located upon the completion of the build."
+      - label:
+          name: SLAVE_LABEL
+          default: 'sandbox-virtual'
+          description: 'Slave label on Jenkins'
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+      - string:
+          name: BUILD_DIRECTORY
+          default: $WORKSPACE/build_output
+          description: "Directory where the build artifact will be located upon the completion of the build."
+
 - parameter:
     name: 'dummy-pod1-defaults'
     parameters:
-        - label:
-            name: SLAVE_LABEL
-            default: 'dummy-pod1'
-            description: 'Slave label on Jenkins'
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
-        - string:
-            name: BUILD_DIRECTORY
-            default: $WORKSPACE/build_output
-            description: "Directory where the build artifact will be located upon the completion of the build."
+      - label:
+          name: SLAVE_LABEL
+          default: 'dummy-pod1'
+          description: 'Slave label on Jenkins'
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+          description: 'Git URL to use on this Jenkins Slave'
+      - string:
+          name: BUILD_DIRECTORY
+          default: $WORKSPACE/build_output
+          description: "Directory where the build artifact will be located upon the completion of the build."
index 13ea9b3..1c7b8cd 100644 (file)
         branch: '{stream}'
         disabled: false
         gs-pathname: ''
-    danube: &danube
-        stream: danube
-        branch: 'stable/{stream}'
-        disabled: false
-        gs-pathname: '/{stream}'
 #--------------------------------
 # POD, INSTALLER, AND BRANCH MAPPING
 #--------------------------------
         - virtual:
             slave-label: joid-virtual
             <<: *master
-        - baremetal:
-            slave-label: joid-baremetal
-            <<: *danube
-        - virtual:
-            slave-label: joid-virtual
-            <<: *danube
 #--------------------------------
 #        None-CI PODs
 #--------------------------------
@@ -62,7 +51,7 @@
         - 'os-nosdn-lxd-noha':
             auto-trigger-name: 'joid-{scenario}-{pod}-{stream}-trigger'
         - 'os-odl_l2-nofeature-ha':
-            auto-trigger-name: 'joid-{scenario}-{pod}-{stream}-trigger'
+            auto-trigger-name: 'daily-trigger-disabled'
         - 'os-onos-nofeature-ha':
             auto-trigger-name: 'daily-trigger-disabled'
         - 'os-odl_l2-nofeature-noha':
         - 'os-ocl-nofeature-noha':
             auto-trigger-name: 'daily-trigger-disabled'
         - 'k8-nosdn-nofeature-noha':
-            auto-trigger-name: 'joid-{scenario}-{pod}-{stream}-trigger'
+            auto-trigger-name: 'daily-trigger-disabled'
         - 'k8-nosdn-lb-noha':
             auto-trigger-name: 'joid-{scenario}-{pod}-{stream}-trigger'
+        - 'k8-ovn-lb-noha':
+            auto-trigger-name: 'joid-{scenario}-{pod}-{stream}-trigger'
+        - 'os-nosdn-openbaton-ha':
+            auto-trigger-name: 'joid-{scenario}-{pod}-{stream}-trigger'
 
     jobs:
         - 'joid-{scenario}-{pod}-daily-{stream}'
     name: 'joid-os-nosdn-nofeature-ha-cengn-pod1-master-trigger'
     triggers:
         - timed: ''
-# os-nosdn-nofeature-ha trigger - branch: danube
-- trigger:
-    name: 'joid-os-nosdn-nofeature-ha-baremetal-danube-trigger'
-    triggers:
-        - timed: '0 2 * * *'
-- trigger:
-    name: 'joid-os-nosdn-nofeature-ha-virtual-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'joid-os-nosdn-nofeature-ha-orange-pod1-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'joid-os-nosdn-nofeature-ha-cengn-pod1-danube-trigger'
-    triggers:
-        - timed: ''
 # os-odl_l2-nofeature-ha trigger - branch: master
 - trigger:
     name: 'joid-os-odl_l2-nofeature-ha-baremetal-master-trigger'
     name: 'joid-os-odl_l2-nofeature-ha-cengn-pod1-master-trigger'
     triggers:
         - timed: ''
-# os-odl_l2-nofeature-ha trigger - branch: danube
-- trigger:
-    name: 'joid-os-odl_l2-nofeature-ha-baremetal-danube-trigger'
-    triggers:
-        - timed: '0 7 * * *'
-- trigger:
-    name: 'joid-os-odl_l2-nofeature-ha-virtual-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'joid-os-odl_l2-nofeature-ha-orange-pod1-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'joid-os-odl_l2-nofeature-ha-cengn-pod1-danube-trigger'
-    triggers:
-        - timed: ''
 # os-onos-nofeature-ha trigger - branch: master
 - trigger:
     name: 'joid-os-onos-nofeature-ha-baremetal-master-trigger'
     name: 'joid-os-onos-nofeature-ha-cengn-pod1-master-trigger'
     triggers:
         - timed: ''
-# os-onos-nofeature-ha trigger - branch: danube
-- trigger:
-    name: 'joid-os-onos-nofeature-ha-baremetal-danube-trigger'
-    triggers:
-        - timed: '0 12 * * *'
-- trigger:
-    name: 'joid-os-onos-nofeature-ha-virtual-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'joid-os-onos-nofeature-ha-orange-pod1-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'joid-os-onos-nofeature-ha-cengn-pod1-danube-trigger'
-    triggers:
-        - timed: ''
 # os-onos-sfc-ha trigger - branch: master
 - trigger:
     name: 'joid-os-onos-sfc-ha-baremetal-master-trigger'
     name: 'joid-os-onos-sfc-ha-cengn-pod1-master-trigger'
     triggers:
         - timed: ''
-# os-onos-sfc-ha trigger - branch: danube
-- trigger:
-    name: 'joid-os-onos-sfc-ha-baremetal-danube-trigger'
-    triggers:
-        - timed: '0 17 * * *'
-- trigger:
-    name: 'joid-os-onos-sfc-ha-virtual-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'joid-os-onos-sfc-ha-orange-pod1-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'joid-os-onos-sfc-ha-cengn-pod1-danube-trigger'
-    triggers:
-        - timed: ''
 # os-nosdn-lxd-noha trigger - branch: master
 - trigger:
     name: 'joid-os-nosdn-lxd-noha-baremetal-master-trigger'
     name: 'joid-os-nosdn-lxd-noha-cengn-pod1-master-trigger'
     triggers:
         - timed: ''
-# os-nosdn-lxd-noha trigger - branch: danube
-- trigger:
-    name: 'joid-os-nosdn-lxd-noha-baremetal-danube-trigger'
-    triggers:
-        - timed: '0 22 * * *'
-- trigger:
-    name: 'joid-os-nosdn-lxd-noha-virtual-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'joid-os-nosdn-lxd-noha-orange-pod1-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'joid-os-nosdn-lxd-noha-cengn-pod1-danube-trigger'
-    triggers:
-        - timed: ''
 # os-nosdn-lxd-ha trigger - branch: master
 - trigger:
     name: 'joid-os-nosdn-lxd-ha-baremetal-master-trigger'
     name: 'joid-os-nosdn-lxd-ha-cengn-pod1-master-trigger'
     triggers:
         - timed: ''
-# os-nosdn-lxd-ha trigger - branch: danube
-- trigger:
-    name: 'joid-os-nosdn-lxd-ha-baremetal-danube-trigger'
-    triggers:
-        - timed: '0 10 * * *'
-- trigger:
-    name: 'joid-os-nosdn-lxd-ha-virtual-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'joid-os-nosdn-lxd-ha-orange-pod1-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'joid-os-nosdn-lxd-ha-cengn-pod1-danube-trigger'
-    triggers:
-        - timed: ''
 # os-nosdn-nofeature-noha trigger - branch: master
 - trigger:
     name: 'joid-os-nosdn-nofeature-noha-baremetal-master-trigger'
     name: 'joid-os-nosdn-nofeature-noha-cengn-pod1-master-trigger'
     triggers:
         - timed: ''
-# os-nosdn-nofeature-noha trigger - branch: danube
-- trigger:
-    name: 'joid-os-nosdn-nofeature-noha-baremetal-danube-trigger'
-    triggers:
-        - timed: '0 4 * * *'
-- trigger:
-    name: 'joid-os-nosdn-nofeature-noha-virtual-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'joid-os-nosdn-nofeature-noha-orange-pod1-danube-trigger'
-    triggers:
-        - timed: ''
-- trigger:
-    name: 'joid-os-nosdn-nofeature-noha-cengn-pod1-danube-trigger'
-    triggers:
-        - timed: ''
 # k8-nosdn-nofeature-noha trigger - branch: master
 - trigger:
     name: 'joid-k8-nosdn-nofeature-noha-baremetal-master-trigger'
     name: 'joid-k8-nosdn-nofeature-noha-cengn-pod1-master-trigger'
     triggers:
         - timed: ''
-# k8-nosdn-nofeature-noha trigger - branch: danube
+# k8-nosdn-lb-noha trigger - branch: master
 - trigger:
-    name: 'joid-k8-nosdn-nofeature-noha-baremetal-danube-trigger'
+    name: 'joid-k8-nosdn-lb-noha-baremetal-master-trigger'
     triggers:
-        - timed: '0 15 * * *'
+        - timed: '5 20 * * *'
 - trigger:
-    name: 'joid-k8-nosdn-nofeature-noha-virtual-danube-trigger'
+    name: 'joid-k8-nosdn-lb-noha-virtual-master-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'joid-k8-nosdn-nofeature-noha-orange-pod1-danube-trigger'
+    name: 'joid-k8-nosdn-lb-noha-orange-pod1-master-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'joid-k8-nosdn-nofeature-noha-cengn-pod1-danube-trigger'
+    name: 'joid-k8-nosdn-lb-noha-cengn-pod1-master-trigger'
     triggers:
         - timed: ''
-# k8-nosdn-lb-noha trigger - branch: master
+# k8-ovn-lb-noha trigger - branch: master
 - trigger:
-    name: 'joid-k8-nosdn-lb-noha-baremetal-master-trigger'
+    name: 'joid-k8-ovn-lb-noha-baremetal-master-trigger'
     triggers:
-        - timed: '5 20 * * *'
+        - timed: '5 17 * * *'
 - trigger:
-    name: 'joid-k8-nosdn-lb-noha-virtual-master-trigger'
+    name: 'joid-k8-ovn-lb-noha-virtual-master-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'joid-k8-nosdn-lb-noha-orange-pod1-master-trigger'
+    name: 'joid-k8-ovn-lb-noha-orange-pod1-master-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'joid-k8-nosdn-lb-noha-cengn-pod1-master-trigger'
+    name: 'joid-k8-ovn-lb-noha-cengn-pod1-master-trigger'
     triggers:
         - timed: ''
-# k8-nosdn-lb-noha trigger - branch: danube
+
+# os-nosdn-openbaton-ha trigger - branch: master
 - trigger:
-    name: 'joid-k8-nosdn-lb-noha-baremetal-danube-trigger'
+    name: 'joid-os-nosdn-openbaton-ha-baremetal-master-trigger'
     triggers:
-        - timed: '0 20 * * *'
+        - timed: '5 25 * * *'
 - trigger:
-    name: 'joid-k8-nosdn-lb-noha-virtual-danube-trigger'
+    name: 'joid-os-nosdn-openbaton-ha-virtual-master-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'joid-k8-nosdn-lb-noha-orange-pod1-danube-trigger'
+    name: 'joid-os-nosdn-openbaton-ha-orange-pod1-master-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'joid-k8-nosdn-lb-noha-cengn-pod1-danube-trigger'
+    name: 'joid-os-nosdn-openbaton-ha-cengn-pod1-master-trigger'
     triggers:
         - timed: ''
index e197dbd..9740d38 100644 (file)
@@ -94,9 +94,6 @@ EXTRA=${DEPLOY_OPTIONS[4]}
 if [ "$SDN_CONTROLLER" == 'odl_l2' ] || [ "$SDN_CONTROLLER" == 'odl_l3' ]; then
     SDN_CONTROLLER='odl'
 fi
-if [ "$HA_MODE" == 'noha' ]; then
-    HA_MODE='nonha'
-fi
 
 # Add extra to features
 if [ "$EXTRA" != "" ];then
index e5b56bf..a39249a 100644 (file)
@@ -26,6 +26,7 @@
     testname:
         - 'cyclictest'
         - 'packet_forward'
+        - 'livemigration'
 #####################################
 # patch verification phases
 #####################################
                   git-revision: true
                   kill-phase-on: FAILURE
                   abort-all-job: true
+        - multijob:
+            name: livemigration-test
+            condition: SUCCESSFUL
+            projects:
+                - name: 'kvmfornfv-livemigration-daily-test-{stream}'
+                  current-parameters: false
+                  node-parameters: false
+                  git-revision: true
+                  kill-phase-on: FAILURE
+                  abort-all-job: true
+
 
 - job-template:
     name: 'kvmfornfv-daily-build-{stream}'
             !include-raw: ./kvmfornfv-download-artifact.sh
         - shell:
             !include-raw: ./kvmfornfv-test.sh
+- builder:
+    name: 'kvmfornfv-livemigration-daily-test-macro'
+    builders:
+        - shell:
+            !include-raw: ./kvmfornfv-download-artifact.sh
+        - shell:
+            !include-raw: ./kvmfornfv-test.sh
+
 #####################################
 # parameter macros
 #####################################
diff --git a/jjb/multisite/fuel-deploy-for-multisite.sh b/jjb/multisite/fuel-deploy-for-multisite.sh
deleted file mode 100755 (executable)
index 71c6cc1..0000000
+++ /dev/null
@@ -1,124 +0,0 @@
-#!/bin/bash
-# SPDX-license-identifier: Apache-2.0
-##############################################################################
-# Copyright (c) 2016 Ericsson AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-set -o nounset
-set -o pipefail
-
-# do not continue with the deployment if FRESH_INSTALL is not requested
-if [[ "$FRESH_INSTALL" == "true" ]]; then
-    echo "Fresh install requested. Proceeding with the installation."
-else
-    echo "Fresh install is not requested. Skipping the installation."
-    exit 0
-fi
-
-export TERM="vt220"
-export BRANCH=$(echo $BRANCH | sed 's/stable\///g')
-# get the latest successful job console log and extract the properties filename
-FUEL_DEPLOY_BUILD_URL="https://build.opnfv.org/ci/job/fuel-deploy-virtual-daily-$BRANCH/lastSuccessfulBuild/consoleText"
-FUEL_PROPERTIES_FILE=$(curl -s -L ${FUEL_DEPLOY_BUILD_URL} | grep 'ISO:' | awk '{print $2}' | sed 's/iso/properties/g')
-if [[ -z "FUEL_PROPERTIES_FILE" ]]; then
-    echo "Unable to extract the url to Fuel ISO properties from ${FUEL_DEPLOY_URL}"
-    exit 1
-fi
-
-# use known/working version of fuel
-#FUEL_PROPERTIES_FILE="opnfv-2017-03-06_16-00-15.properties"
-curl -L -s -o $WORKSPACE/latest.properties $GS_PATH/$FUEL_PROPERTIES_FILE
-
-# source the file so we get OPNFV vars
-source latest.properties
-
-# echo the info about artifact that is used during the deployment
-echo "Using ${OPNFV_ARTIFACT_URL/*\/} for deployment"
-
-# download the iso
-echo "Downloading the ISO using the link http://$OPNFV_ARTIFACT_URL"
-curl -L -s -o $WORKSPACE/opnfv.iso http://$OPNFV_ARTIFACT_URL > gsutil.iso.log 2>&1
-
-
-# set deployment parameters
-DEPLOY_SCENARIO="os-nosdn-nofeature-noha"
-export TMPDIR=$HOME/tmpdir
-BRIDGE=${BRIDGE:-pxebr}
-LAB_NAME=${NODE_NAME/-*}
-POD_NAME=${NODE_NAME/*-}
-
-if [[ "$NODE_NAME" =~ "virtual" ]]; then
-    POD_NAME="virtual_kvm"
-fi
-
-# we currently support ericsson, intel, lf and zte labs
-if [[ ! "$LAB_NAME" =~ (ericsson|intel|lf|zte) ]]; then
-    echo "Unsupported/unidentified lab $LAB_NAME. Cannot continue!"
-    exit 1
-else
-    echo "Using configuration for $LAB_NAME"
-fi
-
-# create TMPDIR if it doesn't exist
-export TMPDIR=$HOME/tmpdir
-mkdir -p $TMPDIR
-
-# change permissions down to TMPDIR
-chmod a+x $HOME
-chmod a+x $TMPDIR
-
-# clone fuel repo and checkout the sha1 that corresponds to the ISO
-echo "Cloning fuel repo"
-git clone https://gerrit.opnfv.org/gerrit/p/fuel.git fuel
-cd $WORKSPACE/fuel
-echo "Checking out $OPNFV_GIT_SHA1"
-git checkout $OPNFV_GIT_SHA1 --quiet
-
-# clone the securedlab repo
-cd $WORKSPACE
-echo "Cloning securedlab repo ${GIT_BRANCH##origin/}"
-git clone ssh://jenkins-ericsson@gerrit.opnfv.org:29418/securedlab --quiet \
-    --branch ${GIT_BRANCH##origin/}
-
-# log file name
-FUEL_LOG_FILENAME="${JOB_NAME}_${BUILD_NUMBER}.log.tar.gz"
-
-# construct the command
-DEPLOY_COMMAND="sudo $WORKSPACE/fuel/ci/deploy.sh -b file://$WORKSPACE/securedlab \
-    -l $LAB_NAME -p $POD_NAME -s $DEPLOY_SCENARIO -i file://$WORKSPACE/opnfv.iso \
-    -H -B $BRIDGE -S $TMPDIR -L $WORKSPACE/$FUEL_LOG_FILENAME"
-
-# log info to console
-echo "Deployment parameters"
-echo "--------------------------------------------------------"
-echo "Scenario: $DEPLOY_SCENARIO"
-echo "Lab: $LAB_NAME"
-echo "POD: $POD_NAME"
-echo "ISO: ${OPNFV_ARTIFACT_URL/*\/}"
-echo
-echo "Starting the deployment using $INSTALLER_TYPE. This could take some time..."
-echo "--------------------------------------------------------"
-echo
-
-# start the deployment
-echo "Issuing command"
-echo "$DEPLOY_COMMAND"
-echo
-
-$DEPLOY_COMMAND
-exit_code=$?
-
-echo
-echo "--------------------------------------------------------"
-echo "Deployment is done!"
-
-if [[ $exit_code -ne 0 ]]; then
-    echo "Deployment failed!"
-    exit $exit_code
-else
-    echo "Deployment is successful!"
-    exit 0
-fi
diff --git a/jjb/multisite/multisite-daily-jobs.yml b/jjb/multisite/multisite-daily-jobs.yml
deleted file mode 100644 (file)
index 06cefb6..0000000
+++ /dev/null
@@ -1,305 +0,0 @@
-- project:
-    name: kingbird
-
-    project: 'multisite'
-
-    jobs:
-        - 'multisite-kingbird-virtual-daily-{stream}'
-        - 'multisite-{phase}-{stream}'
-
-    phase:
-        - 'fuel-deploy-regionone-virtual':
-            slave-label: ericsson-virtual12
-        - 'fuel-deploy-regiontwo-virtual':
-            slave-label: ericsson-virtual13
-        - 'register-endpoints':
-            slave-label: ericsson-virtual12
-        - 'update-auth':
-            slave-label: ericsson-virtual13
-        - 'kingbird-deploy-virtual':
-            slave-label: ericsson-virtual12
-
-    stream:
-        - master:
-            branch: '{stream}'
-            gs-pathname: ''
-            disabled: false
-            timed: '0 12 * * *'
-        - danube:
-            branch: 'stable/{stream}'
-            gs-pathname: '/{stream}'
-            disabled: false
-            timed: '0 0 * * *'
-
-- job-template:
-    name: 'multisite-kingbird-virtual-daily-{stream}'
-
-    project-type: multijob
-
-    disabled: '{obj:disabled}'
-
-    concurrent: false
-
-    parameters:
-        - project-parameter:
-            project: '{project}'
-            branch: '{branch}'
-        - choice:
-            name: FRESH_INSTALL
-            choices:
-                - 'true'
-                - 'false'
-        - string:
-            name: KINGBIRD_LOG_FILE
-            default: $WORKSPACE/kingbird.log
-        - 'opnfv-build-defaults'
-
-    triggers:
-         - timed: '{timed}'
-
-    builders:
-        - description-setter:
-            description: "Built on $NODE_NAME"
-        - multijob:
-            name: fuel-deploy-virtual
-            condition: SUCCESSFUL
-            projects:
-                - name: 'multisite-fuel-deploy-regionone-virtual-{stream}'
-                  current-parameters: false
-                  predefined-parameters: |
-                    FUEL_VERSION=latest
-                    DEPLOY_SCENARIO=os-nosdn-nofeature-noha
-                    OS_REGION=RegionOne
-                    REGIONONE_IP=100.64.209.10
-                    REGIONTWO_IP=100.64.209.11
-                    FRESH_INSTALL=$FRESH_INSTALL
-                  node-parameters: false
-                  node-label-name: SLAVE_LABEL
-                  node-label: ericsson-virtual12
-                  kill-phase-on: FAILURE
-                  abort-all-job: true
-                - name: 'multisite-fuel-deploy-regiontwo-virtual-{stream}'
-                  current-parameters: false
-                  predefined-parameters: |
-                    FUEL_VERSION=latest
-                    DEPLOY_SCENARIO=os-nosdn-nofeature-noha
-                    OS_REGION=RegionTwo
-                    REGIONONE_IP=100.64.209.10
-                    REGIONTWO_IP=100.64.209.11
-                    FRESH_INSTALL=$FRESH_INSTALL
-                  node-parameters: false
-                  node-label-name: SLAVE_LABEL
-                  node-label: ericsson-virtual13
-                  kill-phase-on: FAILURE
-                  abort-all-job: true
-        - multijob:
-            name: centralize-keystone
-            condition: SUCCESSFUL
-            projects:
-                - name: 'multisite-register-endpoints-{stream}'
-                  current-parameters: false
-                  predefined-parameters: |
-                    OS_REGION=RegionOne
-                    REGIONONE_IP=100.64.209.10
-                    REGIONTWO_IP=100.64.209.11
-                    FRESH_INSTALL=$FRESH_INSTALL
-                  node-parameters: false
-                  node-label-name: SLAVE_LABEL
-                  node-label: ericsson-virtual12
-                  kill-phase-on: FAILURE
-                  abort-all-job: true
-                - name: 'multisite-update-auth-{stream}'
-                  current-parameters: false
-                  predefined-parameters: |
-                    OS_REGION=RegionTwo
-                    REGIONONE_IP=100.64.209.10
-                    REGIONTWO_IP=100.64.209.11
-                    FRESH_INSTALL=$FRESH_INSTALL
-                  node-parameters: false
-                  node-label-name: SLAVE_LABEL
-                  node-label: ericsson-virtual13
-                  kill-phase-on: FAILURE
-                  abort-all-job: true
-        - multijob:
-            name: kingbird-deploy-virtual
-            condition: SUCCESSFUL
-            projects:
-                - name: 'multisite-kingbird-deploy-virtual-{stream}'
-                  current-parameters: false
-                  predefined-parameters: |
-                    OS_REGION=RegionOne
-                    REGIONONE_IP=100.64.209.10
-                    REGIONTWO_IP=100.64.209.11
-                    FRESH_INSTALL=$FRESH_INSTALL
-                  node-parameters: false
-                  node-label-name: SLAVE_LABEL
-                  node-label: ericsson-virtual12
-                  kill-phase-on: FAILURE
-                  abort-all-job: true
-        - multijob:
-            name: kingbird-functest
-            condition: SUCCESSFUL
-            projects:
-                - name: 'functest-fuel-virtual-suite-{stream}'
-                  current-parameters: false
-                  predefined-parameters: |
-                    DEPLOY_SCENARIO=os-nosdn-multisite-noha
-                    FUNCTEST_SUITE_NAME=multisite
-                    OS_REGION=RegionOne
-                    REGIONONE_IP=100.64.209.10
-                    REGIONTWO_IP=100.64.209.11
-                    FRESH_INSTALL=$FRESH_INSTALL
-                  node-parameters: false
-                  node-label-name: SLAVE_LABEL
-                  node-label: ericsson-virtual12
-                  kill-phase-on: NEVER
-                  abort-all-job: false
-
-- job-template:
-    name: 'multisite-{phase}-{stream}'
-
-    concurrent: false
-
-    disabled: '{obj:disabled}'
-
-    concurrent: false
-
-    parameters:
-        - project-parameter:
-            project: '{project}'
-            branch: '{branch}'
-        - string:
-            name: KINGBIRD_LOG_FILE
-            default: $WORKSPACE/kingbird.log
-        - string:
-            name: GS_PATH
-            default: 'http://artifacts.opnfv.org/fuel{gs-pathname}'
-        - 'fuel-defaults'
-        - '{slave-label}-defaults'
-        - choice:
-            name: FRESH_INSTALL
-            choices:
-                - 'true'
-                - 'false'
-
-    scm:
-        - git-scm
-
-    builders:
-        - description-setter:
-            description: "Built on $NODE_NAME"
-        - 'multisite-{phase}-builder':
-            stream: '{stream}'
-
-    publishers:
-        - 'multisite-{phase}-publisher'
-
-########################
-# builder macros
-########################
-- builder:
-    name: 'multisite-fuel-deploy-regionone-virtual-builder'
-    builders:
-        - shell:
-            !include-raw-escape: ./fuel-deploy-for-multisite.sh
-        - shell: |
-            #!/bin/bash
-
-            echo "This is where we deploy fuel, extract passwords and save into file"
-
-            cd $WORKSPACE/tools/keystone/
-            ./run.sh -t controller -r fetchpass.sh -o servicepass.ini
-
-- builder:
-    name: 'multisite-fuel-deploy-regiontwo-virtual-builder'
-    builders:
-        - shell:
-            !include-raw-escape: ./fuel-deploy-for-multisite.sh
-        - shell: |
-            #!/bin/bash
-
-            echo "This is where we deploy fuel, extract publicUrl, privateUrl, and adminUrl and save into file"
-
-            cd $WORKSPACE/tools/keystone/
-            ./run.sh -t controller -r endpoint.sh -o endpoints.ini
-- builder:
-    name: 'multisite-register-endpoints-builder'
-    builders:
-        - copyartifact:
-            project: 'multisite-fuel-deploy-regiontwo-virtual-{stream}'
-            which-build: multijob-build
-            filter: "endpoints.ini"
-        - shell: |
-            #!/bin/bash
-
-            echo "This is where we register RegionTwo in RegionOne keystone using endpoints.ini"
-
-            cd $WORKSPACE/tools/keystone/
-            ./run.sh -t controller -r region.sh -d $WORKSPACE/endpoints.ini
-- builder:
-    name: 'multisite-update-auth-builder'
-    builders:
-        - copyartifact:
-            project: 'multisite-fuel-deploy-regionone-virtual-{stream}'
-            which-build: multijob-build
-            filter: "servicepass.ini"
-        - shell: |
-            #!/bin/bash
-
-            echo "This is where we read passwords from servicepass.ini and replace passwords in RegionTwo"
-
-            cd $WORKSPACE/tools/keystone/
-            ./run.sh -t controller -r writepass.sh -d $WORKSPACE/servicepass.ini
-            ./run.sh -t compute -r writepass.sh -d $WORKSPACE/servicepass.ini
-- builder:
-    name: 'multisite-kingbird-deploy-virtual-builder'
-    builders:
-        - shell: |
-            #!/bin/bash
-
-            echo "This is where we install kingbird"
-            cd $WORKSPACE/tools/kingbird
-            ./deploy.sh
-########################
-# publisher macros
-########################
-- publisher:
-    name: 'multisite-fuel-deploy-regionone-virtual-publisher'
-    publishers:
-        - archive:
-            artifacts: 'servicepass.ini'
-            allow-empty: false
-            only-if-success: true
-            fingerprint: true
-- publisher:
-    name: 'multisite-fuel-deploy-regiontwo-virtual-publisher'
-    publishers:
-        - archive:
-            artifacts: 'endpoints.ini'
-            allow-empty: false
-            only-if-success: true
-            fingerprint: true
-- publisher:
-    name: 'multisite-register-endpoints-publisher'
-    publishers:
-        - archive:
-            artifacts: 'dummy.txt'
-            allow-empty: true
-- publisher:
-    name: 'multisite-update-auth-publisher'
-    publishers:
-        - archive:
-            artifacts: 'dummy.txt'
-            allow-empty: true
-- publisher:
-    name: 'multisite-kingbird-deploy-virtual-publisher'
-    publishers:
-        - archive:
-            artifacts: 'dummy.txt'
-            allow-empty: true
-- publisher:
-    name: 'multisite-kingbird-functest-publisher'
-    publishers:
-        - archive:
-            artifacts: 'dummy.txt'
-            allow-empty: true
index 9a4d885..2702c45 100644 (file)
@@ -58,7 +58,7 @@
 - job-template:
     name: 'netready-build-gluon-packages-daily-{stream}'
 
-    disabled: false
+    disabled: true
 
     concurrent: true
 
diff --git a/jjb/nfvbench/nfvbench.yml b/jjb/nfvbench/nfvbench.yml
new file mode 100644 (file)
index 0000000..85486c8
--- /dev/null
@@ -0,0 +1,90 @@
+- project:
+    name: nfvbench
+
+    project: '{name}'
+
+    jobs:
+        - 'nfvbench-build-{stream}'
+        - 'nfvbench-verify-{stream}'
+
+    stream:
+        - master:
+            branch: '{stream}'
+            gs-pathname: ''
+            disabled: false
+            docker-tag: 'latest'
+
+- job-template:
+    name: 'nfvbench-build-{stream}'
+
+    disabled: '{obj:disabled}'
+
+    parameters:
+        - project-parameter:
+            project: '{project}'
+            branch: '{branch}'
+        - string:
+            name: GIT_BASE
+            default: https://gerrit.opnfv.org/gerrit/$PROJECT
+            description: "Used for overriding the GIT URL coming from Global Jenkins configuration in case if the stuff is done on none-LF HW."
+        - 'opnfv-build-ubuntu-defaults'
+
+    scm:
+        - git-scm-gerrit
+
+    triggers:
+        - gerrit:
+            server-name: 'gerrit.opnfv.org'
+            trigger-on:
+                - comment-added-contains-event:
+                    comment-contains-value: 'buildvm'
+            projects:
+              - project-compare-type: 'ANT'
+                project-pattern: '{project}'
+                branches:
+                  - branch-compare-type: 'ANT'
+                    branch-pattern: '**/{branch}'
+
+    builders:
+        - shell: |
+            cd $WORKSPACE/nfvbenchvm/dib
+            bash build-image.sh
+
+- job-template:
+    name: 'nfvbench-verify-{stream}'
+
+    disabled: '{obj:disabled}'
+
+    parameters:
+        - project-parameter:
+            project: '{project}'
+            branch: '{branch}'
+        - string:
+            name: GIT_BASE
+            default: https://gerrit.opnfv.org/gerrit/$PROJECT
+            description: "Used for overriding the GIT URL coming from Global Jenkins configuration in case if the stuff is done on none-LF HW."
+        - 'opnfv-build-ubuntu-defaults'
+
+    scm:
+        - git-scm-gerrit
+
+    triggers:
+        - gerrit:
+            server-name: 'gerrit.opnfv.org'
+            trigger-on:
+                - patchset-created-event:
+                    exclude-drafts: 'false'
+                    exclude-trivial-rebase: 'false'
+                    exclude-no-code-change: 'false'
+            projects:
+              - project-compare-type: 'ANT'
+                project-pattern: '{project}'
+                branches:
+                  - branch-compare-type: 'ANT'
+                    branch-pattern: '**/{branch}'
+
+    builders:
+        - shell: |
+            echo "pass"
+
+
diff --git a/jjb/orchestra/orchestra-daily-jobs.yml b/jjb/orchestra/orchestra-daily-jobs.yml
new file mode 100644 (file)
index 0000000..6baaab8
--- /dev/null
@@ -0,0 +1,98 @@
+###################################
+# job configuration for orchestra
+###################################
+- project:
+    name: 'orchestra-daily-jobs'
+
+    project: 'orchestra'
+
+#--------------------------------
+# BRANCH ANCHORS
+#--------------------------------
+    master: &master
+        stream: master
+        branch: '{stream}'
+        gs-pathname: ''
+        disabled: false
+
+#-------------------------------------------------------
+# POD, INSTALLER, AND BRANCH MAPPING
+#-------------------------------------------------------
+    pod:
+        - virtual:
+            slave-label: 'joid-virtual'
+            os-version: 'xenial'
+            <<: *master
+
+    jobs:
+        - 'orchestra-{pod}-daily-{stream}'
+
+################################
+# job template
+################################
+- job-template:
+    name: 'orchestra-{pod}-daily-{stream}'
+
+    project-type: multijob
+
+    disabled: '{obj:disabled}'
+
+    concurrent: false
+
+    properties:
+        - logrotate-default
+        - throttle:
+            enabled: true
+            max-total: 1
+            max-per-node: 1
+            option: 'project'
+
+    scm:
+        - git-scm
+
+    wrappers:
+        - ssh-agent-wrapper
+
+        - timeout:
+            timeout: 240
+            fail: true
+
+    triggers:
+         - timed: '@daily'
+
+    parameters:
+        - project-parameter:
+            project: '{project}'
+            branch: '{branch}'
+        - string:
+            name: DEPLOY_SCENARIO
+            default: os-nosdn-openbaton-ha
+        - '{slave-label}-defaults'
+
+    builders:
+        - description-setter:
+            description: "Built on $NODE_NAME"
+        - multijob:
+            name: deploy
+            condition: SUCCESSFUL
+            projects:
+                - name: 'joid-deploy-{pod}-daily-{stream}'
+                  current-parameters: false
+                  predefined-parameters: |
+                    DEPLOY_SCENARIO=os-nosdn-openbaton-ha
+                    COMPASS_OS_VERSION=xenial
+                  node-parameters: true
+                  kill-phase-on: FAILURE
+                  abort-all-job: true
+        - multijob:
+            name: functest
+            condition: SUCCESSFUL
+            projects:
+                - name: 'functest-joid-{pod}-daily-{stream}'
+                  current-parameters: false
+                  predefined-parameters: |
+                    DEPLOY_SCENARIO=os-nosdn-openbaton-ha
+                    FUNCTEST_SUITE_NAME=orchestra_ims
+                  node-parameters: true
+                  kill-phase-on: NEVER
+                  abort-all-job: true
diff --git a/jjb/orchestra/orchestra-project-jobs.yml b/jjb/orchestra/orchestra-project-jobs.yml
new file mode 100644 (file)
index 0000000..0f0c0f6
--- /dev/null
@@ -0,0 +1,50 @@
+- project:
+
+    name: orchestra-project
+
+    project: 'orchestra'
+
+    stream:
+        - master:
+            branch: '{stream}'
+            gs-pathname: ''
+
+    jobs:
+        - 'orchestra-build-{stream}'
+
+- job-template:
+    name: 'orchestra-build-{stream}'
+
+    concurrent: true
+
+    properties:
+        - logrotate-default
+        - throttle:
+            enabled: true
+            max-total: 1
+            max-per-node: 1
+            option: 'project'
+
+    parameters:
+        - project-parameter:
+            project: '{project}'
+            branch: '{branch}'
+
+    scm:
+        - git-scm
+
+    triggers:
+        - timed: 'H 23 * * *'
+
+    builders:
+        - 'orchestra-build-macro'
+
+- builder:
+    name: 'orchestra-build-macro'
+    builders:
+        - shell: |
+            #!/bin/bash
+
+            echo "Hello world!"
+
+
diff --git a/jjb/ovn4nfv/ovn4nfv-daily-jobs.yml b/jjb/ovn4nfv/ovn4nfv-daily-jobs.yml
new file mode 100644 (file)
index 0000000..ed6df41
--- /dev/null
@@ -0,0 +1,87 @@
+- project:
+    name: 'ovn4nfv-daily-jobs'
+
+    project: 'ovn4nfv'
+
+    master: &master
+        stream: master
+        branch: '{stream}'
+        gs-pathname: ''
+        disabled: false
+
+    pod:
+        - virtual:
+            slave-label: 'joid-virtual'
+            os-version: 'xenial'
+            <<: *master
+
+    jobs:
+        - 'ovn4nfv-{pod}-daily-{stream}'
+
+- job-template:
+    name: 'ovn4nfv-{pod}-daily-{stream}'
+
+    project-type: multijob
+
+    disabled: '{obj:disabled}'
+
+    concurrent: false
+
+    properties:
+        - logrotate-default
+        - throttle:
+            enabled: true
+            max-total: 1
+            max-per-node: 1
+            option: 'project'
+
+    scm:
+        - git-scm
+
+    wrappers:
+        - ssh-agent-wrapper
+
+        - timeout:
+            timeout: 240
+            fail: true
+
+    triggers:
+         - timed: '@daily'
+
+    parameters:
+        - project-parameter:
+            project: '{project}'
+            branch: '{branch}'
+        - string:
+            name: DEPLOY_SCENARIO
+            default: os-ovn-nofeature-noha
+        - '{slave-label}-defaults'
+
+    builders:
+        - description-setter:
+            description: "Built on $NODE_NAME"
+        - multijob:
+            name: deploy
+            condition: SUCCESSFUL
+            projects:
+                - name: 'joid-deploy-{pod}-daily-{stream}'
+                  current-parameters: false
+                  predefined-parameters: |
+                    DEPLOY_SCENARIO=os-ovn-nofeature-noha
+                    COMPASS_OS_VERSION=xenial
+                  node-parameters: true
+                  kill-phase-on: FAILURE
+                  abort-all-job: true
+        - multijob:
+            name: functest
+            condition: SUCCESSFUL
+            projects:
+                - name: 'functest-joid-{pod}-daily-{stream}'
+                  current-parameters: false
+                  predefined-parameters: |
+                    DEPLOY_SCENARIO=os-ovn-nofeature-ha
+                    FUNCTEST_SUITE_NAME=ovn4nfv_test_suite
+                  node-parameters: true
+                  kill-phase-on: NEVER
+                  abort-all-job: true
+
diff --git a/jjb/ovn4nfv/ovn4nfv-project-jobs.yml b/jjb/ovn4nfv/ovn4nfv-project-jobs.yml
new file mode 100644 (file)
index 0000000..805aa04
--- /dev/null
@@ -0,0 +1,51 @@
+- project:
+    name: ovn4nfv
+
+    project: '{name}'
+
+
+    stream:
+        - master:
+            branch: '{stream}'
+            gs-pathname: ''
+            disabled: false
+
+    jobs:
+        - 'ovn4nfv-build-{stream}'
+
+- job-template:
+    name: 'ovn4nfv-build-{stream}'
+
+    concurrent: true
+
+    disabled: '{obj:disabled}'
+
+    properties:
+        - logrotate-default
+        - throttle:
+            enabled: true
+            max-total: 1
+            max-per-node: 1
+            option: 'project'
+
+    parametert:
+        - project-parameter:
+            project: '{project}'
+            branch: '{branch}'
+
+    scm:
+        - git-scm
+
+    triggers:
+        - timed: 'H 23 * * *'
+
+    builders:
+        - 'ovn4nfv-build-macro'
+
+- builder:
+    name: 'ovn4nfv-build-macro'
+    builders:
+        - shell: |
+            #!/bin/bash
+
+            echo "hello world"
index 0e8c713..62f6de0 100644 (file)
     publishers:
         - email:
             recipients: therbert@redhat.com mark.d.gray@intel.com billy.o.mahony@intel.com
+        - email-jenkins-admins-on-failure
 
 - builder:
     name: build-rpms
diff --git a/jjb/qtip/helpers/cleanup-deploy.sh b/jjb/qtip/helpers/cleanup-deploy.sh
deleted file mode 100644 (file)
index 9cb19a5..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/bin/bash
-##############################################################################
-# Copyright (c) 2016 ZTE and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-# Remove previous running containers if exist
-if [[ ! -z $(docker ps -a | grep "opnfv/qtip:$DOCKER_TAG") ]]; then
-    echo "Removing existing opnfv/qtip containers..."
-    # workaround: sometimes it throws an error when stopping qtip container.
-    # To make sure ci job unblocked, remove qtip container by force without stopping it.
-    docker rm -f $(docker ps -a | grep "opnfv/qtip:$DOCKER_TAG" | awk '{print $1}')
-fi
-
-# Remove existing images if exist
-if [[ $(docker images opnfv/qtip:${DOCKER_TAG} | wc -l) -gt 1 ]]; then
-    echo "Removing docker image opnfv/qtip:$DOCKER_TAG..."
-    docker rmi opnfv/qtip:$DOCKER_TAG
-fi
diff --git a/jjb/qtip/helpers/validate-deploy.sh b/jjb/qtip/helpers/validate-deploy.sh
deleted file mode 100644 (file)
index af8f8c2..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/bin/bash
-##############################################################################
-# Copyright (c) 2017 ZTE and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-set -e
-
-echo "--------------------------------------------------------"
-echo "POD: $NODE_NAME"
-echo "Scenario: $DEPLOY_SCENARIO"
-echo "INSTALLER: $INSTALLER_TYPE"
-echo "INSTALLER_IP: $INSTALLER_IP"
-echo "--------------------------------------------------------"
-
-echo "Qtip: Pulling docker image: opnfv/qtip:${DOCKER_TAG}"
-docker pull opnfv/qtip:$DOCKER_TAG >/dev/null
-
-envs="INSTALLER_TYPE=${INSTALLER_TYPE} -e INSTALLER_IP=${INSTALLER_IP}
--e POD_NAME=${NODE_NAME} -e SCENARIO=${DEPLOY_SCENARIO}"
-
-cmd=" docker run -id -e $envs opnfv/qtip:${DOCKER_TAG} /bin/bash"
-echo "Qtip: Running docker command: ${cmd}"
-${cmd}
-
-container_id=$(docker ps | grep "opnfv/qtip:${DOCKER_TAG}" | awk '{print $1}' | head -1)
-if [ $(docker ps | grep 'opnfv/qtip' | wc -l) == 0 ]; then
-    echo "The container opnfv/qtip with ID=${container_id} has not been properly started. Exiting..."
-    exit 1
-fi
-
-echo "The container ID is: ${container_id}"
-QTIP_REPO=/home/opnfv/repos/qtip
-
-docker exec -t ${container_id} bash -c "bash ${QTIP_REPO}/tests/ci/run_ci.sh"
-
-echo "Qtip done!"
-exit 0
\ No newline at end of file
diff --git a/jjb/qtip/helpers/validate-setup.sh b/jjb/qtip/helpers/validate-setup.sh
deleted file mode 100644 (file)
index 8d84e12..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/usr/bin/env bash
-##############################################################################
-# Copyright (c) 2017 ZTE and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-set -e
-
-# setup virtualenv
-sudo pip install -u virtualenv virtualenvwrapper
-export WORKON_HOME=$HOME/.virtualenvs
-source /usr/local/bin/virtualenvwrapper.sh
-mkvirtualenv qtip
-workon qtip
-
-# setup qtip
-sudo pip install $HOME/repos/qtip
-
-# testing
-qtip --version
-qtip --help
diff --git a/jjb/qtip/qtip-experimental-jobs.yml b/jjb/qtip/qtip-experimental-jobs.yml
new file mode 100644 (file)
index 0000000..05445d8
--- /dev/null
@@ -0,0 +1,44 @@
+###########################################
+# Experimental jobs for development purpose
+###########################################
+
+- project:
+    name: qtip-experimental-jobs
+    project: qtip
+    jobs:
+        - 'qtip-experimental-{stream}'
+    stream:
+        - master:
+            branch: '{stream}'
+            gs-pathname: ''
+            disabled: false
+
+################################
+## job templates
+#################################
+
+- job-template:
+    name: 'qtip-experimental-{stream}'
+
+    disabled: '{obj:disabled}'
+
+    parameters:
+        - project-parameter:
+            project: '{project}'
+            branch: '{branch}'
+        # Pin the tests on zte-pod4 with apex deployment
+        - apex-defaults
+        - zte-pod4-defaults
+    scm:
+        - git-scm-gerrit
+
+    triggers:
+        - experimental:
+            project: '{project}'
+            branch: '{branch}'
+            files: '**'
+
+    builders:
+        - shell: |
+             #!/bin/bash
+                source tests/ci/experimental.sh
index 8dd97de..e64173c 100644 (file)
         branch: '{stream}'
         gs-pathname: ''
         docker-tag: latest
-    danube: &danube
-        stream: danube
-        branch: 'stable/{stream}'
-        gs-pathname: '/{stream}'
-        docker-tag: 'stable'
 
 #--------------------------------
 # JOB VARIABLES
 #--------------------------------
-    pod:
-        - zte-pod1:
-            installer: fuel
-            scenario: os-odl_l2-nofeature-ha
+    qpi:
+        - compute:
+            installer: apex
+            pod: zte-pod4
             <<: *master
-        - zte-pod3:
-            installer: fuel
-            scenario: os-nosdn-kvm-ha
+        - storage:
+            installer: apex
+            pod: zte-pod4
             <<: *master
-        - zte-pod1:
-            installer: fuel
-            scenario: os-odl_l2-nofeature-ha
-            <<: *danube
-        - zte-pod3:
-            installer: fuel
-            scenario: os-nosdn-nofeature-ha
-            <<: *danube
-        - zte-pod3:
-            installer: fuel
-            scenario: os-nosdn-kvm-ha
-            <<: *danube
 
 #--------------------------------
 # JOB LIST
 #--------------------------------
     jobs:
-        - 'qtip-{scenario}-{pod}-daily-{stream}'
+        - 'qtip-{qpi}-{installer}-{stream}'
 
 ################################
 # job templates
 ################################
 - job-template:
-    name: 'qtip-{scenario}-{pod}-daily-{stream}'
+    name: 'qtip-{qpi}-{installer}-{stream}'
     disabled: false
     parameters:
         - project-parameter:
@@ -64,7 +47,7 @@
         - '{pod}-defaults'
         - string:
             name: DEPLOY_SCENARIO
-            default: '{scenario}'
+            default: generic
         - string:
             name: DOCKER_TAG
             default: '{docker-tag}'
             name: CI_DEBUG
             default: 'false'
             description: "Show debug output information"
+        - string:
+            name: TEST_SUITE
+            default: '{qpi}'
     scm:
         - git-scm
     triggers:
-        - 'qtip-{scenario}-{pod}-daily-{stream}-trigger'
+        - 'qtip-daily'
     builders:
         - description-setter:
             description: "POD: $NODE_NAME"
-        - qtip-validate-deploy
+        - shell: |
+            #!/bin/bash
+            source tests/ci/periodic.sh
     publishers:
         - qtip-common-publishers
+        - email-jenkins-admins-on-failure
 
 ################
 # MARCOS
 #---------
 # builder
 #---------
-- builder:
-    name: qtip-validate-deploy
-    builders:
-        - shell:
-            !include-raw: ./helpers/cleanup-deploy.sh
-        - shell:
-            !include-raw: ./helpers/validate-deploy.sh
-
 
 #-----------
 # parameter
 #---------
 
 - trigger:
-    name: 'qtip-os-odl_l2-nofeature-ha-zte-pod1-daily-master-trigger'
-    triggers:
-        - timed: '0 15 * * *'
-
-- trigger:
-    name: 'qtip-os-nosdn-kvm-ha-zte-pod3-daily-master-trigger'
+    name: 'qtip-daily'
     triggers:
         - timed: '0 15 * * *'
-
-- trigger:
-    name: 'qtip-os-odl_l2-nofeature-ha-zte-pod1-daily-danube-trigger'
-    triggers:
-        - timed: '0 7 * * *'
-
-- trigger:
-    name: 'qtip-os-nosdn-kvm-ha-zte-pod3-daily-danube-trigger'
-    triggers:
-        - timed: '0 7 * * *'
-
-- trigger:
-    name: 'qtip-os-nosdn-nofeature-ha-zte-pod3-daily-danube-trigger'
-    triggers:
-        - timed: '30 0 * * *'
index dd444c7..a273c85 100644 (file)
@@ -7,6 +7,8 @@
     project: qtip
     jobs:
         - 'qtip-verify-{stream}'
+        - 'qtip-review-notebook-{stream}'
+        - 'qtip-merge-{stream}'
     stream:
         - master:
             branch: '{stream}'
         - qtip-unit-tests-and-docs-build
     publishers:
         - publish-coverage
+        - email-jenkins-admins-on-failure
+
+# upload juypter notebook to artifacts for review
+- job-template:
+    name: 'qtip-review-notebook-{stream}'
+
+    disabled: '{obj:disabled}'
+
+    parameters:
+        - project-parameter:
+            project: '{project}'
+            branch: '{branch}'
+        - 'opnfv-build-ubuntu-defaults'
+
+    scm:
+        - git-scm-gerrit
+
+    triggers:
+        - gerrit:
+            server-name: 'gerrit.opnfv.org'
+            trigger-on:
+                - patchset-created-event:
+                    exclude-drafts: 'false'
+                    exclude-trivial-rebase: 'false'
+                    exclude-no-code-change: 'false'
+                - draft-published-event
+                - comment-added-contains-event:
+                    comment-contains-value: 'recheck'
+                - comment-added-contains-event:
+                    comment-contains-value: 'reverify'
+            projects:
+              - project-compare-type: 'ANT'
+                project-pattern: '{project}'
+                branches:
+                  - branch-compare-type: 'ANT'
+                    branch-pattern: '**/{branch}'
+                disable-strict-forbidden-file-verification: 'true'
+                file-paths:
+                  - compare-type: ANT
+                    pattern: 'examples/**'
+    builders:
+        - upload-under-review-notebooks-to-opnfv-artifacts
+        - report-build-result-to-gerrit
+
+- job-template:
+    name: 'qtip-merge-{stream}'
+
+    disabled: '{obj:disabled}'
+
+    parameters:
+        - project-parameter:
+            project: $GERRIT_PROJECT
+            branch: '{branch}'
+        - string:
+            name: GS_URL
+            default: '$GS_BASE{gs-pathname}'
+            description: "Directory where the build artifact will be located upon the completion of the build."
+        - string:
+            name: GERRIT_REFSPEC
+            default: 'refs/heads/{branch}'
+            description: "JJB configured GERRIT_REFSPEC parameter"
+
+    scm:
+        - git-scm
+
+    triggers:
+        - gerrit:
+            server-name: 'gerrit.opnfv.org'
+            trigger-on:
+                - change-merged-event
+                - comment-added-contains-event:
+                    comment-contains-value: 'remerge'
+            projects:
+                - project-compare-type: 'ANT'
+                  project-pattern: '*'
+                  branches:
+                      - branch-compare-type: 'ANT'
+                        branch-pattern: '**/{branch}'
+                  file-paths:
+                      - compare-type: ANT
+                        pattern: examples/**
+
+    builders:
+        - remove-old-docs-from-opnfv-artifacts
 
 ################################
 ## job builders
             set -o xtrace
 
             tox
+
+# modified from upload-under-review-docs-to-opnfv-artifacts in global/releng-macro.yml
+- builder:
+    name: upload-under-review-notebooks-to-opnfv-artifacts
+    builders:
+        - shell: |
+            #!/bin/bash
+            set -o errexit
+            set -o pipefail
+            set -o xtrace
+            export PATH=$PATH:/usr/local/bin/
+
+            [[ $GERRIT_CHANGE_NUMBER =~ .+ ]]
+            [[ -d examples ]] || exit 0
+
+            echo
+            echo "###########################"
+            echo "UPLOADING DOCS UNDER REVIEW"
+            echo "###########################"
+            echo
+
+            gs_base="artifacts.opnfv.org/$PROJECT/review"
+            gs_path="$gs_base/$GERRIT_CHANGE_NUMBER"
+            local_path="upload/$GERRIT_CHANGE_NUMBER"
+
+            mkdir -p upload
+            cp -r examples "$local_path"
+            gsutil -m cp -r "$local_path" "gs://$gs_base/"
+
+            echo "Document link(s):" >> gerrit_comment.txt
+            find "$local_path" | grep -e 'ipynb$' | \
+                sed -e "s|^$local_path|    https://nbviewer.jupyter.org/url/$gs_path|" >> gerrit_comment.txt
similarity index 73%
rename from jjb/releng/testapi-automate.yml
rename to jjb/releng/automate.yml
index dd76538..c6ca37f 100644 (file)
@@ -1,20 +1,22 @@
 - project:
-    name: testapi-automate
+    name: utils-automate
     stream:
         - master:
             branch: '{stream}'
-            gs-pathname: ''
+
+    module:
+        - 'testapi'
+        - 'reporting'
 
     phase:
-        - 'docker-update'
         - 'docker-deploy':
             slave-label: 'testresults'
         - 'generate-doc'
 
     jobs:
-        - 'testapi-automate-{stream}'
-        - 'testapi-automate-{phase}-{stream}'
-        - 'testapi-verify-{stream}'
+        - '{module}-automate-{stream}'
+        - '{module}-automate-{phase}-{stream}'
+        - '{module}-verify-{stream}'
 
     project: 'releng'
 
@@ -44,7 +46,7 @@
         - mongodb-backup
 
 - job-template:
-    name: 'testapi-verify-{stream}'
+    name: '{module}-verify-{stream}'
 
     parameters:
         - project-parameter:
                     branch-pattern: '**/{branch}'
                 file-paths:
                   - compare-type: 'ANT'
-                    pattern: 'utils/test/testapi/**'
+                    pattern: 'utils/test/{module}/**'
 
     builders:
-        - run-unit-tests
+        - shell: |
+            cd ./utils/test/{module}/
+            tox
+            if [ -e *.xml ];then
+                cp *.xml $WORKSPACE
+            fi
 
     publishers:
-        - junit:
-            results: nosetests.xml
-        - cobertura:
-            report-file: "coverage.xml"
-            only-stable: "true"
-            health-auto-update: "false"
-            stability-auto-update: "false"
-            zoom-coverage-chart: "true"
-            targets:
-                - files:
-                    healthy: 10
-                    unhealthy: 20
-                    failing: 30
-                - method:
-                    healthy: 50
-                    unhealthy: 40
-                    failing: 30
+        - publish-coverage
+        - email-jenkins-admins-on-failure
 
 - job-template:
-    name: 'testapi-automate-{stream}'
+    name: '{module}-automate-{stream}'
 
     project-type: multijob
 
             branch: '{branch}'
         - string:
             name: DOCKER_TAG
-            default: "latest"
-            description: "Tag name for testapi docker image"
+            default: 'latest'
+            description: 'Tag name for {module} docker image'
+        - string:
+            name: MODULE_NAME
+            default: '{module}'
+            description: "Name of the module"
         - 'opnfv-build-defaults'
 
     scm:
                     branch-pattern: '**/{branch}'
                 file-paths:
                   - compare-type: 'ANT'
-                    pattern: 'utils/test/testapi/**'
+                    pattern: 'utils/test/{module}/**'
 
     builders:
         - description-setter:
             description: "Built on $NODE_NAME"
-        - multijob:
-            name: docker-update
-            condition: SUCCESSFUL
-            projects:
-                - name: 'testapi-automate-docker-update-{stream}'
-                  current-parameters: true
-                  kill-phase-on: FAILURE
-                  abort-all-job: true
+        - docker-update
         - multijob:
             name: docker-deploy
             condition: SUCCESSFUL
             projects:
-                - name: 'testapi-automate-docker-deploy-{stream}'
+                - name: '{module}-automate-docker-deploy-{stream}'
                   current-parameters: false
                   predefined-parameters: |
                     GIT_BASE=$GIT_BASE
             name: generate-doc
             condition: SUCCESSFUL
             projects:
-                - name: 'testapi-automate-generate-doc-{stream}'
+                - name: '{module}-automate-generate-doc-{stream}'
                   current-parameters: true
                   kill-phase-on: FAILURE
                   abort-all-job: true
 
     publishers:
         - 'email-publisher'
+        - email-jenkins-admins-on-failure
 
 - job-template:
-    name: 'testapi-automate-{phase}-{stream}'
+    name: '{module}-automate-{phase}-{stream}'
 
     properties:
         - throttle:
         - project-parameter:
             project: '{project}'
             branch: '{branch}'
-        - string:
-            name: DOCKER_TAG
-            default: "latest"
-            description: "Tag name for testapi docker image"
 
     wrappers:
         - ssh-agent-wrapper
     builders:
         - description-setter:
             description: "Built on $NODE_NAME"
-        - 'testapi-automate-{phase}-macro'
+        - '{module}-automate-{phase}-macro'
 
 ################################
 # job builders
 ################################
 - builder:
-    name: mongodb-backup
+    name: 'docker-update'
     builders:
-        - shell: |
-            bash ./jjb/releng/testapi-backup-mongodb.sh
-
-- builder:
-    name: 'run-unit-tests'
-    builders:
-        - shell: |
-            bash ./utils/test/testapi/run_test.sh
-
-- builder:
-    name: 'testapi-automate-docker-update-macro'
-    builders:
-        - shell: |
-            bash ./jjb/releng/testapi-docker-update.sh
+        - shell:
+            !include-raw: ./docker-update.sh
 
 - builder:
     name: 'testapi-automate-generate-doc-macro'
         - shell: |
             bash ./utils/test/testapi/htmlize/push-doc-artifact.sh
 
+- builder:
+    name: 'reporting-automate-generate-doc-macro'
+    builders:
+        - shell: echo "To Be Done"
+
 - builder:
     name: 'testapi-automate-docker-deploy-macro'
     builders:
         - shell: |
-            bash ./jjb/releng/testapi-docker-deploy.sh
+            sudo bash ./jjb/releng/docker-deploy.sh "sudo docker run -dti --name testapi -p 8082:8000
+            -e mongodb_url=mongodb://172.17.0.1:27017
+            -e base_url=http://testresults.opnfv.org/test opnfv/testapi" \
+            "http://testresults.opnfv.org/test/" "testapi"
+
+- builder:
+    name: 'reporting-automate-docker-deploy-macro'
+    builders:
+        - shell: |
+            sudo bash ./jjb/releng/docker-deploy.sh "sudo docker run -itd --name reporting -p 8084:8000 opnfv/reporting" \
+            "http://testresults.opnfv.org/reporting2/reporting/index.html" "reporting"
+
+- builder:
+    name: mongodb-backup
+    builders:
+        - shell: |
+            bash ./jjb/releng/testapi-backup-mongodb.sh
 
 ################################
 # job publishers
     name: 'email-publisher'
     publishers:
         - email:
-            recipients: rohitsakala@gmail.com feng.xiaowei@zte.com.cn
+            recipients: rohitsakala@gmail.com feng.xiaowei@zte.com.cn morgan.richomme@orange.com
             notify-every-unstable-build: false
             send-to-individuals: true
diff --git a/jjb/releng/docker-deploy.sh b/jjb/releng/docker-deploy.sh
new file mode 100644 (file)
index 0000000..1e83577
--- /dev/null
@@ -0,0 +1,152 @@
+#!/bin/bash
+#  Licensed to the Apache Software Foundation (ASF) under one   *
+#  or more contributor license agreements.  See the NOTICE file *
+#  distributed with this work for additional information        *
+#  regarding copyright ownership.  The ASF licenses this file   *
+#  to you under the Apache License, Version 2.0 (the            *
+#  "License"); you may not use this file except in compliance   *
+#  with the License.  You may obtain a copy of the License at   *
+#                                                               *
+#    http://www.apache.org/licenses/LICENSE-2.0                 *
+#                                                               *
+#  Unless required by applicable law or agreed to in writing,   *
+#  software distributed under the License is distributed on an  *
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY       *
+#  KIND, either express or implied.  See the License for the    *
+#  specific language governing permissions and limitations      *
+#  under the License.                                           *
+
+
+command=$1
+url=$2
+module=$3
+
+REPO="opnfv"
+latest_image=$REPO/$module:latest
+old_image=$REPO/$module:old
+latest_container_name=$module
+old_container_name=$module"_old"
+latest_container_id=
+old_container_id=
+new_start_container=
+
+function DEBUG() {
+  echo `date "+%Y-%m-%d %H:%M:%S.%N"` ": $1"
+}
+
+function check_connectivity() {
+    # check update status via test the connectivity of provide url
+    sleep 5
+    cmd=`curl -s --head  --request GET ${url} | grep '200 OK' > /dev/null`
+    rc=$?
+    DEBUG $rc
+    if [[ $rc == 0 ]]; then
+        return 0
+    else
+        return 1
+    fi
+}
+
+
+function pull_latest_image() {
+    DEBUG "pull latest image $latest_image"
+    docker pull $latest_image
+}
+
+function get_latest_running_container() {
+    latest_container_id=`docker ps -q --filter name=^/$latest_container_name$`
+}
+
+function get_old_running_container() {
+    old_container_id=`docker ps -q --filter name=^/$old_container_name$`
+}
+
+function delete_old_image() {
+    DEBUG "delete old image: $old_image"
+    docker rmi -f $old_image
+}
+
+function delete_old_container() {
+    DEBUG "delete old container: $old_container_name"
+    docker ps -a -q --filter name=^/$old_container_name$ | xargs docker rm -f &>/dev/null
+}
+
+function delete_latest_container() {
+    DEBUG "delete latest container: $module"
+    docker ps -a -q --filter name=^/$latest_container_name$ | xargs docker rm -f &>/dev/null
+}
+
+function delete_latest_image() {
+    DEBUG "delete latest image: $REPO/$module:latest"
+    docker rmi -f $latest_image
+}
+
+function change_image_tag_2_old() {
+    DEBUG "change image tag 2 old"
+    docker tag $latest_image $old_image
+    docker rmi -f $latest_image
+}
+
+function mark_latest_container_2_old() {
+    DEBUG "mark latest container to be old"
+    docker rename "$latest_container_name" "$old_container_name"
+}
+
+function stop_old_container() {
+    DEBUG "stop old container"
+    docker stop "$old_container_name"
+}
+
+function run_latest_image() {
+    new_start_container=`$command`
+    DEBUG "run latest image: $new_start_container"
+}
+
+get_latest_running_container
+get_old_running_container
+
+if [[ ! -z $latest_container_id ]]; then
+    DEBUG "latest container is running: $latest_container_id"
+    delete_old_container
+    delete_old_image
+    change_image_tag_2_old
+    mark_latest_container_2_old
+    pull_latest_image
+    stop_old_container
+    run_latest_image
+
+elif [[ ! -z $old_container_id ]]; then
+    DEBUG "old container is running: $old_container_id"
+    delete_latest_container
+    delete_latest_image
+    pull_latest_image
+    stop_old_container
+    run_latest_image
+else
+    DEBUG "no container is running"
+    delete_old_container
+    delete_old_image
+    delete_latest_container
+    delete_latest_image
+    pull_latest_image
+    run_latest_image
+fi
+
+if check_connectivity; then
+    DEBUG "CONGRATS: $module update successfully"
+else
+    DEBUG "ATTENTION: $module update failed"
+    id=`docker ps -a -q --filter name=^/$old_container_name$`
+    if [[ ! -z $id ]]; then
+        DEBUG "start old container instead"
+        docker stop $new_start_container
+        docker start $id
+    fi
+    if ! check_connectivity; then
+        DEBUG "BIG ISSUE: no container is running normally"
+    fi
+    exit 1
+fi
+
+docker images
+docker ps -a
diff --git a/jjb/releng/docker-update.sh b/jjb/releng/docker-update.sh
new file mode 100644 (file)
index 0000000..559ac83
--- /dev/null
@@ -0,0 +1,34 @@
+#!/bin/bash
+#  Licensed to the Apache Software Foundation (ASF) under one   *
+#  or more contributor license agreements.  See the NOTICE file *
+#  distributed with this work for additional information        *
+#  regarding copyright ownership.  The ASF licenses this file   *
+#  to you under the Apache License, Version 2.0 (the            *
+#  "License"); you may not use this file except in compliance   *
+#  with the License.  You may obtain a copy of the License at   *
+#                                                               *
+#    http://www.apache.org/licenses/LICENSE-2.0                 *
+#                                                               *
+#  Unless required by applicable law or agreed to in writing,   *
+#  software distributed under the License is distributed on an  *
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY       *
+#  KIND, either express or implied.  See the License for the    *
+#  specific language governing permissions and limitations      *
+#  under the License.                                           *
+
+set -o errexit
+set -o nounset
+
+cd $WORKSPACE/utils/test/$MODULE_NAME/docker/
+
+# Remove previous containers
+docker ps -a | grep "opnfv/$MODULE_NAME" | awk '{ print $1 }' | xargs -r docker rm -f
+
+# Remove previous images
+docker images | grep "opnfv/$MODULE_NAME" | awk '{ print $3 }' | xargs -r docker rmi -f
+
+# Start build
+docker build --no-cache -t opnfv/$MODULE_NAME:$DOCKER_TAG .
+
+# Push Image
+docker push opnfv/$MODULE_NAME:$DOCKER_TAG
index 417fc70..d70640a 100644 (file)
             name: RELEASE_VERSION
             default: ""
             description: "Release version, e.g. 1.0, 2.0, 3.0"
+        - string:
+            name: DOCKER_DIR
+            default: "docker"
+            description: "Directory containing files needed by the Dockerfile"
         - string:
             name: DOCKERFILE
             default: "Dockerfile.aarch64"
@@ -83,3 +87,4 @@
     publishers:
         - email:
             recipients: '{receivers}'
+        - email-jenkins-admins-on-failure
index 2aa52ad..0de3df2 100644 (file)
@@ -54,7 +54,7 @@ if [[ -n "$(docker images | grep $DOCKER_REPO_NAME)" ]]; then
     done
 fi
 
-cd $WORKSPACE/docker
+cd $WORKSPACE/$DOCKER_DIR
 HOST_ARCH=$(uname -m)
 if [ ! -f "${DOCKERFILE}" ]; then
     # If this is expected to be a Dockerfile for other arch than x86
@@ -73,6 +73,8 @@ fi
 # Get tag version
 echo "Current branch: $BRANCH"
 
+BUILD_BRANCH=$BRANCH
+
 if [[ "$BRANCH" == "master" ]]; then
     DOCKER_TAG="latest"
 elif [[ -n "${RELEASE_VERSION-}" ]]; then
@@ -82,19 +84,17 @@ else
     DOCKER_TAG="stable"
 fi
 
+if [[ -n "${COMMIT_ID-}" && -n "${RELEASE_VERSION-}" ]]; then
+    DOCKER_TAG=$RELEASE_VERSION
+    BUILD_BRANCH=$COMMIT_ID
+fi
+
 # Start the build
 echo "Building docker image: $DOCKER_REPO_NAME:$DOCKER_TAG"
 echo "--------------------------------------------------------"
 echo
-if [[ $DOCKER_REPO_NAME == *"dovetail"* ]]; then
-    if [[ -n "${RELEASE_VERSION-}" ]]; then
-        DOCKER_TAG=${RELEASE_VERSION}
-    fi
-    cmd="docker build --no-cache -t $DOCKER_REPO_NAME:$DOCKER_TAG -f $DOCKERFILE ."
-else
-    cmd="docker build --no-cache -t $DOCKER_REPO_NAME:$DOCKER_TAG --build-arg BRANCH=$BRANCH
-        -f $DOCKERFILE ."
-fi
+cmd="docker build --no-cache -t $DOCKER_REPO_NAME:$DOCKER_TAG --build-arg BRANCH=$BUILD_BRANCH
+    -f $DOCKERFILE ."
 
 echo ${cmd}
 ${cmd}
index 5fe0eb9..414eba2 100644 (file)
     other-receivers: &other-receivers
         receivers: ''
 
-    project:
+    dockerfile: "Dockerfile"
+    dockerdir: "docker"
+
+    # This is the dockerhub repo the image will be pushed to as
+    # 'opnfv/{dockerrepo}. See: DOCKER_REPO_NAME parameter.
+    # 'project' is the OPNFV repo we expect to contain the Dockerfile
+    dockerrepo:
         # projects with jobs for master
         - 'releng-anteater':
+            project: 'releng-anteater'
             <<: *master
             <<: *other-receivers
         - 'bottlenecks':
+            project: 'bottlenecks'
             <<: *master
             <<: *other-receivers
         - 'cperf':
+            project: 'cperf'
             <<: *master
             <<: *other-receivers
         - 'dovetail':
+            project: 'dovetail'
             <<: *master
             <<: *other-receivers
         - 'functest':
+            project: 'functest'
             <<: *master
             <<: *functest-receivers
         - 'qtip':
+            project: 'qtip'
             <<: *master
             <<: *other-receivers
-        - 'storperf':
+        - 'storperf-master':
+            project: 'storperf'
+            dockerdir: 'docker/storperf-master'
+            <<: *master
+            <<: *other-receivers
+        - 'storperf-graphite':
+            project: 'storperf'
+            dockerdir: 'docker/storperf-graphite'
+            <<: *master
+            <<: *other-receivers
+        - 'storperf-httpfrontend':
+            project: 'storperf'
+            dockerdir: 'docker/storperf-httpfrontend'
+            <<: *master
+            <<: *other-receivers
+        - 'storperf-reporting':
+            project: 'storperf'
+            dockerdir: 'docker/storperf-reporting'
+            <<: *master
+            <<: *other-receivers
+        - 'storperf-swaggerui':
+            project: 'storperf'
+            dockerdir: 'docker/storperf-swaggerui'
             <<: *master
             <<: *other-receivers
         - 'yardstick':
+            project: 'yardstick'
             <<: *master
             <<: *other-receivers
         # projects with jobs for stable
         - 'bottlenecks':
+            project: 'bottlenecks'
             <<: *danube
             <<: *other-receivers
         - 'functest':
+            project: 'functest'
             <<: *danube
             <<: *functest-receivers
         - 'qtip':
+            project: 'qtip'
             <<: *danube
             <<: *other-receivers
         - 'storperf':
+            project: 'storperf'
             <<: *danube
             <<: *other-receivers
         - 'yardstick':
+            project: 'yardstick'
             <<: *danube
             <<: *other-receivers
 
     jobs:
-        - '{project}-docker-build-push-{stream}'
+        - "{dockerrepo}-docker-build-push-{stream}"
 
 
 - project:
 
     name: opnfv-monitor-docker        # projects which only monitor dedicated file or path
 
+    dockerfile: "Dockerfile"
+    dockerdir: "docker"
+
     project:
         # projects with jobs for master
         - 'daisy':
+            dockerrepo: 'daisy'
             <<: *master
         - 'escalator':
+            dockerrepo: 'escalator'
             <<: *master
 
     jobs:
 # job templates
 ########################
 - job-template:
-    name: '{project}-docker-build-push-{stream}'
+    name: '{dockerrepo}-docker-build-push-{stream}'
 
     disabled: '{obj:disabled}'
 
             description: "To enable/disable pushing the image to Dockerhub."
         - string:
             name: DOCKER_REPO_NAME
-            default: "opnfv/{project}"
+            default: "opnfv/{dockerrepo}"
             description: "Dockerhub repo to be pushed to."
+        - string:
+            name: DOCKER_DIR
+            default: "{dockerdir}"
+            description: "Directory containing files needed by the Dockerfile"
+        - string:
+            name: COMMIT_ID
+            default: ""
+            description: "commit id to make a snapshot docker image"
         - string:
             name: RELEASE_VERSION
             default: ""
             description: "Release version, e.g. 1.0, 2.0, 3.0"
         - string:
             name: DOCKERFILE
-            default: "Dockerfile"
+            default: "{dockerfile}"
             description: "Dockerfile to use for creating the image."
 
     scm:
     publishers:
         - email:
             recipients: '{receivers}'
+        - email-jenkins-admins-on-failure
 
 - job-template:
     name: '{project}-docker-build-push-monitor-{stream}'
index 8c231c3..d253da0 100644 (file)
@@ -53,7 +53,7 @@
                     comment-contains-value: 'reverify'
             projects:
               - project-compare-type: 'REG_EXP'
-                project-pattern: 'functest|sdnvpn|qtip|daisy|sfc|escalator|releng|pharos|octopus|securedlab'
+                project-pattern: 'functest|sdnvpn|qtip|daisy|sfc|escalator|releng'
                 branches:
                   - branch-compare-type: 'ANT'
                     branch-pattern: '**/{branch}'
                     comment-contains-value: 'reverify'
             projects:
               - project-compare-type: 'REG_EXP'
-                project-pattern: ''
+                project-pattern: 'octopus|releng-anteater|pharos'
                 branches:
                   - branch-compare-type: 'ANT'
                     branch-pattern: '**/{branch}'
index ecc8730..dc9bfd5 100644 (file)
@@ -52,6 +52,7 @@
     publishers:
         - archive-artifacts:
             artifacts: 'job_output/*'
+        - email-jenkins-admins-on-failure
 
 - job-template:
     name: 'releng-merge-jjb'
diff --git a/jjb/releng/testapi-docker-deploy.sh b/jjb/releng/testapi-docker-deploy.sh
deleted file mode 100644 (file)
index b4e60b0..0000000
+++ /dev/null
@@ -1,81 +0,0 @@
-#!/bin/bash
-
-function check() {
-
-    # Verify hosted
-    sleep 5
-    cmd=`curl -s --head  --request GET http://testresults.opnfv.org/test/swagger/spec | grep '200 OK' > /dev/null`
-    rc=$?
-    echo $rc
-
-    if [[ $rc == 0 ]]
-    then
-        return 0
-    else
-        return 1
-    fi
-
-}
-
-echo "Getting contianer Id of the currently running one"
-contId=$(sudo docker ps | grep "opnfv/testapi:latest" | awk '{print $1}')
-
-echo "Pulling the latest image"
-sudo docker pull opnfv/testapi:latest
-
-echo "Deleting old containers of opnfv/testapi:old"
-sudo docker ps -a | grep "opnfv/testapi" | grep "old" | awk '{print $1}' | xargs -r sudo docker rm -f
-
-echo "Deleting old images of opnfv/testapi:latest"
-sudo docker images | grep "opnfv/testapi" | grep "old" | awk '{print $3}' | xargs -r sudo docker rmi -f
-
-
-if [[ -z "$contId" ]]
-then
-    echo "No running testapi container"
-
-    echo "Removing stopped testapi containers in the previous iterations"
-    sudo docker ps -f status=exited | grep "opnfv_testapi" | awk '{print $1}' | xargs -r sudo docker rm -f
-else
-    echo $contId
-
-    echo "Get the image id of the currently running conatiner"
-    currImgId=$(sudo docker ps | grep "$contId" | awk '{print $2}')
-    echo $currImgId
-
-    if [[ -z "$currImgId" ]]
-    then
-        echo "No image id found for the container id"
-        exit 1
-    fi
-
-    echo "Changing current image tag to old"
-    sudo docker tag "$currImgId" opnfv/testapi:old
-
-    echo "Removing stopped testapi containers in the previous iteration"
-    sudo docker ps -f status=exited | grep "opnfv_testapi" | awk '{print $1}' | xargs -r sudo docker rm -f
-
-    echo "Renaming the running container name to opnfv_testapi as to identify it."
-    sudo docker rename $contId opnfv_testapi
-
-    echo "Stop the currently running container"
-    sudo docker stop $contId
-fi
-
-echo "Running a container with the new image"
-sudo docker run -dti -p "8082:8000" -e "mongodb_url=mongodb://172.17.0.1:27017" -e "swagger_url=http://testresults.opnfv.org/test" opnfv/testapi:latest
-
-if check; then
-    echo "TestResults Hosted."
-else
-    echo "TestResults Hosting Failed"
-    if [[ $(sudo docker images | grep "opnfv/testapi" | grep "old" | awk '{print $3}') ]]; then
-        echo "Running old Image"
-        sudo docker run -dti -p "8082:8000" -e "mongodb_url=mongodb://172.17.0.1:27017" -e "swagger_url=http://testresults.opnfv.org/test" opnfv/testapi:old
-        exit 1
-    fi
-fi
-
-# Echo Images and Containers
-sudo docker images
-sudo docker ps -a
diff --git a/jjb/releng/testapi-docker-update.sh b/jjb/releng/testapi-docker-update.sh
deleted file mode 100644 (file)
index 84f5c32..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/bin/bash
-
-set -o errexit
-set -o nounset
-
-cd $WORKSPACE/utils/test/testapi/docker/
-
-# Remove previous containers
-docker ps -a | grep "opnfv/testapi" | awk '{ print $1 }' | xargs -r docker rm -f
-
-# Remove previous images
-docker images | grep "opnfv/testapi" | awk '{ print $3 }' | xargs -r docker rmi -f
-
-# Start build
-docker build --no-cache -t opnfv/testapi:$DOCKER_TAG .
-
-# Push Image
-docker push opnfv/testapi:$DOCKER_TAG
index 1e85536..430ced5 100644 (file)
                     pattern: '**/*.jinja2'
                   - compare-type: ANT
                     pattern: '**/*.yaml'
+            skip-vote:
+                successful: true
+                failed: true
+                unstable: true
+                notbuilt: true
+
     builders:
         - check-jinja
 
similarity index 81%
rename from jjb/snaps/snaps.yml
rename to jjb/sfc/sfc-project-jobs.yml
index 50b7c30..379fe79 100644 (file)
@@ -3,13 +3,12 @@
 # They will only be enabled on request by projects!
 ###################################################
 - project:
-    name: snaps
+    name: sfc-project-jobs
 
-    project: '{name}'
+    project: 'sfc'
 
     jobs:
-        - 'snaps-verify-{stream}'
-
+        - 'sfc-verify-{stream}'
     stream:
         - master:
             branch: '{stream}'
         - danube:
             branch: 'stable/{stream}'
             gs-pathname: '/{stream}'
-            disabled: false
+            disabled: true
 
 - job-template:
-    name: 'snaps-verify-{stream}'
+    name: 'sfc-verify-{stream}'
 
     disabled: '{obj:disabled}'
 
@@ -30,7 +29,6 @@
             project: '{project}'
             branch: '{branch}'
         - 'opnfv-build-ubuntu-defaults'
-
     scm:
         - git-scm-gerrit
 
                   - compare-type: ANT
                     pattern: 'docs/**|.gitignore'
 
+    builders:
+        - sfc-unit-tests
+
+################################
+# job builders
+################################
+
+- builder:
+    name: sfc-unit-tests
     builders:
         - shell: |
-            echo "Nothing to verify!"
+            cd $WORKSPACE && yamllint $(git ls-tree -r HEAD --name-only  | egrep 'yml$|yaml$')
similarity index 75%
rename from jjb/multisite/multisite-verify-jobs.yml
rename to jjb/snaps/snaps-verify-jobs.yml
index 9431e0b..01ea3e4 100644 (file)
@@ -3,37 +3,39 @@
 # They will only be enabled on request by projects!
 ###################################################
 - project:
-    name: multisite
+    name: snaps
 
     project: '{name}'
 
     jobs:
-        - 'multisite-verify-{stream}'
+        - 'snaps-verify-{stream}'
 
     stream:
         - master:
             branch: '{stream}'
             gs-pathname: ''
             disabled: false
-            timed: '@midnight'
-        - danube:
-            branch: 'stable/{stream}'
-            gs-pathname: '/{stream}'
-            disabled: false
-            timed: ''
 
 - job-template:
-    name: 'multisite-verify-{stream}'
+    name: 'snaps-verify-{stream}'
 
     disabled: '{obj:disabled}'
 
-    concurrent: true
+    concurrent: false
 
     parameters:
         - project-parameter:
             project: '{project}'
             branch: '{branch}'
-        - 'opnfv-build-ubuntu-defaults'
+        - string:
+            name: DEPLOYMENT_HOST_IP
+            default: 192.168.122.2
+            description: 'IP of the deployment node'
+        - string:
+            name: CONTROLLER_IP
+            default: 192.168.122.3
+            description: 'IP of the controller node'
+        - 'intel-virtual10-defaults'
 
     scm:
         - git-scm-gerrit
@@ -66,4 +68,5 @@
         - shell: |
             #!/bin/bash
 
-            echo "Hello World"
+            cd $WORKSPACE/ci
+            ./run_tests.sh $DEPLOYMENT_HOST_IP $CONTROLLER_IP
diff --git a/jjb/storperf/storperf-daily-jobs.yml b/jjb/storperf/storperf-daily-jobs.yml
new file mode 100644 (file)
index 0000000..e849e29
--- /dev/null
@@ -0,0 +1,175 @@
+###################################
+# job configuration for storperf
+###################################
+- project:
+    name: storperf-daily
+
+    project: storperf
+
+#--------------------------------
+# BRANCH ANCHORS
+#--------------------------------
+    master: &master
+        stream: master
+        branch: '{stream}'
+        gs-pathname: ''
+        docker-tag: 'latest'
+#--------------------------------
+# POD, INSTALLER, AND BRANCH MAPPING
+#--------------------------------
+#    Installers using labels
+#            CI PODs
+# This section should only contain the installers
+# that have been switched using labels for slaves
+#--------------------------------
+    pod:
+## fuel CI PODs
+#        - baremetal:
+#            slave-label: fuel-baremetal
+#            installer: fuel
+#            <<: *master
+#        - virtual:
+#            slave-label: fuel-virtual
+#            installer: fuel
+#            <<: *master
+## joid CI PODs
+#        - baremetal:
+#            slave-label: joid-baremetal
+#            installer: joid
+#            <<: *master
+#        - virtual:
+#            slave-label: joid-virtual
+#            installer: joid
+#            <<: *master
+## compass CI PODs
+#        - baremetal:
+#            slave-label: compass-baremetal
+#            installer: compass
+#            <<: *master
+#        - virtual:
+#            slave-label: compass-virtual
+#            installer: compass
+#            <<: *master
+## apex CI PODs
+#        - virtual:
+#            slave-label: apex-virtual-master
+#            installer: apex
+#            <<: *master
+        - baremetal:
+            slave-label: apex-baremetal-master
+            installer: apex
+            <<: *master
+## armband CI PODs
+#        - armband-baremetal:
+#            slave-label: armband-baremetal
+#            installer: fuel
+#            <<: *master
+#        - armband-virtual:
+#            slave-label: armband-virtual
+#            installer: fuel
+#            <<: *master
+## daisy CI PODs
+#        - baremetal:
+#            slave-label: daisy-baremetal
+#            installer: daisy
+#            <<: *master
+#        - virtual:
+#            slave-label: daisy-virtual
+#            installer: daisy
+#            <<: *master
+
+    jobs:
+        - 'storperf-{installer}-{pod}-daily-{stream}'
+
+################################
+# job template
+################################
+- job-template:
+    name: 'storperf-{installer}-{pod}-daily-{stream}'
+
+    concurrent: true
+
+    properties:
+        - logrotate-default
+        - throttle:
+            enabled: true
+            max-per-node: 1
+            option: 'project'
+
+    wrappers:
+        - build-name:
+            name: '$BUILD_NUMBER Scenario: $DEPLOY_SCENARIO'
+        - timeout:
+            timeout: '30'
+            abort: true
+
+    parameters:
+        - project-parameter:
+            project: '{project}'
+            branch: '{branch}'
+        - '{installer}-defaults'
+        - '{slave-label}-defaults'
+        - string:
+            name: DEPLOY_SCENARIO
+            default: 'os-odl_l2-nofeature-ha'
+        - string:
+            name: DOCKER_TAG
+            default: '{docker-tag}'
+            description: 'Tag to pull docker image'
+        - string:
+            name: CLEAN_DOCKER_IMAGES
+            default: 'false'
+            description: 'Remove downloaded docker images (opnfv/storperf*:*)'
+        - string:
+            name: GS_PATHNAME
+            default: '{gs-pathname}'
+            description: "Version directory where the opnfv documents will be stored in gs repository"
+        - string:
+            name: DISK_TYPE
+            default: 'HDD'
+            description: 'The type of hard disk that Cinder uses'
+        - string:
+            name: VOLUME_SIZE
+            default: '2'
+            description: 'Size of Cinder volume (in GB)'
+        - string:
+            name: WORKLOADS
+            default: 'rw'
+            description: 'Workloads to run'
+        - string:
+            name: BLOCK_SIZES
+            default: '16384'
+            description: 'Block sizes for VM I/O operations'
+        - string:
+            name: QUEUE_DEPTHS
+            default: '4'
+            description: 'Number of simultaneous I/O operations to keep active'
+        - string:
+            name: STEADY_STATE_SAMPLES
+            default: '10'
+            description: 'Number of samples to use (1 per minute) to measure steady state'
+        - string:
+            name: TEST_CASE
+            choices:
+                - 'snia_steady_state'
+            description: 'The test case to run'
+
+    scm:
+        - git-scm
+
+    builders:
+        - description-setter:
+            description: "Built on $NODE_NAME"
+        - 'storperf-daily-builder'
+
+########################
+# builder macros
+########################
+- builder:
+    name: storperf-daily-builder
+    builders:
+        - shell: |
+            #!/bin/bash
+
+            cd $WORKSPACE
+            ./ci/daily.sh
diff --git a/jjb/storperf/storperf-verify-jobs.yml b/jjb/storperf/storperf-verify-jobs.yml
new file mode 100644 (file)
index 0000000..55c4e4c
--- /dev/null
@@ -0,0 +1,190 @@
+- project:
+    name: storperf-verify
+
+    project: 'storperf'
+
+#--------------------------------
+# branches
+#--------------------------------
+    stream:
+        - master:
+            branch: '{stream}'
+            gs-pathname: ''
+            disabled: false
+            docker-tag: 'latest'
+#--------------------------------
+# patch verification phases
+#--------------------------------
+    phase:
+        - 'unit-test':
+            slave-label: 'opnfv-build-ubuntu'
+        - 'build-x86_64':
+            slave-label: 'opnfv-build-ubuntu'
+        - 'build-aarch64':
+            slave-label: 'opnfv-build-ubuntu-arm'
+#--------------------------------
+# jobs
+#--------------------------------
+    jobs:
+        - 'storperf-verify-{stream}'
+        - 'storperf-verify-{phase}-{stream}'
+#--------------------------------
+# job templates
+#--------------------------------
+- job-template:
+    name: 'storperf-verify-{stream}'
+
+    disabled: '{obj:disabled}'
+
+    project-type: 'multijob'
+
+    parameters:
+        - project-parameter:
+            project: '{project}'
+            branch: '{branch}'
+        - 'opnfv-build-defaults'
+
+    scm:
+        - git-scm-gerrit
+
+    triggers:
+        - gerrit:
+            server-name: 'gerrit.opnfv.org'
+            trigger-on:
+                - patchset-created-event:
+                    exclude-drafts: 'false'
+                    exclude-trivial-rebase: 'false'
+                    exclude-no-code-change: 'false'
+                - draft-published-event
+                - comment-added-contains-event:
+                    comment-contains-value: 'recheck'
+                - comment-added-contains-event:
+                    comment-contains-value: 'reverify'
+            projects:
+              - project-compare-type: 'ANT'
+                project-pattern: '{project}'
+                branches:
+                  - branch-compare-type: 'ANT'
+                    branch-pattern: '**/{branch}'
+
+    builders:
+        - shell: |
+            #!/bin/bash
+
+            # we do nothing here as the main stuff will be done
+            # in phase jobs
+            echo "Triggering phase jobs!"
+        - multijob:
+            name: 'storperf-build-and-unittest'
+            execution-type: PARALLEL
+            projects:
+                - name: 'storperf-verify-unit-test-{stream}'
+                  current-parameters: false
+                  predefined-parameters: |
+                    GERRIT_BRANCH=$GERRIT_BRANCH
+                    GERRIT_REFSPEC=$GERRIT_REFSPEC
+                    GERRIT_CHANGE_NUMBER=$GERRIT_CHANGE_NUMBER
+                    GERRIT_CHANGE_COMMIT_MESSAGE=$GERRIT_CHANGE_COMMIT_MESSAGE
+                  git-revision: true
+                  node-parameters: false
+                  kill-phase-on: FAILURE
+                  abort-all-job: false
+                - name: 'storperf-verify-build-x86_64-{stream}'
+                  current-parameters: false
+                  predefined-parameters: |
+                    GERRIT_BRANCH=$GERRIT_BRANCH
+                    GERRIT_REFSPEC=$GERRIT_REFSPEC
+                    GERRIT_CHANGE_NUMBER=$GERRIT_CHANGE_NUMBER
+                    GERRIT_CHANGE_COMMIT_MESSAGE=$GERRIT_CHANGE_COMMIT_MESSAGE
+                    ARCH=x86_64
+                  git-revision: true
+                  node-parameters: false
+                  kill-phase-on: FAILURE
+                  abort-all-job: false
+                - name: 'storperf-verify-build-aarch64-{stream}'
+                  current-parameters: false
+                  predefined-parameters: |
+                    GERRIT_BRANCH=$GERRIT_BRANCH
+                    GERRIT_REFSPEC=$GERRIT_REFSPEC
+                    GERRIT_CHANGE_NUMBER=$GERRIT_CHANGE_NUMBER
+                    GERRIT_CHANGE_COMMIT_MESSAGE=$GERRIT_CHANGE_COMMIT_MESSAGE
+                    ARCH=aarch64
+                  git-revision: true
+                  node-parameters: false
+                  kill-phase-on: FAILURE
+                  abort-all-job: false
+
+- job-template:
+    name: 'storperf-verify-{phase}-{stream}'
+
+    disabled: '{obj:disabled}'
+
+    wrappers:
+        - ssh-agent-wrapper
+        - build-timeout:
+            timeout: 30
+
+    parameters:
+        - project-parameter:
+            project: '{project}'
+            branch: '{branch}'
+        - '{slave-label}-defaults'
+
+    scm:
+        - git-scm-gerrit
+
+    builders:
+        - 'storperf-verify-{phase}-builders-macro'
+
+    publishers:
+        - 'storperf-verify-{phase}-publishers-macro'
+#--------------------------------
+# builder macros
+#--------------------------------
+- builder:
+    name: 'storperf-verify-unit-test-builders-macro'
+    builders:
+        - shell: |
+            $WORKSPACE/ci/verify.sh
+- builder:
+    name: 'storperf-verify-build-x86_64-builders-macro'
+    builders:
+        - shell: |
+            $WORKSPACE/ci/verify-build.sh
+- builder:
+    name: 'storperf-verify-build-aarch64-builders-macro'
+    builders:
+        - shell: |
+            $WORKSPACE/ci/verify-build.sh
+#--------------------------------
+# publisher macros
+#--------------------------------
+- publisher:
+    name: 'storperf-verify-unit-test-publishers-macro'
+    publishers:
+        - junit:
+            results: nosetests.xml
+        - cobertura:
+            report-file: "coverage.xml"
+            only-stable: "true"
+            health-auto-update: "true"
+            stability-auto-update: "true"
+            zoom-coverage-chart: "true"
+            targets:
+                - files:
+                    healthy: 10
+                    unhealthy: 20
+                    failing: 30
+                - method:
+                    healthy: 50
+                    unhealthy: 40
+                    failing: 30
+        - email-jenkins-admins-on-failure
+- publisher:
+    name: 'storperf-verify-build-x86_64-publishers-macro'
+    publishers:
+        - email-jenkins-admins-on-failure
+- publisher:
+    name: 'storperf-verify-build-aarch64-publishers-macro'
+    publishers:
+        - email-jenkins-admins-on-failure
index be53b27..307becf 100644 (file)
@@ -4,9 +4,7 @@
     project: '{name}'
 
     jobs:
-        - 'storperf-verify-{stream}'
         - 'storperf-merge-{stream}'
-        - 'storperf-daily-{stream}'
 
     stream:
         - master:
             disabled: false
             docker-tag: 'stable'
 
-- job-template:
-    name: 'storperf-verify-{stream}'
-
-    disabled: '{obj:disabled}'
-
-    node: opnfv-build-ubuntu
-
-    parameters:
-        - project-parameter:
-            project: '{project}'
-            branch: '{branch}'
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: "Used for overriding the GIT URL coming from Global Jenkins configuration in case if the stuff is done on none-LF HW."
-
-    scm:
-        - git-scm-gerrit
-
-    triggers:
-        - gerrit:
-            server-name: 'gerrit.opnfv.org'
-            trigger-on:
-                - patchset-created-event:
-                    exclude-drafts: 'false'
-                    exclude-trivial-rebase: 'false'
-                    exclude-no-code-change: 'false'
-                - draft-published-event
-                - comment-added-contains-event:
-                    comment-contains-value: 'recheck'
-                - comment-added-contains-event:
-                    comment-contains-value: 'reverify'
-            projects:
-              - project-compare-type: 'ANT'
-                project-pattern: '{project}'
-                branches:
-                  - branch-compare-type: 'ANT'
-                    branch-pattern: '**/{branch}'
-
-    builders:
-        - shell: |
-            $WORKSPACE/ci/verify.sh
-
-    publishers:
-        - junit:
-            results: nosetests.xml
-        - cobertura:
-            report-file: "coverage.xml"
-            only-stable: "true"
-            health-auto-update: "true"
-            stability-auto-update: "true"
-            zoom-coverage-chart: "true"
-            targets:
-                - files:
-                    healthy: 10
-                    unhealthy: 20
-                    failing: 30
-                - method:
-                    healthy: 50
-                    unhealthy: 40
-                    failing: 30
-
 - job-template:
     name: 'storperf-merge-{stream}'
 
                     healthy: 50
                     unhealthy: 40
                     failing: 30
-
-- job-template:
-    name: 'storperf-daily-{stream}'
-
-    # Job template for daily builders
-    #
-    # Required Variables:
-    #     stream:    branch with - in place of / (eg. stable)
-    #     branch:    branch (eg. stable)
-    disabled: '{obj:disabled}'
-
-    parameters:
-        - project-parameter:
-            project: '{project}'
-            branch: '{branch}'
-        - 'intel-pod9-defaults'
-        - string:
-            name: DEPLOY_SCENARIO
-            default: 'os-nosdn-nofeature-noha'
-        - string:
-            name: DOCKER_TAG
-            default: '{docker-tag}'
-            description: 'Tag to pull docker image'
-        - choice:
-            name: DISK_TYPE
-            choices:
-                - 'SSD'
-                - 'HDD'
-            default: 'HDD'
-            description: 'The type of hard disk that Cinder uses'
-        - string:
-            name: AGENT_COUNT
-            description: 'The number of slave agents to start. Defaults to the cinder node count'
-        - string:
-            name: VOLUME_SIZE
-            default: '4'
-            description: 'Size of Cinder volume (in GB)'
-        - string:
-            name: WORKLOADS
-            default: 'wr,rr,rw'
-            description: 'Workloads to run'
-        - string:
-            name: BLOCK_SIZES
-            default: '2048,16384'
-            description: 'Block sizes for VM I/O operations'
-        - string:
-            name: QUEUE_DEPTHS
-            default: '1,4'
-            description: 'Number of simultaneous I/O operations to keep active'
-        - string:
-            name: STEADY_STATE_SAMPLES
-            default: '10'
-            description: 'Number of samples to use (1 per minute) to measure steady state'
-        - string:
-            name: DEADLINE
-            description: 'Maximum run time in minutes if steady state cannot be found. Defaults to 3 times steady state samples'
-        - choice:
-            name: TEST_CASE
-            choices:
-                - 'snia_steady_state'
-            description: 'The test case to run'
-
-    scm:
-        - git-scm
-
-    triggers:
-        - timed: '0 18 * * *'
-
-    builders:
-        - shell: |
-            $WORKSPACE/ci/daily.sh
+        - email-jenkins-admins-on-failure
 
diff --git a/jjb/test-requirements.txt b/jjb/test-requirements.txt
deleted file mode 100644 (file)
index 6b700dc..0000000
+++ /dev/null
@@ -1 +0,0 @@
-jenkins-job-builder
index d5a444d..6f3f51a 100644 (file)
@@ -15,8 +15,8 @@
             project-repo: 'https://git.openstack.org/openstack/bifrost'
             clone-location: '/opt/bifrost'
         - 'opnfv':
-            project-repo: 'https://gerrit.opnfv.org/gerrit/releng'
-            clone-location: '/opt/releng'
+            project-repo: 'https://gerrit.opnfv.org/gerrit/releng-xci'
+            clone-location: '/opt/releng-xci'
 
 #--------------------------------
 # jobs
@@ -83,6 +83,7 @@
     publishers:
         - email:
             recipients: fatih.degirmenci@ericsson.com yroblamo@redhat.com mchandras@suse.de jack.morgan@intel.com zhang.jun3g@zte.com.cn
+        - email-jenkins-admins-on-failure
 #--------------------------------
 # trigger macros
 #--------------------------------
             silent-start: true
             projects:
               - project-compare-type: 'ANT'
-                project-pattern: 'releng'
+                project-pattern: 'releng-xci'
                 branches:
                   - branch-compare-type: 'ANT'
                     branch-pattern: '**/{branch}'
                 file-paths:
                   - compare-type: ANT
-                    pattern: 'prototypes/bifrost/**'
+                    pattern: 'bifrost/**'
             readable-message: true
index 3e9ff67..7ef11a4 100644 (file)
@@ -1,5 +1,5 @@
 - project:
-    project: 'releng'
+    project: 'releng-xci'
 
     name: 'bifrost-periodic'
 #--------------------------------
     # trigger is disabled until we know which jobs we will have
     # and adjust stuff accordingly
     triggers:
-        - timed: '#@midnight'
+        - timed: ''  # '@midnight'
 
     builders:
         - description-setter:
index b37da90..4d646a6 100755 (executable)
@@ -38,7 +38,7 @@ if [[ ! "$DISTRO" =~ (xenial|centos7|suse) ]]; then
 fi
 
 # remove previously cloned repos
-sudo /bin/rm -rf /opt/bifrost /opt/openstack-ansible /opt/releng /opt/functest
+sudo /bin/rm -rf /opt/bifrost /opt/openstack-ansible /opt/releng-xci /opt/functest
 
 # Fix up permissions
 fix_ownership
@@ -65,16 +65,16 @@ cd /opt/bifrost && sudo git checkout --quiet $OPENSTACK_BIFROST_VERSION
 echo "xci: using bifrost commit"
 git show --oneline -s --pretty=format:'%h - %s (%cr) <%an>'
 
-sudo git clone --quiet https://gerrit.opnfv.org/gerrit/releng /opt/releng
-cd /opt/releng && sudo git checkout --quiet $OPNFV_RELENG_VERSION
+sudo git clone --quiet https://gerrit.opnfv.org/gerrit/releng-xci /opt/releng-xci
+cd /opt/releng-xci && sudo git checkout --quiet $OPNFV_RELENG_VERSION
 echo "xci: using releng commit"
 git show --oneline -s --pretty=format:'%h - %s (%cr) <%an>'
 
 # source flavor vars
-source "$WORKSPACE/prototypes/xci/config/${XCI_FLAVOR}-vars"
+source "$WORKSPACE/xci/config/${XCI_FLAVOR}-vars"
 
 # combine opnfv and upstream scripts/playbooks
-sudo /bin/cp -rf /opt/releng/prototypes/bifrost/* /opt/bifrost/
+sudo /bin/cp -rf /opt/releng-xci/bifrost/* /opt/bifrost/
 
 # cleanup remnants of previous deployment
 cd /opt/bifrost
index 319f8eb..ef604fc 100644 (file)
@@ -14,8 +14,8 @@
             project-repo: 'https://git.openstack.org/openstack/bifrost'
             clone-location: '$WORKSPACE/bifrost'
         - 'opnfv':
-            project-repo: 'https://gerrit.opnfv.org/gerrit/releng'
-            clone-location: '$WORKSPACE/releng'
+            project-repo: 'https://gerrit.opnfv.org/gerrit/releng-xci'
+            clone-location: '$WORKSPACE/releng-xci'
 #--------------------------------
 # distros
 #--------------------------------
@@ -29,7 +29,7 @@
         - 'centos7':
             disabled: false
             dib-os-release: '7'
-            dib-os-element: 'centos7'
+            dib-os-element: 'centos-minimal'
             dib-os-packages: 'vim,less,bridge-utils,iputils,rsyslog,curl'
             extra-dib-elements: 'openssh-server'
         - 'suse':
 
     wrappers:
         - fix-workspace-permissions
+        - build-timeout:
+            timeout: 90
 
     publishers:
         - email:
             recipients: fatih.degirmenci@ericsson.com yroblamo@redhat.com mchandras@suse.de jack.morgan@intel.com julienjut@gmail.com
+        - email-jenkins-admins-on-failure
 #--------------------------------
 # trigger macros
 #--------------------------------
             custom-url: '* $JOB_NAME $BIFROST_LOG_URL/index.html'
             projects:
               - project-compare-type: 'ANT'
-                project-pattern: 'releng'
+                project-pattern: 'releng-xci'
                 branches:
                   - branch-compare-type: 'ANT'
                     branch-pattern: '**/{branch}'
                 file-paths:
                   - compare-type: ANT
-                    pattern: 'prototypes/bifrost/**'
+                    pattern: 'bifrost/**'
             readable-message: true
 
 #---------------------------
index b522b89..03d9afc 100755 (executable)
@@ -57,16 +57,17 @@ EOF
 </html>
 EOF
 
+    # Upload landing page
+    echo "Uploading the landing page"
+    gsutil -q cp ${WORKSPACE}/index.html ${BIFROST_GS_URL}/index.html
+    rm -f ${WORKSPACE}/index.html
+
     # Finally, download and upload the entire build log so we can retain
     # as much build information as possible
     echo "Uploading the final console output"
     curl -s -L ${BIFROST_CONSOLE_LOG} > ${WORKSPACE}/build_log.txt
     gsutil -q cp -Z ${WORKSPACE}/build_log.txt ${BIFROST_GS_URL}/build_log.txt
-    rm ${WORKSPACE}/build_log.txt
-
-    # Upload landing page
-    gsutil -q cp ${WORKSPACE}/index.html ${BIFROST_GS_URL}/index.html
-    rm ${WORKSPACE}/index.html
+    rm -f ${WORKSPACE}/build_log.txt
 }
 
 function fix_ownership() {
@@ -83,6 +84,9 @@ function fix_ownership() {
 
 function cleanup_and_upload() {
     original_exit=$?
+    echo "Job exit code: $original_exit"
+    # Turn off errexit
+    set +o errexit
     fix_ownership
     upload_logs
     exit $original_exit
@@ -95,21 +99,21 @@ if [[ ! "$DISTRO" =~ (xenial|centos7|suse) ]]; then
 fi
 
 # remove previously cloned repos
-/bin/rm -rf $WORKSPACE/bifrost $WORKSPACE/releng
+/bin/rm -rf $WORKSPACE/bifrost $WORKSPACE/releng-xci
 
 # Fix up permissions
 fix_ownership
 
 # clone all the repos first and checkout the patch afterwards
 git clone https://git.openstack.org/openstack/bifrost $WORKSPACE/bifrost
-git clone https://gerrit.opnfv.org/gerrit/releng $WORKSPACE/releng
+git clone https://gerrit.opnfv.org/gerrit/releng-xci $WORKSPACE/releng-xci
 
 # checkout the patch
 cd $CLONE_LOCATION
 git fetch $PROJECT_REPO $GERRIT_REFSPEC && sudo git checkout FETCH_HEAD
 
 # combine opnfv and upstream scripts/playbooks
-/bin/cp -rf $WORKSPACE/releng/prototypes/bifrost/* $WORKSPACE/bifrost/
+/bin/cp -rf $WORKSPACE/releng-xci/bifrost/* $WORKSPACE/bifrost/
 
 # cleanup remnants of previous deployment
 cd $WORKSPACE/bifrost
index 56a4b18..8335750 100644 (file)
 - project:
-    project: 'releng'
+    name: 'opnfv-osa-periodic'
 
-    name: 'os-periodic'
+    project: 'releng-xci'
 #--------------------------------
-# Branch Anchors
+# branches
 #--------------------------------
-# the versions stated here default to branches which then later
-# on used for checking out the branches, pulling in head of the branch.
-    master: &master
-        stream: master
-        openstack-osa-version: '{stream}'
-        opnfv-releng-version: 'master'
-        gs-pathname: ''
-    ocata: &ocata
-        stream: ocata
-        openstack-osa-version: 'stable/{stream}'
-        opnfv-releng-version: 'master'
-        gs-pathname: '/{stream}'
+    stream:
+        - master:
+            branch: '{stream}'
 #--------------------------------
-#        XCI PODs
-#--------------------------------
-    pod:
-        - virtual:
-            <<: *master
-        - virtual:
-            <<: *ocata
-#--------------------------------
-# Supported Distros
+# distros
 #--------------------------------
     distro:
         - 'xenial':
             disabled: false
-            slave-label: xci-xenial-virtual
-            dib-os-release: 'xenial'
-            dib-os-element: 'ubuntu-minimal'
-            dib-os-packages: 'vlan,vim,less,bridge-utils,sudo,language-pack-en,iputils-ping,rsyslog,curl,python,debootstrap,ifenslave,ifenslave-2.6,lsof,lvm2,tcpdump,nfs-kernel-server,chrony,iptables'
-            extra-dib-elements: 'openssh-server'
         - 'centos7':
             disabled: true
-            slave-label: xci-centos7-virtual
-            dib-os-release: '7'
-            dib-os-element: 'centos7'
-            dib-os-packages: 'vim,less,bridge-utils,iputils,rsyslog,curl'
-            extra-dib-elements: 'openssh-server'
         - 'suse':
             disabled: true
-            slave-label: xci-suse-virtual
-            dib-os-release: '42.2'
-            dib-os-element: 'opensuse-minimal'
-            dib-os-packages: 'vim,less,bridge-utils,iputils,rsyslog,curl'
-            extra-dib-elements: 'openssh-server'
-
+#--------------------------------
+# type
+#--------------------------------
+    type:
+        - virtual
+#--------------------------------
+# phases
+#--------------------------------
+    phase:
+        - 'deploy'
+        - 'healthcheck'
 #--------------------------------
 # jobs
 #--------------------------------
     jobs:
-        - 'osa-deploy-{pod}-{distro}-periodic-{stream}'
-
+        - 'osa-periodic-{distro}-{type}-{stream}'
+        - 'osa-periodic-{phase}-{type}-{stream}'
 #--------------------------------
 # job templates
 #--------------------------------
 - job-template:
-    name: 'osa-deploy-{pod}-{distro}-periodic-{stream}'
+    name: 'osa-periodic-{distro}-{type}-{stream}'
+
+    project-type: multijob
 
     disabled: '{obj:disabled}'
 
     concurrent: false
 
     properties:
+        - logrotate-default
         - build-blocker:
             use-build-blocker: true
             blocking-jobs:
-                - '^xci-os.*'
-                - '^xci-deploy.*'
-                - '^xci-functest.*'
-                - '^bifrost-.*periodic.*'
-                - '^osa-.*periodic.*'
+                - 'xci-verify-.*'
+                - 'bifrost-verify-.*'
+                - 'bifrost-periodic-.*'
+                - 'osa-verify-.*'
+                - 'osa-periodic-.*'
             block-level: 'NODE'
+
+    wrappers:
+        - ssh-agent-wrapper
+        - build-timeout:
+            timeout: 240
+        - fix-workspace-permissions
+
+    scm:
+        - git-scm-osa
+
+    triggers:
+        - pollscm:
+            cron: "@midnight"
+            ignore-post-commit-hooks: True
+
+    parameters:
+        - project-parameter:
+            project: '{project}'
+            branch: '{branch}'
+        - label:
+            name: SLAVE_LABEL
+            default: 'xci-virtual-{distro}'
+
+    builders:
+        - description-setter:
+            description: "Built on $NODE_NAME"
+        - multijob:
+            name: deploy
+            condition: SUCCESSFUL
+            projects:
+                - name: 'osa-periodic-deploy-{type}-{stream}'
+                  current-parameters: true
+                  predefined-parameters: |
+                    DISTRO={distro}
+                    DEPLOY_SCENARIO=os-nosdn-nofeature-noha
+                  git-revision: true
+                  node-parameters: true
+                  kill-phase-on: FAILURE
+                  abort-all-job: true
+        - multijob:
+            name: healthcheck
+            condition: SUCCESSFUL
+            projects:
+                - name: 'osa-periodic-healthcheck-{type}-{stream}'
+                  current-parameters: true
+                  predefined-parameters: |
+                    DISTRO={distro}
+                    DEPLOY_SCENARIO=os-nosdn-nofeature-noha
+                    FUNCTEST_SUITE_NAME=healthcheck
+                  node-parameters: true
+                  kill-phase-on: NEVER
+                  abort-all-job: false
+
+- job-template:
+    name: 'osa-periodic-{phase}-{type}-{stream}'
+
+    disabled: false
+
+    concurrent: true
+
+    properties:
         - logrotate-default
+        - build-blocker:
+            use-build-blocker: true
+            blocking-jobs:
+                - 'xci-verify-deploy-.*'
+                - 'xci-verify-healthcheck-.*'
+                - 'bifrost-verify-.*'
+                - 'bifrost-periodic-.*'
+                - 'osa-verify-.*'
+                - 'osa-periodic-.*'
+            block-level: 'NODE'
 
     parameters:
         - project-parameter:
             project: '{project}'
-            branch: '{opnfv-releng-version}'
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-        - string:
-            name: XCI_FLAVOR
-            default: 'ha'
+            branch: '{branch}'
+        - label:
+            name: SLAVE_LABEL
+            default: 'xci-virtual-{distro}'
         - string:
             name: OPENSTACK_OSA_VERSION
-            default: '{openstack-osa-version}'
-        - string:
-            name: OPNFV_RELENG_VERSION
-            default: '{opnfv-releng-version}'
+            default: 'master'
         - string:
             name: DISTRO
-            default: '{distro}'
+            default: 'xenial'
         - string:
-            name: DIB_OS_RELEASE
-            default: '{dib-os-release}'
+            name: DEPLOY_SCENARIO
+            default: 'os-nosdn-nofeature-noha'
         - string:
-            name: DIB_OS_ELEMENT
-            default: '{dib-os-element}'
+            name: XCI_FLAVOR
+            default: 'mini'
         - string:
-            name: DIB_OS_PACKAGES
-            default: '{dib-os-packages}'
+            name: XCI_LOOP
+            default: 'periodic'
         - string:
-            name: EXTRA_DIB_ELEMENTS
-            default: '{extra-dib-elements}'
+            name: OPNFV_RELENG_DEV_PATH
+            default: $WORKSPACE/releng-xci
         - string:
-            name: CLEAN_DIB_IMAGES
-            default: 'true'
-        - label:
-            name: SLAVE_LABEL
-            default: '{slave-label}'
+            name: FUNCTEST_SUITE_NAME
+            default: 'healthcheck'
         - string:
             name: ANSIBLE_VERBOSITY
-            default: ''
+            default: '-vvvv'
         - string:
-            name: XCI_LOOP
-            default: 'periodic'
-
-    wrappers:
-        - fix-workspace-permissions
+            name: FORCE_MASTER
+            default: 'true'
+        - string:
+            name: GIT_BASE
+            default: https://gerrit.opnfv.org/gerrit/$PROJECT
 
     scm:
-        - git-scm
+        - git-scm-osa
 
-    # trigger is disabled until we know which jobs we will have
-    # and adjust stuff accordingly
-    triggers:
-        - timed: '#@midnight'
+    wrappers:
+        - ssh-agent-wrapper
+        - build-timeout:
+            timeout: 240
+        - fix-workspace-permissions
 
     builders:
         - description-setter:
-            description: "Built on $NODE_NAME - Scenario: $DEPLOY_SCENARIO"
-        - 'osa-deploy-builder'
+            description: "Built on $NODE_NAME"
+        - 'osa-periodic-{phase}-macro'
 
-#---------------------------
+#--------------------------------
 # builder macros
-#---------------------------
+#--------------------------------
 - builder:
-    name: osa-deploy-builder
+    name: 'osa-periodic-deploy-macro'
     builders:
-        - shell:
-            !include-raw: ./xci-deploy.sh
+        - shell: |
+            #!/bin/bash
+
+            # here we will
+            # - clone releng-xci repo as the jobs are running against openstack gerrit
+            #   and we need to clone releng-xci ourselves to $OPNFV_RELENG_DEV_PATH
+            # - run sources-branch-updater.sh from osa to update/pin the role versions
+            #   at the time this job gets triggered against osa master in case if the
+            #   deployment succeeds and we decide to bump version used by xci
+            # - copy generated role versions into $OPNFV_RELENG_DEV_PATH/xci/file
+            # - start the deployment by executing xci-deploy.sh as usual
+            #
+            # we might also need to pin versions of openstack services as well.
+
+            echo "Hello World!"
+
+- builder:
+    name: 'osa-periodic-healthcheck-macro'
+    builders:
+        - shell: |
+            #!/bin/bash
+
+            echo "Hello World!"
+#--------------------------------
+# scm macro
+#--------------------------------
+- scm:
+    name: git-scm-osa
+    scm:
+        - git:
+            url: https://review.openstack.org/p/openstack/openstack-ansible.git
+            branches:
+                - master
+            timeout: 15
index 64e13d3..42079b2 100644 (file)
@@ -6,7 +6,7 @@
 # are checked out based on what is configured.
 #--------------------------------
 - project:
-    project: 'releng'
+    project: 'releng-xci'
 
     name: 'xci-daily'
 #--------------------------------
     publishers:
         - email:
             recipients: fatih.degirmenci@ericsson.com yroblamo@redhat.com mchandras@suse.de jack.morgan@intel.com julienjut@gmail.com
+        - email-jenkins-admins-on-failure
 
 - job-template:
     name: 'xci-{phase}-{pod}-{distro}-daily-{stream}'
index 8ad6378..211d282 100755 (executable)
@@ -11,7 +11,7 @@ set -o errexit
 set -o nounset
 set -o pipefail
 
-cd $WORKSPACE/prototypes/xci
+cd $WORKSPACE/xci
 
 # for daily jobs, we want to use working versions
 # for periodic jobs, we will use whatever is set in the job, probably master
@@ -53,7 +53,7 @@ if [[ "$XCI_LOOP" == "periodic" && "$OPENSTACK_OSA_VERSION" == "master" ]]; then
 fi
 
 # proceed with the deployment
-cd $WORKSPACE/prototypes/xci
+cd $WORKSPACE/xci
 ./xci-deploy.sh
 
 if [[ "$JOB_NAME" =~ "periodic" && "$OPENSTACK_OSA_VERSION" == "master" ]]; then
diff --git a/jjb/xci/xci-verify-jobs.yml b/jjb/xci/xci-verify-jobs.yml
new file mode 100644 (file)
index 0000000..8d1ee55
--- /dev/null
@@ -0,0 +1,226 @@
+- project:
+    name: 'opnfv-xci-verify'
+
+    project: releng-xci
+#--------------------------------
+# branches
+#--------------------------------
+    stream:
+        - master:
+            branch: '{stream}'
+#--------------------------------
+# distros
+#--------------------------------
+    distro:
+        - 'xenial':
+            disabled: false
+        - 'centos7':
+            disabled: true
+        - 'suse':
+            disabled: true
+#--------------------------------
+# type
+#--------------------------------
+    type:
+        - virtual
+#--------------------------------
+# patch verification phases
+#--------------------------------
+    phase:
+        - 'deploy'
+        - 'healthcheck'
+#--------------------------------
+# jobs
+#--------------------------------
+    jobs:
+        - 'xci-verify-{distro}-{type}-{stream}'
+        - 'xci-verify-{phase}-{type}-{stream}'
+#--------------------------------
+# job templates
+#--------------------------------
+- job-template:
+    name: 'xci-verify-{distro}-{type}-{stream}'
+
+    project-type: multijob
+
+    disabled: '{obj:disabled}'
+
+    concurrent: true
+
+    properties:
+        - logrotate-default
+        - build-blocker:
+            use-build-blocker: true
+            blocking-jobs:
+                - 'xci-verify-.*'
+                - 'bifrost-verify-.*'
+                - 'bifrost-periodic-.*'
+                - 'osa-verify-.*'
+                - 'osa-periodic-.*'
+            block-level: 'NODE'
+
+    wrappers:
+        - ssh-agent-wrapper
+        - build-timeout:
+            timeout: 240
+        - fix-workspace-permissions
+
+    scm:
+        - git-scm-gerrit
+
+    triggers:
+        - gerrit:
+            server-name: 'gerrit.opnfv.org'
+            trigger-on:
+                - patchset-created-event:
+                    exclude-drafts: 'false'
+                    exclude-trivial-rebase: 'false'
+                    exclude-no-code-change: 'false'
+                - draft-published-event
+                - comment-added-contains-event:
+                    comment-contains-value: 'recheck'
+                - comment-added-contains-event:
+                    comment-contains-value: 'reverify'
+            projects:
+              - project-compare-type: 'ANT'
+                project-pattern: '{project}'
+                branches:
+                  - branch-compare-type: 'ANT'
+                    branch-pattern: '**/{branch}'
+                file-paths:
+                  - compare-type: ANT
+                    pattern: 'xci/**'
+                disable-strict-forbidden-file-verification: 'true'
+                forbidden-file-paths:
+                  - compare-type: ANT
+                    pattern: 'bifrost/**'
+                  - compare-type: ANT
+                    pattern: 'openstack-ansible/**'
+                  - compare-type: ANT
+                    pattern: 'puppet-infracloud/**'
+                  - compare-type: ANT
+                    pattern: 'README.rst'
+            readable-message: true
+
+    parameters:
+        - project-parameter:
+            project: '{project}'
+            branch: '{branch}'
+        - label:
+            name: SLAVE_LABEL
+            default: 'xci-virtual-{distro}'
+        - string:
+            name: GIT_BASE
+            default: https://gerrit.opnfv.org/gerrit/$PROJECT
+            description: 'Git URL to use on this Jenkins Slave'
+
+    builders:
+        - description-setter:
+            description: "Built on $NODE_NAME"
+        - multijob:
+            name: deploy
+            condition: SUCCESSFUL
+            projects:
+                - name: 'xci-verify-deploy-{type}-{stream}'
+                  current-parameters: true
+                  predefined-parameters: |
+                    DISTRO={distro}
+                    DEPLOY_SCENARIO=os-nosdn-nofeature-noha
+                  node-parameters: true
+                  kill-phase-on: FAILURE
+                  abort-all-job: true
+        - multijob:
+            name: healthcheck
+            condition: SUCCESSFUL
+            projects:
+                - name: 'xci-verify-healthcheck-{type}-{stream}'
+                  current-parameters: true
+                  predefined-parameters: |
+                    DISTRO={distro}
+                    DEPLOY_SCENARIO=os-nosdn-nofeature-noha
+                    FUNCTEST_SUITE_NAME=healthcheck
+                  node-parameters: true
+                  kill-phase-on: NEVER
+                  abort-all-job: true
+
+- job-template:
+    name: 'xci-verify-{phase}-{type}-{stream}'
+
+    disabled: false
+
+    concurrent: true
+
+    properties:
+        - logrotate-default
+        - build-blocker:
+            use-build-blocker: true
+            blocking-jobs:
+                - 'xci-verify-deploy-.*'
+                - 'xci-verify-healthcheck-.*'
+                - 'bifrost-verify-.*'
+                - 'bifrost-periodic-.*'
+                - 'osa-verify-.*'
+                - 'osa-periodic-.*'
+            block-level: 'NODE'
+
+    parameters:
+        - string:
+            name: DISTRO
+            default: 'xenial'
+        - string:
+            name: DEPLOY_SCENARIO
+            default: 'os-nosdn-nofeature-noha'
+        - string:
+            name: FUNCTEST_SUITE_NAME
+            default: 'healthcheck'
+        - string:
+            name: XCI_FLAVOR
+            default: 'mini'
+        - string:
+            name: OPNFV_RELENG_DEV_PATH
+            default: $WORKSPACE/
+        - string:
+            name: ANSIBLE_VERBOSITY
+            default: '-vvvv'
+        - string:
+            name: GIT_BASE
+            default: https://gerrit.opnfv.org/gerrit/$PROJECT
+            description: 'Git URL to use on this Jenkins Slave'
+
+    wrappers:
+        - ssh-agent-wrapper
+        - build-timeout:
+            timeout: 240
+        - fix-workspace-permissions
+
+    scm:
+        - git-scm-gerrit
+
+    builders:
+        - description-setter:
+            description: "Built on $NODE_NAME"
+        - 'xci-verify-{phase}-macro'
+
+#--------------------------------
+# builder macros
+#--------------------------------
+- builder:
+    name: 'xci-verify-deploy-macro'
+    builders:
+        - shell: |
+            #!/bin/bash
+
+            # for some reason, the PATH is not set correctly
+            # setting PATH for ansible stuff
+            export PATH=/home/jenkins/.local/bin:$PATH
+
+            cd $WORKSPACE/xci
+            ./xci-deploy.sh
+
+- builder:
+    name: 'xci-verify-healthcheck-macro'
+    builders:
+        - shell: |
+            #!/bin/bash
+
+            echo "Hello World!"
index 5ff36f8..4536543 100644 (file)
 # that have been switched using labels for slaves
 #--------------------------------
     pod:
+# apex CI PODs
+        - virtual:
+            slave-label: apex-virtual-master
+            installer: apex
+            auto-trigger-name: 'daily-trigger-disabled'
+            <<: *master
+        - baremetal:
+            slave-label: apex-baremetal-master
+            installer: apex
+            auto-trigger-name: 'daily-trigger-disabled'
+            <<: *master
+        - virtual:
+            slave-label: apex-virtual-danube
+            installer: apex
+            auto-trigger-name: 'daily-trigger-disabled'
+            <<: *danube
+        - baremetal:
+            slave-label: apex-baremetal-danube
+            installer: apex
+            auto-trigger-name: 'daily-trigger-disabled'
+            <<: *danube
 # fuel CI PODs
         - baremetal:
             slave-label: fuel-baremetal
             installer: joid
             auto-trigger-name: 'daily-trigger-disabled'
             <<: *danube
-
 # compass CI PODs
         - baremetal:
             slave-label: compass-baremetal
             auto-trigger-name: 'daily-trigger-disabled'
             <<: *danube
 #--------------------------------
-#    Installers not using labels
-#            CI PODs
-# This section should only contain the installers
-# that have not been switched using labels for slaves
-#--------------------------------
-        - lf-pod1:
-            slave-label: '{pod}'
-            installer: apex
-            auto-trigger-name: 'daily-trigger-disabled'
-            <<: *master
-        - lf-pod1:
-            slave-label: '{pod}'
-            installer: apex
-            auto-trigger-name: 'daily-trigger-disabled'
-            <<: *danube
-#--------------------------------
 #        None-CI PODs
 #--------------------------------
         - orange-pod1:
             installer: fuel
             auto-trigger-name: 'daily-trigger-disabled'
             <<: *danube
-        - arm-pod3:
+        - arm-pod5:
             slave-label: '{pod}'
             installer: fuel
             auto-trigger-name: 'daily-trigger-disabled'
             <<: *master
-        - arm-pod3:
+        - arm-pod5:
             slave-label: '{pod}'
             installer: fuel
             auto-trigger-name: 'daily-trigger-disabled'
             <<: *danube
-        - arm-virtual1:
+        - arm-virtual2:
             slave-label: '{pod}'
             installer: fuel
             auto-trigger-name: 'daily-trigger-disabled'
             <<: *master
-        - arm-virtual1:
+        - arm-virtual2:
             slave-label: '{pod}'
             installer: fuel
             auto-trigger-name: 'daily-trigger-disabled'
         - description-setter:
             description: "POD: $NODE_NAME"
         - 'yardstick-cleanup'
-        #- 'yardstick-fetch-os-creds'
+        - 'yardstick-fetch-os-creds'
+        - 'yardstick-fetch-k8s-conf'
         - 'yardstick-{testsuite}'
         - 'yardstick-store-results'
 
     publishers:
         - email:
-            recipients: jean.gaoliang@huawei.com limingjiang@huawei.com
+            recipients: jean.gaoliang@huawei.com limingjiang@huawei.com ross.b.brattain@intel.com
+        - email-jenkins-admins-on-failure
 
 ########################
 # builder macros
         - shell:
             !include-raw: ../../utils/fetch_os_creds.sh
 
+- builder:
+    name: yardstick-fetch-k8s-conf
+    builders:
+        - shell:
+            !include-raw: ./yardstick-get-k8s-conf.sh
+
 - builder:
     name: yardstick-store-results
     builders:
 # parameter macros
 ########################
 - parameter:
-    name: 'yardstick-params-fuel-baremetal'
+    name: 'yardstick-params-apex-virtual-master'
     parameters:
         - string:
             name: YARDSTICK_DB_BACKEND
             default: '-i 104.197.68.199:8086'
             description: 'Arguments to use in order to choose the backend DB'
 - parameter:
-    name: 'yardstick-params-fuel-virtual'
+    name: 'yardstick-params-apex-baremetal-master'
     parameters:
         - string:
             name: YARDSTICK_DB_BACKEND
             default: '-i 104.197.68.199:8086'
             description: 'Arguments to use in order to choose the backend DB'
 - parameter:
-    name: 'yardstick-params-armband-baremetal'
+    name: 'yardstick-params-apex-virtual-danube'
     parameters:
         - string:
             name: YARDSTICK_DB_BACKEND
             default: '-i 104.197.68.199:8086'
             description: 'Arguments to use in order to choose the backend DB'
 - parameter:
-    name: 'yardstick-params-armband-virtual'
+    name: 'yardstick-params-apex-baremetal-danube'
     parameters:
         - string:
             name: YARDSTICK_DB_BACKEND
             default: '-i 104.197.68.199:8086'
             description: 'Arguments to use in order to choose the backend DB'
 - parameter:
-    name: 'yardstick-params-arm-virtual1'
+    name: 'yardstick-params-fuel-baremetal'
     parameters:
         - string:
             name: YARDSTICK_DB_BACKEND
             default: '-i 104.197.68.199:8086'
             description: 'Arguments to use in order to choose the backend DB'
 - parameter:
-    name: 'yardstick-params-joid-baremetal'
+    name: 'yardstick-params-fuel-virtual'
     parameters:
         - string:
             name: YARDSTICK_DB_BACKEND
             default: '-i 104.197.68.199:8086'
             description: 'Arguments to use in order to choose the backend DB'
 - parameter:
-    name: 'yardstick-params-joid-virtual'
+    name: 'yardstick-params-armband-baremetal'
     parameters:
         - string:
             name: YARDSTICK_DB_BACKEND
             default: '-i 104.197.68.199:8086'
             description: 'Arguments to use in order to choose the backend DB'
 - parameter:
-    name: 'yardstick-params-intel-pod8'
+    name: 'yardstick-params-armband-virtual'
     parameters:
         - string:
             name: YARDSTICK_DB_BACKEND
             default: '-i 104.197.68.199:8086'
             description: 'Arguments to use in order to choose the backend DB'
 - parameter:
-    name: 'yardstick-params-lf-pod1'
+    name: 'yardstick-params-arm-virtual2'
     parameters:
         - string:
             name: YARDSTICK_DB_BACKEND
             default: '-i 104.197.68.199:8086'
             description: 'Arguments to use in order to choose the backend DB'
-
 - parameter:
-    name: 'yardstick-params-lf-pod2'
+    name: 'yardstick-params-joid-baremetal'
+    parameters:
+        - string:
+            name: YARDSTICK_DB_BACKEND
+            default: '-i 104.197.68.199:8086'
+            description: 'Arguments to use in order to choose the backend DB'
+- parameter:
+    name: 'yardstick-params-joid-virtual'
+    parameters:
+        - string:
+            name: YARDSTICK_DB_BACKEND
+            default: '-i 104.197.68.199:8086'
+            description: 'Arguments to use in order to choose the backend DB'
+- parameter:
+    name: 'yardstick-params-intel-pod8'
     parameters:
         - string:
             name: YARDSTICK_DB_BACKEND
             default: '-i 104.197.68.199:8086'
             description: 'Arguments to use in order to choose the backend DB'
-
 - parameter:
     name: 'yardstick-params-compass-baremetal'
     parameters:
             description: 'Arguments to use in order to choose the backend DB'
 
 - parameter:
-    name: 'yardstick-params-arm-pod3'
+    name: 'yardstick-params-arm-pod5'
     parameters:
         - string:
             name: YARDSTICK_DB_BACKEND
 - trigger:
     name: 'yardstick-daily-huawei-pod4-trigger'
     triggers:
-        - timed: '0 1 * * *'
+        - timed: ''
index 973f83a..ccee971 100755 (executable)
@@ -2,9 +2,10 @@
 set -e
 [[ $CI_DEBUG == true ]] && redirect="/dev/stdout" || redirect="/dev/null"
 
-# labconfig is used only for joid
-labconfig=""
+rc_file_vol=""
+cacert_file_vol=""
 sshkey=""
+
 if [[ ${INSTALLER_TYPE} == 'apex' ]]; then
     instack_mac=$(sudo virsh domiflist undercloud | grep default | \
                   grep -Eo "[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+")
@@ -15,12 +16,24 @@ if [[ ${INSTALLER_TYPE} == 'apex' ]]; then
         sudo iptables -D FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable
         sudo iptables -D FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable
     fi
-elif [[ ${INSTALLER_TYPE} == 'joid' ]]; then
-    # If production lab then creds may be retrieved dynamically
-    # creds are on the jumphost, always in the same folder
-    labconfig="-v $LAB_CONFIG/admin-openrc:/etc/yardstick/openstack.creds"
-    # If dev lab, credentials may not be the default ones, just provide a path to put them into docker
-    # replace the default one by the customized one provided by jenkins config
+fi
+
+if [[ ${INSTALLER_TYPE} == 'joid' ]]; then
+    if [[ "${DEPLOY_SCENARIO:0:2}" == "k8" ]];then
+        rc_file_vol="-v $LAB_CONFIG/admin.conf:/etc/yardstick/admin.conf"
+    else
+        # If production lab then creds may be retrieved dynamically
+        # creds are on the jumphost, always in the same folder
+        rc_file_vol="-v $LAB_CONFIG/admin-openrc:/etc/yardstick/openstack.creds"
+        # If dev lab, credentials may not be the default ones, just provide a path to put them into docker
+        # replace the default one by the customized one provided by jenkins config
+    fi
+elif [[ ${INSTALLER_TYPE} == 'compass' && ${BRANCH} == 'master' ]]; then
+    cacert_file_vol="-v ${HOME}/os_cacert:/etc/yardstick/os_cacert"
+    echo "export OS_CACERT=/etc/yardstick/os_cacert" >> ${HOME}/opnfv-openrc.sh
+    rc_file_vol="-v ${HOME}/opnfv-openrc.sh:/etc/yardstick/openstack.creds"
+else
+    rc_file_vol="-v ${HOME}/opnfv-openrc.sh:/etc/yardstick/openstack.creds"
 fi
 
 # Set iptables rule to allow forwarding return traffic for container
@@ -31,7 +44,8 @@ fi
 opts="--privileged=true --rm"
 envs="-e INSTALLER_TYPE=${INSTALLER_TYPE} -e INSTALLER_IP=${INSTALLER_IP} \
     -e NODE_NAME=${NODE_NAME} -e EXTERNAL_NETWORK=${EXTERNAL_NETWORK} \
-    -e YARDSTICK_BRANCH=${BRANCH} -e DEPLOY_SCENARIO=${DEPLOY_SCENARIO}"
+    -e YARDSTICK_BRANCH=${BRANCH} -e BRANCH=${BRANCH} \
+    -e DEPLOY_SCENARIO=${DEPLOY_SCENARIO}"
 
 # Pull the image with correct tag
 echo "Yardstick: Pulling image opnfv/yardstick:${DOCKER_TAG}"
@@ -45,7 +59,7 @@ sudo rm -rf ${dir_result}/*
 map_log_dir="-v ${dir_result}:/tmp/yardstick"
 
 # Run docker
-cmd="sudo docker run ${opts} ${envs} ${labconfig} ${map_log_dir} ${sshkey} opnfv/yardstick:${DOCKER_TAG} \
+cmd="sudo docker run ${opts} ${envs} ${rc_file_vol} ${cacert_file_vol} ${map_log_dir} ${sshkey} opnfv/yardstick:${DOCKER_TAG} \
     exec_tests.sh ${YARDSTICK_DB_BACKEND} ${YARDSTICK_SCENARIO_SUITE_NAME}"
 echo "Yardstick: Running docker cmd: ${cmd}"
 ${cmd}
diff --git a/jjb/yardstick/yardstick-get-k8s-conf.sh b/jjb/yardstick/yardstick-get-k8s-conf.sh
new file mode 100755 (executable)
index 0000000..e93367f
--- /dev/null
@@ -0,0 +1,8 @@
+#!/bin/bash
+set -e
+
+dest_path="$HOME/admin.conf"
+
+if [[ "${DEPLOY_SCENARIO:0:2}" == "k8" ]];then
+    juju scp kubernetes-master/0:config "${dest_path}"
+fi
diff --git a/modules/tox.ini b/modules/tox.ini
new file mode 100644 (file)
index 0000000..835cb6b
--- /dev/null
@@ -0,0 +1,28 @@
+# Tox (http://tox.testrun.org/) is a tool for running tests
+# in multiple virtualenvs. This configuration file will run the
+# test suite on all supported python versions. To use it, "pip install tox"
+# and then run "tox" from this directory.
+
+[tox]
+envlist = py27
+skipsdist = True
+
+[testenv]
+usedevelop = True
+setenv=
+  HOME = {envtmpdir}
+  PYTHONPATH = {toxinidir}
+
+[testenv:modules]
+deps=
+  -rrequirements.txt
+  -rtest-requirements.txt
+commands =
+  nosetests \
+  --with-xunit \
+  --xunit-file=nosetests.xml \
+  --cover-package=opnfv \
+  --with-coverage \
+  --cover-xml \
+  --cover-html \
+  tests/unit
diff --git a/prototypes/bifrost/README.md b/prototypes/bifrost/README.md
deleted file mode 100644 (file)
index dc1417a..0000000
+++ /dev/null
@@ -1,53 +0,0 @@
-=====================
-How to deploy bifrost
-=====================
-The scripts and playbooks defined on this repo, need to be combined with proper `Bifrost <http://git.openstack.org/cgit/openstack/bifrost>`_ code.
-
-Please follow that steps:
-
-1. Clone bifrost::
-
-    sudo git clone https://git.openstack.org/openstack/bifrost /opt/bifrost
-
-2. Clone releng::
-
-    sudo git clone https://gerrit.opnfv.org/gerrit/releng /opt/releng
-
-3. Clone infracloud::
-
-    sudo git clone https://git.openstack.org/openstack-infra/puppet-infracloud /opt/puppet-infracloud
-
-4. Combine releng scripts and playbooks with bifrost::
-
-    sudo cp -R /opt/releng/prototypes/bifrost/* /opt/bifrost/
-
-5. Copy /opt/puppet-infracloud/templates/bifrost/create_bridge.py.erb to /opt/puppet-infracloud/files/elements/infra-cloud-bridge/static/opt/create_bridge.py,
-   and replace tag <%= @bridge_name -%> with br_opnfv
-
-6. If you are on a RHEL/CentOS box, ensure that selinux is disabled
-
-7. Run destroy script if you need to cleanup previous environment::
-
-    cd /opt/bifrost
-    sudo ./scripts/destroy-env.sh
-
-8. Run deployment script to spin up 3 vms with bifrost: xcimaster, controller and compute::
-
-    cd /opt/bifrost
-    sudo ./scripts/test-bifrost-deployment.sh
-
-It is likely that the script will show some errors due to timeout. Please ignore the errors, and wait until the vms are completely bootstrapped. To verify it you can check with ironic::
-
-    cd /opt/bifrost
-    source env-vars
-    ironic node-list
-
-And wait until all the vms are in **active** Provisioning State.
-
-9. Check the IPs assigned to each of the VMS. You can check it by looking at inventory:
-
-    cat /tmp/baremetal.csv
-
-10. You can enter into the vms with devuser login/pass:
-
-    ssh devuser@192.168.122.2
diff --git a/prototypes/bifrost/playbooks/opnfv-virtual.yaml b/prototypes/bifrost/playbooks/opnfv-virtual.yaml
deleted file mode 100644 (file)
index 94de628..0000000
+++ /dev/null
@@ -1,95 +0,0 @@
-# SPDX-license-identifier: Apache-2.0
-##############################################################################
-# Copyright (c) 2016 RedHat and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- hosts: localhost
-  connection: local
-  name: "Setting pre-test conditions"
-  become: yes
-  ignore_errors: yes
-  tasks:
-  - name: Remove pre-existing leases file
-    file: path=/var/lib/misc/dnsmasq.leases state=absent
-- hosts: localhost
-  connection: local
-  name: "Executes install, enrollment, and testing in one playbook"
-  become: no
-  gather_facts: yes
-  pre_tasks:
-    - name: "Override the ipv4_gateway setting"
-      set_fact:
-         ipv4_gateway: "192.168.122.1"
-  roles:
-    - { role: bifrost-prep-for-install, when: skip_install is not defined }
-  environment:
-    http_proxy: "{{ lookup('env','http_proxy') }}"
-    https_proxy: "{{ lookup('env','https_proxy') }}"
-- hosts: localhost
-  connection: local
-  name: "Executes install, enrollment, and testing in one playbook"
-  become: yes
-  gather_facts: yes
-  roles:
-    - role: bifrost-keystone-install
-    - role: bifrost-ironic-install
-      cleaning: false
-      testing: true
-    # NOTE(TheJulia): While the next step creates a ramdisk, some elements
-    # do not support ramdisk-image-create as they invoke steps to cleanup
-    # the ramdisk which causes ramdisk-image-create to believe it failed.
-    - role: bifrost-create-dib-image
-      dib_imagename: "{{ http_boot_folder }}/ipa"
-      build_ramdisk: false
-      dib_os_element: "{{ ipa_dib_os_element|default('debian') }}"
-      dib_os_release: "jessie"
-      dib_elements: "ironic-agent {{ ipa_extra_dib_elements | default('') }}"
-      when: create_ipa_image | bool == true
-    - role: bifrost-create-dib-image
-      dib_imagetype: "qcow2"
-      dib_imagename: "{{deploy_image}}"
-      dib_os_element: "{{ lookup('env','DIB_OS_ELEMENT') }}"
-      dib_os_release: "{{ lookup('env', 'DIB_OS_RELEASE') }}"
-      extra_dib_elements: "{{ lookup('env', 'EXTRA_DIB_ELEMENTS') | default('') }}"
-      dib_elements: "vm enable-serial-console simple-init devuser growroot {{ extra_dib_elements }}"
-      dib_packages: "{{ lookup('env', 'DIB_OS_PACKAGES') }}"
-      when: create_image_via_dib | bool == true and transform_boot_image | bool == false
-    - role: bifrost-keystone-client-config
-      user: "{{ ansible_env.SUDO_USER }}"
-      clouds:
-        bifrost:
-          config_username: "{{ ironic.keystone.default_username }}"
-          config_password: "{{ ironic.keystone.default_password }}"
-          config_project_name: "baremetal"
-          config_region_name: "{{ keystone.bootstrap.region_name }}"
-          config_auth_url: "{{ keystone.bootstrap.public_url }}"
-  environment:
-    http_proxy: "{{ lookup('env','http_proxy') }}"
-    https_proxy: "{{ lookup('env','https_proxy') }}"
-- hosts: baremetal
-  name: "Enroll node with Ironic"
-  become: no
-  connection: local
-  roles:
-    - role: ironic-enroll-dynamic
-    - { role: ironic-inspect-node, when: inspect_nodes | default('false') | bool == true }
-- hosts: baremetal
-  name: "Create configuration drive files and deploy machines"
-  vars:
-    multinode_testing: "{{ inventory_dhcp | bool == true }}"
-  become: no
-  connection: local
-  roles:
-    - role: bifrost-configdrives-dynamic
-    - role: bifrost-deploy-nodes-dynamic
-- hosts: baremetal
-  name: "Deploy machines."
-  become: no
-  connection: local
-  serial: 1
-  roles:
-    - role: bifrost-prepare-for-test-dynamic
diff --git a/prototypes/bifrost/scripts/bifrost-provision.sh b/prototypes/bifrost/scripts/bifrost-provision.sh
deleted file mode 100755 (executable)
index 2b90215..0000000
+++ /dev/null
@@ -1,133 +0,0 @@
-#!/bin/bash
-# SPDX-license-identifier: Apache-2.0
-##############################################################################
-# Copyright (c) 2016 Ericsson AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-set -eux
-set -o pipefail
-
-export PYTHONUNBUFFERED=1
-SCRIPT_HOME="$(cd "$(dirname "$0")" && pwd)"
-BIFROST_HOME=$SCRIPT_HOME/..
-ANSIBLE_INSTALL_ROOT=${ANSIBLE_INSTALL_ROOT:-/opt/stack}
-ANSIBLE_VERBOSITY=${ANSIBLE_VERBOSITY-"-vvvv"}
-ENABLE_VENV="false"
-USE_DHCP="false"
-USE_VENV="false"
-BUILD_IMAGE=true
-PROVISION_WAIT_TIMEOUT=${PROVISION_WAIT_TIMEOUT:-3600}
-
-# Ensure the right inventory files is used based on branch
-CURRENT_BIFROST_BRANCH=$(git rev-parse --abbrev-ref HEAD)
-if [ $CURRENT_BIFROST_BRANCH = "master" ]; then
-    BAREMETAL_DATA_FILE=${BAREMETAL_DATA_FILE:-'/tmp/baremetal.json'}
-    INVENTORY_FILE_FORMAT="baremetal_json_file"
-else
-    BAREMETAL_DATA_FILE=${BAREMETAL_DATA_FILE:-'/tmp/baremetal.csv'}
-    INVENTORY_FILE_FORMAT="baremetal_csv_file"
-fi
-export BIFROST_INVENTORY_SOURCE=$BAREMETAL_DATA_FILE
-
-# Default settings for VMs
-export TEST_VM_NUM_NODES=${TEST_VM_NUM_NODES:-3}
-export TEST_VM_NODE_NAMES=${TEST_VM_NODE_NAMES:-"opnfv controller00 compute00"}
-export VM_DOMAIN_TYPE=${VM_DOMAIN_TYPE:-kvm}
-export VM_CPU=${VM_CPU:-4}
-export VM_DISK=${VM_DISK:-100}
-export VM_MEMORY_SIZE=${VM_MEMORY_SIZE:-8192}
-export VM_DISK_CACHE=${VM_DISK_CACHE:-unsafe}
-
-# Settings for bifrost
-TEST_PLAYBOOK="opnfv-virtual.yaml"
-USE_INSPECTOR=true
-USE_CIRROS=false
-TESTING_USER=root
-DOWNLOAD_IPA=true
-CREATE_IPA_IMAGE=false
-INSPECT_NODES=true
-INVENTORY_DHCP=false
-INVENTORY_DHCP_STATIC_IP=false
-WRITE_INTERFACES_FILE=true
-
-# Settings for console access
-export DIB_DEV_USER_PWDLESS_SUDO=yes
-export DIB_DEV_USER_PASSWORD=devuser
-
-# Settings for distro: xenial/ubuntu-minimal, 7/centos7, 42.2/suse
-export DIB_OS_RELEASE=${DIB_OS_RELEASE:-xenial}
-export DIB_OS_ELEMENT=${DIB_OS_ELEMENT:-ubuntu-minimal}
-
-# DIB OS packages
-export DIB_OS_PACKAGES=${DIB_OS_PACKAGES:-"vlan,vim,less,bridge-utils,language-pack-en,iputils-ping,rsyslog,curl"}
-
-# Additional dib elements
-export EXTRA_DIB_ELEMENTS=${EXTRA_DIB_ELEMENTS:-"openssh-server"}
-
-# Source Ansible
-set +x +o nounset
-$SCRIPT_HOME/env-setup.sh
-ANSIBLE=$(which ansible-playbook)
-set -x -o nounset
-
-logs_on_exit() {
-    $SCRIPT_HOME/collect-test-info.sh
-}
-trap logs_on_exit EXIT
-
-# Change working directory
-cd $BIFROST_HOME/playbooks
-
-# Syntax check of dynamic inventory test path
-for task in syntax-check list-tasks; do
-    ${ANSIBLE} ${ANSIBLE_VERBOSITY} \
-           -i inventory/localhost \
-           test-bifrost-create-vm.yaml \
-           --${task}
-    ${ANSIBLE} ${ANSIBLE_VERBOSITY} \
-           -i inventory/localhost \
-           ${TEST_PLAYBOOK} \
-           --${task} \
-           -e testing_user=${TESTING_USER}
-done
-
-# Create the VMS
-${ANSIBLE} ${ANSIBLE_VERBOSITY} \
-       -i inventory/localhost \
-       test-bifrost-create-vm.yaml \
-       -e test_vm_num_nodes=${TEST_VM_NUM_NODES} \
-       -e test_vm_memory_size=${VM_MEMORY_SIZE} \
-       -e enable_venv=${ENABLE_VENV} \
-       -e test_vm_domain_type=${VM_DOMAIN_TYPE} \
-       -e ${INVENTORY_FILE_FORMAT}=${BAREMETAL_DATA_FILE}
-
-# Execute the installation and VM startup test
-${ANSIBLE} ${ANSIBLE_VERBOSITY} \
-    -i inventory/bifrost_inventory.py \
-    ${TEST_PLAYBOOK} \
-    -e use_cirros=${USE_CIRROS} \
-    -e testing_user=${TESTING_USER} \
-    -e test_vm_num_nodes=${TEST_VM_NUM_NODES} \
-    -e inventory_dhcp=${INVENTORY_DHCP} \
-    -e inventory_dhcp_static_ip=${INVENTORY_DHCP_STATIC_IP} \
-    -e enable_venv=${ENABLE_VENV} \
-    -e enable_inspector=${USE_INSPECTOR} \
-    -e inspect_nodes=${INSPECT_NODES} \
-    -e download_ipa=${DOWNLOAD_IPA} \
-    -e create_ipa_image=${CREATE_IPA_IMAGE} \
-    -e write_interfaces_file=${WRITE_INTERFACES_FILE} \
-    -e ipv4_gateway=192.168.122.1 \
-    -e wait_timeout=${PROVISION_WAIT_TIMEOUT} \
-    -e enable_keystone=false
-EXITCODE=$?
-
-if [ $EXITCODE != 0 ]; then
-    echo "************************************"
-    echo "Provisioning failed. See logs folder"
-    echo "************************************"
-fi
-
-exit $EXITCODE
diff --git a/prototypes/bifrost/scripts/destroy-env.sh b/prototypes/bifrost/scripts/destroy-env.sh
deleted file mode 100755 (executable)
index c75e814..0000000
+++ /dev/null
@@ -1,60 +0,0 @@
-#!/bin/bash
-# SPDX-license-identifier: Apache-2.0
-##############################################################################
-# Copyright (c) 2016 RedHat and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-# We need to execute everything as root
-if [[ $(whoami) != "root" ]]; then
-    echo "Error: This script must be run as root!"
-    exit 1
-fi
-
-# Start fresh
-rm -rf /opt/stack
-# HOME is normally set by sudo -H
-rm -rf ${HOME}/.config/openstack
-
-# Delete all libvirt VMs and hosts from vbmc (look for a port number)
-for vm in $(vbmc list | awk '/[0-9]/{{ print $2 }}'); do
-    virsh destroy $vm || true
-    virsh undefine $vm || true
-    vbmc delete $vm
-done
-
-service ironic-conductor stop || true
-
-echo "removing inventory files created by previous builds"
-rm -rf /tmp/baremetal.*
-
-echo "removing ironic database"
-if $(which mysql &> /dev/null); then
-    mysql -u root ironic --execute "drop database ironic;"
-fi
-echo "removing leases"
-[[ -e /var/lib/misc/dnsmasq/dnsmasq.leases ]] && > /var/lib/misc/dnsmasq/dnsmasq.leases
-echo "removing logs"
-rm -rf /var/log/libvirt/baremetal_logs/*
-
-# clean up dib images only if requested explicitly
-CLEAN_DIB_IMAGES=${CLEAN_DIB_IMAGES:-false}
-
-if [ $CLEAN_DIB_IMAGES = "true" ]; then
-    rm -rf /httpboot /tftpboot
-    mkdir /httpboot /tftpboot
-    chmod -R 755 /httpboot /tftpboot
-fi
-
-# remove VM disk images
-rm -rf /var/lib/libvirt/images/*.qcow2
-
-echo "restarting services"
-service dnsmasq restart || true
-service libvirtd restart
-service ironic-api restart || true
-service ironic-conductor start || true
-service ironic-inspector restart || true
diff --git a/prototypes/openstack-ansible/README.md b/prototypes/openstack-ansible/README.md
deleted file mode 100644 (file)
index 34c1d0d..0000000
+++ /dev/null
@@ -1,48 +0,0 @@
-===============================
-How to deploy OpenStack-Ansible
-===============================
-The script and playbooks defined on this repo will deploy an OpenStack
-cloud based on OpenStack-Ansible.
-It needs to be combined with Bifrost. You need use Bifrost to provide six VMs.
-To learn about how to use Bifrost, you can read the document on
-[/opt/releng/prototypes/bifrost/README.md].
-
-Minimal requirements:
-1. You will need to have a least 150G free space for the partition on where
-   "/var/lib/libvirt/images/" lives.
-2. each vm needs to have at least 8 vCPU, 12 GB RAM, 60 GB HDD.
-
-After provisioning the six VMs please follow that steps:
-
-1.Run the script to deploy OpenStack
-  cd /opt/releng/prototypes/openstack-ansible/scripts/
-  sudo ./osa_deploy.sh
-It will take a lot of time. When the deploy is successful, you will see the
-message "OpenStack deployed successfully".
-
-2.To verify the OpenStack operation
-  2.1 ssh into the controller::
-      ssh 192.168.122.3
-  2.2 Enter into the lxc container::
-      lxcname=$(lxc-ls | grep utility)
-      lxc-attach -n $lxcname
-  2.3 Verify the OpenStack API::
-      source /root/openrc
-      openstack user list
-
-This will show the following output::
-+----------------------------------+--------------------+
-| ID                               | Name               |
-+----------------------------------+--------------------+
-| 056f8fe41336435991fd80872731cada | aodh               |
-| 308f6436e68f40b49d3b8e7ce5c5be1e | glance             |
-| 351b71b43a66412d83f9b3cd75485875 | nova               |
-| 511129e053394aea825cce13b9f28504 | ceilometer         |
-| 5596f71319d44c8991fdc65f3927b62e | gnocchi            |
-| 586f49e3398a4c47a2f6fe50135d4941 | stack_domain_admin |
-| 601b329e6b1d427f9a1e05ed28753497 | heat               |
-| 67fe383b94964a4781345fbcc30ae434 | cinder             |
-| 729bb08351264d729506dad84ed3ccf0 | admin              |
-| 9f2beb2b270940048fe6844f0b16281e | neutron            |
-| fa68f86dd1de4ddbbb7415b4d9a54121 | keystone           |
-+----------------------------------+--------------------+
diff --git a/prototypes/openstack-ansible/file/cinder.yml b/prototypes/openstack-ansible/file/cinder.yml
deleted file mode 100644 (file)
index e40b392..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
----
-# This file contains an example to show how to set
-# the cinder-volume service to run in a container.
-#
-# Important note:
-# When using LVM or any iSCSI-based cinder backends, such as NetApp with
-# iSCSI protocol, the cinder-volume service *must* run on metal.
-# Reference: https://bugs.launchpad.net/ubuntu/+source/lxc/+bug/1226855
-
-container_skel:
-  cinder_volumes_container:
-    properties:
-      is_metal: false
diff --git a/prototypes/openstack-ansible/file/exports b/prototypes/openstack-ansible/file/exports
deleted file mode 100644 (file)
index 315f79d..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-# /etc/exports: the access control list for filesystems which may be exported
-#               to NFS clients.  See exports(5).
-#
-# Example for NFSv2 and NFSv3:
-# /srv/homes       hostname1(rw,sync,no_subtree_check) hostname2(ro,sync,no_subtree_check)
-#
-# Example for NFSv4:
-# /srv/nfs4        gss/krb5i(rw,sync,fsid=0,crossmnt,no_subtree_check)
-# /srv/nfs4/homes  gss/krb5i(rw,sync,no_subtree_check)
-#
-/images         *(rw,sync,no_subtree_check,no_root_squash)
-
diff --git a/prototypes/openstack-ansible/file/modules b/prototypes/openstack-ansible/file/modules
deleted file mode 100644 (file)
index 60a517f..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
-# /etc/modules: kernel modules to load at boot time.
-#
-# This file contains the names of kernel modules that should be loaded
-# at boot time, one per line. Lines beginning with "#" are ignored.
-# Parameters can be specified after the module name.
-
-bonding
-8021q
diff --git a/prototypes/openstack-ansible/file/openstack_user_config.yml b/prototypes/openstack-ansible/file/openstack_user_config.yml
deleted file mode 100644 (file)
index 43e88c0..0000000
+++ /dev/null
@@ -1,278 +0,0 @@
----
-cidr_networks:
-  container: 172.29.236.0/22
-  tunnel: 172.29.240.0/22
-  storage: 172.29.244.0/22
-
-used_ips:
-  - "172.29.236.1,172.29.236.50"
-  - "172.29.240.1,172.29.240.50"
-  - "172.29.244.1,172.29.244.50"
-  - "172.29.248.1,172.29.248.50"
-
-global_overrides:
-  internal_lb_vip_address: 172.29.236.222
-  external_lb_vip_address: 192.168.122.220
-  tunnel_bridge: "br-vxlan"
-  management_bridge: "br-mgmt"
-  provider_networks:
-    - network:
-        container_bridge: "br-mgmt"
-        container_type: "veth"
-        container_interface: "eth1"
-        ip_from_q: "container"
-        type: "raw"
-        group_binds:
-          - all_containers
-          - hosts
-        is_container_address: true
-        is_ssh_address: true
-    - network:
-        container_bridge: "br-vxlan"
-        container_type: "veth"
-        container_interface: "eth10"
-        ip_from_q: "tunnel"
-        type: "vxlan"
-        range: "1:1000"
-        net_name: "vxlan"
-        group_binds:
-          - neutron_linuxbridge_agent
-    - network:
-        container_bridge: "br-vlan"
-        container_type: "veth"
-        container_interface: "eth12"
-        host_bind_override: "eth12"
-        type: "flat"
-        net_name: "flat"
-        group_binds:
-          - neutron_linuxbridge_agent
-    - network:
-        container_bridge: "br-vlan"
-        container_type: "veth"
-        container_interface: "eth11"
-        type: "vlan"
-        range: "1:1"
-        net_name: "vlan"
-        group_binds:
-          - neutron_linuxbridge_agent
-    - network:
-        container_bridge: "br-storage"
-        container_type: "veth"
-        container_interface: "eth2"
-        ip_from_q: "storage"
-        type: "raw"
-        group_binds:
-          - glance_api
-          - cinder_api
-          - cinder_volume
-          - nova_compute
-
-# ##
-# ## Infrastructure
-# ##
-
-# galera, memcache, rabbitmq, utility
-shared-infra_hosts:
-  controller00:
-    ip: 172.29.236.11
-  controller01:
-    ip: 172.29.236.12
-  controller02:
-    ip: 172.29.236.13
-
-# repository (apt cache, python packages, etc)
-repo-infra_hosts:
-  controller00:
-    ip: 172.29.236.11
-  controller01:
-    ip: 172.29.236.12
-  controller02:
-    ip: 172.29.236.13
-
-# load balancer
-# Ideally the load balancer should not use the Infrastructure hosts.
-# Dedicated hardware is best for improved performance and security.
-haproxy_hosts:
-  controller00:
-    ip: 172.29.236.11
-  controller01:
-    ip: 172.29.236.12
-  controller02:
-    ip: 172.29.236.13
-
-# rsyslog server
-# log_hosts:
-# log1:
-#  ip: 172.29.236.14
-
-# ##
-# ## OpenStack
-# ##
-
-# keystone
-identity_hosts:
-  controller00:
-    ip: 172.29.236.11
-  controller01:
-    ip: 172.29.236.12
-  controller02:
-    ip: 172.29.236.13
-
-# cinder api services
-storage-infra_hosts:
-  controller00:
-    ip: 172.29.236.11
-  controller01:
-    ip: 172.29.236.12
-  controller02:
-    ip: 172.29.236.13
-
-# glance
-# The settings here are repeated for each infra host.
-# They could instead be applied as global settings in
-# user_variables, but are left here to illustrate that
-# each container could have different storage targets.
-image_hosts:
-  controller00:
-    ip: 172.29.236.11
-    container_vars:
-      limit_container_types: glance
-      glance_nfs_client:
-        - server: "172.29.244.15"
-          remote_path: "/images"
-          local_path: "/var/lib/glance/images"
-          type: "nfs"
-          options: "_netdev,auto"
-  controller01:
-    ip: 172.29.236.12
-    container_vars:
-      limit_container_types: glance
-      glance_nfs_client:
-        - server: "172.29.244.15"
-          remote_path: "/images"
-          local_path: "/var/lib/glance/images"
-          type: "nfs"
-          options: "_netdev,auto"
-  controller02:
-    ip: 172.29.236.13
-    container_vars:
-      limit_container_types: glance
-      glance_nfs_client:
-        - server: "172.29.244.15"
-          remote_path: "/images"
-          local_path: "/var/lib/glance/images"
-          type: "nfs"
-          options: "_netdev,auto"
-
-# nova api, conductor, etc services
-compute-infra_hosts:
-  controller00:
-    ip: 172.29.236.11
-  controller01:
-    ip: 172.29.236.12
-  controller02:
-    ip: 172.29.236.13
-
-# heat
-orchestration_hosts:
-  controller00:
-    ip: 172.29.236.11
-  controller01:
-    ip: 172.29.236.12
-  controller02:
-    ip: 172.29.236.13
-
-# horizon
-dashboard_hosts:
-  controller00:
-    ip: 172.29.236.11
-  controller01:
-    ip: 172.29.236.12
-  controller02:
-    ip: 172.29.236.13
-
-# neutron server, agents (L3, etc)
-network_hosts:
-  controller00:
-    ip: 172.29.236.11
-  controller01:
-    ip: 172.29.236.12
-  controller02:
-    ip: 172.29.236.13
-
-# ceilometer (telemetry API)
-metering-infra_hosts:
-  controller00:
-    ip: 172.29.236.11
-  controller01:
-    ip: 172.29.236.12
-  controller02:
-    ip: 172.29.236.13
-
-# aodh (telemetry alarm service)
-metering-alarm_hosts:
-  controller00:
-    ip: 172.29.236.11
-  controller01:
-    ip: 172.29.236.12
-  controller02:
-    ip: 172.29.236.13
-
-# gnocchi (telemetry metrics storage)
-metrics_hosts:
-  controller00:
-    ip: 172.29.236.11
-  controller01:
-    ip: 172.29.236.12
-  controller02:
-    ip: 172.29.236.13
-
-# nova hypervisors
-compute_hosts:
-  compute00:
-    ip: 172.29.236.14
-  compute01:
-    ip: 172.29.236.15
-
-# ceilometer compute agent (telemetry)
-metering-compute_hosts:
-  compute00:
-    ip: 172.29.236.14
-  compute01:
-    ip: 172.29.236.15
-# cinder volume hosts (NFS-backed)
-# The settings here are repeated for each infra host.
-# They could instead be applied as global settings in
-# user_variables, but are left here to illustrate that
-# each container could have different storage targets.
-storage_hosts:
-  controller00:
-    ip: 172.29.236.11
-    container_vars:
-      cinder_backends:
-        limit_container_types: cinder_volume
-        lvm:
-          volume_group: cinder-volumes
-          volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver
-          volume_backend_name: LVM_iSCSI
-          iscsi_ip_address: "172.29.244.11"
-  controller01:
-    ip: 172.29.236.12
-    container_vars:
-      cinder_backends:
-        limit_container_types: cinder_volume
-        lvm:
-          volume_group: cinder-volumes
-          volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver
-          volume_backend_name: LVM_iSCSI
-          iscsi_ip_address: "172.29.244.12"
-  controller02:
-    ip: 172.29.236.13
-    container_vars:
-      cinder_backends:
-        limit_container_types: cinder_volume
-        lvm:
-          volume_group: cinder-volumes
-          volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver
-          volume_backend_name: LVM_iSCSI
-          iscsi_ip_address: "172.29.244.13"
diff --git a/prototypes/openstack-ansible/file/opnfv-setup-openstack.yml b/prototypes/openstack-ansible/file/opnfv-setup-openstack.yml
deleted file mode 100644 (file)
index aacdeff..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
----
-# Copyright 2014, Rackspace US, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-- include: os-keystone-install.yml
-- include: os-glance-install.yml
-- include: os-cinder-install.yml
-- include: os-nova-install.yml
-- include: os-neutron-install.yml
-- include: os-heat-install.yml
-- include: os-horizon-install.yml
-- include: os-ceilometer-install.yml
-- include: os-aodh-install.yml
-#NOTE(stevelle) Ensure Gnocchi identities exist before Swift
-- include: os-gnocchi-install.yml
-  when:
-    - gnocchi_storage_driver is defined
-    - gnocchi_storage_driver == 'swift'
-  vars:
-    gnocchi_identity_only: True
-- include: os-swift-install.yml
-- include: os-gnocchi-install.yml
-- include: os-ironic-install.yml
diff --git a/prototypes/openstack-ansible/file/user_variables.yml b/prototypes/openstack-ansible/file/user_variables.yml
deleted file mode 100644 (file)
index 65cbcc1..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
----
-# Copyright 2014, Rackspace US, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# ##
-# ## This file contains commonly used overrides for convenience. Please inspect
-# ## the defaults for each role to find additional override options.
-# ##
-
-# # Debug and Verbose options.
-debug: false
-
-haproxy_keepalived_external_vip_cidr: "192.168.122.220/32"
-haproxy_keepalived_internal_vip_cidr: "172.29.236.222/32"
-haproxy_keepalived_external_interface: br-vlan
-haproxy_keepalived_internal_interface: br-mgmt
diff --git a/prototypes/openstack-ansible/playbooks/configure-targethosts.yml b/prototypes/openstack-ansible/playbooks/configure-targethosts.yml
deleted file mode 100644 (file)
index 538fe17..0000000
+++ /dev/null
@@ -1,61 +0,0 @@
----
-- hosts: all
-  remote_user: root
-  vars_files:
-    - ../var/ubuntu.yml
-  tasks:
-    - name: add public key to host
-      copy:
-        src: ../file/authorized_keys
-        dest: /root/.ssh/authorized_keys
-    - name: configure modules
-      copy:
-        src: ../file/modules
-        dest: /etc/modules
-
-- hosts: controller
-  remote_user: root
-  vars_files:
-    - ../var/ubuntu.yml
-  tasks:
-    - name: configure network
-      template:
-        src: ../template/bifrost/controller.interface.j2
-        dest: /etc/network/interfaces
-      notify:
-        - restart network service
-  handlers:
-    - name: restart network service
-      shell: "/sbin/ifconfig ens3 0 &&/sbin/ifdown -a && /sbin/ifup -a"
-
-- hosts: compute
-  remote_user: root
-  vars_files:
-    - ../var/ubuntu.yml
-  tasks:
-    - name: configure network
-      template:
-        src: ../template/bifrost/compute.interface.j2
-        dest: /etc/network/interfaces
-      notify:
-        - restart network service
-  handlers:
-    - name: restart network service
-      shell: "/sbin/ifconfig ens3 0 &&/sbin/ifdown -a && /sbin/ifup -a"
-
-- hosts: compute01
-  remote_user: root
-  tasks:
-    - name: make nfs dir
-      file: "dest=/images mode=0777 state=directory"
-    - name: configure sdrvice
-      shell: "echo 'nfs        2049/tcp' >>  /etc/services && echo 'nfs        2049/udp' >>  /etc/services"
-    - name: configure NFS
-      copy:
-        src: ../file/exports
-        dest: /etc/exports
-      notify:
-        - restart nfs service
-  handlers:
-    - name: restart nfs service
-      service: name=nfs-kernel-server state=restarted
diff --git a/prototypes/openstack-ansible/playbooks/configure-xcimaster.yml b/prototypes/openstack-ansible/playbooks/configure-xcimaster.yml
deleted file mode 100644 (file)
index fbbde64..0000000
+++ /dev/null
@@ -1,66 +0,0 @@
----
-- hosts: xcimaster
-  remote_user: root
-  vars_files:
-    - ../var/ubuntu.yml
-  tasks:
-    - name: generate SSH keys
-      shell: ssh-keygen -b 2048 -t rsa -f /root/.ssh/id_rsa -q -N ""
-      args:
-        creates: /root/.ssh/id_rsa
-    - name: fetch public key
-      fetch: src="/root/.ssh/id_rsa.pub" dest="/"
-    - name: remove openstack-ansible directories
-      file:
-        path={{ item }}
-        state=absent
-        recurse=no
-      with_items:
-        - "{{OSA_PATH}}"
-        - "{{OSA_ETC_PATH}}"
-    - name: clone openstack-ansible
-      git:
-        repo: "{{OSA_URL}}"
-        dest: "{{OSA_PATH}}"
-        version: "{{OPENSTACK_OSA_VERSION}}"
-    - name: copy opnfv-setup-openstack.yml to /opt/openstack-ansible/playbooks
-      copy:
-        src: ../file/opnfv-setup-openstack.yml
-        dest: "{{OSA_PATH}}/playbooks/opnfv-setup-openstack.yml"
-    - name: copy /opt/openstack-ansible/etc/openstack_deploy to /etc/openstack_deploy
-      shell: "/bin/cp -rf {{OSA_PATH}}/etc/openstack_deploy {{OSA_ETC_PATH}}"
-    - name: bootstrap
-      command: "/bin/bash ./scripts/bootstrap-ansible.sh"
-      args:
-        chdir: "{{OSA_PATH}}"
-    - name: generate password token
-      command: "python pw-token-gen.py --file /etc/openstack_deploy/user_secrets.yml"
-      args:
-        chdir: /opt/openstack-ansible/scripts/
-    - name: copy openstack_user_config.yml to /etc/openstack_deploy
-      copy:
-        src: ../file/openstack_user_config.yml
-        dest: "{{OSA_ETC_PATH}}/openstack_user_config.yml"
-    - name: copy cinder.yml to /etc/openstack_deploy/env.d
-      copy:
-        src: ../file/cinder.yml
-        dest: "{{OSA_ETC_PATH}}/env.d/cinder.yml"
-    - name: copy user_variables.yml to /etc/openstack_deploy/
-      copy:
-        src: ../file/user_variables.yml
-        dest: "{{OSA_ETC_PATH}}/user_variables.yml"
-    - name: configure network
-      template:
-        src: ../template/bifrost/controller.interface.j2
-        dest: /etc/network/interfaces
-      notify:
-        - restart network service
-  handlers:
-    - name: restart network service
-      shell: "/sbin/ifconfig ens3 0 &&/sbin/ifdown -a && /sbin/ifup -a"
-
-- hosts: localhost
-  remote_user: root
-  tasks:
-    - name: Generate authorized_keys
-      shell: "/bin/cat /xcimaster/root/.ssh/id_rsa.pub >> ../file/authorized_keys"
diff --git a/prototypes/openstack-ansible/playbooks/inventory b/prototypes/openstack-ansible/playbooks/inventory
deleted file mode 100644 (file)
index d3768f5..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-[xcimaster]
-xcimaster ansible_ssh_host=192.168.122.2
-
-[controller]
-controller00 ansible_ssh_host=192.168.122.3
-controller01 ansible_ssh_host=192.168.122.4
-controller02 ansible_ssh_host=192.168.122.5
-
-[compute]
-compute00 ansible_ssh_host=192.168.122.6
-compute01 ansible_ssh_host=192.168.122.7
diff --git a/prototypes/openstack-ansible/scripts/osa-deploy.sh b/prototypes/openstack-ansible/scripts/osa-deploy.sh
deleted file mode 100755 (executable)
index ec60744..0000000
+++ /dev/null
@@ -1,136 +0,0 @@
-#!/bin/bash
-# SPDX-license-identifier: Apache-2.0
-##############################################################################
-# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-set -o errexit
-set -o nounset
-set -o pipefail
-
-export OSA_PATH=/opt/openstack-ansible
-export LOG_PATH=$OSA_PATH/log
-export PLAYBOOK_PATH=$OSA_PATH/playbooks
-export OSA_BRANCH=${OSA_BRANCH:-"master"}
-XCIMASTER_IP="192.168.122.2"
-
-sudo /bin/rm -rf $LOG_PATH
-sudo /bin/mkdir -p $LOG_PATH
-sudo /bin/cp /root/.ssh/id_rsa.pub ../file/authorized_keys
-echo -e '\n' | sudo tee --append ../file/authorized_keys
-
-# log some info
-echo -e "\n"
-echo "***********************************************************************"
-echo "*                                                                     *"
-echo "*                        Configure XCI Master                         *"
-echo "*                                                                     *"
-echo "*  Bootstrap xci-master, configure network, clone openstack-ansible   *"
-echo "*                Playbooks: configure-xcimaster.yml                   *"
-echo "*                                                                     *"
-echo "***********************************************************************"
-echo -e "\n"
-
-cd ../playbooks/
-# this will prepare the jump host
-# git clone the Openstack-Ansible, bootstrap and configure network
-echo "xci: running ansible playbook configure-xcimaster.yml"
-sudo -E ansible-playbook -i inventory configure-xcimaster.yml
-
-echo "XCI Master is configured successfully!"
-
-# log some info
-echo -e "\n"
-echo "***********************************************************************"
-echo "*                                                                     *"
-echo "*                          Configure Nodes                            *"
-echo "*                                                                     *"
-echo "*       Configure network on OpenStack Nodes, configure NFS           *"
-echo "*                Playbooks: configure-targethosts.yml                 *"
-echo "*                                                                     *"
-echo "***********************************************************************"
-echo -e "\n"
-
-# this will prepare the target host
-# such as configure network and NFS
-echo "xci: running ansible playbook configure-targethosts.yml"
-sudo -E ansible-playbook -i inventory configure-targethosts.yml
-
-echo "Nodes are configured successfully!"
-
-# log some info
-echo -e "\n"
-echo "***********************************************************************"
-echo "*                                                                     *"
-echo "*                       Set Up OpenStack Nodes                        *"
-echo "*                                                                     *"
-echo "*            Set up OpenStack Nodes using openstack-ansible           *"
-echo "*         Playbooks: setup-hosts.yml, setup-infrastructure.yml        *"
-echo "*                                                                     *"
-echo "***********************************************************************"
-echo -e "\n"
-
-# using OpenStack-Ansible deploy the OpenStack
-echo "xci: running ansible playbook setup-hosts.yml"
-sudo -E /bin/sh -c "ssh root@$XCIMASTER_IP openstack-ansible \
-     $PLAYBOOK_PATH/setup-hosts.yml" | \
-     tee $LOG_PATH/setup-hosts.log
-
-# check the result of openstack-ansible setup-hosts.yml
-# if failed, exit with exit code 1
-if grep -q 'failed=1\|unreachable=1' $LOG_PATH/setup-hosts.log; then
-    echo "OpenStack node setup failed!"
-    exit 1
-fi
-
-echo "xci: running ansible playbook setup-infrastructure.yml"
-sudo -E /bin/sh -c "ssh root@$XCIMASTER_IP openstack-ansible \
-     $PLAYBOOK_PATH/setup-infrastructure.yml" | \
-     tee $LOG_PATH/setup-infrastructure.log
-
-# check the result of openstack-ansible setup-infrastructure.yml
-# if failed, exit with exit code 1
-if grep -q 'failed=1\|unreachable=1' $LOG_PATH/setup-infrastructure.log; then
-    echo "OpenStack node setup failed!"
-    exit 1
-fi
-
-echo "OpenStack nodes are setup successfully!"
-
-sudo -E /bin/sh -c "ssh root@$XCIMASTER_IP ansible -i $PLAYBOOK_PATH/inventory/ \
-           galera_container -m shell \
-           -a "mysql -h localhost -e 'show status like \"%wsrep_cluster_%\";'"" \
-           | tee $LOG_PATH/galera.log
-
-if grep -q 'FAILED' $LOG_PATH/galera.log; then
-    echo "Database cluster verification failed!"
-    exit 1
-else
-    echo "Database cluster verification successful!"
-fi
-
-# log some info
-echo -e "\n"
-echo "***********************************************************************"
-echo "*                                                                     *"
-echo "*                           Install OpenStack                         *"
-echo "*                 Playbooks: opnfv-setup-openstack.yml                *"
-echo "*                                                                     *"
-echo "***********************************************************************"
-echo -e "\n"
-
-echo "xci: running ansible playbook opnfv-setup-openstack.yml"
-sudo -E /bin/sh -c "ssh root@$XCIMASTER_IP openstack-ansible \
-     $PLAYBOOK_PATH/opnfv-setup-openstack.yml" | \
-     tee $LOG_PATH/opnfv-setup-openstack.log
-
-if grep -q 'failed=1\|unreachable=1' $LOG_PATH/opnfv-setup-openstack.log; then
-   echo "OpenStack installation failed!"
-   exit 1
-else
-   echo "OpenStack installation is successfully completed!"
-   exit 0
-fi
diff --git a/prototypes/openstack-ansible/template/bifrost/compute.interface.j2 b/prototypes/openstack-ansible/template/bifrost/compute.interface.j2
deleted file mode 100644 (file)
index 1719f6a..0000000
+++ /dev/null
@@ -1,86 +0,0 @@
-# This file describes the network interfaces available on your system
-# and how to activate them. For more information, see interfaces(5).
-
-# The loopback network interface
-auto lo
-iface lo inet loopback
-
-
-# Physical interface
-auto ens3
-iface ens3 inet manual
-
-# Container/Host management VLAN interface
-auto ens3.10
-iface ens3.10 inet manual
-    vlan-raw-device ens3
-
-# OpenStack Networking VXLAN (tunnel/overlay) VLAN interface
-auto ens3.30
-iface ens3.30 inet manual
-    vlan-raw-device ens3
-
-# Storage network VLAN interface (optional)
-auto ens3.20
-iface ens3.20 inet manual
-    vlan-raw-device ens3
-
-# Container/Host management bridge
-auto br-mgmt
-iface br-mgmt inet static
-    bridge_stp off
-    bridge_waitport 0
-    bridge_fd 0
-    bridge_ports ens3.10
-    address {{host_info[inventory_hostname].MGMT_IP}}
-    netmask 255.255.252.0
-
-# compute1 VXLAN (tunnel/overlay) bridge config
-auto br-vxlan
-iface br-vxlan inet static
-    bridge_stp off
-    bridge_waitport 0
-    bridge_fd 0
-    bridge_ports ens3.30
-    address {{host_info[inventory_hostname].VXLAN_IP}}
-    netmask 255.255.252.0
-
-# OpenStack Networking VLAN bridge
-auto br-vlan
-iface br-vlan inet static
-    bridge_stp off
-    bridge_waitport 0
-    bridge_fd 0
-    bridge_ports ens3
-    address {{host_info[inventory_hostname].VLAN_IP}}
-    netmask 255.255.255.0
-    gateway 192.168.122.1
-    offload-sg off
-    # Create veth pair, don't bomb if already exists
-    pre-up ip link add br-vlan-veth type veth peer name eth12 || true
-    # Set both ends UP
-    pre-up ip link set br-vlan-veth up
-    pre-up ip link set eth12 up
-    # Delete veth pair on DOWN
-    post-down ip link del br-vlan-veth || true
-    bridge_ports br-vlan-veth
-
-# Add an additional address to br-vlan
-iface br-vlan inet static
-    # Flat network default gateway
-    # -- This needs to exist somewhere for network reachability
-    # -- from the router namespace for floating IP paths.
-    # -- Putting this here is primarily for tempest to work.
-    address {{host_info[inventory_hostname].VLAN_IP_SECOND}}
-    netmask 255.255.252.0
-    dns-nameserver 8.8.8.8 8.8.4.4
-
-# compute1 Storage bridge
-auto br-storage
-iface br-storage inet static
-    bridge_stp off
-    bridge_waitport 0
-    bridge_fd 0
-    bridge_ports ens3.20
-    address {{host_info[inventory_hostname].STORAGE_IP}}
-    netmask 255.255.252.0
diff --git a/prototypes/openstack-ansible/template/bifrost/controller.interface.j2 b/prototypes/openstack-ansible/template/bifrost/controller.interface.j2
deleted file mode 100644 (file)
index 74aeea9..0000000
+++ /dev/null
@@ -1,71 +0,0 @@
-# This file describes the network interfaces available on your system
-# and how to activate them. For more information, see interfaces(5).
-
-# The loopback network interface
-auto lo
-iface lo inet loopback
-
-# Physical interface
-auto ens3
-iface ens3 inet manual
-
-# Container/Host management VLAN interface
-auto ens3.10
-iface ens3.10 inet manual
-    vlan-raw-device ens3
-
-# OpenStack Networking VXLAN (tunnel/overlay) VLAN interface
-auto ens3.30
-iface ens3.30 inet manual
-    vlan-raw-device ens3
-
-# Storage network VLAN interface (optional)
-auto ens3.20
-iface ens3.20 inet manual
-    vlan-raw-device ens3
-
-# Container/Host management bridge
-auto br-mgmt
-iface br-mgmt inet static
-    bridge_stp off
-    bridge_waitport 0
-    bridge_fd 0
-    bridge_ports ens3.10
-    address {{host_info[inventory_hostname].MGMT_IP}}
-    netmask 255.255.252.0
-
-# OpenStack Networking VXLAN (tunnel/overlay) bridge
-#
-# Only the COMPUTE and NETWORK nodes must have an IP address
-# on this bridge. When used by infrastructure nodes, the
-# IP addresses are assigned to containers which use this
-# bridge.
-#
-auto br-vxlan
-iface br-vxlan inet manual
-    bridge_stp off
-    bridge_waitport 0
-    bridge_fd 0
-    bridge_ports ens3.30
-
-# OpenStack Networking VLAN bridge
-auto br-vlan
-iface br-vlan inet static
-    bridge_stp off
-    bridge_waitport 0
-    bridge_fd 0
-    bridge_ports ens3
-    address {{host_info[inventory_hostname].VLAN_IP}}
-    netmask 255.255.255.0
-    gateway 192.168.122.1
-    dns-nameserver 8.8.8.8 8.8.4.4
-
-# compute1 Storage bridge
-auto br-storage
-iface br-storage inet static
-    bridge_stp off
-    bridge_waitport 0
-    bridge_fd 0
-    bridge_ports ens3.20
-    address {{host_info[inventory_hostname].STORAGE_IP}}
-    netmask 255.255.252.0
diff --git a/prototypes/openstack-ansible/var/ubuntu.yml b/prototypes/openstack-ansible/var/ubuntu.yml
deleted file mode 100644 (file)
index eb595be..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
----
-OSA_URL: https://git.openstack.org/openstack/openstack-ansible
-OSA_PATH: /opt/openstack-ansible
-OSA_ETC_PATH: /etc/openstack_deploy
-OPENSTACK_OSA_VERSION: "{{ lookup('env','OPENSTACK_OSA_VERSION') }}"
-
-XCIMASTER_IP: 192.168.122.2
-host_info: {'xcimaster':{'MGMT_IP': '172.29.236.10','VLAN_IP': '192.168.122.2', 'STORAGE_IP': '172.29.244.10'},'controller00':{'MGMT_IP': '172.29.236.11','VLAN_IP': '192.168.122.3', 'STORAGE_IP': '172.29.244.11'},'controller01':{'MGMT_IP': '172.29.236.12','VLAN_IP': '192.168.122.4', 'STORAGE_IP': '172.29.244.12'},'controller02':{'MGMT_IP': '172.29.236.13','VLAN_IP': '192.168.122.5', 'STORAGE_IP': '172.29.240.13'},'compute00':{'MGMT_IP': '172.29.236.14','VLAN_IP': '192.168.122.6','VLAN_IP_SECOND': '173.29.241.1','VXLAN_IP': '172.29.240.14', 'STORAGE_IP': '172.29.244.14'},'compute01':{'MGMT_IP': '172.29.236.15','VLAN_IP': '192.168.122.7','VLAN_IP_SECOND': '173.29.241.2','VXLAN_IP': '172.29.240.15', 'STORAGE_IP': '172.29.244.15'}}
diff --git a/prototypes/puppet-infracloud/README.md b/prototypes/puppet-infracloud/README.md
deleted file mode 100644 (file)
index 37d575c..0000000
+++ /dev/null
@@ -1,61 +0,0 @@
-===============================
-How to deploy puppet-infracloud
-===============================
-The manifest and mmodules defined on this repo will deploy an OpenStack cloud based on `Infra Cloud <http://docs.openstack.org/infra/system-config/infra-cloud.html>`_ project.
-
-Once all the hardware is provisioned, enter in controller and compute nodes and follow these steps:
-
-1. Clone releng::
-
-    git clone https://gerrit.opnfv.org/gerrit/releng /opt/releng
-
-2. Copy hiera to the right place::
-
-    cp /opt/releng/prototypes/puppet-infracloud/hiera/common.yaml /var/lib/hiera
-
-3. Install modules::
-
-    cd /opt/releng/prototypes/puppet-infracloud
-    ./install_modules.sh
-
-4. Apply the infracloud manifest::
-
-    cd /opt/releng/prototypes/puppet-infracloud
-    puppet apply manifests/site.pp --modulepath=/etc/puppet/modules:/opt/releng/prototypes/puppet-infracloud/modules
-
-5. Once you finish this operation on controller and compute nodes, you will have a functional OpenStack cloud.
-
-In jumphost, follow that steps:
-
-1. Clone releng::
-
-    git clone https://gerrit.opnfv.org/gerrit/releng /opt/releng
-
-2. Create OpenStack clouds config directory::
-
-    mkdir -p /root/.config/openstack
-
-3. Copy credentials file::
-
-    cp /opt/releng/prototypes/puppet-infracloud/creds/clouds.yaml /root/.config/openstack/
-
-4. Install python-dev package as the installation of python-openstackclient depends on it
-
-    apt-get install -y python-dev
-
-5. Install openstack-client. (version 3.2.0 is known to work)::
-
-    pip install python-openstackclient
-
-6. Update /etc/hosts and add controller00::
-
-    192.168.122.3 controller00
-    192.168.122.3 controller00.opnfvlocal controller00
-
-7. Export the desired cloud::
-
-    export OS_CLOUD=opnfv
-
-8. Start using it::
-
-    openstack service list
diff --git a/prototypes/puppet-infracloud/creds/clouds.yaml b/prototypes/puppet-infracloud/creds/clouds.yaml
deleted file mode 100644 (file)
index cc27da2..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
----
-clouds:
-  opnfv:
-    verify: False
-    auth:
-      auth_url: https://controller00.opnfvlocal:5000
-      project_name: opnfv
-      username: opnfv
-      password: pass
-    identity_api_version: '3'
-    region_name: RegionOne
-    user_domain_name: opnfv
-    project_domain_name: opnfv
diff --git a/prototypes/puppet-infracloud/deploy_on_baremetal.md b/prototypes/puppet-infracloud/deploy_on_baremetal.md
deleted file mode 100644 (file)
index 2bd0a53..0000000
+++ /dev/null
@@ -1,58 +0,0 @@
-How to deploy Infra Cloud on baremetal
-==================================
-
-Install bifrost controller
---------------------------
-First step for deploying Infra Cloud is to install the bifrost controller. This can be virtualized, doesn't need to be on baremetal.
-To achieve that, first we can create a virtual machine with libvirt, with the proper network setup. This VM needs to share one physical interface (the PXE boot one), with the servers for the controller and compute nodes.
-Please follow documentation on: [https://git.openstack.org/cgit/openstack/bifrost/tree/tools/virsh_dev_env/README.md](https://git.openstack.org/cgit/openstack/bifrost/tree/tools/virsh_dev_env/README.md) to get sample templates and instructions for creating the bifrost VM.
-
-Once the **baremetal** VM is finished, you can login by ssh and start installing bifrost there. To proceed, follow this steps:
-
- 1. Change to root user, install git
- 2. Clone releng project (cd /opt, git clone https://gerrit.opnfv.org/gerrit/releng)
- 3. cd /opt/releng/prototypes/puppet-infracloud
- 4. Copy hiera to the right folder (cp hiera/common_baremetal.yaml /var/lib/hiera/common.yaml)
- 5. Ensure hostname is properly set ( hostnamectl set-hostname baremetal.opnfvlocal , hostname -f )
- 6. Install puppet and modules ( ./install_puppet.sh , ./install_modules.sh )
- 7. Apply puppet to install bifrost (puppet apply manifests/site.pp --modulepath=/etc/puppet/modules:/opt/releng/prototypes/puppet-infracloud/modules)
-
- With these steps you will have a bifrost controller up and running.
-
-Deploy baremetal servers
---------------------------
-Once you have bifrost controller ready, you need to use it to start deployment of the baremetal servers.
-On the same bifrost VM, follow these steps:
-
- 1. Source bifrost env vars: source /opt/stack/bifrost/env-vars
- 2. Export baremetal servers inventory:  export BIFROST_INVENTORY-SOURCE=/opt/stack/baremetal.json 
- 3. Change active directory: cd /opt/stack/bifrost/playbooks
- 3. Enroll the servers: ansible-playbook -vvv -i inventory/bifrost_inventory.py enroll-dynamic.yaml -e @/etc/bifrost/bifrost_global_vars
- 4. Deploy the servers:  ansible-playbook -vvv -i inventory/bifrost_inventory.py deploy-dynamic.yaml -e @/etc/bifrost/bifrost_global_vars
- 5. Wait until they are on **active** state, check it with: ironic node-list
-
-In case of some server needing to be redeployed, you can reset it and redeploy again with:
-
- 1. ironic node-set-provision-state <name_of_server> deleted
- 2. Wait and check with ironic node-list until the server is on **available** state
- 3. Redeploy again: ansible-playbook -vvv -i inventory/bifrost_inventory.py deploy-dynamic.yaml -e @/etc/bifrost/bifrost_global_vars
-
-Deploy baremetal servers
---------------------------
-Once all the servers are on **active** state, they can be accessed by ssh and InfraCloud manifests can be deployed on them, to properly deploy a controller and a compute.
-On each of those, follow that steps:
-
- 1. ssh from the bifrost controller to their external ips: ssh root@172.30.13.90
- 2. cd /opt, clone releng project (git clone https://gerrit.opnfv.org/gerrit/releng)
- 3. Copy hiera to the right folder ( cp hiera/common_baremetal.yaml /var/lib/hiera/common.yaml)
- 4. Install modules: ./install_modules.sh
- 5. Apply puppet: puppet apply manifests/site.pp --modulepath=/etc/puppet/modules:/opt/releng/prototypes/puppet-infracloud/modules
-
-Once this has been done on controller and compute, you will have a working cloud. To start working with it, follow that steps:
-
- 1. Ensure that controller00.opnfvlocal resolves properly to the external IP (this is already done in the bifrost controller)
- 2. Copy releng/prototypes/puppet-infracloud/creds/clouds.yaml to $HOME/.config/openstack/clouds.yaml
- 3. Install python-openstackclient
- 4. Specify the cloud you want to use: export OS_CLOUD=opnfvlocal
- 5. Now you can start operating in your cloud with openstack-client: openstack flavor list
-
diff --git a/prototypes/puppet-infracloud/hiera/common.yaml b/prototypes/puppet-infracloud/hiera/common.yaml
deleted file mode 100644 (file)
index 634d96c..0000000
+++ /dev/null
@@ -1,85 +0,0 @@
----
-keystone_rabbit_password: pass
-neutron_rabbit_password: pass
-nova_rabbit_password: pass
-root_mysql_password: pass
-keystone_mysql_password: pass
-glance_mysql_password: pass
-neutron_mysql_password: pass
-nova_mysql_password: pass
-keystone_admin_password: pass
-glance_admin_password: pass
-neutron_admin_password: pass
-nova_admin_password: pass
-keystone_admin_token: token
-ssl_key_file_contents: |
-  -----BEGIN PRIVATE KEY-----
-  MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC0YX6wsA/Jhe3q
-  ByoiLsyagO5rOCIyzDsMTV0YMWVIa/QybvS1vI+pK9FIoYPbqWFGHXmQF0DJYulb
-  GnB6A0GlT3YXuaKPucaaANr5hTjuEBF6LuQeq+OIO5u7+l56HGWbbVeB7+vnIxK9
-  43G545aBZSGlUnVfFg+v+IQtmRr36iEa5UDd4sahDXcp2Dm3zGgkFhFKie6AJ4UU
-  TzrH2SL6Nhl7i+AenuoUEDdgDWfGnCXozLngfmhKDi6lHDmh5zJhFS7cKz14wLgF
-  37fsWxxxEX8a6gtGYEEHqXV3x3AXO+U98pr15/xQM9O2O3mrqc/zkmcCRUwCjEeD
-  jEHey3UJAgMBAAECggEAGqapBEwPGRRbsY87b2+AtXdFQrw5eU3pj4jCr3dk4o1o
-  uCbiqxNgGnup4VRT2hmtkKF8O4jj/p1JozdF1RE0GsuhxCGeXiPxrwFfWSyQ28Ou
-  AWJ6O/njlVZRTTXRzbLyZEOEgWNEdJMfCsVXIUL6EsYxcW68fr8QtExAo0gSzvwe
-  IVyhopBy4A1jr5jWqjjlgJhoTHQCkp1e9pHiaW5WWHtk2DFdy6huw5PoDRppG42P
-  soMzqHy9AIWXrYaTGNjyybdJvbaiF0X5Bkr6k8ZxMlRuEb3Vpyrj7SsBrUifRJM3
-  +yheSq3drdQHlw5VrukoIgXGYB4zAQq3LndLoL5YTQKBgQDlzz/hB1IuGOKBXRHy
-  p0j+Lyoxt5EiOW2mdEkbTUYyYnD9EDbJ0wdQ5ijtWLw0J3AwhASkH8ZyljOVHKlY
-  Sq2Oo/uroIH4M8cVIBOJQ2/ak98ItLZ1OMMnDxlZva52jBfYwOEkg6OXeLOLmay6
-  ADfxQ56RFqreVHi9J0/jvpn9UwKBgQDI8CZrM4udJTP7gslxeDcRZw6W34CBBFds
-  49d10Tfd05sysOludzWAfGFj27wqIacFcIyYQmnSga9lBhowv+RwdSjcb2QCCjOb
-  b2GdH+qSFU8BTOcd5FscCBV3U8Y1f/iYp0EQ1/GiG2AYcQC67kjWOO4/JZEXsmtq
-  LisFlWTcswKBgQCC/bs/nViuhei2LELKuafVmzTF2giUJX/m3Wm+cjGNDqew18kj
-  CXKmHks93tKIN+KvBNFQa/xF3G/Skt/EP+zl3XravUbYH0tfM0VvfE0JnjgHUlqe
-  PpiebvDYQlJrqDb/ihHLKm3ZLSfKbvIRo4Y/s3dy5CTJTgT0bLAQ9Nf5mQKBgGqb
-  Dqb9d+rtnACqSNnMn9q5xIHDHlhUx1VcJCm70Fn+NG7WcWJMGLSMSNdD8zafGA/I
-  wK7fPWmTqEx+ylJm3HnVjtI0vuheJTcoBq/oCPlsGLhl5pBzYOskVs8yQQyNUoUa
-  52haSTZqM7eD7JFAbqBJIA2cjrf1zwtMZ0LVGegFAoGBAIFSkI+y4tDEEaSsxrMM
-  OBYEZDkffVar6/mDJukvyn0Q584K3I4eXIDoEEfMGgSN2Tza6QamuNFxOPCH+AAv
-  UKvckK4yuYkc7mQIgjCE8N8UF4kgsXjPek61TZT1QVI1aYFb78ZAZ0miudqWkx4t
-  YSNDj7llArylrPGHBLQ38X4/
-  -----END PRIVATE KEY-----
-ssl_cert_file_contents: |
-  -----BEGIN CERTIFICATE-----
-  MIIDcTCCAlmgAwIBAgIJAJsHSxF0u/oaMA0GCSqGSIb3DQEBCwUAME8xCzAJBgNV
-  BAYTAlVTMQ4wDAYDVQQHDAVXb3JsZDEOMAwGA1UECgwFT1BORlYxIDAeBgNVBAMM
-  F2NvbnRyb2xsZXIwMC5vcG5mdmxvY2FsMB4XDTE2MDgxNzE2MzQwOFoXDTE3MDgx
-  NzE2MzQwOFowTzELMAkGA1UEBhMCVVMxDjAMBgNVBAcMBVdvcmxkMQ4wDAYDVQQK
-  DAVPUE5GVjEgMB4GA1UEAwwXY29udHJvbGxlcjAwLm9wbmZ2bG9jYWwwggEiMA0G
-  CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC0YX6wsA/Jhe3qByoiLsyagO5rOCIy
-  zDsMTV0YMWVIa/QybvS1vI+pK9FIoYPbqWFGHXmQF0DJYulbGnB6A0GlT3YXuaKP
-  ucaaANr5hTjuEBF6LuQeq+OIO5u7+l56HGWbbVeB7+vnIxK943G545aBZSGlUnVf
-  Fg+v+IQtmRr36iEa5UDd4sahDXcp2Dm3zGgkFhFKie6AJ4UUTzrH2SL6Nhl7i+Ae
-  nuoUEDdgDWfGnCXozLngfmhKDi6lHDmh5zJhFS7cKz14wLgF37fsWxxxEX8a6gtG
-  YEEHqXV3x3AXO+U98pr15/xQM9O2O3mrqc/zkmcCRUwCjEeDjEHey3UJAgMBAAGj
-  UDBOMB0GA1UdDgQWBBQyFVbU5s2ihD0hX3W7GyHiHZGG1TAfBgNVHSMEGDAWgBQy
-  FVbU5s2ihD0hX3W7GyHiHZGG1TAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBCwUA
-  A4IBAQB+xf7I9RVWzRNjMbWBDE6pBvOWnSksv7Jgr4cREvyOxBDaIoO3uQRDDu6r
-  RCgGs1CuwEaFX1SS/OVrKRFiy9kCU/LBZEFwaHRaL2Kj57Z2yNInPIiKB4h9jen2
-  75fYrpq42XUDSI0NpsqAJpmcQqXOOo8V08FlH0/6h8mWdsfQfbyaf+g73+aRZds8
-  Q4ttmBrqY4Pi5CJW46w7LRCA5o92Di3GI9dAh9MVZ3023cTTjDkW04QbluphuTFj
-  O07Npz162/fHTXut+piV78t+1HlfYWY5TOSQMIVwenftA/Bn8+TQAgnLR+nGo/wu
-  oEaxLtj3Jr07+yIjL88ewT+c3fpq
-  -----END CERTIFICATE-----
-infracloud_mysql_password: pass
-opnfv_password: pass
-
-rabbitmq::package_gpg_key: 'https://www.rabbitmq.com/rabbitmq-release-signing-key.asc'
-rabbitmq::repo::apt::key: '0A9AF2115F4687BD29803A206B73A36E6026DFCA'
-
-hosts:
-  jumphost.opnfvlocal:
-    ip: 192.168.122.2
-  controller00.opnfvlocal:
-    ip: 192.168.122.3
-  compute00.opnfvlocal:
-    ip: 192.168.122.4
-
-bridge_name: br_opnfv
-neutron_subnet_cidr: '192.168.122.0/24'
-neutron_subnet_gateway: '192.168.122.1'
-neutron_subnet_allocation_pools:
-  - 'start=192.168.122.50,end=192.168.122.254'
-virt_type: 'qemu'
diff --git a/prototypes/puppet-infracloud/hiera/common_baremetal.yaml b/prototypes/puppet-infracloud/hiera/common_baremetal.yaml
deleted file mode 100644 (file)
index 015612c..0000000
+++ /dev/null
@@ -1,174 +0,0 @@
----
-keystone_rabbit_password: pass
-neutron_rabbit_password: pass
-nova_rabbit_password: pass
-root_mysql_password: pass
-keystone_mysql_password: pass
-glance_mysql_password: pass
-neutron_mysql_password: pass
-nova_mysql_password: pass
-keystone_admin_password: pass
-glance_admin_password: pass
-neutron_admin_password: pass
-nova_admin_password: pass
-keystone_admin_token: token
-ssl_key_file_contents: |
-  -----BEGIN PRIVATE KEY-----
-  MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC0YX6wsA/Jhe3q
-  ByoiLsyagO5rOCIyzDsMTV0YMWVIa/QybvS1vI+pK9FIoYPbqWFGHXmQF0DJYulb
-  GnB6A0GlT3YXuaKPucaaANr5hTjuEBF6LuQeq+OIO5u7+l56HGWbbVeB7+vnIxK9
-  43G545aBZSGlUnVfFg+v+IQtmRr36iEa5UDd4sahDXcp2Dm3zGgkFhFKie6AJ4UU
-  TzrH2SL6Nhl7i+AenuoUEDdgDWfGnCXozLngfmhKDi6lHDmh5zJhFS7cKz14wLgF
-  37fsWxxxEX8a6gtGYEEHqXV3x3AXO+U98pr15/xQM9O2O3mrqc/zkmcCRUwCjEeD
-  jEHey3UJAgMBAAECggEAGqapBEwPGRRbsY87b2+AtXdFQrw5eU3pj4jCr3dk4o1o
-  uCbiqxNgGnup4VRT2hmtkKF8O4jj/p1JozdF1RE0GsuhxCGeXiPxrwFfWSyQ28Ou
-  AWJ6O/njlVZRTTXRzbLyZEOEgWNEdJMfCsVXIUL6EsYxcW68fr8QtExAo0gSzvwe
-  IVyhopBy4A1jr5jWqjjlgJhoTHQCkp1e9pHiaW5WWHtk2DFdy6huw5PoDRppG42P
-  soMzqHy9AIWXrYaTGNjyybdJvbaiF0X5Bkr6k8ZxMlRuEb3Vpyrj7SsBrUifRJM3
-  +yheSq3drdQHlw5VrukoIgXGYB4zAQq3LndLoL5YTQKBgQDlzz/hB1IuGOKBXRHy
-  p0j+Lyoxt5EiOW2mdEkbTUYyYnD9EDbJ0wdQ5ijtWLw0J3AwhASkH8ZyljOVHKlY
-  Sq2Oo/uroIH4M8cVIBOJQ2/ak98ItLZ1OMMnDxlZva52jBfYwOEkg6OXeLOLmay6
-  ADfxQ56RFqreVHi9J0/jvpn9UwKBgQDI8CZrM4udJTP7gslxeDcRZw6W34CBBFds
-  49d10Tfd05sysOludzWAfGFj27wqIacFcIyYQmnSga9lBhowv+RwdSjcb2QCCjOb
-  b2GdH+qSFU8BTOcd5FscCBV3U8Y1f/iYp0EQ1/GiG2AYcQC67kjWOO4/JZEXsmtq
-  LisFlWTcswKBgQCC/bs/nViuhei2LELKuafVmzTF2giUJX/m3Wm+cjGNDqew18kj
-  CXKmHks93tKIN+KvBNFQa/xF3G/Skt/EP+zl3XravUbYH0tfM0VvfE0JnjgHUlqe
-  PpiebvDYQlJrqDb/ihHLKm3ZLSfKbvIRo4Y/s3dy5CTJTgT0bLAQ9Nf5mQKBgGqb
-  Dqb9d+rtnACqSNnMn9q5xIHDHlhUx1VcJCm70Fn+NG7WcWJMGLSMSNdD8zafGA/I
-  wK7fPWmTqEx+ylJm3HnVjtI0vuheJTcoBq/oCPlsGLhl5pBzYOskVs8yQQyNUoUa
-  52haSTZqM7eD7JFAbqBJIA2cjrf1zwtMZ0LVGegFAoGBAIFSkI+y4tDEEaSsxrMM
-  OBYEZDkffVar6/mDJukvyn0Q584K3I4eXIDoEEfMGgSN2Tza6QamuNFxOPCH+AAv
-  UKvckK4yuYkc7mQIgjCE8N8UF4kgsXjPek61TZT1QVI1aYFb78ZAZ0miudqWkx4t
-  YSNDj7llArylrPGHBLQ38X4/
-  -----END PRIVATE KEY-----
-ssl_cert_file_contents: |
-  -----BEGIN CERTIFICATE-----
-  MIIDcTCCAlmgAwIBAgIJAJsHSxF0u/oaMA0GCSqGSIb3DQEBCwUAME8xCzAJBgNV
-  BAYTAlVTMQ4wDAYDVQQHDAVXb3JsZDEOMAwGA1UECgwFT1BORlYxIDAeBgNVBAMM
-  F2NvbnRyb2xsZXIwMC5vcG5mdmxvY2FsMB4XDTE2MDgxNzE2MzQwOFoXDTE3MDgx
-  NzE2MzQwOFowTzELMAkGA1UEBhMCVVMxDjAMBgNVBAcMBVdvcmxkMQ4wDAYDVQQK
-  DAVPUE5GVjEgMB4GA1UEAwwXY29udHJvbGxlcjAwLm9wbmZ2bG9jYWwwggEiMA0G
-  CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC0YX6wsA/Jhe3qByoiLsyagO5rOCIy
-  zDsMTV0YMWVIa/QybvS1vI+pK9FIoYPbqWFGHXmQF0DJYulbGnB6A0GlT3YXuaKP
-  ucaaANr5hTjuEBF6LuQeq+OIO5u7+l56HGWbbVeB7+vnIxK943G545aBZSGlUnVf
-  Fg+v+IQtmRr36iEa5UDd4sahDXcp2Dm3zGgkFhFKie6AJ4UUTzrH2SL6Nhl7i+Ae
-  nuoUEDdgDWfGnCXozLngfmhKDi6lHDmh5zJhFS7cKz14wLgF37fsWxxxEX8a6gtG
-  YEEHqXV3x3AXO+U98pr15/xQM9O2O3mrqc/zkmcCRUwCjEeDjEHey3UJAgMBAAGj
-  UDBOMB0GA1UdDgQWBBQyFVbU5s2ihD0hX3W7GyHiHZGG1TAfBgNVHSMEGDAWgBQy
-  FVbU5s2ihD0hX3W7GyHiHZGG1TAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBCwUA
-  A4IBAQB+xf7I9RVWzRNjMbWBDE6pBvOWnSksv7Jgr4cREvyOxBDaIoO3uQRDDu6r
-  RCgGs1CuwEaFX1SS/OVrKRFiy9kCU/LBZEFwaHRaL2Kj57Z2yNInPIiKB4h9jen2
-  75fYrpq42XUDSI0NpsqAJpmcQqXOOo8V08FlH0/6h8mWdsfQfbyaf+g73+aRZds8
-  Q4ttmBrqY4Pi5CJW46w7LRCA5o92Di3GI9dAh9MVZ3023cTTjDkW04QbluphuTFj
-  O07Npz162/fHTXut+piV78t+1HlfYWY5TOSQMIVwenftA/Bn8+TQAgnLR+nGo/wu
-  oEaxLtj3Jr07+yIjL88ewT+c3fpq
-  -----END CERTIFICATE-----
-infracloud_mysql_password: pass
-opnfv_password: pass
-
-rabbitmq::package_gpg_key: 'https://www.rabbitmq.com/rabbitmq-release-signing-key.asc'
-rabbitmq::repo::apt::key: '0A9AF2115F4687BD29803A206B73A36E6026DFCA'
-
-hosts:
-  jumphost.opnfvlocal:
-    ip: 172.30.13.89
-  controller00.opnfvlocal:
-    ip: 172.30.13.90
-  compute00.opnfvlocal:
-    ip: 172.30.13.91
-
-# settings for bifrost
-bridge_name: br_opnfv
-ironic_db_password: pass
-bifrost_mysql_password: pass
-bifrost_ssh_private_key: |
-  -----BEGIN RSA PRIVATE KEY-----
-  MIIEowIBAAKCAQEAvwr2LbfJQuKZDOQse+DQHX84c9LCHvQfy0pu15JkiLM5dUtx
-  hLr/5fxSzblubS4WkNZVsGTtUp51f8yoQyltqquGlVfUf0GO+PCLaRp0arhli0Rl
-  sAGatI12amnrVap82jINiKQRO+UnF97z2hiB35Zxko4jSaPOOiL48DEKowZHL2Ja
-  jjUt6dXcaNotXNaKZpcxz92gdZhFOPU8BrJ/mI9k9u6QI/4qLG/WzW4frHLigA1t
-  OrZ3Nnu3tloWNsS1lh71KRfEv46VD8tCAZfXqJtjdH4Z4AUO++CLF/K4zXhIoFqU
-  Wf8aS64YzoaAfnJ+jUwKs92dVjuFtbEk+t2YLQIDAQABAoIBAQCAr++YaD6oUV9r
-  caANaiiGVhY+3u9oTmXEWMVFbRVPh/riaglzsUuDLm7QqWIbJXqJ4fcitTmv95GK
-  nt+RLizzVEt5+gnoFs8qHU6rY+ibos6z+0TMRKhjiw8DK4oc0JT9nc3EB1CcmgW1
-  bLeyZ+PEKuEiKaDXkAHw43HwyfgyS3Lc90TSaLj3P7egsBuhx1Yy+wgyiPQ/bF0b
-  OBLHHK+nwYLGAq25n/+zA7XAndc2OQd4KzUJcvjyND+IMYnzEbeFH36UcFqbvgGu
-  nR55yIrCxsxcJhhT2slMNtg/xCmo3Jzz1kNBtwbNBik4/5Lkckny0xhQl+h7vz9U
-  +cKjwfK5AoGBAPSy/JHMeQ5/rzbA5LAZhVa/Yc4B5datkwLNg6mh4CzMabJs8AKd
-  de05XB/Nq6Hfp8Aa7zLt2GIb3iqF6w/y+j8YAXS2KQD8/HDs2/9Oxr512kfssk5D
-  dcpTqeIFetzM9pqnctVXBGlbz0QLeL+lT3kXY00+CBm6LjEv8dsPxZr3AoGBAMfd
-  nDnTjUVZ+sRpTBDM3MhKLMETxNWNDaozL+SgpYQwtKlSTfQVdFcM66a8qCFjQFsc
-  /6AjL0bjCA5u859IoQ4ValD0vgkyLHdEN0P1Grf3MK8kjOW1A1s1i2FY6U0z9AM2
-  zsUCA9bB5A9wwxwofoa8VkaDpVSMITbakVoNxJj7AoGAImcft2fmBTHScoJAJLoR
-  0xZpK8t8gug4aQZ34luN5v5+RcWnINb+g3GzEA2cec+2B/5BbwmdiH2eiJ/3YnCo
-  2kIHwl7x+N+Ypk/GxmhO7Owo2j/e+b3mS6HjmpFmqrBuY2PzcyceyalMxKZQPbGC
-  MOYm4e88uFFCuUuiV0gqYhUCgYBmSFhCE6yxeCnoSEbgNicq7SLYMIjEDOqYVpfE
-  9h2ed9qM6IzyQ+SFBBy4+MVGSOfPeRis2DTCnz8pO8i7lEyvy2/cPFPgmue8pZFu
-  2smwqfUlPJxKlgdArzdEO18x3kubNXo9whk614EiEcAX8fVGeK3iak665Pe+fb5z
-  Cqa47wKBgDp3/dgtMneoePKNefy4a9vp5y4XKviC6GOrr0xpEM2ptZ+I7mUJcACN
-  KbaW0dPgtS1cApelmF73IAJRYbKMW7lQzql61IoGw4pGTIMPKerqRs/hTWYPZiSG
-  QHWf3iTV5uQr6cSRoUgkAUHVw2KTGad41RAhDp352iakZuNNBFga
-  -----END RSA PRIVATE KEY-----
-bifrost_ssh_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC/CvYtt8lC4pkM5Cx74NAdfzhz0sIe9B/LSm7XkmSIszl1S3GEuv/l/FLNuW5tLhaQ1lWwZO1SnnV/zKhDKW2qq4aVV9R/QY748ItpGnRquGWLRGWwAZq0jXZqaetVqnzaMg2IpBE75ScX3vPaGIHflnGSjiNJo846IvjwMQqjBkcvYlqONS3p1dxo2i1c1opmlzHP3aB1mEU49TwGsn+Yj2T27pAj/iosb9bNbh+scuKADW06tnc2e7e2WhY2xLWWHvUpF8S/jpUPy0IBl9eom2N0fhngBQ774IsX8rjNeEigWpRZ/xpLrhjOhoB+cn6NTAqz3Z1WO4W1sST63Zgt yolanda@trasto
-infracloud_vlan: 415
-infracloud_gateway_ip: 172.30.13.1
-default_network_interface: eno3
-dhcp_static_mask: 255.255.255.128
-dhcp_pool_start: 10.20.0.130
-dhcp_pool_end: 10.20.0.254
-network_interface: eth1
-ipv4_nameserver: 8.8.8.8
-ipv4_subnet_mask: 255.255.255.0
-ipv4_gateway: 172.30.13.1
-ironic_inventory:
-  controller00.opnfvlocal:
-    driver: agent_ipmitool
-    driver_info:
-      power:
-        ipmi_address: 172.30.8.90
-        ipmi_username: admin
-    provisioning_ipv4_address: 10.20.0.130
-    ipv4_address: 172.30.13.90
-    ansible_ssh_host: 172.30.13.90
-    ipv4_gateway: 172.30.13.1
-    ipv4_interface_mac: 00:1e:67:f6:9b:35
-    ipv4_subnet_mask: 255.255.255.192
-    name: controller00.opnfvlocal
-    nics:
-      - mac: a4:bf:01:01:a9:fc
-      - mac: 00:1e:67:f6:9b:35
-    properties:
-      cpu_arch: x86_64
-      cpus: '44'
-      disk_size: '1800'
-      ram: '65536'
-    uuid: 00a22849-2442-e511-906e-0012795d96dd
-  compute00.opnfvlocal:
-    driver: agent_ipmitool
-    driver_info:
-      power:
-        ipmi_address: 172.30.8.91
-        ipmi_username: admin
-    provisioning_ipv4_address: 10.20.0.131
-    ipv4_address: 172.30.13.91
-    ansible_ssh_host: 172.30.13.91
-    ipv4_gateway: 172.30.13.1
-    ipv4_interface_mac: 00:1e:67:f6:9b:37
-    ipv4_subnet_mask: 255.255.255.0
-    name: compute00.opnfvlocal
-    nics:
-      - mac: a4:bf:01:01:a9:d4
-      - mac: 00:1e:67:f6:9b:37
-    properties:
-      cpu_arch: x86_64
-      cpus: '44'
-      disk_size: '1800'
-      ram: '65536'
-    uuid: 0051e926-f242-e511-906e-0012795d96dd
-ipmi_passwords: {'172.30.8.90': 'octopus', '172.30.8.91': 'octopus'}
-neutron_subnet_cidr: '172.30.13.0/24'
-neutron_subnet_gateway: '172.30.13.1'
-neutron_subnet_allocation_pools:
-  - 'start=172.30.13.100,end=172.30.13.254'
-virt_type: 'kvm'
-dib_dev_user_password: devuser
diff --git a/prototypes/puppet-infracloud/install_modules.sh b/prototypes/puppet-infracloud/install_modules.sh
deleted file mode 100755 (executable)
index 5d5acd9..0000000
+++ /dev/null
@@ -1,121 +0,0 @@
-#!/bin/bash
-# Copyright 2014 OpenStack Foundation.
-# Copyright 2014 Hewlett-Packard Development Company, L.P.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-MODULE_PATH=`puppet config print modulepath | cut -d ':' -f 1`
-SCRIPT_NAME=$(basename $0)
-SCRIPT_DIR=$(readlink -f "$(dirname $0)")
-JUST_CLONED=0
-
-function remove_module {
-    local SHORT_MODULE_NAME=$1
-    if [ -n "$SHORT_MODULE_NAME" ]; then
-        rm -Rf "$MODULE_PATH/$SHORT_MODULE_NAME"
-    else
-        echo "ERROR: remove_module requires a SHORT_MODULE_NAME."
-    fi
-}
-
-function git_clone {
-    local MOD=$1
-    local DEST=$2
-
-    JUST_CLONED=1
-    for attempt in $(seq 0 3); do
-        clone_error=0
-        git clone $MOD $DEST && break || true
-        rm -rf $DEST
-        clone_error=1
-    done
-    return $clone_error
-}
-
-# Array of modules to be installed key:value is module:version.
-declare -A MODULES
-
-# Array of modues to be installed from source and without dependency resolution.
-# key:value is source location, revision to checkout
-declare -A SOURCE_MODULES
-
-# Array of modues to be installed from source and without dependency resolution from openstack git
-# key:value is source location, revision to checkout
-declare -A INTEGRATION_MODULES
-
-# load modules.env to populate MODULES[*] and SOURCE_MODULES[*]
-# for processing.
-MODULE_ENV_FILE=${MODULE_FILE:-modules.env}
-MODULE_ENV_PATH=${MODULE_ENV_PATH:-${SCRIPT_DIR}}
-if [ -f "${MODULE_ENV_PATH}/${MODULE_ENV_FILE}" ] ; then
-    . "${MODULE_ENV_PATH}/${MODULE_ENV_FILE}"
-fi
-
-if [ -z "${!MODULES[*]}" ] && [ -z "${!SOURCE_MODULES[*]}" ] ; then
-    echo ""
-    echo "WARNING: nothing to do, unable to find MODULES or SOURCE_MODULES"
-    echo "  export options, try setting MODULE_ENV_PATH or MODULE_ENV_FILE"
-    echo "  export to the proper location of modules.env file."
-    echo ""
-    exit 0
-fi
-
-MODULE_LIST=`puppet module list --color=false`
-
-# Install modules from source
-for MOD in ${!SOURCE_MODULES[*]} ; do
-    JUST_CLONED=0
-    # get the name of the module directory
-    if [ `echo $MOD | awk -F. '{print $NF}'` = 'git' ]; then
-        echo "Remote repos of the form repo.git are not supported: ${MOD}"
-        exit 1
-    fi
-
-    MODULE_NAME=`echo $MOD | awk -F- '{print $NF}'`
-
-    # set up git base command to use the correct path
-    GIT_CMD_BASE="git --git-dir=${MODULE_PATH}/${MODULE_NAME}/.git --work-tree ${MODULE_PATH}/${MODULE_NAME}"
-    # treat any occurrence of the module as a match
-    if ! echo $MODULE_LIST | grep "${MODULE_NAME}" >/dev/null 2>&1; then
-        # clone modules that are not installed
-        git_clone $MOD "${MODULE_PATH}/${MODULE_NAME}"
-    else
-        if [ ! -d ${MODULE_PATH}/${MODULE_NAME}/.git ]; then
-            echo "Found directory ${MODULE_PATH}/${MODULE_NAME} that is not a git repo, deleting it and reinstalling from source"
-            remove_module $MODULE_NAME
-            git_clone $MOD "${MODULE_PATH}/${MODULE_NAME}"
-        elif [ `${GIT_CMD_BASE} remote show origin | grep 'Fetch URL' | awk -F'URL: ' '{print $2}'` != $MOD ]; then
-            echo "Found remote in ${MODULE_PATH}/${MODULE_NAME} that does not match desired remote ${MOD}, deleting dir and re-cloning"
-            remove_module $MODULE_NAME
-            git_clone $MOD "${MODULE_PATH}/${MODULE_NAME}"
-        fi
-    fi
-
-    # fetch the latest refs from the repo
-    if [[ $JUST_CLONED -eq 0 ]] ; then
-        # If we just cloned the repo, we do not need to remote update
-        for attempt in $(seq 0 3); do
-            clone_error=0
-            $GIT_CMD_BASE remote update && break || true
-            clone_error=1
-        done
-        if [[ $clone_error -ne 0 ]] ; then
-            exit $clone_error
-        fi
-    fi
-    # make sure the correct revision is installed, I have to use rev-list b/c rev-parse does not work with tags
-    if [ `${GIT_CMD_BASE} rev-list HEAD --max-count=1` != `${GIT_CMD_BASE} rev-list ${SOURCE_MODULES[$MOD]} --max-count=1` ]; then
-        # checkout correct revision
-        $GIT_CMD_BASE checkout ${SOURCE_MODULES[$MOD]}
-    fi
-done
diff --git a/prototypes/puppet-infracloud/install_puppet.sh b/prototypes/puppet-infracloud/install_puppet.sh
deleted file mode 100755 (executable)
index ae25944..0000000
+++ /dev/null
@@ -1,297 +0,0 @@
-#!/bin/bash -x
-
-# Copyright 2013 OpenStack Foundation.
-# Copyright 2013 Hewlett-Packard Development Company, L.P.
-# Copyright 2013 Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-#
-# Distro identification functions
-#  note, can't rely on lsb_release for these as we're bare-bones and
-#  it may not be installed yet)
-
-
-function is_fedora {
-    [ -f /usr/bin/yum ] && cat /etc/*release | grep -q -e "Fedora"
-}
-
-function is_rhel7 {
-    [ -f /usr/bin/yum ] && \
-        cat /etc/*release | grep -q -e "Red Hat" -e "CentOS" -e "CloudLinux" && \
-        cat /etc/*release | grep -q 'release 7'
-}
-
-function is_ubuntu {
-    [ -f /usr/bin/apt-get ]
-}
-
-function is_opensuse {
-    [ -f /usr/bin/zypper ] && \
-        cat /etc/os-release | grep -q -e "openSUSE"
-}
-
-function is_gentoo {
-    [ -f /usr/bin/emerge ]
-}
-
-# dnf is a drop-in replacement for yum on Fedora>=22
-YUM=yum
-if is_fedora && [[ $(lsb_release -rs) -ge 22 ]]; then
-    YUM=dnf
-fi
-
-
-#
-# Distro specific puppet installs
-#
-
-function _systemd_update {
-    # there is a bug (rhbz#1261747) where systemd can fail to enable
-    # services due to selinux errors after upgrade.  A work-around is
-    # to install the latest version of selinux and systemd here and
-    # restart the daemon for good measure after it is upgraded.
-    $YUM install -y selinux-policy
-    $YUM install -y systemd
-    systemctl daemon-reload
-}
-
-function setup_puppet_fedora {
-    _systemd_update
-
-    $YUM update -y
-
-    # NOTE: we preinstall lsb_release here to ensure facter sets
-    # lsbdistcodename
-    #
-    # Fedora declares some global hardening flags, which distutils
-    # pick up when building python modules.  redhat-rpm-config
-    # provides the required config options.  Really this should be a
-    # dependency of python-devel (fix in the works, see
-    # https://bugzilla.redhat.com/show_bug.cgi?id=1217376) and can be
-    # removed when that is sorted out.
-
-    $YUM install -y redhat-lsb-core git puppet \
-        redhat-rpm-config
-
-    mkdir -p /etc/puppet/modules/
-
-    # Puppet expects the pip command named as pip-python on
-    # Fedora, as per the packaged command name.  However, we're
-    # installing from get-pip.py so it's just 'pip'.  An easy
-    # work-around is to just symlink pip-python to "fool" it.
-    # See upstream issue:
-    #  https://tickets.puppetlabs.com/browse/PUP-1082
-    ln -fs /usr/bin/pip /usr/bin/pip-python
-    # Wipe out templatedir so we don't get warnings about it
-    sed -i '/templatedir/d' /etc/puppet/puppet.conf
-
-    # upstream is currently looking for /run/systemd files to check
-    # for systemd.  This fails in a chroot where /run isn't mounted
-    # (like when using dib).  Comment out this confine as fedora
-    # always has systemd
-    #  see
-    #   https://github.com/puppetlabs/puppet/pull/4481
-    #   https://bugzilla.redhat.com/show_bug.cgi?id=1254616
-    sudo sed -i.bak  '/^[^#].*/ s|\(^.*confine :exists => \"/run/systemd/system\".*$\)|#\ \1|' \
-        /usr/share/ruby/vendor_ruby/puppet/provider/service/systemd.rb
-
-    # upstream "requests" pip package vendors urllib3 and chardet
-    # packages.  The fedora packages un-vendor this, and symlink those
-    # sub-packages back to packaged versions.  We get into a real mess
-    # of if some of the puppet ends up pulling in "requests" from pip,
-    # and then something like devstack does a "yum install
-    # python-requests" which does a very bad job at overwriting the
-    # pip-installed version (symlinks and existing directories don't
-    # mix).  A solution is to pre-install the python-requests
-    # package; clear it out and re-install from pip.  This way, the
-    # package is installed for dependencies, and we have a pip-managed
-    # requests with correctly vendored sub-packages.
-    sudo ${YUM} install -y python2-requests
-    sudo rm -rf /usr/lib/python2.7/site-packages/requests/*
-    sudo rm -rf /usr/lib/python2.7/site-packages/requests-*.{egg,dist}-info
-    sudo pip install requests
-}
-
-function setup_puppet_rhel7 {
-    local puppet_pkg="https://yum.puppetlabs.com/puppetlabs-release-el-7.noarch.rpm"
-
-    # install a bootstrap epel repo to install latest epel-release
-    # package (which provides correct gpg keys, etc); then remove
-    # boostrap
-    cat > /etc/yum.repos.d/epel-bootstrap.repo <<EOF
-[epel-bootstrap]
-name=Bootstrap EPEL
-mirrorlist=https://mirrors.fedoraproject.org/mirrorlist?repo=epel-7&arch=\$basearch
-failovermethod=priority
-enabled=0
-gpgcheck=0
-EOF
-    yum --enablerepo=epel-bootstrap -y install epel-release
-    rm -f /etc/yum.repos.d/epel-bootstrap.repo
-
-    _systemd_update
-    yum update -y
-
-    # NOTE: we preinstall lsb_release to ensure facter sets lsbdistcodename
-    yum install -y redhat-lsb-core git puppet
-
-    rpm -ivh $puppet_pkg
-
-    # see comments in setup_puppet_fedora
-    ln -s /usr/bin/pip /usr/bin/pip-python
-    # Wipe out templatedir so we don't get warnings about it
-    sed -i '/templatedir/d' /etc/puppet/puppet.conf
-
-    # install RDO repo as well; this covers a few things like
-    # openvswitch that aren't available for EPEL
-    yum install -y https://rdoproject.org/repos/rdo-release.rpm
-}
-
-function setup_puppet_ubuntu {
-    if ! which lsb_release > /dev/null 2<&1 ; then
-        DEBIAN_FRONTEND=noninteractive apt-get --option 'Dpkg::Options::=--force-confold' \
-            --assume-yes install -y --force-yes lsb-release
-    fi
-
-    lsbdistcodename=`lsb_release -c -s`
-    if [ $lsbdistcodename != 'trusty' ] ; then
-        rubypkg=rubygems
-    else
-        rubypkg=ruby
-    fi
-
-
-    PUPPET_VERSION=3.*
-    PUPPETDB_VERSION=2.*
-    FACTER_VERSION=2.*
-
-    cat > /etc/apt/preferences.d/00-puppet.pref <<EOF
-Package: puppet puppet-common puppetmaster puppetmaster-common puppetmaster-passenger
-Pin: version $PUPPET_VERSION
-Pin-Priority: 501
-
-Package: puppetdb puppetdb-terminus
-Pin: version $PUPPETDB_VERSION
-Pin-Priority: 501
-
-Package: facter
-Pin: version $FACTER_VERSION
-Pin-Priority: 501
-EOF
-
-    # NOTE(pabelanger): Puppetlabs does not support ubuntu xenial. Instead use
-    # the version of puppet ship by xenial.
-    if [ $lsbdistcodename != 'xenial' ]; then
-        puppet_deb=puppetlabs-release-${lsbdistcodename}.deb
-        if type curl >/dev/null 2>&1; then
-            curl -O http://apt.puppetlabs.com/$puppet_deb
-        else
-            wget http://apt.puppetlabs.com/$puppet_deb -O $puppet_deb
-        fi
-        dpkg -i $puppet_deb
-        rm $puppet_deb
-    fi;
-
-    apt-get update
-    DEBIAN_FRONTEND=noninteractive apt-get --option 'Dpkg::Options::=--force-confold' \
-        --assume-yes dist-upgrade
-    DEBIAN_FRONTEND=noninteractive apt-get --option 'Dpkg::Options::=--force-confold' \
-        --assume-yes install -y --force-yes puppet git $rubypkg
-    # Wipe out templatedir so we don't get warnings about it
-    sed -i '/templatedir/d' /etc/puppet/puppet.conf
-}
-
-function setup_puppet_opensuse {
-    local version=`grep -e "VERSION_ID" /etc/os-release | tr -d "\"" | cut -d "=" -f2`
-    zypper ar http://download.opensuse.org/repositories/systemsmanagement:/puppet/openSUSE_${version}/systemsmanagement:puppet.repo
-    zypper -v --gpg-auto-import-keys --no-gpg-checks -n ref
-    zypper --non-interactive in --force-resolution puppet
-    # Wipe out templatedir so we don't get warnings about it
-    sed -i '/templatedir/d' /etc/puppet/puppet.conf
-}
-
-function setup_puppet_gentoo {
-    echo yes | emaint sync -a
-    emerge -q --jobs=4 puppet-agent
-    sed -i '/templatedir/d' /etc/puppetlabs/puppet/puppet.conf
-}
-
-#
-# pip setup
-#
-
-function setup_pip {
-    # Install pip using get-pip
-    local get_pip_url=https://bootstrap.pypa.io/get-pip.py
-    local ret=1
-
-    if [ -f ./get-pip.py ]; then
-        ret=0
-    elif type curl >/dev/null 2>&1; then
-        curl -O $get_pip_url
-        ret=$?
-    elif type wget >/dev/null 2>&1; then
-        wget $get_pip_url
-        ret=$?
-    fi
-
-    if [ $ret -ne 0 ]; then
-        echo "Failed to get get-pip.py"
-        exit 1
-    fi
-
-    if is_opensuse; then
-        zypper --non-interactive in --force-resolution python python-xml
-    fi
-
-    python get-pip.py
-    rm get-pip.py
-
-    # we are about to overwrite setuptools, but some packages we
-    # install later might depend on the python-setuptools package.  To
-    # avoid later conflicts, and because distro packages don't include
-    # enough info for pip to certain it can fully uninstall the old
-    # package, for safety we clear it out by hand (this seems to have
-    # been a problem with very old to new updates, e.g. centos6 to
-    # current-era, but less so for smaller jumps).  There is a bit of
-    # chicken-and-egg problem with pip in that it requires setuptools
-    # for some operations, such as wheel creation.  But just
-    # installing setuptools shouldn't require setuptools itself, so we
-    # are safe for this small section.
-    if is_rhel7 || is_fedora; then
-        yum install -y python-setuptools
-        rm -rf /usr/lib/python2.7/site-packages/setuptools*
-    fi
-
-    pip install -U setuptools
-}
-
-setup_pip
-
-if is_fedora; then
-    setup_puppet_fedora
-elif is_rhel7; then
-    setup_puppet_rhel7
-elif is_ubuntu; then
-    setup_puppet_ubuntu
-elif is_opensuse; then
-    setup_puppet_opensuse
-elif is_gentoo; then
-    setup_puppet_gentoo
-else
-    echo "*** Can not setup puppet: distribution not recognized"
-    exit 1
-fi
-
diff --git a/prototypes/puppet-infracloud/manifests/site.pp b/prototypes/puppet-infracloud/manifests/site.pp
deleted file mode 100644 (file)
index 3483b06..0000000
+++ /dev/null
@@ -1,104 +0,0 @@
-# SPDX-license-identifier: Apache-2.0
-##############################################################################
-# Copyright (c) 2016 RedHat and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-node 'controller00.opnfvlocal' {
-  $group = 'infracloud'
-  include ::sudoers
-
-  class { '::opnfv::server':
-    iptables_public_tcp_ports => [80,5000,5671,8774,9292,9696,35357], # logs,keystone,rabbit,nova,glance,neutron,keystone
-    sysadmins                 => hiera('sysadmins', []),
-    enable_unbound            => false,
-    purge_apt_sources         => false,
-  }
-  class { '::opnfv::controller':
-    keystone_rabbit_password         => hiera('keystone_rabbit_password'),
-    neutron_rabbit_password          => hiera('neutron_rabbit_password'),
-    nova_rabbit_password             => hiera('nova_rabbit_password'),
-    root_mysql_password              => hiera('infracloud_mysql_password'),
-    keystone_mysql_password          => hiera('keystone_mysql_password'),
-    glance_mysql_password            => hiera('glance_mysql_password'),
-    neutron_mysql_password           => hiera('neutron_mysql_password'),
-    nova_mysql_password              => hiera('nova_mysql_password'),
-    keystone_admin_password          => hiera('keystone_admin_password'),
-    glance_admin_password            => hiera('glance_admin_password'),
-    neutron_admin_password           => hiera('neutron_admin_password'),
-    nova_admin_password              => hiera('nova_admin_password'),
-    keystone_admin_token             => hiera('keystone_admin_token'),
-    ssl_key_file_contents            => hiera('ssl_key_file_contents'),
-    ssl_cert_file_contents           => hiera('ssl_cert_file_contents'),
-    br_name                          => hiera('bridge_name'),
-    controller_public_address        => $::fqdn,
-    neutron_subnet_cidr              => hiera('neutron_subnet_cidr'),
-    neutron_subnet_gateway           => hiera('neutron_subnet_gateway'),
-    neutron_subnet_allocation_pools  => hiera('neutron_subnet_allocation_pools'),
-    opnfv_password                   => hiera('opnfv_password'),
-    require                          => Class['::opnfv::server'],
-  }
-}
-
-node 'compute00.opnfvlocal' {
-  $group = 'infracloud'
-  include ::sudoers
-
-  class { '::opnfv::server':
-    sysadmins                 => hiera('sysadmins', []),
-    enable_unbound            => false,
-    purge_apt_sources         => false,
-  }
-
-  class { '::opnfv::compute':
-    nova_rabbit_password             => hiera('nova_rabbit_password'),
-    neutron_rabbit_password          => hiera('neutron_rabbit_password'),
-    neutron_admin_password           => hiera('neutron_admin_password'),
-    ssl_cert_file_contents           => hiera('ssl_cert_file_contents'),
-    ssl_key_file_contents            => hiera('ssl_key_file_contents'),
-    br_name                          => hiera('bridge_name'),
-    controller_public_address        => 'controller00.opnfvlocal',
-    virt_type                        => hiera('virt_type'),
-    require                          => Class['::opnfv::server'],
-  }
-}
-
-node 'jumphost.opnfvlocal' {
-  class { '::opnfv::server':
-    sysadmins                 => hiera('sysadmins', []),
-    enable_unbound            => false,
-    purge_apt_sources         => false,
-  }
-}
-
-node 'baremetal.opnfvlocal', 'lfpod5-jumpserver' {
-  class { '::opnfv::server':
-    iptables_public_udp_ports => [67, 69],
-    sysadmins                 => hiera('sysadmins', []),
-    enable_unbound            => false,
-    purge_apt_sources         => false,
-  }
-
-  class { '::infracloud::bifrost':
-    ironic_inventory          => hiera('ironic_inventory', {}),
-    ironic_db_password        => hiera('ironic_db_password'),
-    mysql_password            => hiera('bifrost_mysql_password'),
-    ipmi_passwords            => hiera('ipmi_passwords'),
-    ssh_private_key           => hiera('bifrost_ssh_private_key'),
-    ssh_public_key            => hiera('bifrost_ssh_public_key'),
-    vlan                      => hiera('infracloud_vlan'),
-    gateway_ip                => hiera('infracloud_gateway_ip'),
-    default_network_interface => hiera('default_network_interface'),
-    dhcp_static_mask          => hiera('dhcp_static_mask'),
-    dhcp_pool_start           => hiera('dhcp_pool_start'),
-    dhcp_pool_end             => hiera('dhcp_pool_end'),
-    network_interface         => hiera('network_interface'),
-    ipv4_nameserver           => hiera('ipv4_nameserver'),
-    ipv4_subnet_mask          => hiera('ipv4_subnet_mask'),
-    bridge_name               => hiera('bridge_name'),
-    dib_dev_user_password     => hiera('dib_dev_user_password'),
-    require                   => Class['::opnfv::server'],
-  }
-}
diff --git a/prototypes/puppet-infracloud/modules.env b/prototypes/puppet-infracloud/modules.env
deleted file mode 100644 (file)
index 9c07ec9..0000000
+++ /dev/null
@@ -1,84 +0,0 @@
-# Copyright 2014 OpenStack Foundation.
-# Copyright 2016 RedHat.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# load additional modules from modules.env
-# modules.env should exist in the same folder as install_modules.sh
-#
-# - use export MODULE_FILE to specify an alternate config
-#   when calling install_modules.sh.
-#   This allows for testing environments that are configured with alternate
-#   module configuration.
-
-# Source modules should use tags, explicit refs or remote branches because
-# we do not update local branches in this script.
-# Keep sorted
-
-OPENSTACK_GIT_ROOT=https://git.openstack.org
-
-# InfraCloud modules
-SOURCE_MODULES["$OPENSTACK_GIT_ROOT/openstack/puppet-cinder"]="origin/stable/mitaka"
-SOURCE_MODULES["$OPENSTACK_GIT_ROOT/openstack/puppet-glance"]="origin/stable/mitaka"
-SOURCE_MODULES["$OPENSTACK_GIT_ROOT/openstack/puppet-ironic"]="origin/stable/mitaka"
-SOURCE_MODULES["$OPENSTACK_GIT_ROOT/openstack/puppet-keystone"]="origin/stable/mitaka"
-SOURCE_MODULES["$OPENSTACK_GIT_ROOT/openstack/puppet-neutron"]="origin/stable/mitaka"
-SOURCE_MODULES["$OPENSTACK_GIT_ROOT/openstack/puppet-nova"]="origin/stable/mitaka"
-SOURCE_MODULES["$OPENSTACK_GIT_ROOT/openstack/puppet-openstack_extras"]="origin/stable/mitaka"
-SOURCE_MODULES["$OPENSTACK_GIT_ROOT/openstack/puppet-openstacklib"]="origin/stable/mitaka"
-
-SOURCE_MODULES["https://git.openstack.org/openstack-infra/puppet-vcsrepo"]="0.0.8"
-SOURCE_MODULES["https://github.com/duritong/puppet-sysctl"]="v0.0.11"
-SOURCE_MODULES["https://github.com/nanliu/puppet-staging"]="1.0.0"
-SOURCE_MODULES["https://github.com/jfryman/puppet-selinux"]="v0.2.5"
-SOURCE_MODULES["https://github.com/maestrodev/puppet-wget"]="v1.6.0"
-SOURCE_MODULES["https://github.com/puppetlabs/puppetlabs-apache"]="1.8.1"
-SOURCE_MODULES["https://github.com/puppetlabs/puppetlabs-apt"]="2.1.0"
-SOURCE_MODULES["https://github.com/puppetlabs/puppetlabs-concat"]="1.2.5"
-SOURCE_MODULES["https://github.com/puppetlabs/puppetlabs-firewall"]="1.1.3"
-SOURCE_MODULES["https://github.com/puppetlabs/puppetlabs-haproxy"]="1.5.0"
-SOURCE_MODULES["https://github.com/puppetlabs/puppetlabs-inifile"]="1.1.3"
-SOURCE_MODULES["https://github.com/puppetlabs/puppetlabs-mysql"]="3.6.2"
-SOURCE_MODULES["https://github.com/puppetlabs/puppetlabs-ntp"]="3.2.1"
-SOURCE_MODULES["https://github.com/puppetlabs/puppetlabs-rabbitmq"]="5.2.3"
-SOURCE_MODULES["https://github.com/puppetlabs/puppetlabs-stdlib"]="4.10.0"
-SOURCE_MODULES["https://github.com/rafaelfelix/puppet-pear"]="1.0.3"
-SOURCE_MODULES["https://github.com/saz/puppet-memcached"]="v2.6.0"
-SOURCE_MODULES["https://github.com/saz/puppet-timezone"]="v3.3.0"
-SOURCE_MODULES["https://github.com/stankevich/puppet-python"]="1.9.4"
-SOURCE_MODULES["https://github.com/vamsee/puppet-solr"]="0.0.8"
-SOURCE_MODULES["https://github.com/voxpupuli/puppet-alternatives"]="0.3.0"
-SOURCE_MODULES["https://github.com/voxpupuli/puppet-archive"]="v0.5.1"
-SOURCE_MODULES["https://github.com/voxpupuli/puppet-git_resource"]="0.3.0"
-SOURCE_MODULES["https://github.com/voxpupuli/puppet-nodejs"]="1.2.0"
-SOURCE_MODULES["https://github.com/voxpupuli/puppet-puppetboard"]="2.4.0"
-
-
-INTEGRATION_MODULES["$OPENSTACK_GIT_ROOT/openstack-infra/puppet-ansible"]="origin/master"
-INTEGRATION_MODULES["$OPENSTACK_GIT_ROOT/openstack-infra/puppet-httpd"]="origin/master"
-INTEGRATION_MODULES["$OPENSTACK_GIT_ROOT/openstack-infra/puppet-infracloud"]="origin/master"
-INTEGRATION_MODULES["$OPENSTACK_GIT_ROOT/openstack-infra/puppet-iptables"]="origin/master"
-INTEGRATION_MODULES["$OPENSTACK_GIT_ROOT/openstack-infra/puppet-logrotate"]="origin/master"
-INTEGRATION_MODULES["$OPENSTACK_GIT_ROOT/openstack-infra/puppet-pip"]="origin/master"
-INTEGRATION_MODULES["$OPENSTACK_GIT_ROOT/openstack-infra/puppet-snmpd"]="origin/master"
-INTEGRATION_MODULES["$OPENSTACK_GIT_ROOT/openstack-infra/puppet-ssh"]="origin/master"
-INTEGRATION_MODULES["$OPENSTACK_GIT_ROOT/openstack-infra/puppet-ssl_cert_check"]="origin/master"
-INTEGRATION_MODULES["$OPENSTACK_GIT_ROOT/openstack-infra/puppet-sudoers"]="origin/master"
-INTEGRATION_MODULES["$OPENSTACK_GIT_ROOT/openstack-infra/puppet-ulimit"]="origin/master"
-INTEGRATION_MODULES["$OPENSTACK_GIT_ROOT/openstack-infra/puppet-unattended_upgrades"]="origin/master"
-INTEGRATION_MODULES["$OPENSTACK_GIT_ROOT/openstack-infra/puppet-unbound"]="origin/master"
-INTEGRATION_MODULES["$OPENSTACK_GIT_ROOT/openstack-infra/puppet-user"]="origin/master"
-
-for MOD in ${!INTEGRATION_MODULES[*]}; do
- SOURCE_MODULES[$MOD]=${INTEGRATION_MODULES[$MOD]}
-done
diff --git a/prototypes/puppet-infracloud/modules/opnfv/manifests/compute.pp b/prototypes/puppet-infracloud/modules/opnfv/manifests/compute.pp
deleted file mode 100644 (file)
index ca548a5..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-class opnfv::compute (
-  $nova_rabbit_password,
-  $neutron_rabbit_password,
-  $neutron_admin_password,
-  $ssl_cert_file_contents,
-  $ssl_key_file_contents,
-  $br_name,
-  $controller_public_address,
-  $virt_type = 'kvm',
-) {
-  class { '::infracloud::compute':
-    nova_rabbit_password          => $nova_rabbit_password,
-    neutron_rabbit_password       => $neutron_rabbit_password,
-    neutron_admin_password        => $neutron_admin_password,
-    ssl_cert_file_contents        => $ssl_cert_file_contents,
-    ssl_key_file_contents         => $ssl_key_file_contents,
-    br_name                       => $br_name,
-    controller_public_address     => $controller_public_address,
-    virt_type                     => $virt_type,
-  }
-
-}
-
diff --git a/prototypes/puppet-infracloud/modules/opnfv/manifests/controller.pp b/prototypes/puppet-infracloud/modules/opnfv/manifests/controller.pp
deleted file mode 100644 (file)
index 7522692..0000000
+++ /dev/null
@@ -1,85 +0,0 @@
-# SPDX-license-identifier: Apache-2.0
-##############################################################################
-# Copyright (c) 2016 RedHat and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-class opnfv::controller (
-  $keystone_rabbit_password,
-  $neutron_rabbit_password,
-  $nova_rabbit_password,
-  $root_mysql_password,
-  $keystone_mysql_password,
-  $glance_mysql_password,
-  $neutron_mysql_password,
-  $nova_mysql_password,
-  $glance_admin_password,
-  $keystone_admin_password,
-  $neutron_admin_password,
-  $nova_admin_password,
-  $keystone_admin_token,
-  $ssl_key_file_contents,
-  $ssl_cert_file_contents,
-  $br_name,
-  $controller_public_address = $::fqdn,
-  $neutron_subnet_cidr,
-  $neutron_subnet_gateway,
-  $neutron_subnet_allocation_pools,
-  $opnfv_password,
-  $opnfv_email = 'opnfvuser@gmail.com',
-) {
-  class { '::infracloud::controller':
-    keystone_rabbit_password         => $keystone_rabbit_password,
-    neutron_rabbit_password          => $neutron_rabbit_password,
-    nova_rabbit_password             => $nova_rabbit_password,
-    root_mysql_password              => $root_mysql_password,
-    keystone_mysql_password          => $keystone_mysql_password,
-    glance_mysql_password            => $glance_mysql_password,
-    neutron_mysql_password           => $neutron_mysql_password,
-    nova_mysql_password              => $nova_mysql_password,
-    keystone_admin_password          => $keystone_admin_password,
-    glance_admin_password            => $glance_admin_password,
-    neutron_admin_password           => $neutron_admin_password,
-    nova_admin_password              => $nova_admin_password,
-    keystone_admin_token             => $keystone_admin_token,
-    ssl_key_file_contents            => $ssl_key_file_contents,
-    ssl_cert_file_contents           => $ssl_cert_file_contents,
-    br_name                          => $br_name,
-    controller_public_address        => $controller_public_address,
-    neutron_subnet_cidr              => $neutron_subnet_cidr,
-    neutron_subnet_gateway           => $neutron_subnet_gateway,
-    neutron_subnet_allocation_pools  => $neutron_subnet_allocation_pools,
-  }
-
-  # create keystone creds
-  keystone_domain { 'opnfv':
-    ensure  => present,
-    enabled => true,
-  }
-
-  keystone_tenant { 'opnfv':
-    ensure      => present,
-    enabled     => true,
-    description => 'OPNFV cloud',
-    domain      => 'opnfv',
-    require     => Keystone_domain['opnfv'],
-  }
-
-  keystone_user { 'opnfv':
-    ensure   => present,
-    enabled  => true,
-    domain   => 'opnfv',
-    email    => $opnfv_email,
-    password => $opnfv_password,
-    require  => Keystone_tenant['opnfv'],
-  }
-
-  keystone_role { 'user': ensure => present }
-
-  keystone_user_role { 'opnfv::opnfv@opnfv::opnfv':
-    roles => [ 'user', 'admin', ],
-  }
-}
-
diff --git a/prototypes/puppet-infracloud/modules/opnfv/manifests/server.pp b/prototypes/puppet-infracloud/modules/opnfv/manifests/server.pp
deleted file mode 100644 (file)
index d167973..0000000
+++ /dev/null
@@ -1,244 +0,0 @@
-# SPDX-license-identifier: Apache-2.0
-##############################################################################
-# Copyright (c) 2016 RedHat and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-class opnfv::server (
-  $iptables_public_tcp_ports = [],
-  $iptables_public_udp_ports = [],
-  $iptables_rules4           = [],
-  $iptables_rules6           = [],
-  $sysadmins                 = [],
-  $enable_unbound            = true,
-  $purge_apt_sources         = true,
-) {
-  ###########################################################
-  # Classes for all hosts
-
-  include snmpd
-
-  class { 'iptables':
-    public_tcp_ports => $iptables_public_tcp_ports,
-    public_udp_ports => $iptables_public_udp_ports,
-    rules4           => $iptables_rules4,
-    rules6           => $iptables_rules6,
-  }
-
-  class { 'timezone':
-    timezone => 'Etc/UTC',
-  }
-
-  if ($enable_unbound) {
-    class { 'unbound':
-      install_resolv_conf => $install_resolv_conf
-    }
-  }
-
-  if ($::in_chroot) {
-    notify { 'rsyslog in chroot':
-      message => 'rsyslog not refreshed, running in chroot',
-    }
-    $rsyslog_notify = []
-  } else {
-    service { 'rsyslog':
-      ensure     => running,
-      enable     => true,
-      hasrestart => true,
-      require    => Package['rsyslog'],
-    }
-    $rsyslog_notify = [ Service['rsyslog'] ]
-  }
-
-  ###########################################################
-  # System tweaks
-
-  # Increase syslog message size in order to capture
-  # python tracebacks with syslog.
-  file { '/etc/rsyslog.d/99-maxsize.conf':
-    ensure  => present,
-    # Note MaxMessageSize is not a puppet variable.
-    content => '$MaxMessageSize 6k',
-    owner   => 'root',
-    group   => 'root',
-    mode    => '0644',
-    notify  => $rsyslog_notify,
-    require => Package['rsyslog'],
-  }
-
-  # We don't like byobu
-  file { '/etc/profile.d/Z98-byobu.sh':
-    ensure => absent,
-  }
-
-  if $::osfamily == 'Debian' {
-
-    # Ubuntu installs their whoopsie package by default, but it eats through
-    # memory and we don't need it on servers
-    package { 'whoopsie':
-      ensure => absent,
-    }
-
-    package { 'popularity-contest':
-      ensure => absent,
-    }
-  }
-
-  ###########################################################
-  # Package resources for all operating systems
-
-  package { 'at':
-    ensure => present,
-  }
-
-  package { 'lvm2':
-    ensure => present,
-  }
-
-  package { 'strace':
-    ensure => present,
-  }
-
-  package { 'tcpdump':
-    ensure => present,
-  }
-
-  package { 'rsyslog':
-    ensure => present,
-  }
-
-  package { 'git':
-    ensure => present,
-  }
-
-  package { 'rsync':
-    ensure => present,
-  }
-
-  case $::osfamily {
-    'RedHat': {
-      $packages = ['parted', 'puppet', 'wget', 'iputils']
-      $user_packages = ['emacs-nox', 'vim-enhanced']
-      $update_pkg_list_cmd = ''
-    }
-    'Debian': {
-      $packages = ['parted', 'puppet', 'wget', 'iputils-ping']
-      case $::operatingsystemrelease {
-        /^(12|14)\.(04|10)$/: {
-          $user_packages = ['emacs23-nox', 'vim-nox', 'iftop',
-                            'sysstat', 'iotop']
-        }
-        default: {
-          $user_packages = ['emacs-nox', 'vim-nox']
-        }
-      }
-      $update_pkg_list_cmd = 'apt-get update >/dev/null 2>&1;'
-    }
-    default: {
-      fail("Unsupported osfamily: ${::osfamily} The 'openstack_project' module only supports osfamily Debian or RedHat (slaves only).")
-    }
-  }
-  package { $packages:
-    ensure => present
-  }
-
-  ###########################################################
-  # Package resources for specific operating systems
-
-  case $::osfamily {
-    'Debian': {
-      # Purge and augment existing /etc/apt/sources.list if requested, and make
-      # sure apt-get update is run before any packages are installed
-      class { '::apt':
-        purge => { 'sources.list' => $purge_apt_sources }
-      }
-
-      # Make sure dig is installed
-      package { 'dnsutils':
-        ensure => present,
-      }
-    }
-    'RedHat': {
-      # Make sure dig is installed
-      package { 'bind-utils':
-        ensure => present,
-      }
-    }
-  }
-
-  ###########################################################
-  # Manage  ntp
-
-  include '::ntp'
-
-  if ($::osfamily == "RedHat") {
-    # Utils in ntp-perl are included in Debian's ntp package; we
-    # add it here for consistency.  See also
-    # https://tickets.puppetlabs.com/browse/MODULES-3660
-    package { 'ntp-perl':
-      ensure => present
-    }
-    # NOTE(pabelanger): We need to ensure ntpdate service starts on boot for
-    # centos-7.  Currently, ntpd explicitly require ntpdate to be running before
-    # the sync process can happen in ntpd.  As a result, if ntpdate is not
-    # running, ntpd will start but fail to sync because of DNS is not properly
-    # setup.
-    package { 'ntpdate':
-      ensure => present,
-    }
-    service { 'ntpdate':
-      enable => true,
-      require => Package['ntpdate'],
-    }
-  }
-
-  ###########################################################
-  # Manage  python/pip
-
-  $desired_virtualenv = '13.1.0'
-  class { '::pip':
-    optional_settings => {
-      'extra-index-url' => '',
-    },
-    manage_pip_conf => true,
-  }
-
-  if (( versioncmp($::virtualenv_version, $desired_virtualenv) < 0 )) {
-    $virtualenv_ensure = $desired_virtualenv
-  } else {
-    $virtualenv_ensure = present
-  }
-  package { 'virtualenv':
-    ensure   => $virtualenv_ensure,
-    provider => openstack_pip,
-    require  => Class['pip'],
-  }
-
-  # manage root ssh
-  if ! defined(File['/root/.ssh']) {
-    file { '/root/.ssh':
-      ensure => directory,
-      mode   => '0700',
-    }
-  }
-
-  # ensure that we have non-pass sudo, and
-  # not require tty
-  file_line { 'sudo_rule_no_pw':
-    path => '/etc/sudoers',
-    line => '%wheel     ALL=(ALL)       NOPASSWD: ALL',
-  }
-  file_line { 'sudo_rule_notty':
-    path   => '/etc/sudoers',
-    line   => 'Defaults    requiretty',
-    match  => '.*requiretty.*',
-    match_for_absence => true,
-    ensure => absent,
-    multiple => true,
-  }
-
-  # update hosts
-  create_resources('host', hiera_hash('hosts'))
-}
diff --git a/prototypes/xci/README.rst b/prototypes/xci/README.rst
deleted file mode 100644 (file)
index 0d93665..0000000
+++ /dev/null
@@ -1,217 +0,0 @@
-###########################
-OPNFV XCI Developer Sandbox
-###########################
-
-The XCI Developer Sandbox is created by the OPNFV community for the OPNFV
-community in order to
-
-- provide means for OPNFV developers to work with OpenStack master branch,
-  cutting the time it takes to develop new features significantly and testing
-  them on OPNFV Infrastructure
-- enable OPNFV developers to identify bugs earlier, issue fixes faster, and
-  get feedback on a daily basis
-- establish mechanisms to run additional testing on OPNFV Infrastructure to
-  provide feedback to OpenStack community
-- make the solutions we put in place available to other LF Networking Projects
-  OPNFV works with closely
-
-More information about OPNFV XCI and the sandbox can be seen on
-`OPNFV Wiki <https://wiki.opnfv.org/pages/viewpage.action?pageId=8687635>`_.
-
-===================================
-Components of XCI Developer Sandbox
-===================================
-
-The sandbox uses OpenStack projects for VM node creation, provisioning
-and OpenStack installation.
-
-- **openstack/bifrost:** Bifrost (pronounced bye-frost) is a set of Ansible
-  playbooks that automates the task of deploying a base image onto a set
-  of known hardware using ironic. It provides modular utility for one-off
-  operating system deployment with as few operational requirements as
-  reasonably possible. Bifrost supports different operating systems such as
-  Ubuntu, CentOS, and openSUSE.
-  More information about this project can be seen on
-  `Bifrost documentation <https://docs.openstack.org/developer/bifrost/>`_.
-
-- **openstack/openstack-ansible:** OpenStack-Ansible is an official OpenStack
-  project which aims to deploy production environments from source in a way
-  that makes it scalable while also being simple to operate, upgrade, and grow.
-  More information about this project can be seen on
-  `OpenStack Ansible documentation <https://docs.openstack.org/developer/openstack-ansible/>`_.
-
-- **opnfv/releng:** OPNFV Releng Project provides additional scripts, Ansible
-  playbooks and configuration options in order for developers to have easy
-  way of using openstack/bifrost and openstack/openstack-ansible by just
-  setting couple of environment variables and executing a single script.
-  More infromation about this project can be seen on
-  `OPNFV Releng documentation <https://wiki.opnfv.org/display/releng>_`.
-
-==========
-Basic Flow
-==========
-
-Here are the steps that take place upon the execution of the sandbox script
-``xci-deploy.sh``:
-
-1. Sources environment variables in order to set things up properly.
-2. Installs ansible on the host where sandbox script is executed.
-3. Creates and provisions VM nodes based on the flavor chosen by the user.
-4. Configures the host where the sandbox script is executed.
-5. Configures the deployment host which the OpenStack installation will
-   be driven from.
-6. Configures the target hosts where OpenStack will be installed.
-7. Configures the target hosts as controller(s) and compute(s) nodes.
-8. Starts the OpenStack installation.
-
-=====================
-Sandbox Prerequisites
-=====================
-
-In order to use this sandbox, the host must have certain packages installed.
-
-- libvirt
-- python
-- pip
-- git
-- <fix the list with all the dependencies>
-- passwordless sudo
-
-The host must also have enough CPU/RAM/Disk in order to host number of VM
-nodes that will be created based on the chosen flavor. See the details from
-`this link <https://wiki.opnfv.org/display/INF/XCI+Developer+Sandbox#XCIDeveloperSandbox-Prerequisites>`_.
-
-===========================
-Flavors Provided by Sandbox
-===========================
-
-OPNFV XCI Sandbox provides different flavors such as all in one (aio) which
-puts much lower requirements on the host machine and full-blown HA.
-
-* aio: Single node which acts as the deployment host, controller and compute.
-* mini: One deployment host, 1 controller node and 1 compute node.
-* noha: One deployment host, 1 controller node and 2 compute nodes.
-* ha: One deployment host, 3 controller nodes and 2 compute nodes.
-
-See the details of the flavors from
-`this link <https://wiki.opnfv.org/display/INF/XCI+Developer+Sandbox#XCIDeveloperSandbox-AvailableFlavors>`_.
-
-==========
-How to Use
-==========
-
-Basic Usage
------------
-
-clone OPNFV Releng repository
-
-    git clone https://gerrit.opnfv.org/gerrit/releng.git
-
-change into directory where the sandbox script is located
-
-    cd releng/prototypes/xci
-
-execute sandbox script
-
-    ./xci-deploy.sh
-
-Issuing above command will start aio sandbox deployment and the sandbox
-should be ready between 1,5 and 2 hours depending on the host machine.
-
-Please remember that the user executing the XCI script will need to
-have an ssh key available, and stored in $HOME/.ssh directory.
-You can generate one by executing
-
-    ssh-keygen -t rsa
-
-Advanced Usage
---------------
-
-The flavor to deploy, the versions of upstream components to use can
-be configured by developers by setting certain environment variables.
-Below example deploys noha flavor using the latest of openstack-ansible
-master branch and stores logs in different location than what is configured.
-
-clone OPNFV Releng repository
-
-    git clone https://gerrit.opnfv.org/gerrit/releng.git
-
-change into directory where the sandbox script is located
-
-    cd releng/prototypes/xci
-
-set the sandbox flavor
-
-    export XCI_FLAVOR=noha
-
-set the version to use for openstack-ansible
-
-    export OPENSTACK_OSA_VERSION=master
-
-set where the logs should be stored
-
-    export LOG_PATH=/home/jenkins/xcilogs
-
-execute sandbox script
-
-    ./xci-deploy.sh
-
-===============
-User Variables
-===============
-
-All user variables can be set from command line by exporting them before
-executing the script. The current user variables can be seen from
-``releng/prototypes/xci/config/user-vars``.
-
-The variables can also be set directly within the file before executing
-the sandbox script.
-
-===============
-Pinned Versions
-===============
-
-As explained above, the users can pick and choose which versions to use. If
-you want to be on the safe side, you can use the pinned versions the sandbox
-provides. They can be seen from ``releng/prototypes/xci/config/pinned-versions``.
-
-How Pinned Versions are Determined
-----------------------------------
-
-OPNFV runs periodic jobs against upstream projects openstack/bifrost and
-openstack/ansible using latest on master and stable/ocata branches,
-continuously chasing the HEAD of corresponding branches.
-
-Once a working version is identified, the versions of the upstream components
-are then bumped in releng repo.
-
-===========================================
-Limitations, Known Issues, and Improvements
-===========================================
-
-The list can be seen using `this link <https://jira.opnfv.org/issues/?filter=11616>`_.
-
-=========
-Changelog
-=========
-
-Changelog can be seen using `this link <https://jira.opnfv.org/issues/?filter=11625>`_.
-
-=======
-Testing
-=======
-
-Sandbox is continuously tested by OPNFV CI to ensure the changes do not impact
-users. In fact, OPNFV CI itself uses the sandbox scripts to run daily platform
-verification jobs.
-
-=======
-Support
-=======
-
-OPNFV XCI issues are tracked on OPNFV JIRA Releng project. If you encounter
-and issue or identify a bug, please submit an issue to JIRA using
-`this link <https://jira.opnfv.org/projects/RELENG>_`.
-
-If you have questions or comments, you can ask them on ``#opnfv-pharos`` IRC
-channel on Freenode.
diff --git a/prototypes/xci/config/aio-vars b/prototypes/xci/config/aio-vars
deleted file mode 100755 (executable)
index f28ecff..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-#-------------------------------------------------------------------------------
-# XCI Flavor Configuration
-#-------------------------------------------------------------------------------
-# You are free to modify parts of the configuration to fit into your environment.
-# But before doing that, please ensure you checked other flavors to see if one
-# them can be used instead, saving you some time.
-#-------------------------------------------------------------------------------
-
-#-------------------------------------------------------------------------------
-# Configure VM Nodes
-#-------------------------------------------------------------------------------
-export TEST_VM_NUM_NODES=1
-export TEST_VM_NODE_NAMES=opnfv
-export VM_DOMAIN_TYPE=kvm
-export VM_CPU=8
-export VM_DISK=80
-export VM_MEMORY_SIZE=8192
-export VM_DISK_CACHE=unsafe
diff --git a/prototypes/xci/config/env-vars b/prototypes/xci/config/env-vars
deleted file mode 100755 (executable)
index 9d4c782..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-#-------------------------------------------------------------------------------
-# !!! Changing or overriding these will most likely break everything altogether !!!
-#    Please do not change these settings if you are not developing for XCI!
-#-------------------------------------------------------------------------------
-export OPNFV_RELENG_GIT_URL=https://gerrit.opnfv.org/gerrit/releng.git
-export OPENSTACK_BIFROST_GIT_URL=https://git.openstack.org/openstack/bifrost
-export OPENSTACK_OSA_GIT_URL=https://git.openstack.org/openstack/openstack-ansible
-export OPENSTACK_OSA_ETC_PATH=/etc/openstack_deploy
-export OPNFV_HOST_IP=192.168.122.2
-export XCI_FLAVOR_ANSIBLE_FILE_PATH=$OPNFV_RELENG_PATH/prototypes/xci/file/$XCI_FLAVOR
-export CI_LOOP=${CI_LOOP:-daily}
-export JOB_NAME=${JOB_NAME:-false}
-# TODO: this currently matches to bifrost ansible version
-# there is perhaps better way to do this
-export XCI_ANSIBLE_PIP_VERSION=2.1.5.0
-export ANSIBLE_HOST_KEY_CHECKING=False
-export DISTRO=${DISTRO:-ubuntu}
-export DIB_OS_RELEASE=${DIB_OS_RELEASE:-xenial}
-export DIB_OS_ELEMENT=${DIB_OS_ELEMENT:-ubuntu-minimal}
-export DIB_OS_PACKAGES=${DIB_OS_PACKAGES:-"vlan,vim,less,bridge-utils,sudo,language-pack-en,iputils-ping,rsyslog,curl,python,debootstrap,ifenslave,ifenslave-2.6,lsof,lvm2,tcpdump,nfs-kernel-server,chrony,iptables"}
-export EXTRA_DIB_ELEMENTS=${EXTRA_DIB_ELEMENTS:-"openssh-server"}
diff --git a/prototypes/xci/config/ha-vars b/prototypes/xci/config/ha-vars
deleted file mode 100755 (executable)
index 1ba4589..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-#-------------------------------------------------------------------------------
-# XCI Flavor Configuration
-#-------------------------------------------------------------------------------
-# You are free to modify parts of the configuration to fit into your environment.
-# But before doing that, please ensure you checked other flavors to see if one
-# them can be used instead, saving you some time.
-#-------------------------------------------------------------------------------
-
-#-------------------------------------------------------------------------------
-# Configure VM Nodes
-#-------------------------------------------------------------------------------
-export TEST_VM_NUM_NODES=6
-export TEST_VM_NODE_NAMES="opnfv controller00 controller01 controller02 compute00 compute01"
-export VM_DOMAIN_TYPE=kvm
-export VM_CPU=8
-export VM_DISK=80
-export VM_MEMORY_SIZE=16384
-export VM_DISK_CACHE=unsafe
diff --git a/prototypes/xci/config/mini-vars b/prototypes/xci/config/mini-vars
deleted file mode 100755 (executable)
index 8f1e83c..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-#-------------------------------------------------------------------------------
-# XCI Flavor Configuration
-#-------------------------------------------------------------------------------
-# You are free to modify parts of the configuration to fit into your environment.
-# But before doing that, please ensure you checked other flavors to see if one
-# them can be used instead, saving you some time.
-#-------------------------------------------------------------------------------
-
-#-------------------------------------------------------------------------------
-# Configure VM Nodes
-#-------------------------------------------------------------------------------
-export TEST_VM_NUM_NODES=3
-export TEST_VM_NODE_NAMES="opnfv controller00 compute00"
-export VM_DOMAIN_TYPE=kvm
-export VM_CPU=8
-export VM_DISK=80
-export VM_MEMORY_SIZE=12288
-export VM_DISK_CACHE=unsafe
diff --git a/prototypes/xci/config/noha-vars b/prototypes/xci/config/noha-vars
deleted file mode 100755 (executable)
index 935becb..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-#-------------------------------------------------------------------------------
-# XCI Flavor Configuration
-#-------------------------------------------------------------------------------
-# You are free to modify parts of the configuration to fit into your environment.
-# But before doing that, please ensure you checked other flavors to see if one
-# them can be used instead, saving you some time.
-#-------------------------------------------------------------------------------
-
-#-------------------------------------------------------------------------------
-# Configure VM Nodes
-#-------------------------------------------------------------------------------
-export TEST_VM_NUM_NODES=4
-export TEST_VM_NODE_NAMES="opnfv controller00 compute00 compute01"
-export VM_DOMAIN_TYPE=kvm
-export VM_CPU=8
-export VM_DISK=80
-export VM_MEMORY_SIZE=12288
-export VM_DISK_CACHE=unsafe
diff --git a/prototypes/xci/config/pinned-versions b/prototypes/xci/config/pinned-versions
deleted file mode 100755 (executable)
index e3b49c7..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-#-------------------------------------------------------------------------------
-# Pinned Component Versions
-#-------------------------------------------------------------------------------
-# You are free to override these versions in user-vars to experiment with
-# different branches or with different commits but be aware that things might
-# not work as expected. You can set the versions you want to use before running
-# the main script on your shell as shown on the examples below.
-#
-# It is important to be consistent between branches you use for OpenStack
-# projects OPNFV XCI uses.
-#
-# Examples:
-#   export OPENSTACK_BIFROST_VERSION="stable/ocata"
-#   export OPENSTACK_OSA_VERSION="stable/ocata"
-# or
-#   export OPENSTACK_BIFROST_VERSION="master"
-#   export OPENSTACK_OSA_VERSION="master"
-# or
-#   export OPENSTACK_BIFROST_VERSION="a87f7ce6c8725b3bbffec7b2efa1e466796848a9"
-#   export OPENSTACK_OSA_VERSION="4713cf45e11b4ebca9fbed25d1389854602213d8"
-#-------------------------------------------------------------------------------
-# use releng from master until the development work with the sandbox is complete
-export OPNFV_RELENG_VERSION="master"
-# HEAD of "master" as of 04.04.2017
-export OPENSTACK_BIFROST_VERSION=${OPENSTACK_BIFROST_VERSION:-"6109f824e5510e794dbf1968c3859e8b6356d280"}
-# HEAD of "master" as of 04.04.2017
-export OPENSTACK_OSA_VERSION=${OPENSTACK_OSA_VERSION:-"d9e1330c7ff9d72a604b6b4f3af765f66a01b30e"}
diff --git a/prototypes/xci/config/user-vars b/prototypes/xci/config/user-vars
deleted file mode 100755 (executable)
index 5ed5396..0000000
+++ /dev/null
@@ -1,58 +0,0 @@
-#-------------------------------------------------------------------------------
-# Set Deployment Flavor
-#-------------------------------------------------------------------------------
-# OPNFV XCI currently supports 4 different types of flavors:
-#   - all in one (aio): 1 opnfv VM which acts as controller and compute node
-#   - mini: 3 VMs, 1 opnfv VM deployment host, 1 controller, and 1 compute nodes
-#   - noha: 4 VMs, 1 opnfv VM deployment host, 1 controller, and 2 compute nodes
-#   - ha: 6 VMs, 1 opnfv VM deployment host, 3 controllers, and 2 compute nodes
-#
-# Apart from having different number of nodes, CPU, RAM, and disk allocations
-# also differ from each other. Please take a look at the env-vars files for
-# each of these flavors.
-#
-# Examples:
-#   export XCI_FLAVOR="aio"
-# or
-#   export XCI_FLAVOR="mini"
-# or
-#   export XCI_FLAVOR="noha"
-# or
-#   export XCI_FLAVOR="ha"
-#-------------------------------------------------------------------------------
-export XCI_FLAVOR=${XCI_FLAVOR:-aio}
-
-#-------------------------------------------------------------------------------
-# Set Paths to where git repositories of XCI Components will be cloned
-#-------------------------------------------------------------------------------
-# OPNFV XCI Sandbox is not verified to be used as non-root user as of yet so
-# changing these paths might break things.
-#-------------------------------------------------------------------------------
-export XCI_DEVEL_ROOT=${XCI_DEVEL_ROOT:-"/tmp/.xci-deploy-env"}
-export OPNFV_RELENG_PATH="${XCI_DEVEL_ROOT}/releng"
-export OPENSTACK_BIFROST_PATH="${XCI_DEVEL_ROOT}/bifrost"
-export OPENSTACK_OSA_PATH="${XCI_DEVEL_ROOT}/openstack-ansible"
-export OPNFV_SSH_HOST_KEYS_PATH="${XCI_DEVEL_ROOT}/ssh_host_keys"
-
-#-------------------------------------------------------------------------------
-# Set the playbook to use for OpenStack deployment
-#-------------------------------------------------------------------------------
-# The variable can be overriden in order to install additional OpenStack services
-# supported by OpenStack Ansible or exclude certain OpenStack services.
-#-------------------------------------------------------------------------------
-export OPNFV_OSA_PLAYBOOK=${OPNFV_OSA_PLAYBOOK:-"$OPENSTACK_OSA_PATH/playbooks/setup-openstack.yml"}
-
-#-------------------------------------------------------------------------------
-# Configure some other stuff
-#-------------------------------------------------------------------------------
-# Set the verbosity for ansible
-#
-# Examples:
-#   ANSIBLE_VERBOSITY="-v"
-# or
-#   ANSIBLE_VERBOSITY="-vvvv"
-export ANSIBLE_VERBOSITY=${ANSIBLE_VERBOSITY-""}
-export LOG_PATH=${LOG_PATH:-${XCI_DEVEL_ROOT}/opnfv/logs}
-export RUN_TEMPEST=${RUN_TEMPEST:-false}
-# Set this to to true to force XCI to re-create the target OS images
-export CLEAN_DIB_IMAGES=${CLEAN_DIB_IMAGES:-false}
diff --git a/prototypes/xci/docs/developer-guide.rst b/prototypes/xci/docs/developer-guide.rst
deleted file mode 100644 (file)
index 9a07b12..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-#########################
-OPNFV XCI Developer Guide
-#########################
-
-This document will contain details about the XCI and how things are put
-together in order to support different flavors and different distros in future.
-
-Document is for anyone who will
-
-- do hands on development with XCI such as new features to XCI itself or
-  bugfixes
-- integrate new features
-- want to know what is going on behind the scenes
-
-It will also have guidance regarding how to develop for the sandbox.
-
-If you are looking for User's Guide, please check README.rst in the root of
-xci folder or take a look at
-`Wiki <https://wiki.opnfv.org/display/INF/OpenStack>`_.
-
-===================================
-Components of XCI Developer Sandbox
-===================================
-
-TBD
-
-=============
-Detailed Flow
-=============
-
-TBD
diff --git a/prototypes/xci/file/aio/configure-opnfvhost.yml b/prototypes/xci/file/aio/configure-opnfvhost.yml
deleted file mode 100644 (file)
index 5c66d40..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
----
-- hosts: opnfv
-  remote_user: root
-  vars_files:
-  vars_files:
-    - ../var/opnfv.yml
-  roles:
-    - role: remove-folders
-    - { role: clone-repository, project: "openstack/openstack-ansible", repo: "{{ OPENSTACK_OSA_GIT_URL }}", dest: "{{ OPENSTACK_OSA_PATH }}", version: "{{ OPENSTACK_OSA_VERSION }}" }
-  tasks:
-    - name: bootstrap ansible on opnfv host
-      command: "/bin/bash ./scripts/bootstrap-ansible.sh"
-      args:
-        chdir: "{{OPENSTACK_OSA_PATH}}"
-    - name: bootstrap opnfv host as aio
-      command: "/bin/bash ./scripts/bootstrap-aio.sh"
-      args:
-        chdir: "{{OPENSTACK_OSA_PATH}}"
-    - name: install OpenStack on opnfv host - this command doesn't log anything to console
-      command: "/bin/bash ./scripts/run-playbooks.sh"
-      args:
-        chdir: "{{OPENSTACK_OSA_PATH}}"
diff --git a/prototypes/xci/file/aio/flavor-vars.yml b/prototypes/xci/file/aio/flavor-vars.yml
deleted file mode 100644 (file)
index 6ac1e0f..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
----
-# this file is added intentionally in order to simplify putting files in place
-# in future, it might contain vars specific to this flavor
diff --git a/prototypes/xci/file/aio/inventory b/prototypes/xci/file/aio/inventory
deleted file mode 100644 (file)
index 9a3dd9e..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-[opnfv]
-opnfv ansible_ssh_host=192.168.122.2
diff --git a/prototypes/xci/file/ansible-role-requirements.yml b/prototypes/xci/file/ansible-role-requirements.yml
deleted file mode 100644 (file)
index 842bcc4..0000000
+++ /dev/null
@@ -1,199 +0,0 @@
----
-# SPDX-license-identifier: Apache-2.0
-##############################################################################
-# Copyright (c) 2017 Ericsson AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-# these versions are extracted based on the osa commit d9e1330c7ff9d72a604b6b4f3af765f66a01b30e on 04.04.2017
-# https://review.openstack.org/gitweb?p=openstack/openstack-ansible.git;a=commit;h=d9e1330c7ff9d72a604b6b4f3af765f66a01b30e
-- name: apt_package_pinning
-  scm: git
-  src: https://git.openstack.org/openstack/openstack-ansible-apt_package_pinning
-  version: 364fc9fcd8ff652546c13d9c20ac808bc0e35f66
-- name: pip_install
-  scm: git
-  src: https://git.openstack.org/openstack/openstack-ansible-pip_install
-  version: 793ae4d01397bd91ebe18e9670e8e27d1ae91960
-- name: galera_client
-  scm: git
-  src: https://git.openstack.org/openstack/openstack-ansible-galera_client
-  version: c093c13e01826da545bf9a0259e0be441bc1b5e1
-- name: galera_server
-  scm: git
-  src: https://git.openstack.org/openstack/openstack-ansible-galera_server
-  version: fd0a6b104a32badbe7e7594e2c829261a53bfb11
-- name: ceph_client
-  scm: git
-  src: https://git.openstack.org/openstack/openstack-ansible-ceph_client
-  version: 9149bfa8e3c4284b656834ba7765ea3aa48bec2e
-- name: haproxy_server
-  scm: git
-  src: https://git.openstack.org/openstack/openstack-ansible-haproxy_server
-  version: 32415ab81c61083ac5a83b65274703e4a5470e5e
-- name: keepalived
-  scm: git
-  src: https://github.com/evrardjp/ansible-keepalived
-  version: 4f7c8eb16e3cbd8c8748f126c1eea73db5c8efe9
-- name: lxc_container_create
-  scm: git
-  src: https://git.openstack.org/openstack/openstack-ansible-lxc_container_create
-  version: 097da38126d90cfca36cdc3955aaf658a00db599
-- name: lxc_hosts
-  scm: git
-  src: https://git.openstack.org/openstack/openstack-ansible-lxc_hosts
-  version: 2931d0c87a1c592ad7f1f2f83cdcf468e8dea932
-- name: memcached_server
-  scm: git
-  src: https://git.openstack.org/openstack/openstack-ansible-memcached_server
-  version: 58e17aa13ebe7b0aa5da7c00afc75d6716d2720d
-- name: openstack-ansible-security
-  scm: git
-  src: https://git.openstack.org/openstack/openstack-ansible-security
-  version: 9d745ec4fe8ac3e6d6cbb2412abe5196a9d2dad7
-- name: openstack_hosts
-  scm: git
-  src: https://git.openstack.org/openstack/openstack-ansible-openstack_hosts
-  version: 2076dfddf418b1bdd64d3782346823902aa996bc
-- name: os_keystone
-  scm: git
-  src: https://git.openstack.org/openstack/openstack-ansible-os_keystone
-  version: cee7a02143a1826479e6444c6fb5f1c2b6074ab7
-- name: openstack_openrc
-  scm: git
-  src: https://git.openstack.org/openstack/openstack-ansible-openstack_openrc
-  version: fb98ad8d7bfe7fba0c964cb061313f1b8767c4b0
-- name: os_aodh
-  scm: git
-  src: https://git.openstack.org/openstack/openstack-ansible-os_aodh
-  version: 9dcacb8fd6feef02e485f99c83535707ae67876b
-- name: os_barbican
-  scm: git
-  src: https://git.openstack.org/openstack/openstack-ansible-os_barbican
-  version: bb3f39cb2f3c31c6980aa65c8953ff6293b992c0
-- name: os_ceilometer
-  scm: git
-  src: https://git.openstack.org/openstack/openstack-ansible-os_ceilometer
-  version: 178ad8245fa019f0610c628c58c377997b011e8a
-- name: os_cinder
-  scm: git
-  src: https://git.openstack.org/openstack/openstack-ansible-os_cinder
-  version: 1321fd39d8f55d1dc3baf91b4194469b349d7dc4
-- name: os_glance
-  scm: git
-  src: https://git.openstack.org/openstack/openstack-ansible-os_glance
-  version: f39ef212bfa2edff8334bfb632cc463001c77c11
-- name: os_gnocchi
-  scm: git
-  src: https://git.openstack.org/openstack/openstack-ansible-os_gnocchi
-  version: 318bd76e5e72402e8ff5b372b469c27a9395341b
-- name: os_heat
-  scm: git
-  src: https://git.openstack.org/openstack/openstack-ansible-os_heat
-  version: 07d59ddb757b2d2557fba52ac537803e646e65b4
-- name: os_horizon
-  scm: git
-  src: https://git.openstack.org/openstack/openstack-ansible-os_horizon
-  version: 69ef49c4f7a42f082f4bcff824d13f57145e2b83
-- name: os_ironic
-  scm: git
-  src: https://git.openstack.org/openstack/openstack-ansible-os_ironic
-  version: 57e8a0eaaa2159f33e64a1b037180383196919d1
-- name: os_magnum
-  scm: git
-  src: https://git.openstack.org/openstack/openstack-ansible-os_magnum
-  version: 8329c257dff25686827bd1cc904506d76ad1d12f
-- name: os_trove
-  scm: git
-  src: https://git.openstack.org/openstack/openstack-ansible-os_trove
-  version: b948402c76d6188caa7be376098354cdb850d638
-- name: os_neutron
-  scm: git
-  src: https://git.openstack.org/openstack/openstack-ansible-os_neutron
-  version: 2a92a4e1857e7457683aefd87ee5a4e751fc701a
-- name: os_nova
-  scm: git
-  src: https://git.openstack.org/openstack/openstack-ansible-os_nova
-  version: 511963b7921ec7c2db24e8ee1d71a940b0aafae4
-- name: os_rally
-  scm: git
-  src: https://git.openstack.org/openstack/openstack-ansible-os_rally
-  version: 96153c5b3285d11d00611a03135c9d8f267e0f52
-- name: os_sahara
-  scm: git
-  src: https://git.openstack.org/openstack/openstack-ansible-os_sahara
-  version: 012d3f3530f878e5143d58380f94d1f514baad04
-- name: os_swift
-  scm: git
-  src: https://git.openstack.org/openstack/openstack-ansible-os_swift
-  version: d62d6a23ac0b01d0320dbcb6c710dfd5f3cecfdf
-- name: os_tempest
-  scm: git
-  src: https://git.openstack.org/openstack/openstack-ansible-os_tempest
-  version: 9d2bfb09d1ebbc9102329b0d42de33aa321e57b1
-- name: plugins
-  scm: git
-  src: https://git.openstack.org/openstack/openstack-ansible-plugins
-  version: 3d2e23bb7e1d6775789d7f65ce8a878a7ee1d3c7
-- name: rabbitmq_server
-  scm: git
-  src: https://git.openstack.org/openstack/openstack-ansible-rabbitmq_server
-  version: 9b0ce64fe235705e237bc4b476ecc0ad602d67a8
-- name: repo_build
-  scm: git
-  src: https://git.openstack.org/openstack/openstack-ansible-repo_build
-  version: fe3ae20f74a912925d5c78040984957a6d55f9de
-- name: repo_server
-  scm: git
-  src: https://git.openstack.org/openstack/openstack-ansible-repo_server
-  version: 7ea0820e0941282cd5c5cc263e939ffbee54ba52
-- name: rsyslog_client
-  scm: git
-  src: https://git.openstack.org/openstack/openstack-ansible-rsyslog_client
-  version: 19615e47137eee46ee92c0308532fe1d2212333c
-- name: rsyslog_server
-  scm: git
-  src: https://git.openstack.org/openstack/openstack-ansible-rsyslog_server
-  version: efd7b21798da49802012e390a0ddf7cc38636eeb
-- name: sshd
-  scm: git
-  src: https://github.com/willshersystems/ansible-sshd
-  version: 426e11c4dffeca09fcc4d16103a91e5e65180040
-- name: bird
-  scm: git
-  src: https://github.com/logan2211/ansible-bird
-  version: 2c4d29560d3617abddf0e63e0c95536364dedd92
-- name: etcd
-  scm: git
-  src: https://github.com/logan2211/ansible-etcd
-  version: ef63b0c5fd352b61084fd5aca286ee7f3fea932b
-- name: unbound
-  scm: git
-  src: https://github.com/logan2211/ansible-unbound
-  version: 5329d03eb9c15373d648a801563087c576bbfcde
-- name: resolvconf
-  scm: git
-  src: https://github.com/logan2211/ansible-resolvconf
-  version: 3b2b7cf2e900b194829565b351bf32bb63954548
-- name: os_designate
-  scm: git
-  src: https://git.openstack.org/openstack/openstack-ansible-os_designate
-  version: b7098a6bdea73c869f45a86e0cc78d21b032161e
-- name: ceph.ceph-common
-  scm: git
-  src: https://github.com/ceph/ansible-ceph-common
-  version: ef149767fa9565ec887f0bdb007ff752bd61e5d5
-- name: ceph.ceph-docker-common
-  scm: git
-  src: https://github.com/ceph/ansible-ceph-docker-common
-  version: ca86fd0ef6d24aa2c750a625acdcb8012c374aa0
-- name: ceph-mon
-  scm: git
-  src: https://github.com/ceph/ansible-ceph-mon
-  version: c5be4d6056dfe6a482ca3fcc483a6050cc8929a1
-- name: ceph-osd
-  scm: git
-  src: https://github.com/ceph/ansible-ceph-osd
-  version: 7bc5a61ceb96e487b7a9fe9643f6dafa6492f2b5
diff --git a/prototypes/xci/file/cinder.yml b/prototypes/xci/file/cinder.yml
deleted file mode 100644 (file)
index e40b392..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
----
-# This file contains an example to show how to set
-# the cinder-volume service to run in a container.
-#
-# Important note:
-# When using LVM or any iSCSI-based cinder backends, such as NetApp with
-# iSCSI protocol, the cinder-volume service *must* run on metal.
-# Reference: https://bugs.launchpad.net/ubuntu/+source/lxc/+bug/1226855
-
-container_skel:
-  cinder_volumes_container:
-    properties:
-      is_metal: false
diff --git a/prototypes/xci/file/ha/flavor-vars.yml b/prototypes/xci/file/ha/flavor-vars.yml
deleted file mode 100644 (file)
index 167502c..0000000
+++ /dev/null
@@ -1,39 +0,0 @@
----
-host_info: {
-    'opnfv': {
-        'VLAN_IP': '192.168.122.2',
-        'MGMT_IP': '172.29.236.10',
-        'VXLAN_IP': '172.29.240.10',
-        'STORAGE_IP': '172.29.244.10'
-    },
-    'controller00': {
-        'VLAN_IP': '192.168.122.3',
-        'MGMT_IP': '172.29.236.11',
-        'VXLAN_IP': '172.29.240.11',
-        'STORAGE_IP': '172.29.244.11'
-    },
-    'controller01': {
-        'VLAN_IP': '192.168.122.4',
-        'MGMT_IP': '172.29.236.12',
-        'VXLAN_IP': '172.29.240.12',
-        'STORAGE_IP': '172.29.244.12'
-    },
-    'controller02': {
-        'VLAN_IP': '192.168.122.5',
-        'MGMT_IP': '172.29.236.13',
-        'VXLAN_IP': '172.29.240.13',
-        'STORAGE_IP': '172.29.244.13'
-    },
-    'compute00': {
-        'VLAN_IP': '192.168.122.6',
-        'MGMT_IP': '172.29.236.14',
-        'VXLAN_IP': '172.29.240.14',
-        'STORAGE_IP': '172.29.244.14'
-    },
-    'compute01': {
-        'VLAN_IP': '192.168.122.7',
-        'MGMT_IP': '172.29.236.15',
-        'VXLAN_IP': '172.29.240.15',
-        'STORAGE_IP': '172.29.244.15'
-    }
-}
diff --git a/prototypes/xci/file/ha/inventory b/prototypes/xci/file/ha/inventory
deleted file mode 100644 (file)
index 94b1d07..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-[opnfv]
-opnfv ansible_ssh_host=192.168.122.2
-
-[controller]
-controller00 ansible_ssh_host=192.168.122.3
-controller01 ansible_ssh_host=192.168.122.4
-controller02 ansible_ssh_host=192.168.122.5
-
-[compute]
-compute00 ansible_ssh_host=192.168.122.6
-compute01 ansible_ssh_host=192.168.122.7
diff --git a/prototypes/xci/file/ha/openstack_user_config.yml b/prototypes/xci/file/ha/openstack_user_config.yml
deleted file mode 100644 (file)
index 09fb734..0000000
+++ /dev/null
@@ -1,254 +0,0 @@
----
-cidr_networks:
-  container: 172.29.236.0/22
-  tunnel: 172.29.240.0/22
-  storage: 172.29.244.0/22
-
-used_ips:
-  - "172.29.236.1,172.29.236.50"
-  - "172.29.240.1,172.29.240.50"
-  - "172.29.244.1,172.29.244.50"
-  - "172.29.248.1,172.29.248.50"
-
-global_overrides:
-  internal_lb_vip_address: 172.29.236.222
-  external_lb_vip_address: 192.168.122.220
-  tunnel_bridge: "br-vxlan"
-  management_bridge: "br-mgmt"
-  provider_networks:
-    - network:
-        container_bridge: "br-mgmt"
-        container_type: "veth"
-        container_interface: "eth1"
-        ip_from_q: "container"
-        type: "raw"
-        group_binds:
-          - all_containers
-          - hosts
-        is_container_address: true
-        is_ssh_address: true
-    - network:
-        container_bridge: "br-vxlan"
-        container_type: "veth"
-        container_interface: "eth10"
-        ip_from_q: "tunnel"
-        type: "vxlan"
-        range: "1:1000"
-        net_name: "vxlan"
-        group_binds:
-          - neutron_linuxbridge_agent
-    - network:
-        container_bridge: "br-vlan"
-        container_type: "veth"
-        container_interface: "eth12"
-        host_bind_override: "eth12"
-        type: "flat"
-        net_name: "flat"
-        group_binds:
-          - neutron_linuxbridge_agent
-    - network:
-        container_bridge: "br-vlan"
-        container_type: "veth"
-        container_interface: "eth11"
-        type: "vlan"
-        range: "1:1"
-        net_name: "vlan"
-        group_binds:
-          - neutron_linuxbridge_agent
-    - network:
-        container_bridge: "br-storage"
-        container_type: "veth"
-        container_interface: "eth2"
-        ip_from_q: "storage"
-        type: "raw"
-        group_binds:
-          - glance_api
-          - cinder_api
-          - cinder_volume
-          - nova_compute
-
-# ##
-# ## Infrastructure
-# ##
-
-# galera, memcache, rabbitmq, utility
-shared-infra_hosts:
-  controller00:
-    ip: 172.29.236.11
-  controller01:
-    ip: 172.29.236.12
-  controller02:
-    ip: 172.29.236.13
-
-# repository (apt cache, python packages, etc)
-repo-infra_hosts:
-  controller00:
-    ip: 172.29.236.11
-  controller01:
-    ip: 172.29.236.12
-  controller02:
-    ip: 172.29.236.13
-
-# load balancer
-# Ideally the load balancer should not use the Infrastructure hosts.
-# Dedicated hardware is best for improved performance and security.
-haproxy_hosts:
-  controller00:
-    ip: 172.29.236.11
-  controller01:
-    ip: 172.29.236.12
-  controller02:
-    ip: 172.29.236.13
-
-# rsyslog server
-# log_hosts:
-# log1:
-#  ip: 172.29.236.14
-
-# ##
-# ## OpenStack
-# ##
-
-# keystone
-identity_hosts:
-  controller00:
-    ip: 172.29.236.11
-  controller01:
-    ip: 172.29.236.12
-  controller02:
-    ip: 172.29.236.13
-
-# cinder api services
-storage-infra_hosts:
-  controller00:
-    ip: 172.29.236.11
-  controller01:
-    ip: 172.29.236.12
-  controller02:
-    ip: 172.29.236.13
-
-# glance
-# The settings here are repeated for each infra host.
-# They could instead be applied as global settings in
-# user_variables, but are left here to illustrate that
-# each container could have different storage targets.
-image_hosts:
-  controller00:
-    ip: 172.29.236.11
-    container_vars:
-      limit_container_types: glance
-      glance_nfs_client:
-        - server: "172.29.244.14"
-          remote_path: "/images"
-          local_path: "/var/lib/glance/images"
-          type: "nfs"
-          options: "_netdev,auto"
-  controller01:
-    ip: 172.29.236.12
-    container_vars:
-      limit_container_types: glance
-      glance_nfs_client:
-        - server: "172.29.244.14"
-          remote_path: "/images"
-          local_path: "/var/lib/glance/images"
-          type: "nfs"
-          options: "_netdev,auto"
-  controller02:
-    ip: 172.29.236.13
-    container_vars:
-      limit_container_types: glance
-      glance_nfs_client:
-        - server: "172.29.244.14"
-          remote_path: "/images"
-          local_path: "/var/lib/glance/images"
-          type: "nfs"
-          options: "_netdev,auto"
-
-# nova api, conductor, etc services
-compute-infra_hosts:
-  controller00:
-    ip: 172.29.236.11
-  controller01:
-    ip: 172.29.236.12
-  controller02:
-    ip: 172.29.236.13
-
-# heat
-orchestration_hosts:
-  controller00:
-    ip: 172.29.236.11
-  controller01:
-    ip: 172.29.236.12
-  controller02:
-    ip: 172.29.236.13
-
-# horizon
-dashboard_hosts:
-  controller00:
-    ip: 172.29.236.11
-  controller01:
-    ip: 172.29.236.12
-  controller02:
-    ip: 172.29.236.13
-
-# neutron server, agents (L3, etc)
-network_hosts:
-  controller00:
-    ip: 172.29.236.11
-  controller01:
-    ip: 172.29.236.12
-  controller02:
-    ip: 172.29.236.13
-
-# nova hypervisors
-compute_hosts:
-  compute00:
-    ip: 172.29.236.14
-  compute01:
-    ip: 172.29.236.15
-
-# cinder volume hosts (NFS-backed)
-# The settings here are repeated for each infra host.
-# They could instead be applied as global settings in
-# user_variables, but are left here to illustrate that
-# each container could have different storage targets.
-storage_hosts:
-  controller00:
-    ip: 172.29.236.11
-    container_vars:
-      cinder_backends:
-        limit_container_types: cinder_volume
-        nfs_volume:
-          volume_backend_name: NFS_VOLUME1
-          volume_driver: cinder.volume.drivers.nfs.NfsDriver
-          nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
-          nfs_shares_config: /etc/cinder/nfs_shares
-          shares:
-            - ip: "172.29.244.14"
-              share: "/volumes"
-  controller01:
-    ip: 172.29.236.12
-    container_vars:
-      cinder_backends:
-        limit_container_types: cinder_volume
-        nfs_volume:
-          volume_backend_name: NFS_VOLUME1
-          volume_driver: cinder.volume.drivers.nfs.NfsDriver
-          nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
-          nfs_shares_config: /etc/cinder/nfs_shares
-          shares:
-            - ip: "172.29.244.14"
-              share: "/volumes"
-  controller02:
-    ip: 172.29.236.13
-    container_vars:
-      cinder_backends:
-        limit_container_types: cinder_volume
-        nfs_volume:
-          volume_backend_name: NFS_VOLUME1
-          volume_driver: cinder.volume.drivers.nfs.NfsDriver
-          nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
-          nfs_shares_config: /etc/cinder/nfs_shares
-          shares:
-            - ip: "172.29.244.14"
-              share: "/volumes"
diff --git a/prototypes/xci/file/ha/user_variables.yml b/prototypes/xci/file/ha/user_variables.yml
deleted file mode 100644 (file)
index 094cc8c..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
----
-# Copyright 2014, Rackspace US, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# ##
-# ## This file contains commonly used overrides for convenience. Please inspect
-# ## the defaults for each role to find additional override options.
-# ##
-
-# # Debug and Verbose options.
-debug: false
-
-haproxy_keepalived_external_vip_cidr: "192.168.122.220/32"
-haproxy_keepalived_internal_vip_cidr: "172.29.236.222/32"
-haproxy_keepalived_external_interface: br-vlan
-haproxy_keepalived_internal_interface: br-mgmt
-gnocchi_db_sync_options: ""
diff --git a/prototypes/xci/file/install-ansible.sh b/prototypes/xci/file/install-ansible.sh
deleted file mode 100644 (file)
index 67a49b3..0000000
+++ /dev/null
@@ -1,136 +0,0 @@
-#!/bin/bash
-# NOTE(hwoarang): Most parts of this this file were taken from the
-# bifrost repository (scripts/install-deps.sh). This script contains all
-# the necessary distro specific code to install ansible and it's dependencies.
-
-set -eu
-
-declare -A PKG_MAP
-
-CHECK_CMD_PKGS=(
-    libffi
-    libopenssl
-    net-tools
-    python-devel
-)
-
-# Check zypper before apt-get in case zypper-aptitude
-# is installed
-if [ -x '/usr/bin/zypper' ]; then
-    OS_FAMILY="Suse"
-    INSTALLER_CMD="sudo -H -E zypper install -y"
-    CHECK_CMD="zypper search --match-exact --installed"
-    PKG_MAP=(
-        [gcc]=gcc
-        [git]=git
-        [libffi]=libffi-devel
-        [libopenssl]=libopenssl-devel
-        [net-tools]=net-tools
-        [python]=python
-        [python-devel]=python-devel
-        [venv]=python-virtualenv
-        [wget]=wget
-    )
-    EXTRA_PKG_DEPS=( python-xml )
-    # NOTE (cinerama): we can't install python without removing this package
-    # if it exists
-    if $(${CHECK_CMD} patterns-openSUSE-minimal_base-conflicts &> /dev/null); then
-        sudo -H zypper remove -y patterns-openSUSE-minimal_base-conflicts
-    fi
-elif [ -x '/usr/bin/apt-get' ]; then
-    OS_FAMILY="Debian"
-    INSTALLER_CMD="sudo -H -E apt-get -y install"
-    CHECK_CMD="dpkg -l"
-    PKG_MAP=( [gcc]=gcc
-              [git]=git
-              [libffi]=libffi-dev
-              [libopenssl]=libssl-dev
-              [net-tools]=net-tools
-              [python]=python-minimal
-              [python-devel]=libpython-dev
-              [venv]=python-virtualenv
-              [wget]=wget
-            )
-    EXTRA_PKG_DEPS=()
-elif [ -x '/usr/bin/dnf' ] || [ -x '/usr/bin/yum' ]; then
-    OS_FAMILY="RedHat"
-    PKG_MANAGER=$(which dnf || which yum)
-    INSTALLER_CMD="sudo -H -E ${PKG_MANAGER} -y install"
-    CHECK_CMD="rpm -q"
-    PKG_MAP=(
-        [gcc]=gcc
-        [git]=git
-        [libffi]=libffi-devel
-        [libopenssl]=openssl-devel
-        [net-tools]=net-tools
-        [python]=python
-        [python-devel]=python-devel
-        [venv]=python-virtualenv
-        [wget]=wget
-    )
-    EXTRA_PKG_DEPS=()
-else
-    echo "ERROR: Supported package manager not found.  Supported: apt,yum,zypper"
-fi
-
-if ! $(python --version &>/dev/null); then
-    ${INSTALLER_CMD} ${PKG_MAP[python]}
-fi
-if ! $(gcc -v &>/dev/null); then
-    ${INSTALLER_CMD} ${PKG_MAP[gcc]}
-fi
-if ! $(git --version &>/dev/null); then
-    ${INSTALLER_CMD} ${PKG_MAP[git]}
-fi
-if ! $(wget --version &>/dev/null); then
-    ${INSTALLER_CMD} ${PKG_MAP[wget]}
-fi
-
-for pkg in ${CHECK_CMD_PKGS[@]}; do
-    if ! $(${CHECK_CMD} ${PKG_MAP[$pkg]} &>/dev/null); then
-        ${INSTALLER_CMD} ${PKG_MAP[$pkg]}
-    fi
-done
-
-if [ -n "${EXTRA_PKG_DEPS-}" ]; then
-    for pkg in ${EXTRA_PKG_DEPS}; do
-        if ! $(${CHECK_CMD} ${pkg} &>/dev/null); then
-            ${INSTALLER_CMD} ${pkg}
-        fi
-    done
-fi
-
-# If we're using a venv, we need to work around sudo not
-# keeping the path even with -E.
-PYTHON=$(which python)
-
-# To install python packages, we need pip.
-#
-# We can't use the apt packaged version of pip since
-# older versions of pip are incompatible with
-# requests, one of our indirect dependencies (bug 1459947).
-#
-# Note(cinerama): We use pip to install an updated pip plus our
-# other python requirements. pip breakages can seriously impact us,
-# so we've chosen to install/upgrade pip here rather than in
-# requirements (which are synced automatically from the global ones)
-# so we can quickly and easily adjust version parameters.
-# See bug 1536627.
-#
-# Note(cinerama): If pip is linked to pip3, the rest of the install
-# won't work. Remove the alternatives. This is due to ansible's
-# python 2.x requirement.
-if [[ $(readlink -f /etc/alternatives/pip) =~ "pip3" ]]; then
-    sudo -H update-alternatives --remove pip $(readlink -f /etc/alternatives/pip)
-fi
-
-if ! which pip; then
-    wget -O /tmp/get-pip.py https://bootstrap.pypa.io/get-pip.py
-    sudo -H -E ${PYTHON} /tmp/get-pip.py
-fi
-
-PIP=$(which pip)
-
-${PIP} install --user "pip>6.0"
-
-${PIP} install --user --upgrade ansible==$XCI_ANSIBLE_PIP_VERSION
diff --git a/prototypes/xci/file/mini/flavor-vars.yml b/prototypes/xci/file/mini/flavor-vars.yml
deleted file mode 100644 (file)
index 0d446ba..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
----
-host_info: {
-    'opnfv': {
-        'VLAN_IP': '192.168.122.2',
-        'MGMT_IP': '172.29.236.10',
-        'VXLAN_IP': '172.29.240.10',
-        'STORAGE_IP': '172.29.244.10'
-    },
-    'controller00': {
-        'VLAN_IP': '192.168.122.3',
-        'MGMT_IP': '172.29.236.11',
-        'VXLAN_IP': '172.29.240.11',
-        'STORAGE_IP': '172.29.244.11'
-    },
-    'compute00': {
-        'VLAN_IP': '192.168.122.4',
-        'MGMT_IP': '172.29.236.12',
-        'VXLAN_IP': '172.29.240.12',
-        'STORAGE_IP': '172.29.244.12'
-    },
-}
diff --git a/prototypes/xci/file/mini/inventory b/prototypes/xci/file/mini/inventory
deleted file mode 100644 (file)
index eb73e5e..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
-[opnfv]
-opnfv ansible_ssh_host=192.168.122.2
-
-[controller]
-controller00 ansible_ssh_host=192.168.122.3
-
-[compute]
-compute00 ansible_ssh_host=192.168.122.4
diff --git a/prototypes/xci/file/mini/openstack_user_config.yml b/prototypes/xci/file/mini/openstack_user_config.yml
deleted file mode 100644 (file)
index f9ccee2..0000000
+++ /dev/null
@@ -1,170 +0,0 @@
----
-cidr_networks:
-  container: 172.29.236.0/22
-  tunnel: 172.29.240.0/22
-  storage: 172.29.244.0/22
-
-used_ips:
-  - "172.29.236.1,172.29.236.50"
-  - "172.29.240.1,172.29.240.50"
-  - "172.29.244.1,172.29.244.50"
-  - "172.29.248.1,172.29.248.50"
-
-global_overrides:
-  internal_lb_vip_address: 172.29.236.11
-  external_lb_vip_address: 192.168.122.3
-  tunnel_bridge: "br-vxlan"
-  management_bridge: "br-mgmt"
-  provider_networks:
-    - network:
-        container_bridge: "br-mgmt"
-        container_type: "veth"
-        container_interface: "eth1"
-        ip_from_q: "container"
-        type: "raw"
-        group_binds:
-          - all_containers
-          - hosts
-        is_container_address: true
-        is_ssh_address: true
-    - network:
-        container_bridge: "br-vxlan"
-        container_type: "veth"
-        container_interface: "eth10"
-        ip_from_q: "tunnel"
-        type: "vxlan"
-        range: "1:1000"
-        net_name: "vxlan"
-        group_binds:
-          - neutron_linuxbridge_agent
-    - network:
-        container_bridge: "br-vlan"
-        container_type: "veth"
-        container_interface: "eth12"
-        host_bind_override: "eth12"
-        type: "flat"
-        net_name: "flat"
-        group_binds:
-          - neutron_linuxbridge_agent
-    - network:
-        container_bridge: "br-vlan"
-        container_type: "veth"
-        container_interface: "eth11"
-        type: "vlan"
-        range: "1:1"
-        net_name: "vlan"
-        group_binds:
-          - neutron_linuxbridge_agent
-    - network:
-        container_bridge: "br-storage"
-        container_type: "veth"
-        container_interface: "eth2"
-        ip_from_q: "storage"
-        type: "raw"
-        group_binds:
-          - glance_api
-          - cinder_api
-          - cinder_volume
-          - nova_compute
-
-# ##
-# ## Infrastructure
-# ##
-
-# galera, memcache, rabbitmq, utility
-shared-infra_hosts:
-  controller00:
-    ip: 172.29.236.11
-
-# repository (apt cache, python packages, etc)
-repo-infra_hosts:
-  controller00:
-    ip: 172.29.236.11
-
-# load balancer
-# Ideally the load balancer should not use the Infrastructure hosts.
-# Dedicated hardware is best for improved performance and security.
-haproxy_hosts:
-  controller00:
-    ip: 172.29.236.11
-
-# rsyslog server
-# log_hosts:
-# log1:
-#  ip: 172.29.236.14
-
-# ##
-# ## OpenStack
-# ##
-
-# keystone
-identity_hosts:
-  controller00:
-    ip: 172.29.236.11
-
-# cinder api services
-storage-infra_hosts:
-  controller00:
-    ip: 172.29.236.11
-
-# glance
-# The settings here are repeated for each infra host.
-# They could instead be applied as global settings in
-# user_variables, but are left here to illustrate that
-# each container could have different storage targets.
-image_hosts:
-  controller00:
-    ip: 172.29.236.11
-    container_vars:
-      limit_container_types: glance
-      glance_nfs_client:
-        - server: "172.29.244.12"
-          remote_path: "/images"
-          local_path: "/var/lib/glance/images"
-          type: "nfs"
-          options: "_netdev,auto"
-
-# nova api, conductor, etc services
-compute-infra_hosts:
-  controller00:
-    ip: 172.29.236.11
-
-# heat
-orchestration_hosts:
-  controller00:
-    ip: 172.29.236.11
-
-# horizon
-dashboard_hosts:
-  controller00:
-    ip: 172.29.236.11
-
-# neutron server, agents (L3, etc)
-network_hosts:
-  controller00:
-    ip: 172.29.236.11
-
-# nova hypervisors
-compute_hosts:
-  compute00:
-    ip: 172.29.236.12
-
-# cinder volume hosts (NFS-backed)
-# The settings here are repeated for each infra host.
-# They could instead be applied as global settings in
-# user_variables, but are left here to illustrate that
-# each container could have different storage targets.
-storage_hosts:
-  controller00:
-    ip: 172.29.236.11
-    container_vars:
-      cinder_backends:
-        limit_container_types: cinder_volume
-        nfs_volume:
-          volume_backend_name: NFS_VOLUME1
-          volume_driver: cinder.volume.drivers.nfs.NfsDriver
-          nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
-          nfs_shares_config: /etc/cinder/nfs_shares
-          shares:
-            - ip: "172.29.244.12"
-              share: "/volumes"
diff --git a/prototypes/xci/file/mini/user_variables.yml b/prototypes/xci/file/mini/user_variables.yml
deleted file mode 100644 (file)
index 7a0b806..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
----
-# Copyright 2014, Rackspace US, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# ##
-# ## This file contains commonly used overrides for convenience. Please inspect
-# ## the defaults for each role to find additional override options.
-# ##
-
-# # Debug and Verbose options.
-debug: false
-
-haproxy_keepalived_external_vip_cidr: "192.168.122.3/32"
-haproxy_keepalived_internal_vip_cidr: "172.29.236.11/32"
-haproxy_keepalived_external_interface: br-vlan
-haproxy_keepalived_internal_interface: br-mgmt
-gnocchi_db_sync_options: ""
diff --git a/prototypes/xci/file/noha/flavor-vars.yml b/prototypes/xci/file/noha/flavor-vars.yml
deleted file mode 100644 (file)
index 3c69a34..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
----
-host_info: {
-    'opnfv': {
-        'VLAN_IP': '192.168.122.2',
-        'MGMT_IP': '172.29.236.10',
-        'VXLAN_IP': '172.29.240.10',
-        'STORAGE_IP': '172.29.244.10'
-    },
-    'controller00': {
-        'VLAN_IP': '192.168.122.3',
-        'MGMT_IP': '172.29.236.11',
-        'VXLAN_IP': '172.29.240.11',
-        'STORAGE_IP': '172.29.244.11'
-    },
-    'compute00': {
-        'VLAN_IP': '192.168.122.4',
-        'MGMT_IP': '172.29.236.12',
-        'VXLAN_IP': '172.29.240.12',
-        'STORAGE_IP': '172.29.244.12'
-    },
-    'compute01': {
-        'VLAN_IP': '192.168.122.5',
-        'MGMT_IP': '172.29.236.13',
-        'VXLAN_IP': '172.29.240.13',
-        'STORAGE_IP': '172.29.244.13'
-    }
-}
diff --git a/prototypes/xci/file/noha/inventory b/prototypes/xci/file/noha/inventory
deleted file mode 100644 (file)
index b4f9f6d..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-[opnfv]
-opnfv ansible_ssh_host=192.168.122.2
-
-[controller]
-controller00 ansible_ssh_host=192.168.122.3
-
-[compute]
-compute00 ansible_ssh_host=192.168.122.4
-compute01 ansible_ssh_host=192.168.122.5
diff --git a/prototypes/xci/file/noha/openstack_user_config.yml b/prototypes/xci/file/noha/openstack_user_config.yml
deleted file mode 100644 (file)
index fb12655..0000000
+++ /dev/null
@@ -1,172 +0,0 @@
----
-cidr_networks:
-  container: 172.29.236.0/22
-  tunnel: 172.29.240.0/22
-  storage: 172.29.244.0/22
-
-used_ips:
-  - "172.29.236.1,172.29.236.50"
-  - "172.29.240.1,172.29.240.50"
-  - "172.29.244.1,172.29.244.50"
-  - "172.29.248.1,172.29.248.50"
-
-global_overrides:
-  internal_lb_vip_address: 172.29.236.11
-  external_lb_vip_address: 192.168.122.3
-  tunnel_bridge: "br-vxlan"
-  management_bridge: "br-mgmt"
-  provider_networks:
-    - network:
-        container_bridge: "br-mgmt"
-        container_type: "veth"
-        container_interface: "eth1"
-        ip_from_q: "container"
-        type: "raw"
-        group_binds:
-          - all_containers
-          - hosts
-        is_container_address: true
-        is_ssh_address: true
-    - network:
-        container_bridge: "br-vxlan"
-        container_type: "veth"
-        container_interface: "eth10"
-        ip_from_q: "tunnel"
-        type: "vxlan"
-        range: "1:1000"
-        net_name: "vxlan"
-        group_binds:
-          - neutron_linuxbridge_agent
-    - network:
-        container_bridge: "br-vlan"
-        container_type: "veth"
-        container_interface: "eth12"
-        host_bind_override: "eth12"
-        type: "flat"
-        net_name: "flat"
-        group_binds:
-          - neutron_linuxbridge_agent
-    - network:
-        container_bridge: "br-vlan"
-        container_type: "veth"
-        container_interface: "eth11"
-        type: "vlan"
-        range: "1:1"
-        net_name: "vlan"
-        group_binds:
-          - neutron_linuxbridge_agent
-    - network:
-        container_bridge: "br-storage"
-        container_type: "veth"
-        container_interface: "eth2"
-        ip_from_q: "storage"
-        type: "raw"
-        group_binds:
-          - glance_api
-          - cinder_api
-          - cinder_volume
-          - nova_compute
-
-# ##
-# ## Infrastructure
-# ##
-
-# galera, memcache, rabbitmq, utility
-shared-infra_hosts:
-  controller00:
-    ip: 172.29.236.11
-
-# repository (apt cache, python packages, etc)
-repo-infra_hosts:
-  controller00:
-    ip: 172.29.236.11
-
-# load balancer
-# Ideally the load balancer should not use the Infrastructure hosts.
-# Dedicated hardware is best for improved performance and security.
-haproxy_hosts:
-  controller00:
-    ip: 172.29.236.11
-
-# rsyslog server
-# log_hosts:
-# log1:
-#  ip: 172.29.236.14
-
-# ##
-# ## OpenStack
-# ##
-
-# keystone
-identity_hosts:
-  controller00:
-    ip: 172.29.236.11
-
-# cinder api services
-storage-infra_hosts:
-  controller00:
-    ip: 172.29.236.11
-
-# glance
-# The settings here are repeated for each infra host.
-# They could instead be applied as global settings in
-# user_variables, but are left here to illustrate that
-# each container could have different storage targets.
-image_hosts:
-  controller00:
-    ip: 172.29.236.11
-    container_vars:
-      limit_container_types: glance
-      glance_nfs_client:
-        - server: "172.29.244.12"
-          remote_path: "/images"
-          local_path: "/var/lib/glance/images"
-          type: "nfs"
-          options: "_netdev,auto"
-
-# nova api, conductor, etc services
-compute-infra_hosts:
-  controller00:
-    ip: 172.29.236.11
-
-# heat
-orchestration_hosts:
-  controller00:
-    ip: 172.29.236.11
-
-# horizon
-dashboard_hosts:
-  controller00:
-    ip: 172.29.236.11
-
-# neutron server, agents (L3, etc)
-network_hosts:
-  controller00:
-    ip: 172.29.236.11
-
-# nova hypervisors
-compute_hosts:
-  compute00:
-    ip: 172.29.236.12
-  compute01:
-    ip: 172.29.236.13
-
-# cinder volume hosts (NFS-backed)
-# The settings here are repeated for each infra host.
-# They could instead be applied as global settings in
-# user_variables, but are left here to illustrate that
-# each container could have different storage targets.
-storage_hosts:
-  controller00:
-    ip: 172.29.236.11
-    container_vars:
-      cinder_backends:
-        limit_container_types: cinder_volume
-        nfs_volume:
-          volume_backend_name: NFS_VOLUME1
-          volume_driver: cinder.volume.drivers.nfs.NfsDriver
-          nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
-          nfs_shares_config: /etc/cinder/nfs_shares
-          shares:
-            - ip: "172.29.244.12"
-              share: "/volumes"
diff --git a/prototypes/xci/file/noha/user_variables.yml b/prototypes/xci/file/noha/user_variables.yml
deleted file mode 100644 (file)
index 7a0b806..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
----
-# Copyright 2014, Rackspace US, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# ##
-# ## This file contains commonly used overrides for convenience. Please inspect
-# ## the defaults for each role to find additional override options.
-# ##
-
-# # Debug and Verbose options.
-debug: false
-
-haproxy_keepalived_external_vip_cidr: "192.168.122.3/32"
-haproxy_keepalived_internal_vip_cidr: "172.29.236.11/32"
-haproxy_keepalived_external_interface: br-vlan
-haproxy_keepalived_internal_interface: br-mgmt
-gnocchi_db_sync_options: ""
diff --git a/prototypes/xci/file/setup-openstack.yml b/prototypes/xci/file/setup-openstack.yml
deleted file mode 100644 (file)
index 415c489..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
----
-# Copyright 2014, Rackspace US, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-- include: os-keystone-install.yml
-- include: os-glance-install.yml
-- include: os-cinder-install.yml
-- include: os-nova-install.yml
-- include: os-neutron-install.yml
-- include: os-heat-install.yml
-- include: os-horizon-install.yml
-- include: os-swift-install.yml
-- include: os-ironic-install.yml
-- include: os-tempest-install.yml
diff --git a/prototypes/xci/playbooks/configure-localhost.yml b/prototypes/xci/playbooks/configure-localhost.yml
deleted file mode 100644 (file)
index b6d0fcc..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
----
-# SPDX-license-identifier: Apache-2.0
-##############################################################################
-# Copyright (c) 2017 Ericsson AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-- hosts: localhost
-  connection: local
-  vars_files:
-    - ../var/{{ ansible_os_family }}.yml
-    - ../var/opnfv.yml
-  roles:
-    - role: remove-folders
-    - { role: clone-repository, project: "opnfv/releng", repo: "{{ OPNFV_RELENG_GIT_URL }}", dest: "{{ OPNFV_RELENG_PATH }}", version: "{{ OPNFV_RELENG_VERSION }}" }
-
-- hosts: localhost
-  connection: local
-  gather_facts: false
-  vars_files:
-    - ../var/{{ ansible_os_family }}.yml
-    - ../var/opnfv.yml
-  tasks:
-    - name: Synchronize local development releng repository to XCI paths
-      synchronize:
-        src: "{{ OPNFV_RELENG_DEV_PATH }}"
-        dest: "{{ OPNFV_RELENG_PATH }}"
-        recursive: yes
-        delete: yes
-      when:
-        - OPNFV_RELENG_DEV_PATH != ""
-
-- hosts: localhost
-  connection: local
-  vars_files:
-    - ../var/{{ ansible_os_family }}.yml
-    - ../var/opnfv.yml
-  tasks:
-    - name:  create log directory {{LOG_PATH}}
-      file:
-        path: "{{LOG_PATH}}"
-        state: directory
-        recurse: no
-    # when the deployment is aio, we overwrite and use playbook, configure-opnfvhost.yml, since everything gets installed on opnfv host
-    - name: copy aio playbook
-      copy:
-        src: "{{XCI_FLAVOR_ANSIBLE_FILE_PATH}}/configure-opnfvhost.yml"
-        dest: "{{OPNFV_RELENG_PATH}}/prototypes/xci/playbooks"
-      when: XCI_FLAVOR == "aio"
-    - name: copy flavor inventory
-      copy:
-        src: "{{XCI_FLAVOR_ANSIBLE_FILE_PATH}}/inventory"
-        dest: "{{OPNFV_RELENG_PATH}}/prototypes/xci/playbooks"
-    - name: copy flavor vars
-      copy:
-        src: "{{XCI_FLAVOR_ANSIBLE_FILE_PATH}}/flavor-vars.yml"
-        dest: "{{OPNFV_RELENG_PATH}}/prototypes/xci/var"
diff --git a/prototypes/xci/playbooks/configure-opnfvhost.yml b/prototypes/xci/playbooks/configure-opnfvhost.yml
deleted file mode 100644 (file)
index 8656ff9..0000000
+++ /dev/null
@@ -1,102 +0,0 @@
----
-# SPDX-license-identifier: Apache-2.0
-##############################################################################
-# Copyright (c) 2017 Ericsson AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-- hosts: opnfv
-  remote_user: root
-  vars_files:
-    - ../var/{{ ansible_os_family }}.yml
-    - ../var/flavor-vars.yml
-    - ../var/opnfv.yml
-  roles:
-    - role: remove-folders
-    - { role: clone-repository, project: "opnfv/releng", repo: "{{ OPNFV_RELENG_GIT_URL }}", dest: "{{ OPNFV_RELENG_PATH }}", version: "{{ OPNFV_RELENG_VERSION }}" }
-    - { role: clone-repository, project: "openstack/openstack-ansible", repo: "{{ OPENSTACK_OSA_GIT_URL }}", dest: "{{ OPENSTACK_OSA_PATH }}", version: "{{ OPENSTACK_OSA_VERSION }}" }
-
-- hosts: opnfv
-  remote_user: root
-  vars_files:
-    - ../var/{{ ansible_os_family }}.yml
-    - ../var/opnfv.yml
-  tasks:
-    - name: Synchronize local development releng repository to XCI paths
-      synchronize:
-        src: "{{ OPNFV_RELENG_DEV_PATH }}"
-        dest: "{{ OPNFV_RELENG_PATH }}"
-        recursive: yes
-        delete: yes
-      when:
-        - OPNFV_RELENG_DEV_PATH != ""
-    - name: Synchronize local development openstack-ansible repository to XCI paths
-      synchronize:
-        src: "{{ OPENSTACK_OSA_DEV_PATH }}"
-        dest: "{{ OPENSTACK_OSA_PATH }}"
-        recursive: yes
-        delete: yes
-      when:
-        - OPENSTACK_OSA_DEV_PATH != ""
-
-- hosts: opnfv
-  remote_user: root
-  vars_files:
-    - ../var/{{ ansible_os_family }}.yml
-    - ../var/flavor-vars.yml
-    - ../var/opnfv.yml
-  roles:
-    # TODO: this only works for ubuntu/xenial and need to be adjusted for other distros
-    - { role: configure-network, when: ansible_distribution_release == "xenial", src: "../template/opnfv.interface.j2", dest: "/etc/network/interfaces" }
-  tasks:
-    - name: generate SSH keys
-      shell: ssh-keygen -b 2048 -t rsa -f /root/.ssh/id_rsa -q -N ""
-      args:
-        creates: /root/.ssh/id_rsa
-    - name: ensure ssh key storage directory exists
-      file:
-        path: "{{ OPNFV_SSH_HOST_KEYS_PATH }}"
-        state: directory
-    - name: fetch public key
-      fetch: src="/root/.ssh/id_rsa.pub" dest="{{ OPNFV_SSH_HOST_KEYS_PATH }}"
-    - name: copy flavor inventory
-      shell: "/bin/cp -rf {{XCI_FLAVOR_ANSIBLE_FILE_PATH}}/inventory {{OPNFV_RELENG_PATH}}/prototypes/xci/playbooks"
-    - name: copy flavor vars
-      shell: "/bin/cp -rf {{XCI_FLAVOR_ANSIBLE_FILE_PATH}}/flavor-vars.yml {{OPNFV_RELENG_PATH}}/prototypes/xci/var"
-    - name: copy openstack_deploy
-      shell: "/bin/cp -rf {{OPENSTACK_OSA_PATH}}/etc/openstack_deploy {{OPENSTACK_OSA_ETC_PATH}}"
-    - name: copy openstack_user_config.yml
-      shell: "/bin/cp -rf {{XCI_FLAVOR_ANSIBLE_FILE_PATH}}/openstack_user_config.yml {{OPENSTACK_OSA_ETC_PATH}}"
-    - name: copy user_variables.yml
-      shell: "/bin/cp -rf {{XCI_FLAVOR_ANSIBLE_FILE_PATH}}/user_variables.yml {{OPENSTACK_OSA_ETC_PATH}}"
-    - name: copy cinder.yml
-      shell: "/bin/cp -rf {{OPNFV_RELENG_PATH}}/prototypes/xci/file/cinder.yml {{OPENSTACK_OSA_ETC_PATH}}/env.d"
-    # TODO: We need to get rid of this as soon as the issue is fixed upstream
-    - name: change the haproxy state from disable to enable
-      replace:
-        dest: "{{OPENSTACK_OSA_PATH}}/playbooks/os-keystone-install.yml"
-        regexp: '(\s+)haproxy_state: disabled'
-        replace: '\1haproxy_state: enabled'
-    - name: copy OPNFV OpenStack playbook
-      shell: "/bin/cp -rf {{OPNFV_RELENG_PATH}}/prototypes/xci/file/setup-openstack.yml {{OPENSTACK_OSA_PATH}}/playbooks"
-    - name: copy OPNFV role requirements
-      shell: "/bin/cp -rf {{OPNFV_RELENG_PATH}}/prototypes/xci/file/ansible-role-requirements.yml {{OPENSTACK_OSA_PATH}}"
-    - name: bootstrap ansible on opnfv host
-      command: "/bin/bash ./scripts/bootstrap-ansible.sh"
-      args:
-        chdir: "{{OPENSTACK_OSA_PATH}}"
-    - name: generate password token
-      command: "python pw-token-gen.py --file {{OPENSTACK_OSA_ETC_PATH}}/user_secrets.yml"
-      args:
-        chdir: "{{OPENSTACK_OSA_PATH}}/scripts"
-- hosts: localhost
-  remote_user: root
-  vars_files:
-    - ../var/opnfv.yml
-  tasks:
-    - name: Generate authorized_keys
-      shell: "/bin/cat {{ OPNFV_SSH_HOST_KEYS_PATH }}/opnfv/root/.ssh/id_rsa.pub >> ../file/authorized_keys"
-    - name: Append public keys to authorized_keys
-      shell: "/bin/cat {{ ansible_env.HOME }}/.ssh/id_rsa.pub >> ../file/authorized_keys"
diff --git a/prototypes/xci/playbooks/configure-targethosts.yml b/prototypes/xci/playbooks/configure-targethosts.yml
deleted file mode 100644 (file)
index 50da1f2..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
----
-- hosts: all
-  remote_user: root
-  tasks:
-    - name: add public key to host
-      copy:
-        src: ../file/authorized_keys
-        dest: /root/.ssh/authorized_keys
-
-- hosts: controller
-  remote_user: root
-  vars_files:
-    - ../var/{{ ansible_os_family }}.yml
-    - ../var/flavor-vars.yml
-  roles:
-    # TODO: this only works for ubuntu/xenial and need to be adjusted for other distros
-    - { role: configure-network, src: "../template/controller.interface.j2", dest: "/etc/network/interfaces" }
-    # we need to force sync time with ntp or the nodes will be out of sync timewise
-    - role: synchronize-time
-
-- hosts: compute
-  remote_user: root
-  vars_files:
-    - ../var/{{ ansible_os_family }}.yml
-    - ../var/flavor-vars.yml
-  roles:
-    # TODO: this only works for ubuntu/xenial and need to be adjusted for other distros
-    - { role: configure-network, src: "../template/compute.interface.j2", dest: "/etc/network/interfaces" }
-    # we need to force sync time with ntp or the nodes will be out of sync timewise
-    - role: synchronize-time
-
-- hosts: compute00
-  remote_user: root
-  # TODO: this role is for configuring NFS on xenial and adjustment needed for other distros
-  roles:
-    - role: configure-nfs
diff --git a/prototypes/xci/playbooks/inventory b/prototypes/xci/playbooks/inventory
deleted file mode 100644 (file)
index fd9af90..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-license-identifier: Apache-2.0
-##############################################################################
-# Copyright (c) 2017 Ericsson AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-[opnfv]
-opnfv ansible_ssh_host=192.168.122.2
diff --git a/prototypes/xci/playbooks/provision-vm-nodes.yml b/prototypes/xci/playbooks/provision-vm-nodes.yml
deleted file mode 100644 (file)
index 8be36c7..0000000
+++ /dev/null
@@ -1,79 +0,0 @@
----
-# SPDX-license-identifier: Apache-2.0
-##############################################################################
-# Copyright (c) 2017 Ericsson AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-- hosts: localhost
-  connection: local
-  vars_files:
-    - ../var/{{ ansible_os_family }}.yml
-    - ../var/opnfv.yml
-  roles:
-    # using these roles here ensures that we can reuse this playbook in different context
-    - role: remove-folders
-    - { role: clone-repository, project: "opnfv/releng", repo: "{{ OPNFV_RELENG_GIT_URL }}", dest: "{{ OPNFV_RELENG_PATH }}", version: "{{ OPNFV_RELENG_VERSION }}" }
-    - { role: clone-repository, project: "opnfv/bifrost", repo: "{{ OPENSTACK_BIFROST_GIT_URL }}", dest: "{{ OPENSTACK_BIFROST_PATH }}", version: "{{ OPENSTACK_BIFROST_VERSION }}" }
-
-- hosts: localhost
-  connection: local
-  gather_facts: false
-  vars_files:
-    - ../var/{{ ansible_os_family }}.yml
-    - ../var/opnfv.yml
-  tasks:
-    - name: Synchronize local development bifrost repository to XCI paths
-      # command module is much faster than the copy module
-      synchronize:
-        src: "{{ OPENSTACK_BIFROST_DEV_PATH }}"
-        dest: "{{ OPENSTACK_BIFROST_PATH }}"
-        recursive: yes
-        delete: yes
-      when:
-        - OPENSTACK_BIFROST_DEV_PATH != ""
-    - name: Synchronize local development releng repository to XCI paths
-      synchronize:
-        src: "{{ OPNFV_RELENG_DEV_PATH }}"
-        dest: "{{ OPNFV_RELENG_PATH }}"
-        recursive: yes
-        delete: yes
-      when:
-        - OPNFV_RELENG_DEV_PATH != ""
-
-- hosts: localhost
-  connection: local
-  gather_facts: false
-  vars_files:
-    - ../var/{{ ansible_os_family }}.yml
-    - ../var/opnfv.yml
-  tasks:
-    - name: combine opnfv/releng and openstack/bifrost scripts/playbooks
-      copy:
-        src: "{{ OPNFV_RELENG_PATH }}/prototypes/bifrost/"
-        dest: "{{ OPENSTACK_BIFROST_PATH }}"
-
-- hosts: localhost
-  connection: local
-  become: yes
-  vars_files:
-    - ../var/{{ ansible_os_family }}.yml
-    - ../var/opnfv.yml
-  tasks:
-    - name: destroy VM nodes created by previous deployment
-      command: "/bin/bash ./scripts/destroy-env.sh"
-      args:
-        chdir: "{{ OPENSTACK_BIFROST_PATH }}"
-
-- hosts: localhost
-  connection: local
-  vars_files:
-    - ../var/{{ ansible_os_family }}.yml
-    - ../var/opnfv.yml
-  tasks:
-    - name: create and provision VM nodes for the flavor {{ XCI_FLAVOR }}
-      command: "/bin/bash ./scripts/bifrost-provision.sh"
-      args:
-        chdir: "{{ OPENSTACK_BIFROST_PATH }}"
diff --git a/prototypes/xci/playbooks/roles/clone-repository/tasks/main.yml b/prototypes/xci/playbooks/roles/clone-repository/tasks/main.yml
deleted file mode 100644 (file)
index 3f7e091..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
----
-# SPDX-license-identifier: Apache-2.0
-##############################################################################
-# Copyright (c) 2017 Ericsson AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-- name: clone "{{ project }}" and checkout "{{ version }}"
-  git:
-    repo: "{{ repo }}"
-    dest: "{{ dest }}"
-    version: "{{ version }}"
diff --git a/prototypes/xci/playbooks/roles/configure-network/tasks/main.yml b/prototypes/xci/playbooks/roles/configure-network/tasks/main.yml
deleted file mode 100644 (file)
index aafadf7..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
----
-# SPDX-license-identifier: Apache-2.0
-##############################################################################
-# Copyright (c) 2017 Ericsson AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-# TODO: this role needs to be adjusted for different distros
-- block:
-    - name: configure modules
-      lineinfile:
-        dest: /etc/modules
-        state: present
-        create: yes
-        line: "8021q"
-    - name: add modules
-      modprobe:
-        name: 8021q
-        state: present
-    - name: ensure glean rules are removed
-      file:
-        path: "/etc/udev/rules.d/99-glean.rules"
-        state: absent
-    - name: ensure interfaces.d folder is empty
-      shell: "/bin/rm -rf /etc/network/interfaces.d/*"
-    - name: ensure interfaces file is updated
-      template:
-        src: "{{ src }}"
-        dest: "{{ dest }}"
-    - name: restart network service
-      shell: "/sbin/ifconfig {{ interface }} 0 && /sbin/ifdown -a && /sbin/ifup -a"
-  when: ansible_distribution_release == "xenial"
diff --git a/prototypes/xci/playbooks/roles/configure-nfs/tasks/main.yml b/prototypes/xci/playbooks/roles/configure-nfs/tasks/main.yml
deleted file mode 100644 (file)
index c52da0b..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
----
-# SPDX-license-identifier: Apache-2.0
-##############################################################################
-# Copyright (c) 2017 Ericsson AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-# TODO: this is for xenial and needs to be adjusted for different distros
-- block:
-    - name: make NFS directories
-      file:
-        dest: "{{ item }}"
-        mode: 0777
-        state: directory
-      with_items:
-        - "/images"
-        - "/volumes"
-    - name: configure NFS service
-      lineinfile:
-        dest: /etc/services
-        state: present
-        create: yes
-        line: "{{ item }}"
-      with_items:
-        - "nfs        2049/tcp"
-        - "nfs        2049/udp"
-    - name: configure NFS exports
-      lineinfile:
-        dest: /etc/exports
-        state: present
-        create: yes
-        line: "{{ item }}"
-      with_items:
-        - "/images         *(rw,sync,no_subtree_check,no_root_squash)"
-        - "/volumes        *(rw,sync,no_subtree_check,no_root_squash)"
-    # TODO: the service name might be different on other distros and needs to be adjusted
-    - name: restart ubuntu xenial NFS service
-      service:
-        name: nfs-kernel-server
-        state: restarted
-  when: ansible_distribution_release == "xenial"
diff --git a/prototypes/xci/playbooks/roles/remove-folders/tasks/main.yml b/prototypes/xci/playbooks/roles/remove-folders/tasks/main.yml
deleted file mode 100644 (file)
index 425b8db..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
----
-# SPDX-license-identifier: Apache-2.0
-##############################################################################
-# Copyright (c) 2017 Ericsson AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-- name: cleanup leftovers of previous deployment
-  file:
-    path: "{{ item }}"
-    state: absent
-    recurse: no
-  with_items:
-    - "{{ OPNFV_RELENG_PATH }}"
-    - "{{ OPENSTACK_BIFROST_PATH }}"
-    - "{{ OPENSTACK_OSA_PATH }}"
-    - "{{ OPENSTACK_OSA_ETC_PATH }}"
-    - "{{ LOG_PATH }} "
-    - "{{ OPNFV_SSH_HOST_KEYS_PATH }}"
diff --git a/prototypes/xci/playbooks/roles/synchronize-time/tasks/main.yml b/prototypes/xci/playbooks/roles/synchronize-time/tasks/main.yml
deleted file mode 100644 (file)
index 5c39d89..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
----
-# SPDX-license-identifier: Apache-2.0
-##############################################################################
-# Copyright (c) 2017 Ericsson AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-# TODO: this role needs to be adjusted for different distros
-- block:
-    - name: restart chrony
-      service:
-        name: chrony
-        state: restarted
-    - name: synchronize time
-      shell: "chronyc -a 'burst 4/4' && chronyc -a makestep"
-  when: ansible_distribution_release == "xenial"
diff --git a/prototypes/xci/template/compute.interface.j2 b/prototypes/xci/template/compute.interface.j2
deleted file mode 100644 (file)
index 094544c..0000000
+++ /dev/null
@@ -1,73 +0,0 @@
-# The loopback network interface
-auto lo
-iface lo inet loopback
-
-# Physical interface
-auto {{ interface }}
-iface {{ interface }} inet manual
-
-# Container/Host management VLAN interface
-auto {{ interface }}.10
-iface {{ interface }}.10 inet manual
-    vlan-raw-device {{ interface }}
-
-# OpenStack Networking VXLAN (tunnel/overlay) VLAN interface
-auto {{ interface }}.30
-iface {{ interface }}.30 inet manual
-    vlan-raw-device {{ interface }}
-
-# Storage network VLAN interface
-auto {{ interface }}.20
-iface {{ interface }}.20 inet manual
-    vlan-raw-device {{ interface }}
-
-# Container/Host management bridge
-auto br-mgmt
-iface br-mgmt inet static
-    bridge_stp off
-    bridge_waitport 0
-    bridge_fd 0
-    bridge_ports {{ interface }}.10
-    address {{host_info[inventory_hostname].MGMT_IP}}
-    netmask 255.255.252.0
-
-# compute1 VXLAN (tunnel/overlay) bridge config
-auto br-vxlan
-iface br-vxlan inet static
-    bridge_stp off
-    bridge_waitport 0
-    bridge_fd 0
-    bridge_ports {{ interface }}.30
-    address {{host_info[inventory_hostname].VXLAN_IP}}
-    netmask 255.255.252.0
-
-# OpenStack Networking VLAN bridge
-auto br-vlan
-iface br-vlan inet static
-    bridge_stp off
-    bridge_waitport 0
-    bridge_fd 0
-    bridge_ports {{ interface }}
-    address {{host_info[inventory_hostname].VLAN_IP}}
-    netmask 255.255.255.0
-    gateway 192.168.122.1
-    dns-nameserver 8.8.8.8 8.8.4.4
-    offload-sg off
-    # Create veth pair, don't bomb if already exists
-    pre-up ip link add br-vlan-veth type veth peer name eth12 || true
-    # Set both ends UP
-    pre-up ip link set br-vlan-veth up
-    pre-up ip link set eth12 up
-    # Delete veth pair on DOWN
-    post-down ip link del br-vlan-veth || true
-    bridge_ports br-vlan-veth
-
-# OpenStack Storage bridge
-auto br-storage
-iface br-storage inet static
-    bridge_stp off
-    bridge_waitport 0
-    bridge_fd 0
-    bridge_ports {{ interface }}.20
-    address {{host_info[inventory_hostname].STORAGE_IP}}
-    netmask 255.255.252.0
diff --git a/prototypes/xci/template/controller.interface.j2 b/prototypes/xci/template/controller.interface.j2
deleted file mode 100644 (file)
index 638e78e..0000000
+++ /dev/null
@@ -1,64 +0,0 @@
-# The loopback network interface
-auto lo
-iface lo inet loopback
-
-# Physical interface
-auto {{ interface }}
-iface {{ interface }} inet manual
-
-# Container/Host management VLAN interface
-auto {{ interface }}.10
-iface {{ interface }}.10 inet manual
-    vlan-raw-device {{ interface }}
-
-# OpenStack Networking VXLAN (tunnel/overlay) VLAN interface
-auto {{ interface }}.30
-iface {{ interface }}.30 inet manual
-    vlan-raw-device {{ interface }}
-
-# Storage network VLAN interface (optional)
-auto {{ interface }}.20
-iface {{ interface }}.20 inet manual
-    vlan-raw-device {{ interface }}
-
-# Container/Host management bridge
-auto br-mgmt
-iface br-mgmt inet static
-    bridge_stp off
-    bridge_waitport 0
-    bridge_fd 0
-    bridge_ports {{ interface }}.10
-    address {{host_info[inventory_hostname].MGMT_IP}}
-    netmask 255.255.252.0
-
-# OpenStack Networking VXLAN (tunnel/overlay) bridge
-auto br-vxlan
-iface br-vxlan inet static
-    bridge_stp off
-    bridge_waitport 0
-    bridge_fd 0
-    bridge_ports {{ interface }}.30
-    address {{host_info[inventory_hostname].VXLAN_IP}}
-    netmask 255.255.252.0
-
-# OpenStack Networking VLAN bridge
-auto br-vlan
-iface br-vlan inet static
-    bridge_stp off
-    bridge_waitport 0
-    bridge_fd 0
-    bridge_ports {{ interface }}
-    address {{host_info[inventory_hostname].VLAN_IP}}
-    netmask 255.255.255.0
-    gateway 192.168.122.1
-    dns-nameserver 8.8.8.8 8.8.4.4
-
-# OpenStack Storage bridge
-auto br-storage
-iface br-storage inet static
-    bridge_stp off
-    bridge_waitport 0
-    bridge_fd 0
-    bridge_ports {{ interface }}.20
-    address {{host_info[inventory_hostname].STORAGE_IP}}
-    netmask 255.255.252.0
diff --git a/prototypes/xci/template/opnfv.interface.j2 b/prototypes/xci/template/opnfv.interface.j2
deleted file mode 100644 (file)
index e9f8649..0000000
+++ /dev/null
@@ -1,64 +0,0 @@
-# The loopback network interface
-auto lo
-iface lo inet loopback
-
-# Physical interface
-auto {{ interface }}
-iface {{ interface }} inet manual
-
-# Container/Host management VLAN interface
-auto {{ interface }}.10
-iface {{ interface }}.10 inet manual
-    vlan-raw-device {{ interface }}
-
-# OpenStack Networking VXLAN (tunnel/overlay) VLAN interface
-auto {{ interface }}.30
-iface {{ interface }}.30 inet manual
-    vlan-raw-device {{ interface }}
-
-# Storage network VLAN interface (optional)
-auto {{ interface }}.20
-iface {{ interface }}.20 inet manual
-    vlan-raw-device {{ interface }}
-
-# Container/Host management bridge
-auto br-mgmt
-iface br-mgmt inet static
-    bridge_stp off
-    bridge_waitport 0
-    bridge_fd 0
-    bridge_ports {{ interface }}.10
-    address {{host_info[inventory_hostname].MGMT_IP}}
-    netmask 255.255.252.0
-
-# OpenStack Networking VXLAN (tunnel/overlay) bridge
-auto br-vxlan
-iface br-vxlan inet static
-    bridge_stp off
-    bridge_waitport 0
-    bridge_fd 0
-    bridge_ports {{ interface }}.30
-    address {{ host_info[inventory_hostname].VXLAN_IP }}
-    netmask 255.255.252.0
-
-# OpenStack Networking VLAN bridge
-auto br-vlan
-iface br-vlan inet static
-    bridge_stp off
-    bridge_waitport 0
-    bridge_fd 0
-    bridge_ports {{ interface }}
-    address {{host_info[inventory_hostname].VLAN_IP}}
-    netmask 255.255.255.0
-    gateway 192.168.122.1
-    dns-nameserver 8.8.8.8 8.8.4.4
-
-# OpenStack Storage bridge
-auto br-storage
-iface br-storage inet static
-    bridge_stp off
-    bridge_waitport 0
-    bridge_fd 0
-    bridge_ports {{ interface }}.20
-    address {{host_info[inventory_hostname].STORAGE_IP}}
-    netmask 255.255.252.0
diff --git a/prototypes/xci/var/Debian.yml b/prototypes/xci/var/Debian.yml
deleted file mode 100644 (file)
index d13d080..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
----
-# SPDX-license-identifier: Apache-2.0
-##############################################################################
-# Copyright (c) 2017 Ericsson AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-# this is the interface the VM nodes are connected to libvirt network "default"
-interface: "ens3"
diff --git a/prototypes/xci/var/RedHat.yml b/prototypes/xci/var/RedHat.yml
deleted file mode 100644 (file)
index 6d03e0f..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
----
-# SPDX-license-identifier: Apache-2.0
-##############################################################################
-# Copyright (c) 2017 Ericsson AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-# this is placeholder and left blank intentionally to complete later on
diff --git a/prototypes/xci/var/Suse.yml b/prototypes/xci/var/Suse.yml
deleted file mode 100644 (file)
index 6d03e0f..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
----
-# SPDX-license-identifier: Apache-2.0
-##############################################################################
-# Copyright (c) 2017 Ericsson AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-# this is placeholder and left blank intentionally to complete later on
diff --git a/prototypes/xci/var/opnfv.yml b/prototypes/xci/var/opnfv.yml
deleted file mode 100644 (file)
index 85f532a..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
----
-# SPDX-license-identifier: Apache-2.0
-##############################################################################
-# Copyright (c) 2017 Ericsson AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-OPNFV_RELENG_GIT_URL: "{{ lookup('env','OPNFV_RELENG_GIT_URL') }}"
-OPNFV_RELENG_PATH: "{{ lookup('env','OPNFV_RELENG_PATH') }}"
-OPNFV_RELENG_DEV_PATH: "{{ lookup('env','OPNFV_RELENG_DEV_PATH') }}"
-OPNFV_RELENG_VERSION: "{{ lookup('env','OPNFV_RELENG_VERSION') }}"
-OPENSTACK_BIFROST_GIT_URL: "{{ lookup('env','OPENSTACK_BIFROST_GIT_URL') }}"
-OPENSTACK_BIFROST_PATH: "{{ lookup('env','OPENSTACK_BIFROST_PATH') }}"
-OPENSTACK_BIFROST_DEV_PATH: "{{ lookup('env','OPENSTACK_BIFROST_DEV_PATH') }}"
-OPENSTACK_BIFROST_VERSION: "{{ lookup('env','OPENSTACK_BIFROST_VERSION') }}"
-OPENSTACK_OSA_GIT_URL: "{{ lookup('env','OPENSTACK_OSA_GIT_URL') }}"
-OPENSTACK_OSA_PATH: "{{ lookup('env','OPENSTACK_OSA_PATH') }}"
-OPENSTACK_OSA_DEV_PATH: "{{ lookup('env','OPENSTACK_OSA_DEV_PATH') }}"
-OPENSTACK_OSA_VERSION: "{{ lookup('env','OPENSTACK_OSA_VERSION') }}"
-OPENSTACK_OSA_ETC_PATH: "{{ lookup('env','OPENSTACK_OSA_ETC_PATH') }}"
-XCI_ANSIBLE_PIP_VERSION: "{{ lookup('env','XCI_ANSIBLE_PIP_VERSION') }}"
-XCI_FLAVOR: "{{ lookup('env','XCI_FLAVOR') }}"
-XCI_FLAVOR_ANSIBLE_FILE_PATH: "{{ lookup('env','XCI_FLAVOR_ANSIBLE_FILE_PATH') }}"
-XCI_LOOP: "{{ lookup('env','XCI_LOOP') }}"
-LOG_PATH: "{{ lookup('env','LOG_PATH') }}"
-OPNFV_HOST_IP: "{{ lookup('env','OPNFV_HOST_IP') }}"
-OPNFV_SSH_HOST_KEYS_PATH: "{{ lookup('env', 'OPNFV_SSH_HOST_KEYS_PATH') }}"
diff --git a/prototypes/xci/xci-deploy.sh b/prototypes/xci/xci-deploy.sh
deleted file mode 100755 (executable)
index 3a65983..0000000
+++ /dev/null
@@ -1,204 +0,0 @@
-#!/bin/bash
-set -o errexit
-set -o nounset
-set -o pipefail
-
-#-------------------------------------------------------------------------------
-# This script should not be run as root
-#-------------------------------------------------------------------------------
-if [[ $(whoami) == "root" ]]; then
-    echo "WARNING: This script should not be run as root!"
-    echo "Elevated privileges are aquired automatically when necessary"
-    echo "Waiting 10s to give you a chance to stop the script (Ctrl-C)"
-    for x in $(seq 10 -1 1); do echo -n "$x..."; sleep 1; done
-fi
-
-#-------------------------------------------------------------------------------
-# Set environment variables
-#-------------------------------------------------------------------------------
-# The order of sourcing the variable files is significant so please do not
-# change it or things might stop working.
-# - user-vars: variables that can be configured or overriden by user.
-# - pinned-versions: versions to checkout. These can be overriden if you want to
-#   use different/more recent versions of the tools but you might end up using
-#   something that is not verified by OPNFV XCI.
-# - flavor-vars: settings for VM nodes for the chosen flavor.
-# - env-vars: variables for the xci itself and you should not need to change or
-#   override any of them.
-#-------------------------------------------------------------------------------
-# find where are we
-XCI_PATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
-# source user vars
-source $XCI_PATH/config/user-vars
-# source pinned versions
-source $XCI_PATH/config/pinned-versions
-# source flavor configuration
-source "$XCI_PATH/config/${XCI_FLAVOR}-vars"
-# source xci configuration
-source $XCI_PATH/config/env-vars
-
-#-------------------------------------------------------------------------------
-# Log info to console
-#-------------------------------------------------------------------------------
-echo "Info: Starting XCI Deployment"
-echo "Info: Deployment parameters"
-echo "-------------------------------------------------------------------------"
-echo "xci flavor: $XCI_FLAVOR"
-echo "opnfv/releng version: $OPNFV_RELENG_VERSION"
-echo "openstack/bifrost version: $OPENSTACK_BIFROST_VERSION"
-echo "openstack/openstack-ansible version: $OPENSTACK_OSA_VERSION"
-echo "-------------------------------------------------------------------------"
-
-#-------------------------------------------------------------------------------
-# Install ansible on localhost
-#-------------------------------------------------------------------------------
-source file/install-ansible.sh
-
-# TODO: The xci playbooks can be put into a playbook which will be done later.
-
-#-------------------------------------------------------------------------------
-# Start provisioning VM nodes
-#-------------------------------------------------------------------------------
-# This playbook
-# - removes directories that were created by the previous xci run
-# - clones opnfv/releng and openstack/bifrost repositories
-# - combines opnfv/releng and openstack/bifrost scripts/playbooks
-# - destorys VMs, removes ironic db, leases, logs
-# - creates and provisions VMs for the chosen flavor
-#-------------------------------------------------------------------------------
-echo "Info: Starting provisining VM nodes using openstack/bifrost"
-echo "-------------------------------------------------------------------------"
-cd $XCI_PATH/playbooks
-ansible-playbook $ANSIBLE_VERBOSITY -i inventory provision-vm-nodes.yml
-echo "-----------------------------------------------------------------------"
-echo "Info: VM nodes are provisioned!"
-source $OPENSTACK_BIFROST_PATH/env-vars
-ironic node-list
-echo
-#-------------------------------------------------------------------------------
-# Configure localhost
-#-------------------------------------------------------------------------------
-# This playbook
-# - removes directories that were created by the previous xci run
-# - clones opnfv/releng repository
-# - creates log directory
-# - copies flavor files such as playbook, inventory, and var file
-#-------------------------------------------------------------------------------
-echo "Info: Configuring localhost for openstack-ansible"
-echo "-----------------------------------------------------------------------"
-cd $XCI_PATH/playbooks
-ansible-playbook $ANSIBLE_VERBOSITY -i inventory configure-localhost.yml
-echo "-----------------------------------------------------------------------"
-echo "Info: Configured localhost host for openstack-ansible"
-
-#-------------------------------------------------------------------------------
-# Configure openstack-ansible deployment host, opnfv
-#-------------------------------------------------------------------------------
-# This playbook
-# - removes directories that were created by the previous xci run
-# - clones opnfv/releng and openstack/openstack-ansible repositories
-# - configures network
-# - generates/prepares ssh keys
-# - bootstraps ansible
-# - copies flavor files to be used by openstack-ansible
-#-------------------------------------------------------------------------------
-echo "Info: Configuring opnfv deployment host for openstack-ansible"
-echo "-----------------------------------------------------------------------"
-cd $OPNFV_RELENG_PATH/prototypes/xci/playbooks
-ansible-playbook $ANSIBLE_VERBOSITY -i inventory configure-opnfvhost.yml
-echo "-----------------------------------------------------------------------"
-echo "Info: Configured opnfv deployment host for openstack-ansible"
-
-#-------------------------------------------------------------------------------
-# Skip the rest if the flavor is aio since the target host for aio is opnfv
-#-------------------------------------------------------------------------------
-if [[ $XCI_FLAVOR == "aio" ]]; then
-    echo "xci: aio has been installed"
-    exit 0
-fi
-
-#-------------------------------------------------------------------------------
-# Configure target hosts for openstack-ansible
-#-------------------------------------------------------------------------------
-# This playbook
-# - adds public keys to target hosts
-# - configures network
-# - configures nfs
-#-------------------------------------------------------------------------------
-echo "Info: Configuring target hosts for openstack-ansible"
-echo "-----------------------------------------------------------------------"
-cd $OPNFV_RELENG_PATH/prototypes/xci/playbooks
-ansible-playbook $ANSIBLE_VERBOSITY -i inventory configure-targethosts.yml
-echo "-----------------------------------------------------------------------"
-echo "Info: Configured target hosts"
-
-#-------------------------------------------------------------------------------
-# Set up target hosts for openstack-ansible
-#-------------------------------------------------------------------------------
-# This is openstack-ansible playbook. Check upstream documentation for details.
-#-------------------------------------------------------------------------------
-echo "Info: Setting up target hosts for openstack-ansible"
-echo "-----------------------------------------------------------------------"
-ssh root@$OPNFV_HOST_IP "openstack-ansible \
-     $OPENSTACK_OSA_PATH/playbooks/setup-hosts.yml" | \
-     tee $LOG_PATH/setup-hosts.log
-echo "-----------------------------------------------------------------------"
-# check the log to see if we have any error
-if grep -q 'failed=1\|unreachable=1' $LOG_PATH/setup-hosts.log; then
-    echo "Error: OpenStack node setup failed!"
-    exit 1
-fi
-echo "Info: Set up target hosts for openstack-ansible successfuly"
-
-#-------------------------------------------------------------------------------
-# Set up infrastructure
-#-------------------------------------------------------------------------------
-# This is openstack-ansible playbook. Check upstream documentation for details.
-#-------------------------------------------------------------------------------
-echo "Info: Setting up infrastructure"
-echo "-----------------------------------------------------------------------"
-echo "xci: running ansible playbook setup-infrastructure.yml"
-ssh root@$OPNFV_HOST_IP "openstack-ansible \
-     $OPENSTACK_OSA_PATH/playbooks//setup-infrastructure.yml" | \
-     tee $LOG_PATH/setup-infrastructure.log
-echo "-----------------------------------------------------------------------"
-# check the log to see if we have any error
-if grep -q 'failed=1\|unreachable=1' $LOG_PATH/setup-infrastructure.log; then
-    echo "Error: OpenStack node setup failed!"
-    exit 1
-fi
-
-#-------------------------------------------------------------------------------
-# Verify database cluster
-#-------------------------------------------------------------------------------
-echo "Info: Verifying database cluster"
-echo "-----------------------------------------------------------------------"
-ssh root@$OPNFV_HOST_IP "ansible -i $OPENSTACK_OSA_PATH/playbooks/inventory/ \
-           galera_container -m shell \
-           -a "mysql -h localhost -e 'show status like \"%wsrep_cluster_%\";'"" \
-           | tee $LOG_PATH/galera.log
-echo "-----------------------------------------------------------------------"
-# check the log to see if we have any error
-if grep -q 'FAILED' $LOG_PATH/galera.log; then
-    echo "Error: Database cluster verification failed!"
-    exit 1
-fi
-echo "Info: Database cluster verification successful!"
-
-#-------------------------------------------------------------------------------
-# Install OpenStack
-#-------------------------------------------------------------------------------
-# This is openstack-ansible playbook. Check upstream documentation for details.
-#-------------------------------------------------------------------------------
-echo "Info: Installing OpenStack on target hosts"
-echo "-----------------------------------------------------------------------"
-ssh root@$OPNFV_HOST_IP "openstack-ansible \
-     $OPENSTACK_OSA_PATH/playbooks/setup-openstack.yml" | \
-     tee $LOG_PATH/opnfv-setup-openstack.log
-echo "-----------------------------------------------------------------------"
-# check the log to see if we have any error
-if grep -q 'failed=1\|unreachable=1' $LOG_PATH/opnfv-setup-openstack.log; then
-   echo "Error: OpenStack installation failed!"
-   exit 1
-fi
-echo "Info: OpenStack installation is successfully completed!"
diff --git a/setup.py b/setup.py
deleted file mode 100644 (file)
index 3c93408..0000000
--- a/setup.py
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/usr/bin/env python
-
-from setuptools import setup
-
-setup(
-    name="releng",
-    version="master",
-    url="https://www.opnfv.org",
-)
diff --git a/tox.ini b/tox.ini
index e9f5fbb..d3489e5 100644 (file)
--- a/tox.ini
+++ b/tox.ini
@@ -8,27 +8,13 @@ envlist = py27
 skipsdist = True
 
 [testenv]
-usedevelop = True
+usedevelop = False
 setenv=
   HOME = {envtmpdir}
   PYTHONPATH = {toxinidir}
 
 [testenv:jjb]
 deps =
-       -rjjb/test-requirements.txt
+  jenkins-job-builder==1.6.1
 commands=
-       jenkins-jobs test -o job_output -r jjb/
-
-[testenv:modules]
-deps=
-       -rmodules/requirements.txt
-       -rmodules/test-requirements.txt
-commands =
-       nosetests -w modules \
-       --with-xunit \
-       --xunit-file=modules/nosetests.xml \
-       --cover-package=opnfv \
-       --with-coverage \
-       --cover-xml \
-       --cover-html \
-       tests/unit
+  jenkins-jobs test -o job_output -r jjb/
index 197e493..def5ecc 100644 (file)
@@ -12,6 +12,8 @@ parser.add_argument("-u", "--user", help="Give username of this pod")
 parser.add_argument("-k", "--key", help="Give key file of the user")
 parser.add_argument("-p", "--password", help="Give password of the user")
 parser.add_argument("-f", "--filepath", help="Give dest path of output file")
+parser.add_argument("-s", "--sshkey", default="/root/.ssh/id_rsa",
+                    help="Give the path for ssh key")
 args = parser.parse_args()
 
 
@@ -49,7 +51,7 @@ def get_with_passwd():
                                        args.user, installer_pwd=args.password)
 
 
-def create_file(handler):
+def create_file(handler, INSTALLER_TYPE):
     """
     Create the yaml file of nodes info.
     As Yardstick required, node name must be node1, node2, ... and node1 must
@@ -62,34 +64,37 @@ def create_file(handler):
     nodes = handler.nodes
     node_list = []
     index = 1
+    user = 'root'
+    if INSTALLER_TYPE == 'apex':
+        user = 'heat-admin'
     for node in nodes:
         try:
             if node.roles[0].lower() == "controller":
                 node_info = {'name': "node%s" % index, 'role': node.roles[0],
-                             'ip': node.ip, 'user': 'root'}
+                             'ip': node.ip, 'user': user}
                 node_list.append(node_info)
                 index += 1
         except Exception:
             node_info = {'name': node.name, 'role': 'unknown', 'ip': node.ip,
-                         'user': 'root'}
+                         'user': user}
             node_list.append(node_info)
     for node in nodes:
         try:
             if node.roles[0].lower() == "compute":
                 node_info = {'name': "node%s" % index, 'role': node.roles[0],
-                             'ip': node.ip, 'user': 'root'}
+                             'ip': node.ip, 'user': user}
                 node_list.append(node_info)
                 index += 1
         except Exception:
             node_info = {'name': node.name, 'role': 'unknown', 'ip': node.ip,
-                         'user': 'root'}
+                         'user': user}
             node_list.append(node_info)
     if args.INSTALLER_TYPE == 'compass':
         for item in node_list:
             item['password'] = 'root'
     else:
         for item in node_list:
-            item['key_filename'] = '/root/.ssh/id_rsa'
+            item['key_filename'] = args.sshkey
     data = {'nodes': node_list}
     with open(args.filepath, "w") as fw:
         yaml.dump(data, fw)
@@ -105,7 +110,7 @@ def main():
     if not handler:
         print("Error: failed to get the node's handler.")
         return 1
-    create_file(handler)
+    create_file(handler, args.INSTALLER_TYPE)
 
 
 if __name__ == '__main__':
index 458bbda..377930d 100755 (executable)
@@ -12,8 +12,9 @@ set -o nounset
 set -o pipefail
 
 usage() {
-    echo "usage: $0 [-v] -d <destination> -i <installer_type> -a <installer_ip>" >&2
+    echo "usage: $0 [-v] -d <destination> -i <installer_type> -a <installer_ip> [-o <os_cacert>] [-s <ssh_key>]" >&2
     echo "[-v] Virtualized deployment" >&2
+    echo "[-s <ssh_key>] Path to ssh key. For MCP deployments only" >&2
 }
 
 info ()  {
@@ -53,11 +54,13 @@ swap_to_public() {
 : ${DEPLOY_TYPE:=''}
 
 #Get options
-while getopts ":d:i:a:h:v" optchar; do
+while getopts ":d:i:a:h:s:o:v" optchar; do
     case "${optchar}" in
         d) dest_path=${OPTARG} ;;
         i) installer_type=${OPTARG} ;;
         a) installer_ip=${OPTARG} ;;
+        s) ssh_key=${OPTARG} ;;
+        o) os_cacert=${OPTARG} ;;
         v) DEPLOY_TYPE="virt" ;;
         *) echo "Non-option argument: '-${OPTARG}'" >&2
            usage
@@ -68,8 +71,12 @@ done
 
 # set vars from env if not provided by user as options
 dest_path=${dest_path:-$HOME/opnfv-openrc.sh}
+os_cacert=${os_cacert:-$HOME/os_cacert}
 installer_type=${installer_type:-$INSTALLER_TYPE}
 installer_ip=${installer_ip:-$INSTALLER_IP}
+if [ "${installer_type}" == "fuel" ] && [ "${BRANCH}" == "master" ]; then
+    installer_ip=${SALT_MASTER_IP}
+fi
 
 if [ -z $dest_path ] || [ -z $installer_type ] || [ -z $installer_ip ]; then
     usage
@@ -89,40 +96,49 @@ ssh_options="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
 
 # Start fetching the files
 if [ "$installer_type" == "fuel" ]; then
-    #ip_fuel="10.20.0.2"
     verify_connectivity $installer_ip
+    if [ "${BRANCH}" == "master" ]; then
+        ssh_key=${ssh_key:-$SSH_KEY}
+        if [ -z $ssh_key ] || [ ! -f $ssh_key ]; then
+            error "Please provide path to existing ssh key for mcp deployment."
+            exit 2
+        fi
+        ssh_options+=" -i ${ssh_key}"
 
-    env=$(sshpass -p r00tme ssh 2>/dev/null $ssh_options root@${installer_ip} \
-        'fuel env'|grep operational|head -1|awk '{print $1}') &> /dev/null
-    if [ -z $env ]; then
-        error "No operational environment detected in Fuel"
-    fi
-    env_id="${FUEL_ENV:-$env}"
+        # retrieving controller vip
+        controller_ip=$(ssh 2>/dev/null ${ssh_options} ubuntu@${installer_ip} \
+            "sudo salt --out txt 'ctl*' pillar.get _param:openstack_control_address | awk '{print \$2; exit}'" | \
+            sed 's/ //g') &> /dev/null
 
-    # Check if controller is alive (online='True')
-    controller_ip=$(sshpass -p r00tme ssh 2>/dev/null $ssh_options root@${installer_ip} \
-        "fuel node --env ${env_id} | grep controller | grep 'True\|  1' | awk -F\| '{print \$5}' | head -1" | \
-        sed 's/ //g') &> /dev/null
+        info "Fetching rc file from controller $controller_ip..."
+        ssh ${ssh_options} ubuntu@${controller_ip} "sudo cat /root/keystonercv3" > $dest_path
 
-    if [ -z $controller_ip ]; then
-        error "The controller $controller_ip is not up. Please check that the POD is correctly deployed."
-    fi
+        if [[ $BUILD_TAG =~ "baremetal" ]]; then
+            ssh ${ssh_options} ubuntu@${installer_ip} "cat /etc/ssl/certs/os_cacert" > $os_cacert
+        fi
+    else
+        #ip_fuel="10.20.0.2"
+        env=$(sshpass -p r00tme ssh 2>/dev/null ${ssh_options} root@${installer_ip} \
+            'fuel env'|grep operational|head -1|awk '{print $1}') &> /dev/null
+        if [ -z $env ]; then
+            error "No operational environment detected in Fuel"
+        fi
+        env_id="${FUEL_ENV:-$env}"
 
-    info "Fetching rc file from controller $controller_ip..."
-    sshpass -p r00tme ssh 2>/dev/null $ssh_options root@${installer_ip} \
-        "scp $ssh_options ${controller_ip}:/root/openrc ." &> /dev/null
-    sshpass -p r00tme scp 2>/dev/null $ssh_options root@${installer_ip}:~/openrc $dest_path &> /dev/null
+        # Check if controller is alive (online='True')
+        controller_ip=$(sshpass -p r00tme ssh 2>/dev/null ${ssh_options} root@${installer_ip} \
+            "fuel node --env ${env_id} | grep controller | grep 'True\|  1' | awk -F\| '{print \$5}' | head -1" | \
+            sed 's/ //g') &> /dev/null
 
-    #This file contains the mgmt keystone API, we need the public one for our rc file
-    admin_ip=$(cat $dest_path | grep "OS_AUTH_URL" | sed 's/^.*\=//' | sed "s/^\([\"']\)\(.*\)\1\$/\2/g" | sed s'/\/$//')
-    public_ip=$(sshpass -p r00tme ssh $ssh_options root@${installer_ip} \
-        "ssh ${controller_ip} 'source openrc; openstack endpoint list'" \
-        | grep keystone | grep public | sed 's/ /\n/g' | grep ^http | head -1) &> /dev/null
-        #| grep http | head -1 | cut -d '|' -f 4 | sed 's/v1\/.*/v1\//' | sed 's/ //g') &> /dev/null
-    #NOTE: this is super ugly sed 's/v1\/.*/v1\//'OS_AUTH_URL
-    # but sometimes the output of endpoint-list is like this: http://172.30.9.70:8004/v1/%(tenant_id)s
-    # Fuel virtual need a fix
+        if [ -z $controller_ip ]; then
+            error "The controller $controller_ip is not up. Please check that the POD is correctly deployed."
+        fi
 
+        info "Fetching rc file from controller $controller_ip..."
+        sshpass -p r00tme ssh 2>/dev/null ${ssh_options} root@${installer_ip} \
+            "scp ${ssh_options} ${controller_ip}:/root/openrc ." &> /dev/null
+        sshpass -p r00tme scp 2>/dev/null ${ssh_options} root@${installer_ip}:~/openrc $dest_path &> /dev/null
+    fi
     #convert to v3 URL
     auth_url=$(cat $dest_path|grep AUTH_URL)
     if [[ -z `echo $auth_url |grep v3` ]]; then
@@ -143,36 +159,41 @@ elif [ "$installer_type" == "apex" ]; then
     sudo scp $ssh_options root@$installer_ip:/home/stack/overcloudrc.v3 $dest_path
 
 elif [ "$installer_type" == "compass" ]; then
-    verify_connectivity $installer_ip
-    controller_ip=$(sshpass -p'root' ssh 2>/dev/null $ssh_options root@${installer_ip} \
-        'mysql -ucompass -pcompass -Dcompass -e"select *  from cluster;"' \
-        | awk -F"," '{for(i=1;i<NF;i++)if($i~/\"127.0.0.1\"/) {print $(i+2);break;}}'  \
-        | grep -oP "\d+.\d+.\d+.\d+")
-
-    if [ -z $controller_ip ]; then
-        error "The controller $controller_ip is not up. Please check that the POD is correctly deployed."
-    fi
-
-    info "Fetching rc file from controller $controller_ip..."
-    sshpass -p root ssh 2>/dev/null $ssh_options root@${installer_ip} \
-        "scp $ssh_options ${controller_ip}:/opt/admin-openrc.sh ." &> /dev/null
-    sshpass -p root scp 2>/dev/null $ssh_options root@${installer_ip}:~/admin-openrc.sh $dest_path &> /dev/null
-
-    info "This file contains the mgmt keystone API, we need the public one for our rc file"
-
-    if grep "OS_AUTH_URL.*v2" $dest_path > /dev/null 2>&1 ; then
-        public_ip=$(sshpass -p root ssh $ssh_options root@${installer_ip} \
-            "ssh ${controller_ip} 'source /opt/admin-openrc.sh; openstack endpoint show identity '" \
-            | grep publicurl | awk '{print $4}')
+    if [ "${BRANCH}" == "master" ]; then
+        sudo docker cp compass-tasks:/opt/openrc $dest_path &> /dev/null
+        sudo chown $(whoami):$(whoami) $dest_path
+        sudo docker cp compass-tasks:/opt/os_cacert $os_cacert &> /dev/null
     else
-        public_ip=$(sshpass -p root ssh $ssh_options root@${installer_ip} \
-            "ssh ${controller_ip} 'source /opt/admin-openrc.sh; \
-                 openstack endpoint list --interface public --service identity '" \
-            | grep identity | awk '{print $14}')
-    fi
-    info "public_ip: $public_ip"
-    swap_to_public $public_ip
+        verify_connectivity $installer_ip
+        controller_ip=$(sshpass -p'root' ssh 2>/dev/null $ssh_options root@${installer_ip} \
+            'mysql -ucompass -pcompass -Dcompass -e"select *  from cluster;"' \
+            | awk -F"," '{for(i=1;i<NF;i++)if($i~/\"127.0.0.1\"/) {print $(i+2);break;}}'  \
+            | grep -oP "\d+.\d+.\d+.\d+")
+
+        if [ -z $controller_ip ]; then
+            error "The controller $controller_ip is not up. Please check that the POD is correctly deployed."
+        fi
 
+        info "Fetching rc file from controller $controller_ip..."
+        sshpass -p root ssh 2>/dev/null $ssh_options root@${installer_ip} \
+            "scp $ssh_options ${controller_ip}:/opt/admin-openrc.sh ." &> /dev/null
+        sshpass -p root scp 2>/dev/null $ssh_options root@${installer_ip}:~/admin-openrc.sh $dest_path &> /dev/null
+
+        info "This file contains the mgmt keystone API, we need the public one for our rc file"
+
+        if grep "OS_AUTH_URL.*v2" $dest_path > /dev/null 2>&1 ; then
+            public_ip=$(sshpass -p root ssh $ssh_options root@${installer_ip} \
+                "ssh ${controller_ip} 'source /opt/admin-openrc.sh; openstack endpoint show identity '" \
+                | grep publicurl | awk '{print $4}')
+        else
+            public_ip=$(sshpass -p root ssh $ssh_options root@${installer_ip} \
+                "ssh ${controller_ip} 'source /opt/admin-openrc.sh; \
+                     openstack endpoint list --interface public --service identity '" \
+                | grep identity | awk '{print $14}')
+        fi
+        info "public_ip: $public_ip"
+        swap_to_public $public_ip
+    fi
 
 elif [ "$installer_type" == "joid" ]; then
     # do nothing...for the moment
index 8fce2e0..c46ca89 100755 (executable)
@@ -61,8 +61,8 @@ main () {
     #make pid dir
     pidfile="/var/run/$jenkinsuser/jenkins_jnlp_pid"
     if ! [ -d /var/run/$jenkinsuser/ ]; then
-        mkdir /var/run/$jenkinsuser/
-        chown $jenkinsuser:$jenkinsuser /var/run/$jenkinsuser/
+        sudo mkdir /var/run/$jenkinsuser/
+        sudo chown $jenkinsuser:$jenkinsuser /var/run/$jenkinsuser/
     fi
 
     if [[ $skip_monit != true ]]; then
index f0c488a..518d20a 100644 (file)
@@ -27,10 +27,13 @@ node_list=(\
 'ericsson-pod1' 'ericsson-pod2' \
 'ericsson-virtual1' 'ericsson-virtual2'  'ericsson-virtual3' \
 'ericsson-virtual4' 'ericsson-virtual5' 'ericsson-virtual12' \
-'arm-pod1' 'arm-pod3' \
+'arm-pod1' 'arm-pod5' \
 'huawei-pod1' 'huawei-pod2' 'huawei-pod3' 'huawei-pod4' 'huawei-pod5' \
 'huawei-pod6' 'huawei-pod7' 'huawei-pod12' \
-'huawei-virtual1' 'huawei-virtual2' 'huawei-virtual3' 'huawei-virtual4')
+'huawei-virtual1' 'huawei-virtual2' 'huawei-virtual3' 'huawei-virtual4' \
+'huawei-virtual5' 'huawei-virtual8' 'huawei-virtual9' \
+'zte-pod2' \
+'zte-virtual1')
 
 
 if [[ ! " ${node_list[@]} " =~ " ${testbed} " ]]; then
@@ -7,6 +7,7 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 import requests
+import time
 
 from tornado.escape import json_encode
 from tornado.escape import json_decode
@@ -24,7 +25,7 @@ class FiltersHandler(BaseHandler):
                 'status': ['success', 'warning', 'danger'],
                 'projects': ['functest', 'yardstick'],
                 'installers': ['apex', 'compass', 'fuel', 'joid'],
-                'version': ['colorado', 'master'],
+                'version': ['master', 'colorado', 'danube'],
                 'loops': ['daily', 'weekly', 'monthly'],
                 'time': ['10 days', '30 days']
             }
@@ -53,27 +54,27 @@ class ScenariosHandler(BaseHandler):
     def _get_scenario_result(self, scenario, data, args):
         result = {
             'status': data.get('status'),
-            'installers': self._get_installers_result(data['installers'], args)
+            'installers': self._get_installers_result(data, args)
         }
         return result
 
     def _get_installers_result(self, data, args):
         func = self._get_installer_result
-        return {k: func(k, data.get(k, {}), args) for k in args['installers']}
+        return {k: func(data.get(k, {}), args) for k in args['installers']}
 
-    def _get_installer_result(self, installer, data, args):
-        projects = data.get(args['version'], [])
-        return [self._get_project_data(projects, p) for p in args['projects']]
+    def _get_installer_result(self, data, args):
+        return self._get_version_data(data.get(args['version'], {}), args)
 
-    def _get_project_data(self, projects, project):
+    def _get_version_data(self, data, args):
+        return {k: self._get_project_data(data.get(k, {}))
+                for k in args['projects']}
+
+    def _get_project_data(self, data):
         atom = {
-            'project': project,
-            'score': None,
-            'status': None
+            'score': data.get('score', ''),
+            'status': data.get('status', '')
         }
-        for p in projects:
-            if p['project'] == project:
-                return p
+
         return atom
 
     def _get_scenarios(self):
@@ -88,41 +89,42 @@ class ScenariosHandler(BaseHandler):
                                                                     [])
                                                               ) for a in data}
         scenario = {
-            'status': self._get_status(),
-            'installers': installers
+            'status': self._get_status()
         }
+        scenario.update(installers)
+
         return scenario
 
     def _get_status(self):
         return 'success'
 
     def _get_installer(self, data):
-        return {a.get('version'): self._get_version(a) for a in data}
+        return {a.get('version'): self._get_version(a.get('projects'))
+                for a in data}
 
     def _get_version(self, data):
+        return {a.get('project'): self._get_project(a) for a in data}
+
+    def _get_project(self, data):
+        scores = data.get('scores', [])
+        trusts = data.get('trust_indicators', [])
+
         try:
-            scores = data.get('score', {}).get('projects')[0]
-            trusts = data.get('trust_indicator', {}).get('projects')[0]
-        except (TypeError, IndexError):
-            return []
-        else:
-            scores = {key: [dict(date=a.get('date')[:10],
-                                 score=a.get('score')
-                                 ) for a in scores[key]] for key in scores}
-            trusts = {key: [dict(date=a.get('date')[:10],
-                                 status=a.get('status')
-                                 ) for a in trusts[key]] for key in trusts}
-            atom = self._get_atom(scores, trusts)
-            return [dict(project=k,
-                         score=sorted(atom[k], reverse=True)[0].get('score'),
-                         status=sorted(atom[k], reverse=True)[0].get('status')
-                         ) for k in atom if atom[k]]
-
-    def _get_atom(self, scores, trusts):
-        s = {k: {a['date']: a['score'] for a in scores[k]} for k in scores}
-        t = {k: {a['date']: a['status'] for a in trusts[k]} for k in trusts}
-        return {k: [dict(score=s[k][a], status=t[k][a], data=a
-                         ) for a in s[k] if a in t[k]] for k in s}
+            date = sorted(scores, reverse=True)[0].get('date')
+        except IndexError:
+            data = time.time()
+
+        try:
+            score = sorted(scores, reverse=True)[0].get('score')
+        except IndexError:
+            score = None
+
+        try:
+            status = sorted(trusts, reverse=True)[0].get('status')
+        except IndexError:
+            status = None
+
+        return {'date': date, 'score': score, 'status': status}
 
     def _change_to_utf8(self, obj):
         if isinstance(obj, dict):
diff --git a/utils/test/reporting/api/requirements.txt b/utils/test/reporting/api/requirements.txt
deleted file mode 100644 (file)
index 12ad688..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-tornado==4.4.2
-requests==2.1.0
-
diff --git a/utils/test/reporting/api/setup.cfg b/utils/test/reporting/api/setup.cfg
deleted file mode 100644 (file)
index 53d1092..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-[metadata]
-name = reporting
-
-author = JackChan
-author-email = chenjiankun1@huawei.com
-
-classifier =
-    Environment :: opnfv
-    Intended Audience :: Information Technology
-    Intended Audience :: System Administrators
-    License :: OSI Approved :: Apache Software License
-    Operating System :: POSIX :: Linux
-    Programming Language :: Python
-    Programming Language :: Python :: 2
-    Programming Language :: Python :: 2.7
-
-[global]
-setup-hooks =
-    pbr.hooks.setup_hook
-
-[files]
-packages =
-    api
-
-[entry_points]
-console_scripts =
-    api = api.server:main
-
-[egg_info]
-tag_build =
-tag_date = 0
-tag_svn_revision = 0
diff --git a/utils/test/reporting/api/setup.py b/utils/test/reporting/api/setup.py
deleted file mode 100644 (file)
index d974816..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-import setuptools
-
-
-__author__ = 'JackChan'
-
-
-setuptools.setup(
-    setup_requires=['pbr>=1.8'],
-    pbr=True)
index ad278ce..f235790 100644 (file)
 FROM nginx:stable
 
 MAINTAINER Morgan Richomme <morgan.richomme@orange.com>
-LABEL version="danube.1.0" description="OPNFV Test Reporting Docker container"
+LABEL version="1.0" description="OPNFV Test Reporting Docker container"
 
 ARG BRANCH=master
 
 ENV HOME /home/opnfv
-ENV working_dir /home/opnfv/utils/test/reporting
-ENV TERM xterm
-ENV COLORTERM gnome-terminal
-ENV CONFIG_REPORTING_YAML /home/opnfv/utils/test/reporting/reporting.yaml
+ENV working_dir ${HOME}/releng/utils/test/reporting
+ENV CONFIG_REPORTING_YAML ${working_dir}/reporting.yaml
 
+WORKDIR ${HOME}
 # Packaged dependencies
 RUN apt-get update && apt-get install -y \
+build-essential \
 ssh \
+curl \
+gnupg \
 python-pip \
+python-dev \
+python-setuptools \
 git-core \
-wkhtmltopdf \
-nodejs \
-npm \
 supervisor \
 --no-install-recommends
 
-RUN pip install --upgrade pip
+RUN pip install --upgrade pip && easy_install -U setuptools==30.0.0
 
-RUN git clone --depth 1 https://gerrit.opnfv.org/gerrit/releng /home/opnfv
-RUN pip install -r ${working_dir}/docker/requirements.pip
+RUN git clone --depth 1 https://gerrit.opnfv.org/gerrit/releng /home/opnfv/releng
+RUN pip install -r ${working_dir}/requirements.txt
 
-WORKDIR ${working_dir}/api
-RUN pip install -r requirements.txt
-RUN python setup.py install
+RUN sh -c 'curl -sL https://deb.nodesource.com/setup_8.x | bash -' \
+    && apt-get install -y nodejs \
+    && npm install -g bower \
+    && npm install -g grunt \
+    && npm install -g grunt-cli
 
 WORKDIR ${working_dir}
+RUN python setup.py install
 RUN docker/reporting.sh
+RUN docker/web_server.sh
 
 expose 8000
 
index 9e26972..ced8179 100644 (file)
@@ -15,10 +15,10 @@ server {
     }
 
     location /reporting/ {
-        alias /home/opnfv/utils/test/reporting/pages/dist/;
+        alias /home/opnfv/releng/utils/test/reporting/pages/dist/;
     }
 
     location /display/ {
-        alias /home/opnfv/utils/test/reporting/display/;
+        alias /home/opnfv/releng/utils/test/reporting/display/;
     }
 }
index 49f4517..076dc47 100755 (executable)
@@ -1,10 +1,10 @@
 #!/bin/bash
 
-export PYTHONPATH="${PYTHONPATH}:."
-export CONFIG_REPORTING_YAML=./reporting.yaml
+export PYTHONPATH="${PYTHONPATH}:./reporting"
+export CONFIG_REPORTING_YAML=./reporting/reporting.yaml
 
 declare -a versions=(danube master)
-declare -a projects=(functest storperf yardstick)
+declare -a projects=(functest storperf yardstick qtip)
 
 project=$1
 reporting_type=$2
@@ -29,8 +29,9 @@ cp -Rf js display
 #  projet   |        option
 #   $1      |          $2
 # functest  | status, vims, tempest
-# yardstick |
-# storperf  |
+# yardstick | status
+# storperf  | status
+# qtip      | status
 
 function report_project()
 {
@@ -40,7 +41,7 @@ function report_project()
   echo "********************************"
   echo " $project reporting "
   echo "********************************"
-  python ./$dir/reporting-$type.py
+  python ./reporting/$dir/reporting-$type.py
   if [ $? ]; then
     echo "$project reporting $type...OK"
   else
@@ -50,51 +51,28 @@ function report_project()
 
 if [ -z "$1" ]; then
   echo "********************************"
-  echo " Functest reporting "
+  echo " * Static status reporting     *"
   echo "********************************"
-  echo "reporting vIMS..."
-  python ./functest/reporting-vims.py
-  echo "reporting vIMS...OK"
-  sleep 10
-  echo "reporting Tempest..."
-  python ./functest/reporting-tempest.py
-  echo "reporting Tempest...OK"
-  sleep 10
-  echo "reporting status..."
-  python ./functest/reporting-status.py
-  echo "Functest reporting status...OK"
-
-  echo "********************************"
-  echo " Yardstick reporting "
-  echo "********************************"
-  python ./yardstick/reporting-status.py
-  echo "Yardstick reporting status...OK"
+  for i in "${projects[@]}"
+  do
+    report_project $i $i "status"
+    sleep 5
+  done
+  report_project "QTIP" "qtip" "status"
 
-  echo "********************************"
-  echo " Storperf reporting "
-  echo "********************************"
-  python ./storperf/reporting-status.py
-  echo "Storperf reporting status...OK"
 
-  report_project "QTIP" "qtip" "status"
+  echo "Functest reporting vIMS..."
+  report_project "functest" "functest" "vims"
+  echo "reporting vIMS...OK"
+  sleep 5
+  echo "Functest reporting Tempest..."
+  report_project "functest" "functest" "tempest"
+  echo "reporting Tempest...OK"
+  sleep 5
 
 else
   if [ -z "$2" ]; then
     reporting_type="status"
   fi
-  echo "********************************"
-  echo " $project/$reporting_type reporting "
-  echo "********************************"
-  python ./$project/reporting-$reporting_type.py
+  report_project $project $project $reporting_type
 fi
-cp -r display /usr/share/nginx/html
-
-
-# nginx config
-cp /home/opnfv/utils/test/reporting/docker/nginx.conf /etc/nginx/conf.d/
-echo "daemon off;" >> /etc/nginx/nginx.conf
-
-# supervisor config
-cp /home/opnfv/utils/test/reporting/docker/supervisor.conf /etc/supervisor/conf.d/
-
-ln -s /usr/bin/nodejs /usr/bin/node
index 6de856e..aeee3ba 100644 (file)
@@ -12,3 +12,4 @@ PyYAML==3.11
 simplejson==3.8.1
 jinja2==2.8
 tornado==4.4.2
+requests==2.12.5
index 5e315ba..49310d4 100644 (file)
@@ -1,22 +1,19 @@
 [supervisord]
 nodaemon = true
 
-[program:reporting_tornado]
+[program:tornado]
 user = root
-directory = /home/opnfv/utils/test/reporting/api/api
+directory = /home/opnfv/releng/utils/test/reporting/api
 command = python server.py --port=800%(process_num)d
 process_name=%(program_name)s%(process_num)d
 numprocs=4
 numprocs_start=1
-autorestart = true
 
-[program:reporting_nginx]
+[program:nginx]
 user = root
 command = service nginx restart
-autorestart = true
 
-[program:reporting_angular]
+[program:configuration]
 user = root
-directory = /home/opnfv/utils/test/reporting/pages
-command = bash angular.sh
-autorestart = true
+directory = /home/opnfv/releng/utils/test/reporting/pages
+command = bash config.sh
diff --git a/utils/test/reporting/docker/web_server.sh b/utils/test/reporting/docker/web_server.sh
new file mode 100755 (executable)
index 0000000..0dd8df7
--- /dev/null
@@ -0,0 +1,14 @@
+#!/bin/bash
+cp -r display /usr/share/nginx/html
+
+
+# nginx config
+cp /home/opnfv/releng/utils/test/reporting/docker/nginx.conf /etc/nginx/conf.d/
+echo "daemon off;" >> /etc/nginx/nginx.conf
+
+# supervisor config
+cp /home/opnfv/releng/utils/test/reporting/docker/supervisor.conf /etc/supervisor/conf.d/
+
+# Manage Angular front end
+cd pages && /bin/bash angular.sh
+
diff --git a/utils/test/reporting/docs/_build/.buildinfo b/utils/test/reporting/docs/_build/.buildinfo
new file mode 100644 (file)
index 0000000..6bd6fd6
--- /dev/null
@@ -0,0 +1,4 @@
+# Sphinx build info version 1
+# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done.
+config: 235ce07a48cec983846ad34dfd375b07
+tags: 645f666f9bcd5a90fca523b33c5a78b7
diff --git a/utils/test/reporting/docs/_build/.doctrees/environment.pickle b/utils/test/reporting/docs/_build/.doctrees/environment.pickle
new file mode 100644 (file)
index 0000000..23f59c3
Binary files /dev/null and b/utils/test/reporting/docs/_build/.doctrees/environment.pickle differ
diff --git a/utils/test/reporting/docs/_build/.doctrees/index.doctree b/utils/test/reporting/docs/_build/.doctrees/index.doctree
new file mode 100644 (file)
index 0000000..51e2d5a
Binary files /dev/null and b/utils/test/reporting/docs/_build/.doctrees/index.doctree differ
diff --git a/utils/test/reporting/docs/conf.py b/utils/test/reporting/docs/conf.py
new file mode 100644 (file)
index 0000000..2e70d2b
--- /dev/null
@@ -0,0 +1,341 @@
+# -*- coding: utf-8 -*-
+#
+# OPNFV testing Reporting documentation build configuration file, created by
+# sphinx-quickstart on Mon July 4 10:03:43 2017.
+#
+# This file is execfile()d with the current directory set to its
+# containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#
+# import os
+# import sys
+# sys.path.insert(0, os.path.abspath('.'))
+
+# -- General configuration ------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#
+# needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = [
+    'sphinx.ext.autodoc',
+]
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix(es) of source filenames.
+# You can specify multiple suffix as a list of string:
+#
+# source_suffix = ['.rst', '.md']
+source_suffix = '.rst'
+
+# The encoding of source files.
+#
+# source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'OPNFV Reporting'
+copyright = u'2017, #opnfv-testperf (chat.freenode.net)'
+author = u'#opnfv-testperf (chat.freenode.net)'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = u'master'
+# The full version, including alpha/beta/rc tags.
+release = u'master'
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#
+# This is also used if you do content translation via gettext catalogs.
+# Usually you set "language" from the command line for these cases.
+language = 'en'
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#
+# today = ''
+#
+# Else, today_fmt is used as the format for a strftime call.
+#
+# today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+# This patterns also effect to html_static_path and html_extra_path
+exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
+
+# The reST default role (used for this markup: `text`) to use for all
+# documents.
+#
+# default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#
+# add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#
+# add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#
+# show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+# modindex_common_prefix = []
+
+# If true, keep warnings as "system message" paragraphs in the built documents.
+# keep_warnings = False
+
+# If true, `todo` and `todoList` produce output, else they produce nothing.
+todo_include_todos = False
+
+
+# -- Options for HTML output ----------------------------------------------
+
+# The theme to use for HTML and HTML Help pages.  See the documentation for
+# a list of builtin themes.
+#
+html_theme = 'alabaster'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further.  For a list of options available for each theme, see the
+# documentation.
+#
+# html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+# html_theme_path = []
+
+# The name for this set of Sphinx documents.
+# "<project> v<release> documentation" by default.
+#
+# html_title = u'OPNFV Functest vmaster'
+
+# A shorter title for the navigation bar.  Default is the same as html_title.
+#
+# html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#
+# html_logo = None
+
+# The name of an image file (relative to this directory) to use as a favicon of
+# the docs.  This file should be a Windows icon file (.ico) being 16x16 or
+# 32x32 pixels large.
+#
+# html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# Add any extra paths that contain custom files (such as robots.txt or
+# .htaccess) here, relative to this directory. These files are copied
+# directly to the root of the documentation.
+#
+# html_extra_path = []
+
+# If not None, a 'Last updated on:' timestamp is inserted at every page
+# bottom, using the given strftime format.
+# The empty string is equivalent to '%b %d, %Y'.
+#
+# html_last_updated_fmt = None
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#
+# html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#
+# html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#
+# html_additional_pages = {}
+
+# If false, no module index is generated.
+#
+# html_domain_indices = True
+
+# If false, no index is generated.
+#
+# html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#
+# html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#
+# html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+#
+# html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+#
+# html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it.  The value of this option must be the
+# base URL from which the finished HTML is served.
+#
+# html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+# html_file_suffix = None
+
+# Language to be used for generating the HTML full-text search index.
+# Sphinx supports the following languages:
+#   'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
+#   'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
+#
+# html_search_language = 'en'
+
+# A dictionary with options for the search language support, empty by default.
+# 'ja' uses this config value.
+# 'zh' user can custom change `jieba` dictionary path.
+#
+# html_search_options = {'type': 'default'}
+
+# The name of a javascript file (relative to the configuration directory) that
+# implements a search results scorer. If empty, the default will be used.
+#
+# html_search_scorer = 'scorer.js'
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'OPNFVreportingdoc'
+
+# -- Options for LaTeX output ---------------------------------------------
+
+latex_elements = {
+     # The paper size ('letterpaper' or 'a4paper').
+     #
+     # 'papersize': 'letterpaper',
+
+     # The font size ('10pt', '11pt' or '12pt').
+     #
+     # 'pointsize': '10pt',
+
+     # Additional stuff for the LaTeX preamble.
+     #
+     # 'preamble': '',
+
+     # Latex figure (float) alignment
+     #
+     # 'figure_align': 'htbp',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title,
+#  author, documentclass [howto, manual, or own class]).
+latex_documents = [
+    (master_doc, 'OPNFVReporting.tex',
+     u'OPNFV testing Reporting Documentation',
+     u'\\#opnfv-testperf (chat.freenode.net)', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#
+# latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#
+# latex_use_parts = False
+
+# If true, show page references after internal links.
+#
+# latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+#
+# latex_show_urls = False
+
+# Documents to append as an appendix to all manuals.
+#
+# latex_appendices = []
+
+# It false, will not define \strong, \code,    itleref, \crossref ... but only
+# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
+# packages.
+#
+# latex_keep_old_macro_names = True
+
+# If false, no module index is generated.
+#
+# latex_domain_indices = True
+
+
+# -- Options for manual page output ---------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+    (master_doc, 'opnfvReporting', u'OPNFV Testing Reporting Documentation',
+     [author], 1)
+]
+
+# If true, show URL addresses after external links.
+#
+# man_show_urls = False
+
+
+# -- Options for Texinfo output -------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+#  dir menu entry, description, category)
+texinfo_documents = [
+    (master_doc, 'OPNFVReporting', u'OPNFV Testing reporting Documentation',
+     author, 'OPNFVTesting', 'One line description of project.',
+     'Miscellaneous'),
+]
+
+# Documents to append as an appendix to all manuals.
+#
+# texinfo_appendices = []
+
+# If false, no module index is generated.
+#
+# texinfo_domain_indices = True
+
+# How to display URL addresses: 'footnote', 'no', or 'inline'.
+#
+# texinfo_show_urls = 'footnote'
+
+# If true, do not generate a @detailmenu in the "Top" node's menu.
+#
+# texinfo_no_detailmenu = False
diff --git a/utils/test/reporting/docs/index.rst b/utils/test/reporting/docs/index.rst
new file mode 100644 (file)
index 0000000..af41876
--- /dev/null
@@ -0,0 +1,16 @@
+Welcome to OPNFV Testing reporting documentation!
+=================================================
+
+Contents:
+
+.. toctree::
+   :maxdepth: 2
+
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
+
index 080f27b..0e00ea6 100755 (executable)
@@ -1,8 +1,3 @@
-: ${SERVER_URL:='http://testresults.opnfv.org/reporting/api'}
-
-echo "var BASE_URL = 'http://${SERVER_URL}/landing-page'" >> app/scripts/app.config.js
-echo "var PROJECT_URL = 'http://${SERVER_URL}'" >> app/scripts/app.config.js
-
 apt-get install -y nodejs
 apt-get install -y npm
 npm install
index f4eb65a..843a623 100644 (file)
     <script src="scripts/controllers/auth.controller.js"></script>
     <script src="scripts/controllers/admin.controller.js"></script>
     <script src="scripts/controllers/main.controller.js"></script>
-    <script src="scripts/app.config.js"></script>
     <script src="scripts/controllers/testvisual.controller.js"></script>
 
     <!-- endbuild -->
 </body>
 
-</html>
\ No newline at end of file
+</html>
index 0f3a17a..8d494c3 100644 (file)
  * Controller of the opnfvdashBoardAngularApp
  */
 angular.module('opnfvApp')
-    .controller('TableController', ['$scope', '$state', '$stateParams', '$http', 'TableFactory', function($scope, $state, $stateParams, $http, TableFactory) {
-
-        $scope.filterlist = [];
-        $scope.selection = [];
-        $scope.statusList = [];
-        $scope.projectList = [];
-        $scope.installerList = [];
-        $scope.versionlist = [];
-        $scope.loopci = [];
-        $scope.time = [];
-        $scope.tableDataAll = {};
-        $scope.tableInfoAll = {};
-        $scope.scenario = {};
-
-        $scope.VersionConfig = {
-            create: true,
-            valueField: 'title',
-            labelField: 'title',
-            delimiter: '|',
-            maxItems: 1,
-            placeholder: 'Version',
-            onChange: function(value) {
-                checkElementArrayValue($scope.selection, $scope.VersionOption);
-                $scope.selection.push(value);
-                // console.log($scope.selection);
-                getScenarioData();
-
-            }
-        }
-
-        $scope.LoopConfig = {
-            create: true,
-            valueField: 'title',
-            labelField: 'title',
-            delimiter: '|',
-            maxItems: 1,
-            placeholder: 'Loop',
-            onChange: function(value) {
-                checkElementArrayValue($scope.selection, $scope.LoopOption);
-                $scope.selection.push(value);
-                // console.log($scope.selection);
-                getScenarioData();
-
-            }
-        }
-
-        $scope.TimeConfig = {
-            create: true,
-            valueField: 'title',
-            labelField: 'title',
-            delimiter: '|',
-            maxItems: 1,
-            placeholder: 'Time',
-            onChange: function(value) {
-                checkElementArrayValue($scope.selection, $scope.TimeOption);
-                $scope.selection.push(value);
-                // console.log($scope.selection)
-                getScenarioData();
-
+    .controller('TableController', ['$scope', '$state', '$stateParams', '$http', 'TableFactory', '$timeout',
+        function($scope, $state, $stateParams, $http, TableFactory, $timeout) {
+
+            init();
+
+            function init() {
+                $scope.filterlist = [];
+                $scope.selection = [];
+
+                $scope.statusList = [];
+                $scope.projectList = [];
+                $scope.installerList = [];
+                $scope.versionlist = [];
+                $scope.loopList = [];
+                $scope.timeList = [];
+
+                $scope.selectStatus = [];
+                $scope.selectProjects = [];
+                $scope.selectInstallers = [];
+                $scope.selectVersion = null;
+                $scope.selectLoop = null;
+                $scope.selectTime = null;
+
+                $scope.statusClicked = false;
+                $scope.installerClicked = false;
+                $scope.projectClicked = false;
+
+                $scope.scenarios = {};
+
+                $scope.VersionConfig = {
+                    create: true,
+                    valueField: 'title',
+                    labelField: 'title',
+                    delimiter: '|',
+                    maxItems: 1,
+                    placeholder: 'Version',
+                    onChange: function(value) {
+                        $scope.selectVersion = value;
+
+                        getScenarioData();
 
-            }
-        }
-
-
-        init();
-
-        function init() {
-            $scope.toggleSelection = toggleSelection;
-            getScenarioData();
-            // radioSetting();
-            getFilters();
-        }
-
-        function getFilters() {
-            TableFactory.getFilter().get({
-
-
-            }).$promise.then(function(response) {
-                if (response != null) {
-                    $scope.statusList = response.filters.status;
-                    $scope.projectList = response.filters.projects;
-                    $scope.installerList = response.filters.installers;
-                    $scope.versionlist = response.filters.version;
-                    $scope.loopci = response.filters.loops;
-                    $scope.time = response.filters.time;
-
-                    $scope.statusListString = $scope.statusList.toString();
-                    $scope.projectListString = $scope.projectList.toString();
-                    $scope.installerListString = $scope.installerList.toString();
-                    $scope.VersionSelected = $scope.versionlist[1];
-                    $scope.LoopCiSelected = $scope.loopci[0];
-                    $scope.TimeSelected = $scope.time[0];
-                    radioSetting($scope.versionlist, $scope.loopci, $scope.time);
-
-                } else {
-                    alert("网络错误");
-                }
-            })
-        }
-
-        function getScenarioData() {
-
-            var utl = BASE_URL + '/scenarios';
-            var data = {
-                'status': ['success', 'danger', 'warning'],
-                'projects': ['functest', 'yardstick'],
-                'installers': ['apex', 'compass', 'fuel', 'joid'],
-                'version': $scope.VersionSelected,
-                'loops': $scope.LoopCiSelected,
-                'time': $scope.TimeSelected
-            };
-            var config = {
-                headers: {
-                    'Content-Type': 'application/x-www-form-urlencoded;charset=utf-8;'
-                }
-            }
-            $http.post(utl, data, config).then(function(response) {
-                if (response.status == 200) {
-                    $scope.scenario = response.data;
-                    constructJson();
-                }
-            })
-        }
-
-        //construct json 
-        function constructJson() {
-
-            var colspan;
-            var InstallerData;
-            var projectsInfo;
-            $scope.tableDataAll["scenario"] = [];
-
-
-            for (var item in $scope.scenario.scenarios) {
-
-                var headData = Object.keys($scope.scenario.scenarios[item].installers).sort();
-                var scenarioStatus = $scope.scenario.scenarios[item].status;
-                var scenarioStatusDisplay;
-                if (scenarioStatus == "success") {
-                    scenarioStatusDisplay = "navy";
-                } else if (scenarioStatus == "danger") {
-                    scenarioStatusDisplay = "danger";
-                } else if (scenarioStatus == "warning") {
-                    scenarioStatusDisplay = "warning";
+                    }
                 }
 
-                InstallerData = headData;
-                var projectData = [];
-                var datadisplay = [];
-                var projects = [];
+                $scope.LoopConfig = {
+                    create: true,
+                    valueField: 'title',
+                    labelField: 'title',
+                    delimiter: '|',
+                    maxItems: 1,
+                    placeholder: 'Loop',
+                    onChange: function(value) {
+                        $scope.selectLoop = value;
 
-                for (var j = 0; j < headData.length; j++) {
+                        getScenarioData();
 
-                    projectData.push($scope.scenario.scenarios[item].installers[headData[j]]);
+                    }
                 }
-                for (var j = 0; j < projectData.length; j++) {
-
-                    for (var k = 0; k < projectData[j].length; k++) {
-                        projects.push(projectData[j][k].project);
-                        var temArray = [];
-                        if (projectData[j][k].score == null) {
-                            temArray.push("null");
-                            temArray.push(projectData[j][k].project);
-                            temArray.push(headData[j]);
-                        } else {
-                            temArray.push(projectData[j][k].score);
-                            temArray.push(projectData[j][k].project);
-                            temArray.push(headData[j]);
-                        }
-
-
-                        if (projectData[j][k].status == "platinium") {
-                            temArray.push("primary");
-                            temArray.push("P");
-                        } else if (projectData[j][k].status == "gold") {
-                            temArray.push("danger");
-                            temArray.push("G");
-                        } else if (projectData[j][k].status == "silver") {
-                            temArray.push("warning");
-                            temArray.push("S");
-                        } else if (projectData[j][k].status == null) {
-                            temArray.push("null");
-                        }
-
-                        datadisplay.push(temArray);
 
+                $scope.TimeConfig = {
+                    create: true,
+                    valueField: 'title',
+                    labelField: 'title',
+                    delimiter: '|',
+                    maxItems: 1,
+                    placeholder: 'Time',
+                    onChange: function(value) {
+                        $scope.selectTime = value;
+
+                        getScenarioData();
                     }
-
                 }
 
-                colspan = projects.length / headData.length;
-
-                var tabledata = {
-                    scenarioName: item,
-                    Installer: InstallerData,
-                    projectData: projectData,
-                    projects: projects,
-                    datadisplay: datadisplay,
-                    colspan: colspan,
-                    status: scenarioStatus,
-                    statusDisplay: scenarioStatusDisplay
-                };
-
-                JSON.stringify(tabledata);
-                $scope.tableDataAll.scenario.push(tabledata);
-
-                // console.log(tabledata);
-
+                getFilters();
             }
 
+            function getFilters() {
+                TableFactory.getFilter().get({
+                }).$promise.then(function(response) {
+                    if (response != null) {
+                        $scope.statusList = response.filters.status;
+                        $scope.projectList = response.filters.projects;
+                        $scope.installerList = response.filters.installers;
+                        $scope.versionList = toSelectList(response.filters.version);
+                        $scope.loopList = toSelectList(response.filters.loops);
+                        $scope.timeList = toSelectList(response.filters.time);
+
+                        $scope.selectStatus = copy($scope.statusList);
+                        $scope.selectInstallers = copy($scope.installerList);
+                        $scope.selectProjects = copy($scope.projectList);
+                        $scope.selectVersion = response.filters.version[0];
+                        $scope.selectLoop = response.filters.loops[0];
+                        $scope.selectTime = response.filters.time[0];
+
+                        getScenarioData();
+
+                    } else {
+                    }
+                });
+            }
 
-            projectsInfo = $scope.tableDataAll.scenario[0].projects;
-
-            var tempHeadData = [];
-
-            for (var i = 0; i < InstallerData.length; i++) {
-                for (var j = 0; j < colspan; j++) {
-                    tempHeadData.push(InstallerData[i]);
-                }
+            function toSelectList(arr){
+                var tempList = [];
+                angular.forEach(arr, function(ele){
+                    tempList.push({'title': ele});
+                });
+                return tempList;
             }
 
-            //console.log(tempHeadData);
+            function copy(arr){
+                var tempList = [];
+                angular.forEach(arr, function(ele){
+                    tempList.push(ele);
+                });
+                return tempList;
+            }
 
-            var projectsInfoAll = [];
+            function getScenarioData() {
 
-            for (var i = 0; i < projectsInfo.length; i++) {
-                var tempA = [];
-                tempA.push(projectsInfo[i]);
-                tempA.push(tempHeadData[i]);
-                projectsInfoAll.push(tempA);
+                var data = {
+                    'status': $scope.selectStatus,
+                    'projects': $scope.selectProjects,
+                    'installers': $scope.selectInstallers,
+                    'version': $scope.selectVersion,
+                    'loops': $scope.selectLoop,
+                    'time': $scope.selectTime
+                };
 
-            }
-            //console.log(projectsInfoAll);
+                TableFactory.getScenario(data).then(function(response) {
+                    if (response.status == 200) {
+                        $scope.scenarios = response.data.scenarios;
+                        getScenario();
+                    }
 
-            $scope.tableDataAll["colspan"] = colspan;
-            $scope.tableDataAll["Installer"] = InstallerData;
-            $scope.tableDataAll["Projects"] = projectsInfoAll;
+                }, function(error) {
+                });
 
-            // console.log($scope.tableDataAll);
-            $scope.colspan = $scope.tableDataAll.colspan;
+            }
 
-        }
+            function getScenario(){
 
-        //get json element size
-        function getSize(jsondata) {
-            var size = 0;
-            for (var item in jsondata) {
-                size++;
+                $scope.project_row = [];
+                angular.forEach($scope.selectInstallers, function(installer){
+                    angular.forEach($scope.selectProjects, function(project){
+                        var temp = {
+                            'installer': installer,
+                            'project': project
+                        }
+                        $scope.project_row.push(temp);
+
+                    });
+                });
+
+
+                $scope.scenario_rows = [];
+                angular.forEach($scope.scenarios, function(scenario, name){
+                    var scenario_row = {
+                        'name': null,
+                        'status': null,
+                        'statusDisplay': null,
+                        'datadisplay': [],
+                    };
+                    scenario_row.name = name;
+                    scenario_row.status = scenario.status;
+
+                    var scenarioStatusDisplay;
+                    if (scenario.status == "success") {
+                        scenarioStatusDisplay = "navy";
+                    } else if (scenario.status == "danger") {
+                        scenarioStatusDisplay = "danger";
+                    } else if (scenario.status == "warning") {
+                        scenarioStatusDisplay = "warning";
+                    }
+                    scenario_row.statusDisplay = scenarioStatusDisplay;
+
+                    angular.forEach($scope.selectInstallers, function(installer){
+                        angular.forEach($scope.selectProjects, function(project){
+                            var datadisplay = {
+                                'installer': null,
+                                'project': null,
+                                'value': null,
+                                'label': null,
+                                'label_value': null
+                            };
+                            datadisplay.installer = installer;
+                            datadisplay.project = project;
+                            datadisplay.value = scenario.installers[installer][project].score;
+
+                            var single_status = scenario.installers[installer][project].status;
+                            if (single_status == "platinium") {
+                                datadisplay.label = 'primary';
+                                datadisplay.label_value = 'P';
+                            } else if (single_status == "gold") {
+                                datadisplay.label = 'danger';
+                                datadisplay.label_value = 'G';
+                            } else if (single_status == "silver") {
+                                datadisplay.label = 'warning';
+                                datadisplay.label_value = 'S';
+                            } else if (single_status == null) {
+                            }
+                            scenario_row.datadisplay.push(datadisplay);
+
+                        });
+                    });
+                    $scope.scenario_rows.push(scenario_row);
+                });
             }
-            return size;
-        }
 
-        $scope.colspan = $scope.tableDataAll.colspan;
-        // console.log($scope.colspan);
 
-
-        //find all same element index 
-        function getSameElementIndex(array, element) {
-            var indices = [];
-            var idx = array.indexOf(element);
-            while (idx != -1) {
-                indices.push(idx);
-                idx = array.indexOf(element, idx + 1);
+            function clickBase(eleList, ele){
+                var idx = eleList.indexOf(ele);
+                if(idx > -1){
+                    eleList.splice(idx, 1);
+                }else{
+                    eleList.push(ele);
+                }
             }
-            //return indices;
-            var result = { element: element, index: indices };
-            JSON.stringify(result);
-            return result;
-        }
 
-        //delete element in array
-        function deletElement(array, index) {
-            array.splice(index, 1);
+            $scope.clickStatus = function(status){
+                if($scope.selectStatus.length == $scope.statusList.length && $scope.statusClicked == false){
+                    $scope.selectStatus = [];
+                    $scope.statusClicked = true;
+                }
 
-        }
+                clickBase($scope.selectStatus, status);
 
-        function radioSetting(array1, array2, array3) {
-            var tempVersion = [];
-            var tempLoop = [];
-            var tempTime = [];
-            for (var i = 0; i < array1.length; i++) {
-                var temp = {
-                    title: array1[i]
-                };
-                tempVersion.push(temp);
-            }
-            for (var i = 0; i < array2.length; i++) {
-                var temp = {
-                    title: array2[i]
-                };
-                tempLoop.push(temp);
-            }
-            for (var i = 0; i < array3.length; i++) {
-                var temp = {
-                    title: array3[i]
-                };
-                tempTime.push(temp);
-            }
-            $scope.VersionOption = tempVersion;
-            $scope.LoopOption = tempLoop;
-            $scope.TimeOption = tempTime;
-        }
-
-        //remove element in the array
-        function removeArrayValue(arr, value) {
-            for (var i = 0; i < arr.length; i++) {
-                if (arr[i] == value) {
-                    arr.splice(i, 1);
-                    break;
+                if($scope.selectStatus.length == 0 && $scope.statusClicked == true){
+                    $scope.selectStatus = copy($scope.statusList);
+                    $scope.statusClicked = false;
                 }
+
+                getScenarioData();
             }
-        }
 
-        //check if exist element
-        function checkElementArrayValue(arrayA, arrayB) {
-            for (var i = 0; i < arrayB.length; i++) {
-                if (arrayA.indexOf(arrayB[i].title) > -1) {
-                    removeArrayValue(arrayA, arrayB[i].title);
+            $scope.clickInstaller = function(installer){
+                if($scope.selectInstallers.length == $scope.installerList.length && $scope.installerClicked == false){
+                    $scope.selectInstallers = [];
+                    $scope.installerClicked = true;
                 }
-            }
-        }
 
-        function toggleSelection(status) {
-            var idx = $scope.selection.indexOf(status);
+                clickBase($scope.selectInstallers, installer);
+
+                if($scope.selectInstallers.length == 0 && $scope.installerClicked == true){
+                    $scope.selectInstallers = copy($scope.installerList);
+                    $scope.installerClicked = false;
+                }
 
-            if (idx > -1) {
-                $scope.selection.splice(idx, 1);
-                filterData($scope.selection)
-            } else {
-                $scope.selection.push(status);
-                filterData($scope.selection)
+                getScenarioData();
             }
-            // console.log($scope.selection);
 
-        }
+            $scope.clickProject = function(project){
+                if($scope.selectProjects.length == $scope.projectList.length && $scope.projectClicked == false){
+                    $scope.selectProjects = [];
+                    $scope.projectClicked = true;
+                }
 
-        //filter function
-        function filterData(selection) {
+                clickBase($scope.selectProjects, project);
 
-            $scope.selectInstallers = [];
-            $scope.selectProjects = [];
-            $scope.selectStatus = [];
-            for (var i = 0; i < selection.length; i++) {
-                if ($scope.statusListString.indexOf(selection[i]) > -1) {
-                    $scope.selectStatus.push(selection[i]);
-                }
-                if ($scope.projectListString.indexOf(selection[i]) > -1) {
-                    $scope.selectProjects.push(selection[i]);
-                }
-                if ($scope.installerListString.indexOf(selection[i]) > -1) {
-                    $scope.selectInstallers.push(selection[i]);
+                if($scope.selectProjects.length == 0 && $scope.projectClicked == true){
+                    $scope.selectProjects = copy($scope.projectList);
+                    $scope.projectClicked = false;
                 }
-            }
-
-            $scope.colspan = $scope.selectProjects.length;
-            //when some selection is empty, we set it full
-            if ($scope.selectInstallers.length == 0) {
-                $scope.selectInstallers = $scope.installerList;
 
+                getScenarioData();
             }
-            if ($scope.selectProjects.length == 0) {
-                $scope.selectProjects = $scope.projectList;
-                $scope.colspan = $scope.tableDataAll.colspan;
-            }
-            if ($scope.selectStatus.length == 0) {
-                $scope.selectStatus = $scope.statusList
-            }
-
-            // console.log($scope.selectStatus);
-            // console.log($scope.selectProjects);
 
         }
-
-
-    }]);
\ No newline at end of file
+    ]);
index def8e72..894e10f 100644 (file)
@@ -16,7 +16,7 @@ angular.module('opnfvApp')
             $scope.vsperf = "542,185,640,414";
             $scope.stor = "658,187,750,410";
             $scope.qtip = "769,190,852,416";
-            $scope.bootleneck = "870,192,983,419";
+            $scope.bottlenecks = "870,192,983,419";
             $scope.noPopArea1 = "26,8,1190,180";
             $scope.noPopArea2 = "1018,193,1190,590";
             $scope.noPopArea3 = "37,455,1003,584";
@@ -41,25 +41,18 @@ angular.module('opnfvApp')
                 $scope.tableData = null;
                 $scope.modalName = name;
 
-                var url = PROJECT_URL + '/projects/' + name + '/cases';
-
-                var config = {
-                    headers: {
-                        'Content-Type': 'application/x-www-form-urlencoded;charset=utf-8;'
-                    }
-                }
-                $http.get(url, config).then(function(response) {
+                TableFactory.getProjectTestCases(name).then(function(response) {
                     if (response.status == 200) {
                         $scope.tableData = response.data;
 
                         $scope.tableData = constructObjectArray($scope.tableData);
                         console.log($scope.tableData);
                         $loading.finish('Key');
-
-
-
                     }
+                }, function(error) {
+
                 })
+
             }
 
             //construct key value for tableData
index 2a8cbd0..e715c5c 100644 (file)
@@ -4,45 +4,67 @@
  * get data factory
  */
 angular.module('opnfvApp')
-    .factory('TableFactory', function($resource, $rootScope) {
+    .factory('TableFactory', function($resource, $rootScope, $http) {
+
+        var BASE_URL = 'http://testresults.opnfv.org/reporting2';
+        $.ajax({
+          url: 'config.json',
+          async: false,
+          dataType: 'json',
+          success: function (response) {
+              BASE_URL = response.url;
+          },
+          error: function (response){
+              alert('fail to get api url, using default: http://testresults.opnfv.org/reporting2')
+          }
+        });
 
         return {
             getFilter: function() {
-                return $resource(BASE_URL + '/filters', {}, {
+                return $resource(BASE_URL + '/landing-page/filters', {}, {
                     'get': {
                         method: 'GET',
 
                     }
                 });
             },
-            getScenario: function() {
-                return $resource(BASE_URL + '/scenarios', {}, {
-                    'post': {
-                        method: 'POST',
+            getScenario: function(data) {
+
+                var config = {
+                    headers: {
+                        'Content-Type': 'application/x-www-form-urlencoded;charset=utf-8;'
                     }
-                })
+                }
+
+                return $http.post(BASE_URL + '/landing-page/scenarios', data, config);
             },
+
+
             getProjectUrl: function() {
-                return $resource(PROJECT_URL + '/projects-page/projects', {}, {
+                return $resource(BASE_URL + '/projects-page/projects', {}, {
                     'get': {
                         method: 'GET'
                     }
                 })
             },
-            getProjectTestCases: function() {
-                return $resource(PROJECT_URL + '/projects/:project/cases', { project: '@project' }, {
-                    'get': {
-                        method: 'GET'
+            getProjectTestCases: function(name) {
+                var config = {
+                    headers: {
+                        'Content-Type': 'application/x-www-form-urlencoded;charset=utf-8;'
                     }
-                })
+                };
+                return $http.get(BASE_URL + '/projects/' + name + '/cases', {}, config)
+
+
             },
             getProjectTestCaseDetail: function() {
-                return $resource(PROJECT_URL + '/projects/:project/cases/:testcase', { project: '@project', testcase: '@testcase' }, {
+                return $resource(BASE_URL + '/projects/:project/cases/:testcase', { project: '@project', testcase: '@testcase' }, {
                     'get': {
 
                         method: 'GET'
                     }
                 })
             }
+
         };
-    });
\ No newline at end of file
+    });
index f504bd7..a33c483 100644 (file)
@@ -29,9 +29,9 @@
                     <div class=" col-md-12" data-toggle="buttons" aria-pressed="false">
 
                         <label> Status </label> &nbsp;&nbsp; &nbsp;
-                        <label class="btn btn-outline btn-success btn-sm" style="height:25px; margin-right: 5px;" ng-repeat="status in statusList" value={{status}} ng-checked="selection.indexOf(status)>-1" ng-click="toggleSelection(status)">
+                        <label class="btn btn-outline btn-success btn-sm" style="height:25px; margin-right: 5px;" ng-repeat="status in statusList" value={{status}} ng-checked="selectStatus.indexOf(status)>-1" ng-click="clickStatus(status)">
                               <input type="checkbox"  disabled="disabled" > {{status}}
-                            
+
                           </label>
                     </div>
 
@@ -39,7 +39,7 @@
 
                     <div class=" col-md-12" data-toggle="buttons">
                         <label> Projects </label> &nbsp;
-                        <label class="btn btn-outline btn-success btn-sm " style="height:25px;margin-right: 5px;" ng-repeat="project in projectList" value={{project}} ng-checked="selection.indexOf(project)>-1" ng-click="toggleSelection(project)">
+                        <label class="btn btn-outline btn-success btn-sm " style="height:25px;margin-right: 5px;" ng-repeat="project in projectList" value={{project}} ng-checked="selectProjects.indexOf(project)>-1" ng-click="clickProject(project)">
                             <input type="checkbox" disabled="disabled"> {{project}}
                         </label>
 
@@ -47,7 +47,7 @@
                     <hr class="myhr">
                     <div class=" col-md-12" data-toggle="buttons">
                         <label> Installers </label>
-                        <label class="btn btn-outline btn-success btn-sm" style="height:25px;margin-right: 5px;" ng-repeat="installer in installerList" value={{installer}} ng-checked="selection.indexOf(installer)>-1" ng-click="toggleSelection(installer)">
+                        <label class="btn btn-outline btn-success btn-sm" style="height:25px;margin-right: 5px;" ng-repeat="installer in installerList" value={{installer}} ng-checked="selectInstallers.indexOf(installer)>-1" ng-click="clickInstaller(installer)">
                             <input type="checkbox" disabled="disabled"> {{installer}}
                             </label>
                     </div>
 
 
                     <div class=" col-md-1" style="margin-top:5px;margin-right: 5px;">
-                        <selectize options="VersionOption" ng-model="VersionSelected" config="VersionConfig"></selectize>
+                        <selectize options="versionList" ng-model="selectVersion" config="VersionConfig"></selectize>
 
                     </div>
 
                     <div class=" col-md-1" style="margin-top:5px;margin-right: 5px;">
-                        <selectize options="LoopOption" ng-model="LoopCiSelected" config="LoopConfig"></selectize>
+                        <selectize options="loopList" ng-model="selectLoop" config="LoopConfig"></selectize>
 
                     </div>
 
                     <div class=" col-md-1" style="margin-top:5px;margin-right: 5px;">
-                        <selectize options="TimeOption" ng-model="TimeSelected" config="TimeConfig"></selectize>
+                        <selectize options="timeList" ng-model="selectTime" config="TimeConfig"></selectize>
                     </div>
                 </div>
                 <div class="table-responsive">
                         <thead class="thead">
                             <tr>
                                 <th>Scenario </th>
-                                <th colspan={{colspan}} ng-show="selectInstallers.indexOf(key)!=-1" value={{key}} ng-repeat="key in tableDataAll.Installer"><a href="notfound.html">{{key}}</a> </th>
+                                <th colspan={{selectProjects.length}} ng-show="selectInstallers.indexOf(key)!=-1" value={{key}} ng-repeat="key in selectInstallers"><a href="notfound.html">{{key}}</a> </th>
                             </tr>
 
                             <tr>
 
                                 <td></td>
-                                <td ng-show="selectProjects.indexOf(project[0])!=-1 && selectInstallers.indexOf(project[1])!=-1" ng-repeat="project in tableDataAll.Projects track by $index" data={{project[1]}} value={{project[0]}}>{{project[0]}}</td>
+                                <td ng-show="selectProjects.indexOf(project.project)!=-1 && selectInstallers.indexOf(project.installer)!=-1" ng-repeat="project in project_row track by $index" data={{project.installer}} value={{project.project}}>{{ project.project }}</td>
 
                             </tr>
                         </thead>
                         <tbody class="tbody">
-                            <tr ng-repeat="scenario in tableDataAll.scenario" ng-show="selectStatus.indexOf(scenario.status)!=-1">
+                            <tr ng-repeat="scenario in scenario_rows" ng-show="selectStatus.indexOf(scenario.status)!=-1">
 
-                                <td nowrap="nowrap" data={{scenario.status}}><span class="fa fa-circle text-{{scenario.statusDisplay}}"></span> <a href="notfound.html">{{scenario.scenarioName}}</a> </td>
+                                <td nowrap="nowrap" data={{scenario.status}}><span class="fa fa-circle text-{{scenario.statusDisplay}}"></span> <a href="notfound.html">{{scenario.name}}</a> </td>
 
                                 <!--<td style="background-color:#e7eaec" align="justify" ng-if="data[0]=='Not Support'" ng-repeat="data in scenario.datadisplay track by $index" data={{data[1]}} value={{data[2]}}></td>-->
 
-                                <td nowrap="nowrap" ng-show="selectInstallers.indexOf(data[2])!=-1 && selectProjects.indexOf(data[1])!=-1" ng-repeat="data in scenario.datadisplay track by $index" data={{data[1]}} value={{data[2]}} class={{data[0]}}>
-                                    <span class="label label-{{data[3]}}">{{data[4]}}</a></span> {{data[0]}}</td>
+                                <td nowrap="nowrap" ng-show="selectInstallers.indexOf(data.installer)!=-1 && selectProjects.indexOf(data.project)!=-1" ng-repeat="data in scenario.datadisplay track by $index" data={{data.project}} value={{data.installer}} class={{data.value}}>
+                                    <span class="label label-{{data.label}}">{{data.label_value}}</a></span> {{data.value}}</td>
 
 
                             </tr>
         </div>
     </div>
 
-</section>
\ No newline at end of file
+</section>
index 74eb56e..4de4e18 100644 (file)
@@ -20,7 +20,7 @@
                  <area shape="rect" coords={{vsperf}} alt="test" href="{{vsperfurl}}" onmouseover="pop(event)" ng-mouseover="myTrigger('vsperf')" />
                   <area shape="rect" coords={{stor}} alt="test" href="{{storperfurl}}" onmouseover="pop(event)" ng-mouseover="myTrigger('storperf')"/>
                    <area shape="rect" coords={{qtip}} alt="test" href="{{qtipurl}}"  onmouseover="pop(event)" ng-mouseover="myTrigger('qtip')" />
-                    <area shape="rect" coords={{bootleneck}} alt="test"  href="{{bottlenecksurl}}" onmouseover="pop(event)" ng-mouseover="myTrigger('bootlenecks')" />
+                    <area shape="rect" coords={{bottlenecks}} alt="test"  href="{{bottlenecksurl}}" onmouseover="pop(event)" ng-mouseover="myTrigger('bottlenecks')" />
                      <area shape="rect" coords={{noPopArea1}} alt="test" onmouseover="pophide(event)"  />
                       <area shape="rect" coords={{noPopArea2}} alt="test"  onmouseover="pophide(event)"  />
                        <area shape="rect" coords={{noPopArea3}} alt="test"  onmouseover="pophide(event)"  />
             $('#popup').hide();
             return true;
         }
-    </script>
\ No newline at end of file
+    </script>
diff --git a/utils/test/reporting/pages/config.sh b/utils/test/reporting/pages/config.sh
new file mode 100755 (executable)
index 0000000..f9bb89a
--- /dev/null
@@ -0,0 +1,3 @@
+: ${SERVER_URL:='testresults.opnfv.org/reporting2'}
+
+echo "{\"url\": \"http://${SERVER_URL}\"}" > dist/config.json
@@ -18,6 +18,8 @@ import scenarioResult as sr
 # manage conf
 import utils.reporting_utils as rp_utils
 
+"""Functest reporting status"""
+
 # Logger
 logger = rp_utils.getLogger("Functest-Status")
 
@@ -107,7 +109,6 @@ for version in versions:
         scenario_results = rp_utils.getScenarios(healthcheck,
                                                  installer,
                                                  version)
-
         # get nb of supported architecture (x86, aarch64)
         architectures = rp_utils.getArchitectures(scenario_results)
         logger.info("Supported architectures: {}".format(architectures))
@@ -125,7 +126,7 @@ for version in versions:
             # in case of more than 1 architecture supported
             # precise the architecture
             installer_display = installer
-            if (len(architectures) > 1):
+            if "fuel" in installer:
                 installer_display = installer + "@" + architecture
 
             # For all the scenarios get results
@@ -273,7 +274,8 @@ for version in versions:
             templateEnv = jinja2.Environment(
                 loader=templateLoader, autoescape=True)
 
-            TEMPLATE_FILE = "./functest/template/index-status-tmpl.html"
+            TEMPLATE_FILE = ("./reporting/functest/template"
+                             "/index-status-tmpl.html")
             template = templateEnv.get_template(TEMPLATE_FILE)
 
             outputText = template.render(
@@ -1,4 +1,15 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2017 Orange and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+# SPDX-license-identifier: Apache-2.0
+
 from urllib2 import Request, urlopen, URLError
+from datetime import datetime
 import json
 import jinja2
 import os
@@ -97,7 +108,13 @@ for version in rp_utils.get_config('general.versions'):
                     crit_rate = True
 
                 # Expect that the suite duration is inferior to 30m
-                if result['details']['duration'] < criteria_duration:
+                stop_date = datetime.strptime(result['stop_date'],
+                                              '%Y-%m-%d %H:%M:%S')
+                start_date = datetime.strptime(result['start_date'],
+                                               '%Y-%m-%d %H:%M:%S')
+
+                delta = stop_date - start_date
+                if (delta.total_seconds() < criteria_duration):
                     crit_time = True
 
                 result['criteria'] = {'tests': crit_tests,
@@ -125,7 +142,7 @@ for version in rp_utils.get_config('general.versions'):
         templateEnv = jinja2.Environment(loader=templateLoader,
                                          autoescape=True)
 
-        TEMPLATE_FILE = "./functest/template/index-tempest-tmpl.html"
+        TEMPLATE_FILE = "./reporting/functest/template/index-tempest-tmpl.html"
         template = templateEnv.get_template(TEMPLATE_FILE)
 
         outputText = template.render(scenario_results=scenario_results,
@@ -104,7 +104,7 @@ for version in versions:
                                  % result['details']['sig_test']['duration'])
                     logger.debug("Signaling testing results: %s"
                                  % format_result)
-                except:
+                except Exception:
                     logger.error("Data badly formatted")
                 logger.debug("----------------------------------------")
 
@@ -112,7 +112,7 @@ for version in versions:
         templateEnv = jinja2.Environment(loader=templateLoader,
                                          autoescape=True)
 
-        TEMPLATE_FILE = "./functest/template/index-vims-tmpl.html"
+        TEMPLATE_FILE = "./reporting/functest/template/index-vims-tmpl.html"
         template = templateEnv.get_template(TEMPLATE_FILE)
 
         outputText = template.render(scenario_results=scenario_results,
@@ -72,6 +72,7 @@ $(document).ready(function (){
             <li class="active"><a href="../../index.html">Home</a></li>
             <li><a href="status-apex.html">Apex</a></li>
             <li><a href="status-compass.html">Compass</a></li>
+            <li><a href="status-daisy.html">Daisy</a></li>
             <li><a href="status-fuel@x86.html">fuel@x86</a></li>
             <li><a href="status-fuel@aarch64.html">fuel@aarch64</a></li>
             <li><a href="status-joid.html">Joid</a></li>
@@ -89,7 +90,7 @@ $(document).ready(function (){
             <div class="panel-heading"><h4><b>List of last scenarios ({{version}}) run over the last {{period}} days </b></h4></div>
                 <table class="table">
                     <tr>
-                        <th width="40%">Scenario</th>
+                        <th width="40%">HA Scenario</th>
                         <th width="20%">Status</th>
                         <th width="20%">Trend</th>
                         <th width="10%">Score</th>
@@ -97,14 +98,39 @@ $(document).ready(function (){
                     </tr>
                         {% for scenario,iteration in scenario_stats.iteritems() -%}
                             <tr class="tr-ok">
+                            {% if '-ha' in scenario -%}
                                 <td><a href={{scenario_results[scenario].getUrlLastRun()}}>{{scenario}}</a></td>
                                 <td><div id="gaugeScenario{{loop.index}}"></div></td>
                                 <td><div id="trend_svg{{loop.index}}"></div></td>
                                 <td>{{scenario_results[scenario].getScore()}}</td>
                                 <td>{{iteration}}</td>
+                            {%- endif %}
+                            </tr>
+                            {%- endfor %}
+                            <br>
+                </table>
+                <br>
+               <table class="table">
+                    <tr>
+                        <th width="40%">NOHA Scenario</th>
+                        <th width="20%">Status</th>
+                        <th width="20%">Trend</th>
+                        <th width="10%">Score</th>
+                        <th width="10%">Iteration</th>
+                    </tr>
+                        {% for scenario,iteration in scenario_stats.iteritems() -%}
+                            <tr class="tr-ok">
+                            {% if '-noha' in scenario -%}
+                                <td><a href={{scenario_results[scenario].getUrlLastRun()}}>{{scenario}}</a></td>
+                                <td><div id="gaugeScenario{{loop.index}}"></div></td>
+                                <td><div id="trend_svg{{loop.index}}"></div></td>
+                                <td>{{scenario_results[scenario].getScore()}}</td>
+                                <td>{{iteration}}</td>
+                            {%- endif %}
                             </tr>
                             {%- endfor %}
-                        </table>
+                </table>
+
         </div>
 
 
@@ -23,7 +23,7 @@ reportingDate = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
 
 logger.info("*******************************************")
 logger.info("*   Generating reporting scenario status  *")
-logger.info("*   Data retention = %s days              *" % PERIOD)
+logger.info("*   Data retention = {} days              *".format(PERIOD))
 logger.info("*                                         *")
 logger.info("*******************************************")
 
@@ -33,7 +33,7 @@ def prepare_profile_file(version):
     if not os.path.exists(profile_dir):
         os.makedirs(profile_dir)
 
-    profile_file = '{}/scenario_history.txt'.format(profile_dir, version)
+    profile_file = "{}/scenario_history.txt".format(profile_dir)
     if not os.path.exists(profile_file):
         with open(profile_file, 'w') as f:
             info = 'date,scenario,installer,details,score\n'
@@ -77,7 +77,7 @@ def render_html(prof_results, installer, version):
     template_env = jinja2.Environment(loader=template_loader,
                                       autoescape=True)
 
-    template_file = "./qtip/template/index-status-tmpl.html"
+    template_file = "./reporting/qtip/template/index-status-tmpl.html"
     template = template_env.get_template(template_file)
 
     render_outcome = template.render(prof_results=prof_results,
@@ -106,5 +106,6 @@ def render_reporter():
         rp_utils.generate_csv(profile_file)
         logger.info("CSV generated...")
 
+
 if __name__ == '__main__':
     render_reporter()
         <nav>
           <ul class="nav nav-justified">
             <li class="active"><a href="http://testresults.opnfv.org/reporting/index.html">Home</a></li>
-            <li><a href="index-status-apex.html">Apex</a></li>
-            <li><a href="index-status-compass.html">Compass</a></li>
-            <li><a href="index-status-fuel.html">Fuel</a></li>
-            <li><a href="index-status-joid.html">Joid</a></li>
+            <li><a href="status-apex.html">Apex</a></li>
+            <li><a href="status-compass.html">Compass</a></li>
+            <li><a href="status-daisy.html">Daisy</a></li>
+            <li><a href="status-fuel.html">Fuel</a></li>
+            <li><a href="status-joid.html">Joid</a></li>
           </ul>
         </nav>
       </div>
similarity index 98%
rename from utils/test/reporting/reporting.yaml
rename to utils/test/reporting/reporting/reporting.yaml
index 1692f48..26feb31 100644 (file)
@@ -3,6 +3,7 @@ general:
     installers:
         - apex
         - compass
+        - daisy
         - fuel
         - joid
 
@@ -131,7 +131,7 @@ for version in versions:
         templateEnv = jinja2.Environment(loader=templateLoader,
                                          autoescape=True)
 
-        TEMPLATE_FILE = "./storperf/template/index-status-tmpl.html"
+        TEMPLATE_FILE = "./reporting/storperf/template/index-status-tmpl.html"
         template = templateEnv.get_template(TEMPLATE_FILE)
 
         outputText = template.render(scenario_results=scenario_result_criteria,
@@ -10,7 +10,7 @@
 import logging
 import unittest
 
-from utils import reporting_utils
+from reporting.utils import reporting_utils
 
 
 class reportingUtilsTesting(unittest.TestCase):
@@ -20,10 +20,9 @@ class reportingUtilsTesting(unittest.TestCase):
     def setUp(self):
         self.test = reporting_utils
 
-    def test_getConfig(self):
-        self.assertEqual(self.test.get_config("general.period"), 10)
-# TODO
-# ...
+    def test_foo(self):
+        self.assertTrue(0 < 1)
+
 
 if __name__ == "__main__":
     unittest.main(verbosity=2)
@@ -95,7 +95,7 @@ def getApiResults(case, installer, scenario, version):
         k = response.read()
         results = json.loads(k)
     except URLError as e:
-        print('No kittez. Got an error code:', e)
+        print 'No kittez. Got an error code:'.format(e)
 
     return results
 
@@ -117,19 +117,31 @@ def getScenarios(case, installer, version):
     url = ("http://" + url_base + "?case=" + case +
            "&period=" + str(period) + "&installer=" + installer +
            "&version=" + version)
-    request = Request(url)
 
     try:
+        request = Request(url)
         response = urlopen(request)
         k = response.read()
         results = json.loads(k)
         test_results = results['results']
-    except URLError as e:
-        print('Got an error code:', e)
+        try:
+            page = results['pagination']['total_pages']
+            if page > 1:
+                test_results = []
+                for i in range(1, page + 1):
+                    url_page = url + "&page=" + str(i)
+                    request = Request(url_page)
+                    response = urlopen(request)
+                    k = response.read()
+                    results = json.loads(k)
+                    test_results += results['results']
+        except KeyError:
+            print ('No pagination detected')
+    except URLError as err:
+        print 'Got an error code: {}'.format(err)
 
     if test_results is not None:
         test_results.reverse()
-
         scenario_results = {}
 
         for r in test_results:
@@ -142,7 +154,7 @@ def getScenarios(case, installer, version):
             exclude_noha = get_config('functest.exclude_noha')
             if ((exclude_virtual_pod and "virtual" in r['pod_name']) or
                     (exclude_noha and "noha" in r['scenario'])):
-                print("exclude virtual pod results...")
+                print "exclude virtual pod results..."
             else:
                 scenario_results[r['scenario']].append(r)
 
@@ -157,7 +169,6 @@ def getScenarioStats(scenario_results):
     return scenario_stats
 
 
-# TODO convergence with above function getScenarios
 def getScenarioStatus(installer, version):
     period = get_config('general.period')
     url_base = get_config('testapi.url')
@@ -174,7 +185,7 @@ def getScenarioStatus(installer, version):
         results = json.loads(k)
         test_results = results['results']
     except URLError as e:
-        print('Got an error code:', e)
+        print 'Got an error code: {}'.format(e)
 
     scenario_results = {}
     result_dict = {}
@@ -213,8 +224,8 @@ def getQtipResults(version, installer):
         k = response.read()
         response.close()
         results = json.loads(k)['results']
-    except URLError as e:
-        print('Got an error code:', e)
+    except URLError as err:
+        print 'Got an error code: {}'.format(err)
 
     result_dict = {}
     if results:
@@ -236,7 +247,7 @@ def getNbtestOk(results):
                 if "PASS" in v:
                     nb_test_ok += 1
             except:
-                print("Cannot retrieve test status")
+                print "Cannot retrieve test status"
     return nb_test_ok
 
 
@@ -311,7 +322,7 @@ def getJenkinsUrl(build_tag):
                   "/" + str(build_id[0]))
         jenkins_url = url_base + url_id + "/console"
     except:
-        print('Impossible to get jenkins url:')
+        print 'Impossible to get jenkins url:'
 
     if "jenkins-" not in build_tag:
         jenkins_url = None
@@ -324,7 +335,7 @@ def getScenarioPercent(scenario_score, scenario_criteria):
     try:
         score = float(scenario_score) / float(scenario_criteria) * 100
     except:
-        print('Impossible to calculate the percentage score')
+        print 'Impossible to calculate the percentage score'
     return score
 
 
@@ -410,7 +421,7 @@ def get_percent(four_list, ten_list):
 
 def _test():
     status = getScenarioStatus("compass", "master")
-    print("status:++++++++++++++++++++++++")
+    print "status:++++++++++++++++++++++++"
     print(json.dumps(status, indent=4))
 
 
@@ -427,9 +438,9 @@ def export_csv(scenario_file_name, installer, version):
                                     "/functest/scenario_history_" +
                                     installer + ".csv")
     scenario_installer_file = open(scenario_installer_file_name, "a")
-    with open(scenario_file_name, "r") as f:
+    with open(scenario_file_name, "r") as scenario_file:
         scenario_installer_file.write("date,scenario,installer,detail,score\n")
-        for line in f:
+        for line in scenario_file:
             if installer in line:
                 scenario_installer_file.write(line)
         scenario_installer_file.close
@@ -447,6 +458,6 @@ def export_pdf(pdf_path, pdf_doc_name):
     try:
         pdfkit.from_file(pdf_path, pdf_doc_name)
     except IOError:
-        print("Error but pdf generated anyway...")
+        print "Error but pdf generated anyway..."
     except:
-        print("impossible to generate PDF")
+        print "impossible to generate PDF"
diff --git a/utils/test/reporting/reporting/yardstick/__init__.py b/utils/test/reporting/reporting/yardstick/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
@@ -106,7 +106,7 @@ for version in versions:
         templateEnv = jinja2.Environment(loader=templateLoader,
                                          autoescape=True)
 
-        TEMPLATE_FILE = "./yardstick/template/index-status-tmpl.html"
+        TEMPLATE_FILE = "./reporting/yardstick/template/index-status-tmpl.html"
         template = templateEnv.get_template(TEMPLATE_FILE)
 
         outputText = template.render(scenario_results=scenario_result_criteria,
diff --git a/utils/test/reporting/requirements.txt b/utils/test/reporting/requirements.txt
new file mode 100644 (file)
index 0000000..344064d
--- /dev/null
@@ -0,0 +1,7 @@
+pdfkit>=0.6.1 # MIT
+wkhtmltopdf-pack>=0.12.3 # MIT
+PyYAML>=3.10.0 # MIT
+simplejson>=2.2.0 # MIT
+Jinja2!=2.9.0,!=2.9.1,!=2.9.2,!=2.9.3,!=2.9.4,>=2.8 # BSD License (3 clause)
+requests!=2.12.2,>=2.10.0 # Apache-2.0
+tornado>=4.4.2 # Apache-2.0
diff --git a/utils/test/reporting/run_test.sh b/utils/test/reporting/run_test.sh
new file mode 100755 (executable)
index 0000000..b83b550
--- /dev/null
@@ -0,0 +1,3 @@
+#!/bin/bash
+tox
+exit $?
diff --git a/utils/test/reporting/run_unit_tests.sh b/utils/test/reporting/run_unit_tests.sh
deleted file mode 100755 (executable)
index 6b0e3b2..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/bin/bash
-set -o errexit
-set -o pipefail
-
-# ******************************
-# prepare the env for the tests
-# ******************************
-# Either Workspace is set (CI)
-if [ -z $WORKSPACE ]
-then
-    WORKSPACE="."
-fi
-
-export CONFIG_REPORTING_YAML=./reporting.yaml
-
-# ***************
-# Run unit tests
-# ***************
-echo "Running unit tests..."
-
-# start vitual env
-virtualenv $WORKSPACE/reporting_venv
-source $WORKSPACE/reporting_venv/bin/activate
-
-# install python packages
-easy_install -U setuptools
-easy_install -U pip
-pip install -r $WORKSPACE/docker/requirements.pip
-pip install -e $WORKSPACE
-
-python $WORKSPACE/setup.py develop
-
-# unit tests
-# TODO: remove cover-erase
-# To be deleted when all functest packages will be listed
-nosetests --with-xunit \
-         --cover-package=utils \
-         --with-coverage \
-         --cover-xml \
-         tests/unit
-rc=$?
-
-deactivate
diff --git a/utils/test/reporting/setup.cfg b/utils/test/reporting/setup.cfg
new file mode 100644 (file)
index 0000000..9543945
--- /dev/null
@@ -0,0 +1,12 @@
+[metadata]
+name = reporting
+version = 1
+home-page = https://wiki.opnfv.org/display/testing
+
+[files]
+packages =
+    reporting
+    api
+scripts =
+    docker/reporting.sh
+    docker/web_server.sh
index 627785e..17849f6 100644 (file)
@@ -1,22 +1,23 @@
-##############################################################################
+#!/usr/bin/env python
+
+# Copyright (c) 2017 Orange and others.
+#
 # All rights reserved. This program and the accompanying materials
 # are made available under the terms of the Apache License, Version 2.0
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
 
-from setuptools import setup, find_packages
+# pylint: disable=missing-docstring
+import setuptools
 
+# In python < 2.7.4, a lazy loading of package `pbr` will break
+# setuptools if some other modules registered functions in `atexit`.
+# solution from: http://bugs.python.org/issue15881#msg170215
+try:
+    import multiprocessing  # noqa
+except ImportError:
+    pass
 
-setup(
-    name="reporting",
-    version="master",
-    packages=find_packages(),
-    include_package_data=True,
-    package_data={
-    },
-    url="https://www.opnfv.org",
-    install_requires=["coverage==4.1",
-                      "mock==1.3.0",
-                      "nose==1.3.7"],
-)
+setuptools.setup(
+    setup_requires=['pbr>=1.8'],
+    pbr=True)
diff --git a/utils/test/reporting/test-requirements.txt b/utils/test/reporting/test-requirements.txt
new file mode 100644 (file)
index 0000000..738f508
--- /dev/null
@@ -0,0 +1,5 @@
+coverage>=4.0 # Apache-2.0
+mock>=2.0 # BSD
+nose # LGPL
+flake8<2.6.0,>=2.5.4 # MIT
+pylint==1.4.5 # GPLv2
diff --git a/utils/test/reporting/tox.ini b/utils/test/reporting/tox.ini
new file mode 100644 (file)
index 0000000..2df5030
--- /dev/null
@@ -0,0 +1,27 @@
+[tox]
+envlist = pep8,pylint,py27
+
+[testenv]
+usedevelop = True
+deps =
+  -r{toxinidir}/requirements.txt
+  -r{toxinidir}/test-requirements.txt
+commands = nosetests --with-xunit \
+  --with-coverage \
+  --cover-tests \
+  --cover-package=reporting \
+  --cover-xml \
+  --cover-html \
+  reporting/tests/unit
+
+[testenv:pep8]
+basepython = python2.7
+commands = flake8
+
+[testenv:pylint]
+basepython = python2.7
+whitelist_externals = bash
+commands =
+  bash -c "\
+  pylint --disable=locally-disabled reporting| \
+    tee pylint.out | sed -ne '/Raw metrics/,//p'"
diff --git a/utils/test/testapi/.gitignore b/utils/test/testapi/.gitignore
new file mode 100644 (file)
index 0000000..00f8a03
--- /dev/null
@@ -0,0 +1,7 @@
+AUTHORS
+ChangeLog
+setup.cfg-e
+opnfv_testapi/static
+build
+*.egg-info
+
index 4a2f23a..5f5b861 100644 (file)
         .module('testapiApp')
         .config(configureRoutes);
 
+    angular
+        .module('testapiApp')
+        .directive('dynamicModel', ['$compile', '$parse', function ($compile, $parse) {
+            return {
+                restrict: 'A',
+                terminal: true,
+                priority: 100000,
+                link: function (scope, elem) {
+                    var name = $parse(elem.attr('dynamic-model'))(scope);
+                    elem.removeAttr('dynamic-model');
+                    elem.attr('ng-model', name);
+                    $compile(elem)(scope);
+                }
+            };
+        }]);
+
     configureRoutes.$inject = ['$stateProvider', '$urlRouterProvider'];
 
     /**
         $stateProvider.
             state('home', {
                 url: '/',
-                templateUrl: '/testapi-ui/components/home/home.html'
+                templateUrl: 'testapi-ui/components/home/home.html'
             }).
             state('about', {
                 url: '/about',
-                templateUrl: '/testapi-ui/components/about/about.html'
+                templateUrl: 'testapi-ui/components/about/about.html'
             }).
-            state('guidelines', {
-                url: '/guidelines',
-                templateUrl: '/testapi-ui/components/guidelines/guidelines.html',
-                controller: 'GuidelinesController as ctrl'
+            state('pods', {
+                url: '/pods',
+                templateUrl: 'testapi-ui/components/pods/pods.html',
+                controller: 'PodsController as ctrl'
             }).
             state('communityResults', {
                 url: '/community_results',
-                templateUrl: '/testapi-ui/components/results/results.html',
+                templateUrl: 'testapi-ui/components/results/results.html',
                 controller: 'ResultsController as ctrl'
             }).
             state('userResults', {
                 url: '/user_results',
-                templateUrl: '/testapi-ui/components/results/results.html',
+                templateUrl: 'testapi-ui/components/results/results.html',
                 controller: 'ResultsController as ctrl'
             }).
             state('resultsDetail', {
                 url: '/results/:testID',
-                templateUrl: '/testapi-ui/components/results-report' +
+                templateUrl: 'testapi-ui/components/results-report' +
                              '/resultsReport.html',
                 controller: 'ResultsReportController as ctrl'
             }).
             state('profile', {
                 url: '/profile',
-                templateUrl: '/testapi-ui/components/profile/profile.html',
+                templateUrl: 'testapi-ui/components/profile/profile.html',
                 controller: 'ProfileController as ctrl'
             }).
             state('authFailure', {
                 url: '/auth_failure',
-                templateUrl: '/testapi-ui/components/home/home.html',
+                templateUrl: 'testapi-ui/components/home/home.html',
                 controller: 'AuthFailureController as ctrl'
             }).
             state('logout', {
                 url: '/logout',
-                templateUrl: '/testapi-ui/components/logout/logout.html',
+                templateUrl: 'testapi-ui/components/logout/logout.html',
                 controller: 'LogoutController as ctrl'
             }).
             state('userVendors', {
diff --git a/utils/test/testapi/3rd_party/static/testapi-ui/components/guidelines/guidelines.html b/utils/test/testapi/3rd_party/static/testapi-ui/components/guidelines/guidelines.html
deleted file mode 100644 (file)
index 1dd39ff..0000000
+++ /dev/null
@@ -1,80 +0,0 @@
-<h3>OpenStack Powered&#8482; Guidelines</h3>
-
-<!-- Guideline Filters -->
-<div class="row">
-    <div class="col-md-3">
-        <strong>Version:</strong>
-        <!-- Slicing the version file name here gets rid of the '.json' file extension -->
-        <select ng-model="ctrl.version"
-                ng-change="ctrl.update()"
-                class="form-control"
-                ng-options="versionFile.slice(0,-5) for versionFile in ctrl.versionList">
-        </select>
-    </div>
-    <div class="col-md-4">
-        <strong>Target Program:</strong>
-        <span class="program-about"><a target="_blank" href="http://www.openstack.org/brand/interop/">About</a></span>
-        <select ng-model="ctrl.target" class="form-control" ng-change="ctrl.updateTargetCapabilities()">
-            <option value="platform">OpenStack Powered Platform</option>
-            <option value="compute">OpenStack Powered Compute</option>
-            <option value="object">OpenStack Powered Object Storage</option>
-        </select>
-    </div>
-</div>
-
-<br />
-<div ng-if="ctrl.guidelines">
-    <strong>Guideline Status:</strong>
-    {{ctrl.guidelines.status | capitalize}}
-</div>
-
-<div ng-show="ctrl.guidelines">
-    <strong>Corresponding OpenStack Releases:</strong>
-    <ul class="list-inline">
-        <li ng-repeat="release in ctrl.guidelines.releases">
-            {{release | capitalize}}
-        </li>
-    </ul>
-</div>
-
-<strong>Capability Status:</strong>
-<div class="checkbox">
-    <label>
-    <input type="checkbox" ng-model="ctrl.status.required">
-    <span class="required">Required</span>
-    </label>
-    <label>
-    <input type="checkbox" ng-model="ctrl.status.advisory">
-    <span class="advisory">Advisory</span>
-    </label>
-    <label>
-    <input type="checkbox" ng-model="ctrl.status.deprecated">
-    <span class="deprecated">Deprecated</span>
-    </label>
-    <label>
-    <input type="checkbox" ng-model="ctrl.status.removed">
-    <span class="removed">Removed</span>
-    </label>
-    <a class="test-list-dl pull-right"
-       title="Get a test list for capabilities matching selected statuses."
-       ng-click="ctrl.openTestListModal()">
-
-        Test List <span class="glyphicon glyphicon-file"></span>
-    </a>
-</div>
-<!-- End Capability Filters -->
-
-<p><small>Tests marked with <span class="glyphicon glyphicon-flag text-warning"></span> are tests flagged by Interop Working Group.</small></p>
-
-<!-- Loading animation divs -->
-<div cg-busy="{promise:ctrl.versionsRequest,message:'Loading versions'}"></div>
-<div cg-busy="{promise:ctrl.capsRequest,message:'Loading capabilities'}"></div>
-
-<!-- Get the version-specific template -->
-<div ng-include src="ctrl.detailsTemplate"></div>
-
-<div ng-show="ctrl.showError" class="alert alert-danger" role="alert">
-    <span class="glyphicon glyphicon-exclamation-sign" aria-hidden="true"></span>
-    <span class="sr-only">Error:</span>
-    {{ctrl.error}}
-</div>
diff --git a/utils/test/testapi/3rd_party/static/testapi-ui/components/guidelines/guidelinesController.js b/utils/test/testapi/3rd_party/static/testapi-ui/components/guidelines/guidelinesController.js
deleted file mode 100644 (file)
index a6f4258..0000000
+++ /dev/null
@@ -1,322 +0,0 @@
-/*
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-(function () {
-    'use strict';
-
-    angular
-        .module('testapiApp')
-        .controller('GuidelinesController', GuidelinesController);
-
-    GuidelinesController.$inject = ['$http', '$uibModal', 'testapiApiUrl'];
-
-    /**
-     * TestAPI Guidelines Controller
-     * This controller is for the '/guidelines' page where a user can browse
-     * through tests belonging to Interop WG defined capabilities.
-     */
-    function GuidelinesController($http, $uibModal, testapiApiUrl) {
-        var ctrl = this;
-
-        ctrl.getVersionList = getVersionList;
-        ctrl.update = update;
-        ctrl.updateTargetCapabilities = updateTargetCapabilities;
-        ctrl.filterStatus = filterStatus;
-        ctrl.getObjectLength = getObjectLength;
-        ctrl.openTestListModal = openTestListModal;
-
-        /** The target OpenStack marketing program to show capabilities for. */
-        ctrl.target = 'platform';
-
-        /** The various possible capability statuses. */
-        ctrl.status = {
-            required: true,
-            advisory: false,
-            deprecated: false,
-            removed: false
-        };
-
-        /**
-         * The template to load for displaying capability details.
-         */
-        ctrl.detailsTemplate = 'components/guidelines/partials/' +
-                               'guidelineDetails.html';
-
-        /**
-         * Retrieve an array of available guideline files from the TestAPI
-         * API server, sort this array reverse-alphabetically, and store it in
-         * a scoped variable. The scope's selected version is initialized to
-         * the latest (i.e. first) version here as well. After a successful API
-         * call, the function to update the capabilities is called.
-         * Sample API return array: ["2015.03.json", "2015.04.json"]
-         */
-        function getVersionList() {
-            var content_url = testapiApiUrl + '/guidelines';
-            ctrl.versionsRequest =
-                $http.get(content_url).success(function (data) {
-                    ctrl.versionList = data.sort().reverse();
-                    // Default to the first approved guideline which is expected
-                    // to be at index 1.
-                    ctrl.version = ctrl.versionList[1];
-                    ctrl.update();
-                }).error(function (error) {
-                    ctrl.showError = true;
-                    ctrl.error = 'Error retrieving version list: ' +
-                        angular.toJson(error);
-                });
-        }
-
-        /**
-         * This will contact the TestAPI API server to retrieve the JSON
-         * content of the guideline file corresponding to the selected
-         * version.
-         */
-        function update() {
-            var content_url = testapiApiUrl + '/guidelines/' + ctrl.version;
-            ctrl.capsRequest =
-                $http.get(content_url).success(function (data) {
-                    ctrl.guidelines = data;
-                    ctrl.updateTargetCapabilities();
-                }).error(function (error) {
-                    ctrl.showError = true;
-                    ctrl.guidelines = null;
-                    ctrl.error = 'Error retrieving guideline content: ' +
-                        angular.toJson(error);
-                });
-        }
-
-        /**
-         * This will update the scope's 'targetCapabilities' object with
-         * capabilities belonging to the selected OpenStack marketing program
-         * (programs typically correspond to 'components' in the Interop WG
-         * schema). Each capability will have its status mapped to it.
-         */
-        function updateTargetCapabilities() {
-            ctrl.targetCapabilities = {};
-            var components = ctrl.guidelines.components;
-            var targetCaps = ctrl.targetCapabilities;
-
-            // The 'platform' target is comprised of multiple components, so
-            // we need to get the capabilities belonging to each of its
-            // components.
-            if (ctrl.target === 'platform') {
-                var platform_components = ctrl.guidelines.platform.required;
-
-                // This will contain status priority values, where lower
-                // values mean higher priorities.
-                var statusMap = {
-                    required: 1,
-                    advisory: 2,
-                    deprecated: 3,
-                    removed: 4
-                };
-
-                // For each component required for the platform program.
-                angular.forEach(platform_components, function (component) {
-                    // Get each capability list belonging to each status.
-                    angular.forEach(components[component],
-                        function (caps, status) {
-                            // For each capability.
-                            angular.forEach(caps, function(cap) {
-                                // If the capability has already been added.
-                                if (cap in targetCaps) {
-                                    // If the status priority value is less
-                                    // than the saved priority value, update
-                                    // the value.
-                                    if (statusMap[status] <
-                                        statusMap[targetCaps[cap]]) {
-                                        targetCaps[cap] = status;
-                                    }
-                                }
-                                else {
-                                    targetCaps[cap] = status;
-                                }
-                            });
-                        });
-                });
-            }
-            else {
-                angular.forEach(components[ctrl.target],
-                    function (caps, status) {
-                        angular.forEach(caps, function(cap) {
-                            targetCaps[cap] = status;
-                        });
-                    });
-            }
-        }
-
-        /**
-         * This filter will check if a capability's status corresponds
-         * to a status that is checked/selected in the UI. This filter
-         * is meant to be used with the ng-repeat directive.
-         * @param {Object} capability
-         * @returns {Boolean} True if capability's status is selected
-         */
-        function filterStatus(capability) {
-            var caps = ctrl.targetCapabilities;
-            return (ctrl.status.required &&
-                caps[capability.id] === 'required') ||
-                (ctrl.status.advisory &&
-                caps[capability.id] === 'advisory') ||
-                (ctrl.status.deprecated &&
-                caps[capability.id] === 'deprecated') ||
-                (ctrl.status.removed &&
-                caps[capability.id] === 'removed');
-        }
-
-        /**
-         * This function will get the length of an Object/dict based on
-         * the number of keys it has.
-         * @param {Object} object
-         * @returns {Number} length of object
-         */
-        function getObjectLength(object) {
-            return Object.keys(object).length;
-        }
-
-        /**
-         * This will open the modal that will show a list of all tests
-         * belonging to capabilities with the selected status(es).
-         */
-        function openTestListModal() {
-            $uibModal.open({
-                templateUrl: '/components/guidelines/partials' +
-                        '/testListModal.html',
-                backdrop: true,
-                windowClass: 'modal',
-                animation: true,
-                controller: 'TestListModalController as modal',
-                size: 'lg',
-                resolve: {
-                    version: function () {
-                        return ctrl.version.slice(0, -5);
-                    },
-                    target: function () {
-                        return ctrl.target;
-                    },
-                    status: function () {
-                        return ctrl.status;
-                    }
-                }
-            });
-        }
-
-        ctrl.getVersionList();
-    }
-
-    angular
-        .module('testapiApp')
-        .controller('TestListModalController', TestListModalController);
-
-    TestListModalController.$inject = [
-        '$uibModalInstance', '$http', 'version',
-        'target', 'status', 'testapiApiUrl'
-    ];
-
-    /**
-     * Test List Modal Controller
-     * This controller is for the modal that appears if a user wants to see the
-     * test list corresponding to Interop WG capabilities with the selected
-     * statuses.
-     */
-    function TestListModalController($uibModalInstance, $http, version,
-        target, status, testapiApiUrl) {
-
-        var ctrl = this;
-
-        ctrl.version = version;
-        ctrl.target = target;
-        ctrl.status = status;
-        ctrl.close = close;
-        ctrl.updateTestListString = updateTestListString;
-
-        ctrl.aliases = true;
-        ctrl.flagged = false;
-
-        // Check if the API URL is absolute or relative.
-        if (testapiApiUrl.indexOf('http') > -1) {
-            ctrl.url = testapiApiUrl;
-        }
-        else {
-            ctrl.url = location.protocol + '//' + location.host +
-                testapiApiUrl;
-        }
-
-        /**
-         * This function will close/dismiss the modal.
-         */
-        function close() {
-            $uibModalInstance.dismiss('exit');
-        }
-
-        /**
-         * This function will return a list of statuses based on which ones
-         * are selected.
-         */
-        function getStatusList() {
-            var statusList = [];
-            angular.forEach(ctrl.status, function(value, key) {
-                if (value) {
-                    statusList.push(key);
-                }
-            });
-            return statusList;
-        }
-
-        /**
-         * This will get the list of tests from the API and update the
-         * controller's test list string variable.
-         */
-        function updateTestListString() {
-            var statuses = getStatusList();
-            if (!statuses.length) {
-                ctrl.error = 'No tests matching selected criteria.';
-                return;
-            }
-            ctrl.testListUrl = [
-                ctrl.url, '/guidelines/', ctrl.version, '/tests?',
-                'target=', ctrl.target, '&',
-                'type=', statuses.join(','), '&',
-                'alias=', ctrl.aliases.toString(), '&',
-                'flag=', ctrl.flagged.toString()
-            ].join('');
-            ctrl.testListRequest =
-                $http.get(ctrl.testListUrl).
-                    then(function successCallback(response) {
-                        ctrl.error = null;
-                        ctrl.testListString = response.data;
-                        if (!ctrl.testListString) {
-                            ctrl.testListCount = 0;
-                        }
-                        else {
-                            ctrl.testListCount =
-                                ctrl.testListString.split('\n').length;
-                        }
-                    }, function errorCallback(response) {
-                        ctrl.testListString = null;
-                        ctrl.testListCount = null;
-                        if (angular.isObject(response.data) &&
-                            response.data.message) {
-                            ctrl.error = 'Error retrieving test list: ' +
-                                response.data.message;
-                        }
-                        else {
-                            ctrl.error = 'Unknown error retrieving test list.';
-                        }
-                    });
-        }
-
-        updateTestListString();
-    }
-})();
diff --git a/utils/test/testapi/3rd_party/static/testapi-ui/components/guidelines/partials/guidelineDetails.html b/utils/test/testapi/3rd_party/static/testapi-ui/components/guidelines/partials/guidelineDetails.html
deleted file mode 100644 (file)
index f020c9a..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-<!--
-HTML for guidelines page for all OpenStack Powered (TM) guideline schemas
-This expects the JSON data of the guidelines file to be stored in scope
-variable 'guidelines'.
--->
-
-<ol ng-show="ctrl.guidelines" class="capabilities">
-  <li class="capability-list-item" ng-repeat="capability in ctrl.guidelines.capabilities | arrayConverter | filter:ctrl.filterStatus | orderBy:'id'">
-    <span class="capability-name">{{capability.id}}</span><br />
-    <em>{{capability.description}}</em><br />
-    Status: <span class="{{ctrl.targetCapabilities[capability.id]}}">{{ctrl.targetCapabilities[capability.id]}}</span><br />
-    <span ng-if="capability.project">Project: {{capability.project | capitalize}}<br /></span>
-    <a ng-click="showAchievements = !showAchievements">Achievements ({{capability.achievements.length}})</a><br />
-    <ol uib-collapse="!showAchievements" class="list-inline">
-        <li ng-repeat="achievement in capability.achievements">
-            {{achievement}}
-        </li>
-    </ol>
-
-    <a ng-click="showTests = !showTests">Tests ({{ctrl.getObjectLength(capability.tests)}})</a>
-    <ul uib-collapse="!showTests">
-        <li ng-if="ctrl.guidelines.schema === '1.2'" ng-repeat="test in capability.tests">
-           <span ng-class="{'glyphicon glyphicon-flag text-warning': capability.flagged.indexOf(test) > -1}"></span>
-           {{test}}
-        </li>
-        <li ng-if="ctrl.guidelines.schema > '1.2'" ng-repeat="(testName, testDetails) in capability.tests">
-           <span ng-class="{'glyphicon glyphicon-flag text-warning': testDetails.flagged}" title="{{testDetails.flagged.reason}}"></span>
-           {{testName}}
-           <div class="test-detail" ng-if="testDetails.aliases">
-               <strong>Aliases:</strong>
-               <ul><li ng-repeat="alias in testDetails.aliases">{{alias}}</li></ul>
-           </div>
-        </li>
-    </ul>
-  </li>
-</ol>
-
-<div ng-show="ctrl.guidelines" class="criteria">
-    <hr>
-    <h4><a ng-click="showCriteria = !showCriteria">Criteria</a></h4>
-    <div uib-collapse="showCriteria">
-        <ul>
-            <li ng-repeat="(key, criterion) in ctrl.guidelines.criteria">
-                <span class="criterion-name">{{criterion.name}}</span><br />
-                <em>{{criterion.Description}}</em><br />
-                Weight: {{criterion.weight}}
-            </li>
-        </ul>
-    </div>
-</div>
diff --git a/utils/test/testapi/3rd_party/static/testapi-ui/components/guidelines/partials/testListModal.html b/utils/test/testapi/3rd_party/static/testapi-ui/components/guidelines/partials/testListModal.html
deleted file mode 100644 (file)
index 5b1d698..0000000
+++ /dev/null
@@ -1,46 +0,0 @@
-<div class="modal-content">
-    <div class="modal-header">
-        <button type="button" class="close" aria-hidden="true" ng-click="modal.close()">&times;</button>
-        <h4>Test List ({{modal.testListCount}})</h4>
-        <p>Use this test list with <a title="testapi-client" target="_blank"href="https://github.com/openstack/testapi-client">testapi-client</a>
-           to run only tests in the {{modal.version}} OpenStack Powered&#8482; guideline from capabilities with the following statuses:
-        </p>
-        <ul class="list-inline">
-            <li class="required" ng-if="modal.status.required"> Required</li>
-            <li class="advisory" ng-if="modal.status.advisory"> Advisory</li>
-            <li class="deprecated" ng-if="modal.status.deprecated"> Deprecated</li>
-            <li class="removed" ng-if="modal.status.removed"> Removed</li>
-        </ul>
-        <div class="checkbox checkbox-test-list">
-            <label><input type="checkbox" ng-model="modal.aliases" ng-change="modal.updateTestListString()">Aliases</label>
-            <span class="glyphicon glyphicon-info-sign info-hover" aria-hidden="true"
-                  title="Include test aliases as tests may have been renamed over time. It does not hurt to include these."></span>
-            &nbsp;
-            <label><input type="checkbox" ng-model="modal.flagged" ng-change="modal.updateTestListString()">Flagged</label>
-            <span class="glyphicon glyphicon-info-sign info-hover" aria-hidden="true"
-                  title="Include flagged tests.">
-            </span>
-        </div>
-        <p ng-hide="modal.error"> Alternatively, get the test list directly from the API on your CLI:</p>
-        <code ng-hide="modal.error">wget "{{modal.testListUrl}}" -O {{modal.version}}-test-list.txt</code>
-    </div>
-    <div class="modal-body tests-modal-content">
-        <div cg-busy="{promise:modal.testListRequest,message:'Loading'}"></div>
-        <div ng-show="modal.error" class="alert alert-danger" role="alert">
-            <span class="glyphicon glyphicon-exclamation-sign" aria-hidden="true"></span>
-            <span class="sr-only">Error:</span>
-            {{modal.error}}
-        </div>
-        <div class="form-group">
-            <textarea class="form-control" rows="16" id="tests" wrap="off">{{modal.testListString}}</textarea>
-        </div>
-    </div>
-    <div class="modal-footer">
-        <a target="_blank" href="{{modal.testListUrl}}" download="{{modal.version + '-test-list.txt'}}">
-            <button class="btn btn-primary" ng-if="modal.testListCount > 0" type="button">
-                Download
-            </button>
-        </a>
-        <button class="btn btn-primary" type="button" ng-click="modal.close()">Close</button>
-    </div>
-</div>
diff --git a/utils/test/testapi/3rd_party/static/testapi-ui/components/pods/pods.html b/utils/test/testapi/3rd_party/static/testapi-ui/components/pods/pods.html
new file mode 100644 (file)
index 0000000..cdfcfaf
--- /dev/null
@@ -0,0 +1,71 @@
+<h3>{{ctrl.pageHeader}}</h3>
+<p>{{ctrl.pageParagraph}}</p>
+<div class="row" style="margin-bottom:24px;"></div>
+
+<div class="pod-create">
+    <h4>Create</h4>
+    <div class="row">
+        <div ng-repeat="require in ctrl.createRequirements">
+            <div class="create-pod" style="margin-left:24px;">
+                <p class="input-group">
+                    <label for="cpid">{{require.label|capitalize}}: </label>
+                    <a ng-if="require.type == 'select'">
+                        <select dynamic-model="'ctrl.' + require.label" ng-options="option for option in require.selects"></select>
+                    </a>
+                    <a ng-if="require.type == 'text'">
+                        <input type="text" dynamic-model="'ctrl.' + require.label"/>
+                    </a>
+                    <a ng-if="require.type == 'textarea'">
+                        <textarea rows="2" cols="50" dynamic-model="'ctrl.' + require.label">
+                        </textarea>
+                    </a>
+                </p>
+            </div>
+        </div>
+
+        <div class="col-md-3" style="margin-top:12px; margin-left:8px;">
+            <button type="submit" class="btn btn-primary" ng-click="ctrl.create()">Create</button>
+        </div>
+    </div>
+</div>
+
+<div class="pods-filters" style="margin-top:36px;">
+    <h4>Filters</h4>
+    <div class="row">
+        <div class="col-md-3" style="margin-top:12px; margin-left:8px;">
+            <button type="submit" class="btn btn-primary" ng-click="ctrl.update()">Filter</button>
+            <button type="submit" class="btn btn-primary btn-danger" ng-click="ctrl.clearFilters()">Clear</button>
+        </div>
+    </div>
+</div>
+
+<div cg-busy="{promise:ctrl.authRequest,message:'Loading'}"></div>
+<div cg-busy="{promise:ctrl.podsRequest,message:'Loading'}"></div>
+
+<div ng-show="ctrl.data" class="pods-table" style="margin-top:24px; margin-left:8px;">
+    <table ng-data="ctrl.data.pods" ng-show="ctrl.data" class="table table-striped table-hover">
+        <tbody>
+            <tr ng-repeat-start="(index, pod) in ctrl.data.pods">
+                <td>
+                    <a href="#" ng-click="showPod = !showPod">{{pod.name}}</a>
+                    <div class="show-pod" ng-class="{ 'hidden': ! showPod }" style="margin-left:24px;">
+                        <p>
+                            role: {{pod.role}}<br>
+                            mode: {{pod.mode}}<br>
+                            create_date: {{pod.creation_date}}<br>
+                            details: {{pod.details}}
+                        </p>
+                    </div>
+                </td>
+            </tr>
+            <tr ng-repeat-end=>
+            </tr>
+        </tbody>
+    </table>
+</div>
+
+<div ng-show="ctrl.showError" class="alert alert-danger" role="alert">
+    <span class="glyphicon glyphicon-exclamation-sign" aria-hidden="true"></span>
+    <span class="sr-only">Error:</span>
+    {{ctrl.error}}
+</div>
diff --git a/utils/test/testapi/3rd_party/static/testapi-ui/components/pods/podsController.js b/utils/test/testapi/3rd_party/static/testapi-ui/components/pods/podsController.js
new file mode 100644 (file)
index 0000000..53e8b1e
--- /dev/null
@@ -0,0 +1,119 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+(function () {
+    'use strict';
+
+    angular
+        .module('testapiApp')
+        .controller('PodsController', PodsController);
+
+    PodsController.$inject = [
+        '$scope', '$http', '$filter', '$state', 'testapiApiUrl','raiseAlert'
+    ];
+
+    /**
+     * TestAPI Pods Controller
+     * This controller is for the '/pods' page where a user can browse
+     * through pods declared in TestAPI.
+     */
+    function PodsController($scope, $http, $filter, $state, testapiApiUrl,
+        raiseAlert) {
+        var ctrl = this;
+
+        ctrl.url = testapiApiUrl + '/pods';
+
+        ctrl.create = create;
+        ctrl.update = update;
+        ctrl.open = open;
+        ctrl.clearFilters = clearFilters;
+
+        ctrl.roles = ['community-ci', 'production-ci'];
+        ctrl.modes = ['metal', 'virtual'];
+        ctrl.createRequirements = [
+            {label: 'name', type: 'text', required: true},
+            {label: 'mode', type: 'select', selects: ctrl.modes},
+            {label: 'role', type: 'select', selects: ctrl.roles},
+            {label: 'details', type: 'textarea', required: false}
+        ];
+
+        ctrl.name = '';
+        ctrl.role = 'community-ci';
+        ctrl.mode = 'metal';
+        ctrl.details = '';
+
+        ctrl.pageHeader = 'Pods';
+        ctrl.pageParagraph = 'This page is used to create or query pods.';
+
+        /**
+         * This is called when the date filter calendar is opened. It
+         * does some event handling, and sets a scope variable so the UI
+         * knows which calendar was opened.
+         * @param {Object} $event - The Event object
+         * @param {String} openVar - Tells which calendar was opened
+         */
+        function open($event, openVar) {
+            $event.preventDefault();
+            $event.stopPropagation();
+            ctrl[openVar] = true;
+        }
+
+        /**
+         * This function will clear all filters and update the results
+         * listing.
+         */
+        function clearFilters() {
+            ctrl.update();
+        }
+
+        /**
+         * This will contact the TestAPI to create a new pod.
+         */
+        function create() {
+            ctrl.showError = false;
+            var pods_url = ctrl.url;
+            var body = {
+                name: ctrl.name,
+                mode: ctrl.mode,
+                role: ctrl.role,
+                details: ctrl.details
+            };
+
+            ctrl.podsRequest =
+                $http.post(pods_url, body).error(function (error) {
+                    ctrl.showError = true;
+                    ctrl.error =
+                        'Error creating the new pod from server: ' +
+                        angular.toJson(error);
+                });
+        }
+
+        /**
+         * This will contact the TestAPI to get a listing of declared pods.
+         */
+        function update() {
+            ctrl.showError = false;
+            ctrl.podsRequest =
+                $http.get(ctrl.url).success(function (data) {
+                    ctrl.data = data;
+                }).error(function (error) {
+                    ctrl.data = null;
+                    ctrl.showError = true;
+                    ctrl.error =
+                        'Error retrieving pods from server: ' +
+                        angular.toJson(error);
+                });
+        }
+    }
+})();
index 3056e1d..2ae5339 100644 (file)
@@ -1,6 +1,23 @@
 <h3>{{ctrl.pageHeader}}</h3>
 <p>{{ctrl.pageParagraph}}</p>
-
+<form class="form-inline" ng-show="ctrl.isUserResults">
+<h4>Upload Results</h4>
+<div class="form-group col-m-3">
+     <input class="form-contrl btn btn-default" type = "file" file-model = "resultFile"/>
+</div>
+<div class="checkbox col-m-1">
+  <label>
+      <input type="checkbox" ng-model="ctrl.isPublic">public
+  </label>
+</div>
+<div class="form-group col-m-3">
+     <button class="btn btn-primary" ng-click = "ctrl.uploadFile()">upload result</button>
+</div>
+<div>
+<lable>{{ctrl.uploadState}}</label>
+</div>
+</form>
+<div class="row" style="margin-bottom:24px;"></div>
 <div class="result-filters">
     <h4>Filters</h4>
     <div class="row">
@@ -41,7 +58,6 @@
 
 <div cg-busy="{promise:ctrl.authRequest,message:'Loading'}"></div>
 <div cg-busy="{promise:ctrl.resultsRequest,message:'Loading'}"></div>
-
 <div ng-show="ctrl.data" class="results-table">
     <table ng-data="ctrl.data.result" ng-show="ctrl.data" class="table table-striped table-hover">
         <thead>
index 93a549a..cc6cc0b 100644 (file)
         .module('testapiApp')
         .controller('ResultsController', ResultsController);
 
+    angular
+        .module('testapiApp')
+        .directive('fileModel', ['$parse', function ($parse) {
+            return {
+               restrict: 'A',
+               link: function(scope, element, attrs) {
+                  var model = $parse(attrs.fileModel);
+                  var modelSetter = model.assign;
+
+                  element.bind('change', function(){
+                     scope.$apply(function(){
+                        modelSetter(scope, element[0].files[0]);
+                     });
+                  });
+               }
+            };
+         }]);
+
     ResultsController.$inject = [
         '$scope', '$http', '$filter', '$state', 'testapiApiUrl','raiseAlert'
     ];
@@ -32,6 +50,7 @@
         raiseAlert) {
         var ctrl = this;
 
+        ctrl.uploadFile=uploadFile;
         ctrl.update = update;
         ctrl.open = open;
         ctrl.clearFilters = clearFilters;
@@ -76,6 +95,8 @@
         ctrl.format = 'yyyy-MM-dd';
 
         /** Check to see if this page should display user-specific results. */
+        // ctrl.isUserResults = $state.current.name === 'userResults';
+        // need auth to browse
         ctrl.isUserResults = $state.current.name === 'userResults';
 
         // Should only be on user-results-page if authenticated.
             'The most recently uploaded community test results are listed ' +
             'here.';
 
+        ctrl.uploadState = '';
+
+        ctrl.isPublic = false;
+
         if (ctrl.isUserResults) {
             ctrl.authRequest = $scope.auth.doSignCheck()
                 .then(ctrl.update);
-            ctrl.getUserProducts();
+            // ctrl.getUserProducts();
         } else {
             ctrl.update();
         }
 
+
+        function uploadFileToUrl(file, uploadUrl){
+           var fd = new FormData();
+           fd.append('file', file);
+           fd.append('public', ctrl.isPublic)
+
+           $http.post(uploadUrl, fd, {
+              transformRequest: angular.identity,
+              headers: {'Content-Type': undefined}
+           })
+
+           .success(function(data){
+              var id = data.href.substr(data.href.lastIndexOf('/')+1);
+              ctrl.uploadState = "Upload succeed. Result id is " + id;
+              ctrl.update();
+           })
+
+           .error(function(data, status){
+              ctrl.uploadState = "Upload failed. Error code is " + status;
+           });
+        }
+
+        function uploadFile(){
+           var file = $scope.resultFile;
+           console.log('file is ' );
+           console.dir(file);
+
+           var uploadUrl = testapiApiUrl + "/results/upload";
+           uploadFileToUrl(file, uploadUrl);
+        };
+
         /**
          * This will contact the TestAPI API to get a listing of test run
          * results.
             ctrl.resultsRequest =
                 $http.get(content_url).success(function (data) {
                     ctrl.data = data;
-                    ctrl.totalItems = 20 // ctrl.data.pagination.total_pages * ctrl.itemsPerPage;
-                    ctrl.currentPage = 1 // ctrl.data.pagination.current_page;
+                    ctrl.totalItems = ctrl.data.pagination.total_pages * ctrl.itemsPerPage;
+                    ctrl.currentPage = ctrl.data.pagination.current_page;
                 }).error(function (error) {
                     ctrl.data = null;
                     ctrl.totalItems = 0;
index 46ccc61..2d7399f 100644 (file)
@@ -40,7 +40,7 @@
         <script src="testapi-ui/shared/header/headerController.js"></script>
         <script src="testapi-ui/shared/alerts/alertModalFactory.js"></script>
         <script src="testapi-ui/shared/alerts/confirmModalFactory.js"></script>
-        <script src="testapi-ui/components/guidelines/guidelinesController.js"></script>
+        <script src="testapi-ui/components/pods/podsController.js"></script>
         <script src="testapi-ui/components/results/resultsController.js"></script>
         <script src="testapi-ui/components/results-report/resultsReportController.js"></script>
         <script src="testapi-ui/components/profile/profileController.js"></script>
index f2c49e8..f5b2414 100644 (file)
@@ -17,7 +17,7 @@ TestAPI
           <ul class="nav navbar-nav">
             <li ng-class="{ active: header.isActive('/')}"><a ui-sref="home">Home</a></li>
             <li ng-class="{ active: header.isActive('/about')}"><a ui-sref="about">About</a></li>
-            <li ng-class="{ active: header.isActive('/guidelines')}"><a ui-sref="guidelines">OPNFV Powered&#8482; Guidelines</a></li>
+            <li ng-class="{ active: header.isActive('/pods')}"><a ui-sref="pods">Pods</a></li>
             <li ng-class="{ active: header.isActive('/community_results')}"><a ui-sref="communityResults">Community Results</a></li>
             <!--
             <li ng-class="{ active: header.isCatalogActive('public')}" class="dropdown" uib-dropdown>
@@ -33,6 +33,7 @@ TestAPI
           </ul>
           <ul class="nav navbar-nav navbar-right">
             <li ng-class="{ active: header.isActive('/user_results')}" ng-if="auth.isAuthenticated"><a ui-sref="userResults">My Results</a></li>
+            <!--
             <li ng-if="auth.isAuthenticated" ng-class="{ active: header.isCatalogActive('user')}" class="dropdown" uib-dropdown>
                 <a role="button" class="dropdown-toggle" uib-dropdown-toggle>
                     My Catalog <strong class="caret"></strong>
@@ -42,6 +43,7 @@ TestAPI
                     <li><a ui-sref="userProducts">My Products</a></li>
                 </ul>
             </li>
+            -->
             <li ng-class="{ active: header.isActive('/profile')}" ng-if="auth.isAuthenticated"><a ui-sref="profile">Profile</a></li>
             <li ng-if="auth.isAuthenticated"><a href="" ng-click="auth.doSignOut()">Sign Out</a></li>
             <li ng-if="!auth.isAuthenticated"><a href="" ng-click="auth.doSignIn()">Sign In / Sign Up</a></li>
index 748bd34..6433fa6 100644 (file)
@@ -8,10 +8,10 @@ docker_compose_yml = './docker-compose.yml'
 docker_compose_template = './docker-compose.yml.template'
 
 
-def render_docker_compose(port, swagger_url):
+def render_docker_compose(port, base_url):
     vars = {
         "expose_port": port,
-        "swagger_url": swagger_url,
+        "base_url": base_url,
     }
     template = env.get_template(docker_compose_template)
     yml = template.render(vars=vars)
@@ -22,7 +22,7 @@ def render_docker_compose(port, swagger_url):
 
 
 def main(args):
-    render_docker_compose(args.expose_port, args.swagger_url)
+    render_docker_compose(args.expose_port, args.base_url)
     os.system('docker-compose -f {} up -d'.format(docker_compose_yml))
 
 
@@ -33,8 +33,8 @@ if __name__ == '__main__':
                         required=False,
                         default=8000,
                         help='testapi exposed port')
-    parser.add_argument('-su', '--swagger-url',
+    parser.add_argument('-l', '--base-url',
                         type=str,
                         required=True,
-                        help='testapi exposed swagger-url')
+                        help='testapi exposed base-url')
     main(parser.parse_args())
index 5b131f7..cd68404 100644 (file)
@@ -8,7 +8,7 @@ services:
     container_name: opnfv-testapi
     environment:
       - mongodb_url=mongodb://mongo:27017/
-      - swagger_url={{ vars.swagger_url }}
+      - base_url={{ vars.base_url }}
     ports:
       - "{{ vars.expose_port }}:8000"
     links:
index e031e19..a46fce2 100644 (file)
@@ -9,7 +9,7 @@
 #
 # Execution:
 #    $ docker run -dti -p 8001:8000 \
-#      -e "swagger_url=http://10.63.243.17:8001" \
+#      -e "base_url=http://10.63.243.17:8001" \
 #      -e "mongodb_url=mongodb://10.63.243.17:27017/" \
 #      opnfv/testapi:tag
 #
@@ -47,5 +47,5 @@ RUN git clone https://gerrit.opnfv.org/gerrit/releng /home/releng
 
 WORKDIR /home/releng/utils/test/testapi/
 RUN pip install -r requirements.txt
-RUN bash install.sh
+RUN python setup.py install
 CMD ["bash", "docker/start-server.sh"]
index 9f07efb..b14bc24 100755 (executable)
@@ -6,6 +6,9 @@ if [ "$mongodb_url" != "" ]; then
     sudo crudini --set --existing $FILE mongo url $mongodb_url
 fi
 
-if [ "$swagger_url" != "" ]; then
-    sudo crudini --set --existing $FILE swagger base_url $swagger_url
+if [ "$base_url" != "" ]; then
+    sudo crudini --set --existing $FILE api url $base_url/api/v1
+    sudo crudini --set --existing $FILE ui url $base_url
+    sudo echo "{\"testapiApiUrl\": \"$base_url/api/v1\"}" > \
+        /usr/local/lib/python2.7/dist-packages/opnfv_testapi/static/testapi-ui/config.json
 fi
index 692e488..1ec899f 100644 (file)
@@ -10,13 +10,14 @@ dbname = test_results_collection
 # Listening port
 url = http://localhost:8000/api/v1
 port = 8000
+
+# Number of results for one page (integer value)
+results_per_page = 20
+
 # With debug_on set to true, error traces will be shown in HTTP responses
 debug = True
 authenticate = False
 
-[swagger]
-base_url = http://localhost:8000
-
 [ui]
 url = http://localhost:8000
 
@@ -41,7 +42,7 @@ openid_ns = http://specs.openid.net/auth/2.0
 # Return endpoint in Refstack's API. Value indicating the endpoint
 # where the user should be returned to after signing in. Openstack Id
 # Idp only supports HTTPS address types. (string value)
-openid_return_to = /api/v1/auth/signin_return
+openid_return_to = v1/auth/signin_return
 
 # Claimed identifier. This value must be set to
 # "http://specs.openid.net/auth/2.0/identifier_select". or to user
index b8c4fb4..da6a6cf 100644 (file)
@@ -33,6 +33,7 @@ def main(args):
     else:
         exit(1)
 
+
 if __name__ == '__main__':
     parser = argparse.ArgumentParser(description='Create \
                                       Swagger Spec documentation')
@@ -40,13 +41,13 @@ if __name__ == '__main__':
                         type=str,
                         required=False,
                         default=('http://testresults.opnfv.org'
-                                 '/test/swagger/spec.json'),
+                                 '/test/swagger/resources.json'),
                         help='Resource Listing Spec File')
     parser.add_argument('-au', '--api-declaration-url',
                         type=str,
                         required=False,
                         default=('http://testresults.opnfv.org'
-                                 '/test/swagger/spec'),
+                                 '/test/swagger/APIs'),
                         help='API Declaration Spec File')
     parser.add_argument('-o', '--output-directory',
                         required=True,
diff --git a/utils/test/testapi/install.sh b/utils/test/testapi/install.sh
deleted file mode 100755 (executable)
index d470e38..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/bin/bash
-
-usage="
-Script to install opnfv_tesgtapi automatically.
-This script should be run under root.
-
-usage:
-    bash $(basename "$0") [-h|--help] [-t <test_name>]
-
-where:
-    -h|--help         show this help text"
-
-# Ref :-  https://openstack.nimeyo.com/87286/openstack-packaging-all-definition-data-files-config-setup
-if [ -z "$VIRTUAL_ENV" ];
-then
-    if [[ $(whoami) != "root" ]];
-    then
-        echo "Error: This script must be run as root!"
-        exit 1
-    fi
-else
-    sed -i -e 's#/etc/opnfv_testapi =#etc/opnfv_testapi =#g' setup.cfg
-fi
-
-cp -fr 3rd_party/static opnfv_testapi/static
-python setup.py install
-rm -fr opnfv_testapi/static
-if [ ! -z "$VIRTUAL_ENV" ]; then
-    sed -i -e 's#etc/opnfv_testapi =#/etc/opnfv_testapi =#g' setup.cfg
-fi
\ No newline at end of file
index 545d5e3..50ac049 100644 (file)
@@ -29,40 +29,18 @@ TODOs :
 
 """
 
-import argparse
-import sys
-
-import motor
 import tornado.ioloop
 
-from opnfv_testapi.common import config
+from opnfv_testapi.common.config import CONF
 from opnfv_testapi.router import url_mappings
 from opnfv_testapi.tornado_swagger import swagger
 
-CONF = None
-
-
-def parse_config(argv=[]):
-    global CONF
-    parser = argparse.ArgumentParser()
-    parser.add_argument("-c", "--config-file", dest='config_file',
-                        help="Config file location")
-    args = parser.parse_args(argv)
-    if args.config_file:
-        config.Config.CONFIG = args.config_file
-    CONF = config.Config()
-
-
-def get_db():
-    return motor.MotorClient(CONF.mongo_url)[CONF.mongo_dbname]
-
 
 def make_app():
-    swagger.docs(base_url=CONF.swagger_base_url,
+    swagger.docs(base_url=CONF.ui_url,
                  static_path=CONF.static_path)
     return swagger.Application(
         url_mappings.mappings,
-        db=get_db(),
         debug=CONF.api_debug,
         auth=CONF.api_authenticate,
         cookie_secret='opnfv-testapi',
@@ -70,7 +48,6 @@ def make_app():
 
 
 def main():
-    parse_config(sys.argv[1:])
     application = make_app()
     application.listen(CONF.api_port)
     tornado.ioloop.IOLoop.current().start()
index 67e8fbd..24ba876 100644 (file)
@@ -13,6 +13,7 @@ from tornado import web
 
 from opnfv_testapi.common import message
 from opnfv_testapi.common import raises
+from opnfv_testapi.db import api as dbapi
 
 
 def authenticate(method):
@@ -26,7 +27,7 @@ def authenticate(method):
             except KeyError:
                 raises.Unauthorized(message.unauthorized())
             query = {'access_token': token}
-            check = yield self._eval_db_find_one(query, 'tokens')
+            check = yield dbapi.db_find_one('tokens', query)
             if not check:
                 raises.Forbidden(message.invalid_token())
         ret = yield gen.coroutine(method)(self, *args, **kwargs)
@@ -38,7 +39,7 @@ def not_exist(xstep):
     @functools.wraps(xstep)
     def wrap(self, *args, **kwargs):
         query = kwargs.get('query')
-        data = yield self._eval_db_find_one(query)
+        data = yield dbapi.db_find_one(self.table, query)
         if not data:
             raises.NotFound(message.not_found(self.table, query))
         ret = yield gen.coroutine(xstep)(self, data, *args, **kwargs)
@@ -78,7 +79,7 @@ def carriers_exist(xstep):
         carriers = kwargs.pop('carriers', {})
         if carriers:
             for table, query in carriers:
-                exist = yield self._eval_db_find_one(query(), table)
+                exist = yield dbapi.db_find_one(table, query())
                 if not exist:
                     raises.Forbidden(message.not_found(table, query()))
         ret = yield gen.coroutine(xstep)(self, *args, **kwargs)
@@ -91,7 +92,7 @@ def new_not_exists(xstep):
     def wrap(self, *args, **kwargs):
         query = kwargs.get('query')
         if query:
-            to_data = yield self._eval_db_find_one(query())
+            to_data = yield dbapi.db_find_one(self.table, query())
             if to_data:
                 raises.Forbidden(message.exist(self.table, query()))
         ret = yield gen.coroutine(xstep)(self, *args, **kwargs)
@@ -105,7 +106,7 @@ def updated_one_not_exist(xstep):
         db_keys = kwargs.pop('db_keys', [])
         query = self._update_query(db_keys, data)
         if query:
-            to_data = yield self._eval_db_find_one(query)
+            to_data = yield dbapi.db_find_one(self.table, query)
             if to_data:
                 raises.Forbidden(message.exist(self.table, query))
         ret = yield gen.coroutine(xstep)(self, data, *args, **kwargs)
index 46765ff..4cd53c6 100644 (file)
@@ -8,26 +8,29 @@
 # feng.xiaowei@zte.com.cn remove prepare_put_request            5-30-2016
 ##############################################################################
 import ConfigParser
+import argparse
 import os
+import sys
 
 
 class Config(object):
-    CONFIG = None
 
     def __init__(self):
-        self.file = self.CONFIG if self.CONFIG else self._default_config()
+        self.config_file = None
+        self._set_config_file()
         self._parse()
+        self._parse_per_page()
         self.static_path = os.path.join(
             os.path.dirname(os.path.normpath(__file__)),
             os.pardir,
             'static')
 
     def _parse(self):
-        if not os.path.exists(self.file):
-            raise Exception("%s not found" % self.file)
+        if not os.path.exists(self.config_file):
+            raise Exception("%s not found" % self.config_file)
 
         config = ConfigParser.RawConfigParser()
-        config.read(self.file)
+        config.read(self.config_file)
         self._parse_section(config)
 
     def _parse_section(self, config):
@@ -37,6 +40,10 @@ class Config(object):
         [setattr(self, '{}_{}'.format(section, k), self._parse_value(v))
          for k, v in config.items(section)]
 
+    def _parse_per_page(self):
+        if not hasattr(self, 'api_results_per_page'):
+            self.api_results_per_page = 20
+
     @staticmethod
     def _parse_value(value):
         try:
@@ -48,8 +55,24 @@ class Config(object):
                 value = False
         return value
 
-    @staticmethod
-    def _default_config():
+    def _set_config_file(self):
+        if not self._set_sys_config_file():
+            self._set_default_config_file()
+
+    def _set_sys_config_file(self):
+        parser = argparse.ArgumentParser()
+        parser.add_argument("-c", "--config-file", dest='config_file',
+                            help="Config file location", metavar="FILE")
+        args, _ = parser.parse_known_args(sys.argv)
+        try:
+            self.config_file = args.config_file
+        finally:
+            return self.config_file is not None
+
+    def _set_default_config_file(self):
         is_venv = os.getenv('VIRTUAL_ENV')
-        return os.path.join('/' if not is_venv else is_venv,
-                            'etc/opnfv_testapi/config.ini')
+        self.config_file = os.path.join('/' if not is_venv else is_venv,
+                                        'etc/opnfv_testapi/config.ini')
+
+
+CONF = Config()
index 98536ff..951cbaf 100644 (file)
@@ -10,6 +10,10 @@ not_found_base = 'Could Not Found'
 exist_base = 'Already Exists'
 
 
+def key_error(key):
+    return "KeyError: '{}'".format(key)
+
+
 def no_body():
     return 'No Body'
 
index ec6b8a5..55c58c9 100644 (file)
@@ -26,6 +26,10 @@ class Forbidden(Raiser):
     code = httplib.FORBIDDEN
 
 
+class Conflict(Raiser):
+    code = httplib.CONFLICT
+
+
 class NotFound(Raiser):
     code = httplib.NOT_FOUND
 
diff --git a/utils/test/testapi/opnfv_testapi/db/__init__.py b/utils/test/testapi/opnfv_testapi/db/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/utils/test/testapi/opnfv_testapi/db/api.py b/utils/test/testapi/opnfv_testapi/db/api.py
new file mode 100644 (file)
index 0000000..c057480
--- /dev/null
@@ -0,0 +1,38 @@
+import motor
+
+from opnfv_testapi.common.config import CONF
+
+DB = motor.MotorClient(CONF.mongo_url)[CONF.mongo_dbname]
+
+
+def db_update(collection, query, update_req):
+    return _eval_db(collection, 'update', query, update_req, check_keys=False)
+
+
+def db_delete(collection, query):
+    return _eval_db(collection, 'remove', query)
+
+
+def db_aggregate(collection, pipelines):
+    return _eval_db(collection, 'aggregate', pipelines, allowDiskUse=True)
+
+
+def db_list(collection, query):
+    return _eval_db(collection, 'find', query)
+
+
+def db_save(collection, data):
+    return _eval_db(collection, 'insert', data, check_keys=False)
+
+
+def db_find_one(collection, query):
+    return _eval_db(collection, 'find_one', query)
+
+
+def _eval_db(collection, method, *args, **kwargs):
+    exec_collection = DB.__getattr__(collection)
+    return exec_collection.__getattribute__(method)(*args, **kwargs)
+
+
+def _eval_db_find_one(query, table=None):
+    return _eval_db(table, 'find_one', query)
index 2fc31ca..ed55c70 100644 (file)
@@ -20,8 +20,8 @@
 # feng.xiaowei@zte.com.cn remove DashboardHandler            5-30-2016
 ##############################################################################
 
-from datetime import datetime
 import json
+from datetime import datetime
 
 from tornado import gen
 from tornado import web
@@ -29,6 +29,7 @@ from tornado import web
 from opnfv_testapi.common import check
 from opnfv_testapi.common import message
 from opnfv_testapi.common import raises
+from opnfv_testapi.db import api as dbapi
 from opnfv_testapi.resources import models
 from opnfv_testapi.tornado_swagger import swagger
 
@@ -38,7 +39,6 @@ DEFAULT_REPRESENTATION = "application/json"
 class GenericApiHandler(web.RequestHandler):
     def __init__(self, application, request, **kwargs):
         super(GenericApiHandler, self).__init__(application, request, **kwargs)
-        self.db = self.settings["db"]
         self.json_args = None
         self.table = None
         self.table_cls = None
@@ -50,7 +50,7 @@ class GenericApiHandler(web.RequestHandler):
         self.auth = self.settings["auth"]
 
     def prepare(self):
-        if self.request.method != "GET" and self.request.method != "DELETE":
+        if self.request.body:
             if self.request.headers.get("Content-Type") is not None:
                 if self.request.headers["Content-Type"].startswith(
                         DEFAULT_REPRESENTATION):
@@ -90,8 +90,7 @@ class GenericApiHandler(web.RequestHandler):
 
         if self.table != 'results':
             data.creation_date = datetime.now()
-        _id = yield self._eval_db(self.table, 'insert', data.format(),
-                                  check_keys=False)
+        _id = yield dbapi.db_save(self.table, data.format())
         if 'name' in self.json_args:
             resource = data.name
         else:
@@ -101,22 +100,72 @@ class GenericApiHandler(web.RequestHandler):
     @web.asynchronous
     @gen.coroutine
     def _list(self, query=None, res_op=None, *args, **kwargs):
+        sort = kwargs.get('sort')
+        page = kwargs.get('page', 0)
+        last = kwargs.get('last', 0)
+        per_page = kwargs.get('per_page', 0)
         if query is None:
             query = {}
-        data = []
-        cursor = self._eval_db(self.table, 'find', query)
-        if 'sort' in kwargs:
-            cursor = cursor.sort(kwargs.get('sort'))
-        if 'last' in kwargs:
-            cursor = cursor.limit(kwargs.get('last'))
-        while (yield cursor.fetch_next):
-            data.append(self.format_data(cursor.next_object()))
+        pipelines = list()
+        pipelines.append({'$match': query})
+
+        total_pages = 0
+        data = list()
+        cursor = dbapi.db_list(self.table, query)
+        records_count = yield cursor.count()
+        if records_count > 0:
+            if page > 0:
+                total_pages, return_nr = self._calc_total_pages(records_count,
+                                                                last,
+                                                                page,
+                                                                per_page)
+                pipelines = self._set_pipelines(pipelines,
+                                                sort,
+                                                return_nr,
+                                                page,
+                                                per_page)
+            cursor = dbapi.db_aggregate(self.table, pipelines)
+            while (yield cursor.fetch_next):
+                data.append(self.format_data(cursor.next_object()))
         if res_op is None:
             res = {self.table: data}
         else:
             res = res_op(data, *args)
+        if page > 0:
+            res.update({
+                'pagination': {
+                    'current_page': kwargs.get('page'),
+                    'total_pages': total_pages
+                }
+            })
         self.finish_request(res)
 
+    @staticmethod
+    def _calc_total_pages(records_count, last, page, per_page):
+        records_nr = records_count
+        if (records_count > last) and (last > 0):
+            records_nr = last
+
+        total_pages, remainder = divmod(records_nr, per_page)
+        if remainder > 0:
+            total_pages += 1
+        if page > 1 and page > total_pages:
+            raises.BadRequest(
+                'Request page > total_pages [{}]'.format(total_pages))
+        return total_pages, records_nr
+
+    @staticmethod
+    def _set_pipelines(pipelines, sort, return_nr, page, per_page):
+        if sort:
+            pipelines.append({'$sort': sort})
+
+        over = (page - 1) * per_page
+        left = return_nr - over
+        pipelines.append({'$skip': over})
+        pipelines.append({'$limit': per_page if per_page < left else left})
+
+        return pipelines
+
     @web.asynchronous
     @gen.coroutine
     @check.not_exist
@@ -126,7 +175,7 @@ class GenericApiHandler(web.RequestHandler):
     @check.authenticate
     @check.not_exist
     def _delete(self, data, query=None):
-        yield self._eval_db(self.table, 'remove', query)
+        yield dbapi.db_delete(self.table, query)
         self.finish_request()
 
     @check.authenticate
@@ -136,11 +185,20 @@ class GenericApiHandler(web.RequestHandler):
     def _update(self, data, query=None, **kwargs):
         data = self.table_cls.from_dict(data)
         update_req = self._update_requests(data)
-        yield self._eval_db(self.table, 'update', query, update_req,
-                            check_keys=False)
+        yield dbapi.db_update(self.table, query, update_req)
         update_req['_id'] = str(data._id)
         self.finish_request(update_req)
 
+    @check.authenticate
+    @check.no_body
+    @check.not_exist
+    @check.updated_one_not_exist
+    def pure_update(self, data, query=None, **kwargs):
+        data = self.table_cls.from_dict(data)
+        update_req = self._update_requests(data)
+        yield dbapi.db_update(self.table, query, update_req)
+        self.finish_request()
+
     def _update_requests(self, data):
         request = dict()
         for k, v in self.json_args.iteritems():
@@ -180,23 +238,6 @@ class GenericApiHandler(web.RequestHandler):
             query[key] = new
         return query if not equal else dict()
 
-    def _eval_db(self, table, method, *args, **kwargs):
-        exec_collection = self.db.__getattr__(table)
-        return exec_collection.__getattribute__(method)(*args, **kwargs)
-
-    def _eval_db_find_one(self, query, table=None):
-        if table is None:
-            table = self.table
-        return self._eval_db(table, 'find_one', query)
-
-    def db_save(self, collection, data):
-        self._eval_db(collection, 'insert', data, check_keys=False)
-
-    def db_find_one(self, query, collection=None):
-        if not collection:
-            collection = self.table
-        return self._eval_db(collection, 'find_one', query)
-
 
 class VersionHandler(GenericApiHandler):
     @swagger.operation(nickname='listAllVersions')
index e8fc532..e70a6ed 100644 (file)
@@ -48,6 +48,29 @@ class ModelBase(object):
 
         return t
 
+    @classmethod
+    def from_dict_with_raise(cls, a_dict):
+        if a_dict is None:
+            return None
+
+        attr_parser = cls.attr_parser()
+        t = cls()
+        for k, v in a_dict.iteritems():
+            if k not in t.__dict__:
+                raise AttributeError(
+                    '{} has no attribute {}'.format(cls.__name__, k))
+            value = v
+            if isinstance(v, dict) and k in attr_parser:
+                value = attr_parser[k].from_dict_with_raise(v)
+            elif isinstance(v, list) and k in attr_parser:
+                value = []
+                for item in v:
+                    value.append(attr_parser[k].from_dict_with_raise(item))
+
+            t.__setattr__(k, value)
+
+        return t
+
     @staticmethod
     def attr_parser():
         return {}
index 824a89e..9389d26 100644 (file)
@@ -6,16 +6,20 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
+import logging
 from datetime import datetime
 from datetime import timedelta
+import json
 
 from bson import objectid
 
+from opnfv_testapi.common.config import CONF
 from opnfv_testapi.common import message
 from opnfv_testapi.common import raises
 from opnfv_testapi.resources import handlers
 from opnfv_testapi.resources import result_models
 from opnfv_testapi.tornado_swagger import swagger
+from opnfv_testapi.ui.auth import constants as auth_const
 
 
 class GenericResultHandler(handlers.GenericApiHandler):
@@ -37,6 +41,7 @@ class GenericResultHandler(handlers.GenericApiHandler):
         query = dict()
         date_range = dict()
 
+        query['public'] = {'$not': {'$eq': 'false'}}
         for k in self.request.query_arguments.keys():
             v = self.get_query_argument(k)
             if k == 'project' or k == 'pod' or k == 'case':
@@ -53,10 +58,24 @@ class GenericResultHandler(handlers.GenericApiHandler):
                 date_range.update({'$gte': str(v)})
             elif k == 'to':
                 date_range.update({'$lt': str(v)})
-            elif k != 'last' and k != 'page':
+            elif k == 'signed':
+                openid = self.get_secure_cookie(auth_const.OPENID)
+                role = self.get_secure_cookie(auth_const.ROLE)
+                logging.info('role:%s', role)
+                if role:
+                    del query['public']
+                    if role != "reviewer":
+                        query['user'] = openid
+            elif k not in ['last', 'page', 'descend']:
                 query[k] = v
             if date_range:
                 query['start_date'] = date_range
+
+            # if $lt is not provided,
+            # empty/None/null/'' start_date will also be returned
+            if 'start_date' in query and '$lt' not in query['start_date']:
+                query['start_date'].update({'$lt': str(datetime.now())})
+
         return query
 
 
@@ -81,9 +100,10 @@ class ResultsCLHandler(GenericResultHandler):
                  - criteria : the global criteria status passed or failed
                  - trust_indicator : evaluate the stability of the test case
                    to avoid running systematically long and stable test case
+                 - signed : get logined user result
 
                 GET /results/project=functest&case=vPing&version=Arno-R1 \
-                &pod=pod_name&period=15
+                &pod=pod_name&period=15&signed
             @return 200: all test results consist with query,
                          empty list if no result is found
             @rtype: L{TestResults}
@@ -135,22 +155,41 @@ class ResultsCLHandler(GenericResultHandler):
             @type last: L{string}
             @in last: query
             @required last: False
+            @param page: which page to list, default to 1
+            @type page: L{int}
+            @in page: query
+            @required page: False
             @param trust_indicator: must be float
             @type trust_indicator: L{float}
             @in trust_indicator: query
             @required trust_indicator: False
+            @param signed: user results or all results
+            @type signed: L{string}
+            @in signed: query
+            @required signed: False
+            @param descend: true, newest2oldest; false, oldest2newest
+            @type descend: L{string}
+            @in descend: query
+            @required descend: False
         """
-        last = self.get_query_argument('last', 0)
-        if last is not None:
-            last = self.get_int('last', last)
+        def descend_limit():
+            descend = self.get_query_argument('descend', 'true')
+            return -1 if descend.lower() == 'true' else 1
+
+        def last_limit():
+            return self.get_int('last', self.get_query_argument('last', 0))
 
-        page = self.get_query_argument('page', 0)
-        if page:
-            last = 20
+        def page_limit():
+            return self.get_int('page', self.get_query_argument('page', 1))
 
-        self._list(query=self.set_query(),
-                   sort=[('start_date', -1)],
-                   last=last)
+        limitations = {
+            'sort': {'_id': descend_limit()},
+            'last': last_limit(),
+            'page': page_limit(),
+            'per_page': CONF.api_results_per_page
+        }
+
+        self._list(query=self.set_query(), **limitations)
 
     @swagger.operation(nickname="createTestResult")
     def post(self):
@@ -164,6 +203,9 @@ class ResultsCLHandler(GenericResultHandler):
             @raise 404: pod/project/testcase not exist
             @raise 400: body/pod_name/project_name/case_name not provided
         """
+        self._post()
+
+    def _post(self):
         def pod_query():
             return {'name': self.json_args.get('pod_name')}
 
@@ -178,9 +220,39 @@ class ResultsCLHandler(GenericResultHandler):
         carriers = [('pods', pod_query),
                     ('projects', project_query),
                     ('testcases', testcase_query)]
+
         self._create(miss_fields=miss_fields, carriers=carriers)
 
 
+class ResultsUploadHandler(ResultsCLHandler):
+    @swagger.operation(nickname="uploadTestResult")
+    def post(self):
+        """
+            @description: upload and create a test result
+            @param body: result to be created
+            @type body: L{ResultCreateRequest}
+            @in body: body
+            @rtype: L{CreateResponse}
+            @return 200: result is created.
+            @raise 404: pod/project/testcase not exist
+            @raise 400: body/pod_name/project_name/case_name not provided
+        """
+        logging.info('file upload')
+        fileinfo = self.request.files['file'][0]
+        is_public = self.get_body_argument('public')
+        logging.warning('public:%s', is_public)
+        logging.info('results is :%s', fileinfo['filename'])
+        logging.info('results is :%s', fileinfo['body'])
+        self.json_args = json.loads(fileinfo['body']).copy()
+        self.json_args['public'] = is_public
+
+        openid = self.get_secure_cookie(auth_const.OPENID)
+        if openid:
+            self.json_args['user'] = openid
+
+        super(ResultsUploadHandler, self)._post()
+
+
 class ResultsGURHandler(GenericResultHandler):
     @swagger.operation(nickname='getTestResultById')
     def get(self, result_id):
index 62a6dac..890bf82 100644 (file)
@@ -54,6 +54,8 @@ class ResultCreateRequest(models.ModelBase):
                  build_tag=None,
                  scenario=None,
                  criteria=None,
+                 user=None,
+                 public="true",
                  trust_indicator=None):
         self.pod_name = pod_name
         self.project_name = project_name
@@ -66,6 +68,8 @@ class ResultCreateRequest(models.ModelBase):
         self.build_tag = build_tag
         self.scenario = scenario
         self.criteria = criteria
+        self.user = user
+        self.public = public
         self.trust_indicator = trust_indicator if trust_indicator else TI(0)
 
 
@@ -89,7 +93,7 @@ class TestResult(models.ModelBase):
                  pod_name=None, installer=None, version=None,
                  start_date=None, stop_date=None, details=None,
                  build_tag=None, scenario=None, criteria=None,
-                 trust_indicator=None):
+                 user=None, public="true", trust_indicator=None):
         self._id = _id
         self.case_name = case_name
         self.project_name = project_name
@@ -102,6 +106,8 @@ class TestResult(models.ModelBase):
         self.build_tag = build_tag
         self.scenario = scenario
         self.criteria = criteria
+        self.user = user
+        self.public = public
         self.trust_indicator = trust_indicator
 
     @staticmethod
index 5d420a5..e9c19a7 100644 (file)
@@ -15,6 +15,24 @@ class GenericScenarioHandler(handlers.GenericApiHandler):
         self.table = self.db_scenarios
         self.table_cls = models.Scenario
 
+    def set_query(self, locators):
+        query = dict()
+        elem_query = dict()
+        for k, v in locators.iteritems():
+            if k == 'scenario':
+                query['name'] = v
+            elif k == 'installer':
+                elem_query["installer"] = v
+            elif k == 'version':
+                elem_query["versions.version"] = v
+            elif k == 'project':
+                elem_query["versions.projects.project"] = v
+            else:
+                query[k] = v
+        if elem_query:
+            query['installers'] = {'$elemMatch': elem_query}
+        return query
+
 
 class ScenariosCLHandler(GenericScenarioHandler):
     @swagger.operation(nickname="queryScenarios")
@@ -96,10 +114,10 @@ class ScenarioGURHandler(GenericScenarioHandler):
         self._get_one(query={'name': name})
         pass
 
-    @swagger.operation(nickname="updateScenarioByName")
+    @swagger.operation(nickname="updateScenarioName")
     def put(self, name):
         """
-            @description: update a single scenario by name
+            @description: update scenario, only rename is supported currently
             @param body: fields to be updated
             @type body: L{ScenarioUpdateRequest}
             @in body: body
@@ -119,164 +137,639 @@ class ScenarioGURHandler(GenericScenarioHandler):
         @return 200: delete success
         @raise 404: scenario not exist:
         """
-
         self._delete(query={'name': name})
 
-    def _update_query(self, keys, data):
-        query = dict()
-        if self._is_rename():
-            new = self._term.get('name')
-            if data.get('name') != new:
-                query['name'] = new
 
-        return query
+class ScenarioUpdater(object):
+    def __init__(self, data, body=None,
+                 installer=None, version=None, project=None):
+        self.data = data
+        self.body = body
+        self.installer = installer
+        self.version = version
+        self.project = project
 
-    def _update_requests(self, data):
+    def update(self, item, action):
         updates = {
-            ('name', 'update'): self._update_requests_rename,
-            ('installer', 'add'): self._update_requests_add_installer,
-            ('installer', 'delete'): self._update_requests_delete_installer,
-            ('version', 'add'): self._update_requests_add_version,
-            ('version', 'delete'): self._update_requests_delete_version,
-            ('owner', 'update'): self._update_requests_change_owner,
-            ('project', 'add'): self._update_requests_add_project,
-            ('project', 'delete'): self._update_requests_delete_project,
-            ('customs', 'add'): self._update_requests_add_customs,
+            ('scores', 'post'): self._update_requests_add_score,
+            ('trust_indicators', 'post'): self._update_requests_add_ti,
+            ('customs', 'post'): self._update_requests_add_customs,
+            ('customs', 'put'): self._update_requests_update_customs,
             ('customs', 'delete'): self._update_requests_delete_customs,
-            ('score', 'add'): self._update_requests_add_score,
-            ('trust_indicator', 'add'): self._update_requests_add_ti,
+            ('projects', 'post'): self._update_requests_add_projects,
+            ('projects', 'put'): self._update_requests_update_projects,
+            ('projects', 'delete'): self._update_requests_delete_projects,
+            ('owner', 'put'): self._update_requests_change_owner,
+            ('versions', 'post'): self._update_requests_add_versions,
+            ('versions', 'put'): self._update_requests_update_versions,
+            ('versions', 'delete'): self._update_requests_delete_versions,
+            ('installers', 'post'): self._update_requests_add_installers,
+            ('installers', 'put'): self._update_requests_update_installers,
+            ('installers', 'delete'): self._update_requests_delete_installers,
         }
+        updates[(item, action)](self.data)
 
-        updates[(self._field, self._op)](data)
-
-        return data.format()
+        return self.data.format()
 
-    def _iter_installers(xstep):
+    def iter_installers(xstep):
         @functools.wraps(xstep)
         def magic(self, data):
             [xstep(self, installer)
              for installer in self._filter_installers(data.installers)]
         return magic
 
-    def _iter_versions(xstep):
+    def iter_versions(xstep):
         @functools.wraps(xstep)
         def magic(self, installer):
             [xstep(self, version)
              for version in (self._filter_versions(installer.versions))]
         return magic
 
-    def _iter_projects(xstep):
+    def iter_projects(xstep):
         @functools.wraps(xstep)
         def magic(self, version):
             [xstep(self, project)
              for project in (self._filter_projects(version.projects))]
         return magic
 
-    def _update_requests_rename(self, data):
-        data.name = self._term.get('name')
-        if not data.name:
-            raises.BadRequest(message.missing('name'))
-
-    def _update_requests_add_installer(self, data):
-        data.installers.append(models.ScenarioInstaller.from_dict(self._term))
-
-    def _update_requests_delete_installer(self, data):
-        data.installers = self._remove_installers(data.installers)
-
-    @_iter_installers
-    def _update_requests_add_version(self, installer):
-        installer.versions.append(models.ScenarioVersion.from_dict(self._term))
-
-    @_iter_installers
-    def _update_requests_delete_version(self, installer):
-        installer.versions = self._remove_versions(installer.versions)
-
-    @_iter_installers
-    @_iter_versions
-    def _update_requests_change_owner(self, version):
-        version.owner = self._term.get('owner')
-
-    @_iter_installers
-    @_iter_versions
-    def _update_requests_add_project(self, version):
-        version.projects.append(models.ScenarioProject.from_dict(self._term))
+    @iter_installers
+    @iter_versions
+    @iter_projects
+    def _update_requests_add_score(self, project):
+        project.scores.append(
+            models.ScenarioScore.from_dict(self.body))
 
-    @_iter_installers
-    @_iter_versions
-    def _update_requests_delete_project(self, version):
-        version.projects = self._remove_projects(version.projects)
+    @iter_installers
+    @iter_versions
+    @iter_projects
+    def _update_requests_add_ti(self, project):
+        project.trust_indicators.append(
+            models.ScenarioTI.from_dict(self.body))
 
-    @_iter_installers
-    @_iter_versions
-    @_iter_projects
+    @iter_installers
+    @iter_versions
+    @iter_projects
     def _update_requests_add_customs(self, project):
-        project.customs = list(set(project.customs + self._term))
+        project.customs = list(set(project.customs + self.body))
 
-    @_iter_installers
-    @_iter_versions
-    @_iter_projects
+    @iter_installers
+    @iter_versions
+    @iter_projects
+    def _update_requests_update_customs(self, project):
+        project.customs = list(set(self.body))
+
+    @iter_installers
+    @iter_versions
+    @iter_projects
     def _update_requests_delete_customs(self, project):
         project.customs = filter(
-            lambda f: f not in self._term,
+            lambda f: f not in self.body,
             project.customs)
 
-    @_iter_installers
-    @_iter_versions
-    @_iter_projects
-    def _update_requests_add_score(self, project):
-        project.scores.append(
-            models.ScenarioScore.from_dict(self._term))
+    @iter_installers
+    @iter_versions
+    def _update_requests_add_projects(self, version):
+        version.projects = self._update_with_body(models.ScenarioProject,
+                                                  'project',
+                                                  version.projects)
+
+    @iter_installers
+    @iter_versions
+    def _update_requests_update_projects(self, version):
+        version.projects = self._update_with_body(models.ScenarioProject,
+                                                  'project',
+                                                  list())
+
+    @iter_installers
+    @iter_versions
+    def _update_requests_delete_projects(self, version):
+        version.projects = self._remove_projects(version.projects)
 
-    @_iter_installers
-    @_iter_versions
-    @_iter_projects
-    def _update_requests_add_ti(self, project):
-        project.trust_indicators.append(
-            models.ScenarioTI.from_dict(self._term))
+    @iter_installers
+    @iter_versions
+    def _update_requests_change_owner(self, version):
+        version.owner = self.body.get('owner')
+
+    @iter_installers
+    def _update_requests_add_versions(self, installer):
+        installer.versions = self._update_with_body(models.ScenarioVersion,
+                                                    'version',
+                                                    installer.versions)
+
+    @iter_installers
+    def _update_requests_update_versions(self, installer):
+        installer.versions = self._update_with_body(models.ScenarioVersion,
+                                                    'version',
+                                                    list())
+
+    @iter_installers
+    def _update_requests_delete_versions(self, installer):
+        installer.versions = self._remove_versions(installer.versions)
+
+    def _update_requests_add_installers(self, scenario):
+        scenario.installers = self._update_with_body(models.ScenarioInstaller,
+                                                     'installer',
+                                                     scenario.installers)
+
+    def _update_requests_update_installers(self, scenario):
+        scenario.installers = self._update_with_body(models.ScenarioInstaller,
+                                                     'installer',
+                                                     list())
+
+    def _update_requests_delete_installers(self, scenario):
+        scenario.installers = self._remove_installers(scenario.installers)
+
+    def _update_with_body(self, clazz, field, withs):
+        exists = list()
+        malformat = list()
+        for new in self.body:
+            try:
+                format_new = clazz.from_dict_with_raise(new)
+                new_name = getattr(format_new, field)
+                if not any(getattr(o, field) == new_name for o in withs):
+                    withs.append(format_new)
+                else:
+                    exists.append(new_name)
+            except Exception as error:
+                malformat.append(error.message)
+        if malformat:
+            raises.BadRequest(message.bad_format(malformat))
+        elif exists:
+            raises.Conflict(message.exist('{}s'.format(field), exists))
+        return withs
 
-    def _is_rename(self):
-        return self._field == 'name' and self._op == 'update'
+    def _filter_installers(self, installers):
+        return self._filter('installer', installers)
 
     def _remove_installers(self, installers):
         return self._remove('installer', installers)
 
-    def _filter_installers(self, installers):
-        return self._filter('installer', installers)
+    def _filter_versions(self, versions):
+        return self._filter('version', versions)
 
     def _remove_versions(self, versions):
         return self._remove('version', versions)
 
-    def _filter_versions(self, versions):
-        return self._filter('version', versions)
+    def _filter_projects(self, projects):
+        return self._filter('project', projects)
 
     def _remove_projects(self, projects):
         return self._remove('project', projects)
 
-    def _filter_projects(self, projects):
-        return self._filter('project', projects)
+    def _filter(self, item, items):
+        return filter(
+            lambda f: getattr(f, item) == getattr(self, item),
+            items)
 
     def _remove(self, field, fields):
         return filter(
-            lambda f: getattr(f, field) != self._locate.get(field),
+            lambda f: getattr(f, field) not in self.body,
             fields)
 
-    def _filter(self, field, fields):
-        return filter(
-            lambda f: getattr(f, field) == self._locate.get(field),
-            fields)
 
-    @property
-    def _field(self):
-        return self.json_args.get('field')
+class GenericScenarioUpdateHandler(GenericScenarioHandler):
+    def __init__(self, application, request, **kwargs):
+        super(GenericScenarioUpdateHandler, self).__init__(application,
+                                                           request,
+                                                           **kwargs)
+        self.installer = None
+        self.version = None
+        self.project = None
+        self.item = None
+        self.action = None
+
+    def do_update(self, item, action, locators):
+        self.item = item
+        self.action = action
+        for k, v in locators.iteritems():
+            if not v:
+                v = self.get_query_argument(k)
+                setattr(self, k, v)
+                locators[k] = v
+        self.pure_update(query=self.set_query(locators=locators))
+
+    def _update_requests(self, data):
+        return ScenarioUpdater(data,
+                               self.json_args,
+                               self.installer,
+                               self.version,
+                               self.project).update(self.item, self.action)
+
+
+class ScenarioScoresHandler(GenericScenarioUpdateHandler):
+    @swagger.operation(nickname="addScoreRecord")
+    def post(self, scenario):
+        """
+        @description: add a new score record
+        @notes: add a new score record to a project
+            POST /api/v1/scenarios/<scenario_name>/scores? \
+                installer=<installer_name>& \
+                version=<version_name>& \
+                project=<project_name>
+        @param body: score to be added
+        @type body: L{ScenarioScore}
+        @in body: body
+        @param installer: installer type
+        @type installer: L{string}
+        @in installer: query
+        @required installer: True
+        @param version: version
+        @type version: L{string}
+        @in version: query
+        @required version: True
+        @param project: project name
+        @type project: L{string}
+        @in project: query
+        @required project: True
+        @return 200: score is created.
+        @raise 404:  scenario/installer/version/project not existed
+        """
+        self.do_update('scores',
+                       'post',
+                       locators={'scenario': scenario,
+                                 'installer': None,
+                                 'version': None,
+                                 'project': None})
+
+
+class ScenarioTIsHandler(GenericScenarioUpdateHandler):
+    @swagger.operation(nickname="addTrustIndicatorRecord")
+    def post(self, scenario):
+        """
+        @description: add a new trust indicator record
+        @notes: add a new trust indicator record to a project
+            POST /api/v1/scenarios/<scenario_name>/trust_indicators? \
+                installer=<installer_name>& \
+                version=<version_name>& \
+                project=<project_name>
+        @param body: trust indicator to be added
+        @type body: L{ScenarioTI}
+        @in body: body
+        @param installer: installer type
+        @type installer: L{string}
+        @in installer: query
+        @required installer: True
+        @param version: version
+        @type version: L{string}
+        @in version: query
+        @required version: True
+        @param project: project name
+        @type project: L{string}
+        @in project: query
+        @required project: True
+        @return 200: trust indicator is added.
+        @raise 404:  scenario/installer/version/project not existed
+        """
+        self.do_update('trust_indicators',
+                       'post',
+                       locators={'scenario': scenario,
+                                 'installer': None,
+                                 'version': None,
+                                 'project': None})
+
+
+class ScenarioCustomsHandler(GenericScenarioUpdateHandler):
+    @swagger.operation(nickname="addCustomizedTestCases")
+    def post(self, scenario):
+        """
+        @description: add customized test cases
+        @notes: add several test cases to a project
+            POST /api/v1/scenarios/<scenario_name>/customs? \
+                installer=<installer_name>& \
+                version=<version_name>& \
+                project=<project_name>
+        @param body: test cases to be added
+        @type body: C{list} of L{string}
+        @in body: body
+        @param installer: installer type
+        @type installer: L{string}
+        @in installer: query
+        @required installer: True
+        @param version: version
+        @type version: L{string}
+        @in version: query
+        @required version: True
+        @param project: project name
+        @type project: L{string}
+        @in project: query
+        @required project: True
+        @return 200: test cases are added.
+        @raise 404:  scenario/installer/version/project not existed
+        """
+        self.do_update('customs',
+                       'post',
+                       locators={'scenario': scenario,
+                                 'installer': None,
+                                 'version': None,
+                                 'project': None})
+
+    @swagger.operation(nickname="updateCustomizedTestCases")
+    def put(self, scenario):
+        """
+        @description: update customized test cases
+        @notes: substitute all the customized test cases
+            PUT /api/v1/scenarios/<scenario_name>/customs? \
+                installer=<installer_name>& \
+                version=<version_name>& \
+                project=<project_name>
+        @param body: new supported test cases
+        @type body: C{list} of L{string}
+        @in body: body
+        @param installer: installer type
+        @type installer: L{string}
+        @in installer: query
+        @required installer: True
+        @param version: version
+        @type version: L{string}
+        @in version: query
+        @required version: True
+        @param project: project name
+        @type project: L{string}
+        @in project: query
+        @required project: True
+        @return 200: substitute test cases success.
+        @raise 404:  scenario/installer/version/project not existed
+        """
+        self.do_update('customs',
+                       'put',
+                       locators={'scenario': scenario,
+                                 'installer': None,
+                                 'version': None,
+                                 'project': None})
+
+    @swagger.operation(nickname="deleteCustomizedTestCases")
+    def delete(self, scenario):
+        """
+        @description: delete one or several customized test cases
+        @notes: delete one or some customized test cases
+            DELETE /api/v1/scenarios/<scenario_name>/customs? \
+                installer=<installer_name>& \
+                version=<version_name>& \
+                project=<project_name>
+        @param body: test case(s) to be deleted
+        @type body: C{list} of L{string}
+        @in body: body
+        @param installer: installer type
+        @type installer: L{string}
+        @in installer: query
+        @required installer: True
+        @param version: version
+        @type version: L{string}
+        @in version: query
+        @required version: True
+        @param project: project name
+        @type project: L{string}
+        @in project: query
+        @required project: True
+        @return 200: delete test case(s) success.
+        @raise 404:  scenario/installer/version/project not existed
+        """
+        self.do_update('customs',
+                       'delete',
+                       locators={'scenario': scenario,
+                                 'installer': None,
+                                 'version': None,
+                                 'project': None})
 
-    @property
-    def _op(self):
-        return self.json_args.get('op')
 
-    @property
-    def _locate(self):
-        return self.json_args.get('locate')
+class ScenarioProjectsHandler(GenericScenarioUpdateHandler):
+    @swagger.operation(nickname="addProjectsUnderScenario")
+    def post(self, scenario):
+        """
+        @description: add projects to scenario
+        @notes: add one or multiple projects
+            POST /api/v1/scenarios/<scenario_name>/projects? \
+                installer=<installer_name>& \
+                version=<version_name>
+        @param body: projects to be added
+        @type body: C{list} of L{ScenarioProject}
+        @in body: body
+        @param installer: installer type
+        @type installer: L{string}
+        @in installer: query
+        @required installer: True
+        @param version: version
+        @type version: L{string}
+        @in version: query
+        @required version: True
+        @return 200: projects are added.
+        @raise 400: bad schema
+        @raise 409: conflict, project already exists
+        @raise 404:  scenario/installer/version not existed
+        """
+        self.do_update('projects',
+                       'post',
+                       locators={'scenario': scenario,
+                                 'installer': None,
+                                 'version': None})
+
+    @swagger.operation(nickname="updateScenarioProjects")
+    def put(self, scenario):
+        """
+        @description: replace all projects
+        @notes: substitute all projects, delete existed ones with new provides
+            PUT /api/v1/scenarios/<scenario_name>/projects? \
+                installer=<installer_name>& \
+                version=<version_name>
+        @param body: new projects
+        @type body: C{list} of L{ScenarioProject}
+        @in body: body
+        @param installer: installer type
+        @type installer: L{string}
+        @in installer: query
+        @required installer: True
+        @param version: version
+        @type version: L{string}
+        @in version: query
+        @required version: True
+        @return 200: replace projects success.
+        @raise 400: bad schema
+        @raise 404:  scenario/installer/version not existed
+        """
+        self.do_update('projects',
+                       'put',
+                       locators={'scenario': scenario,
+                                 'installer': None,
+                                 'version': None})
+
+    @swagger.operation(nickname="deleteProjectsUnderScenario")
+    def delete(self, scenario):
+        """
+        @description: delete one or multiple projects
+        @notes: delete one or multiple projects
+            DELETE /api/v1/scenarios/<scenario_name>/projects? \
+                installer=<installer_name>& \
+                version=<version_name>
+        @param body: projects(names) to be deleted
+        @type body: C{list} of L{string}
+        @in body: body
+        @param installer: installer type
+        @type installer: L{string}
+        @in installer: query
+        @required installer: True
+        @param version: version
+        @type version: L{string}
+        @in version: query
+        @required version: True
+        @return 200: delete project(s) success.
+        @raise 404:  scenario/installer/version not existed
+        """
+        self.do_update('projects',
+                       'delete',
+                       locators={'scenario': scenario,
+                                 'installer': None,
+                                 'version': None})
 
-    @property
-    def _term(self):
-        return self.json_args.get('term')
+
+class ScenarioOwnerHandler(GenericScenarioUpdateHandler):
+    @swagger.operation(nickname="changeScenarioOwner")
+    def put(self, scenario):
+        """
+        @description: change scenario owner
+        @notes: substitute all projects, delete existed ones with new provides
+            PUT /api/v1/scenarios/<scenario_name>/owner? \
+                installer=<installer_name>& \
+                version=<version_name>
+        @param body: new owner
+        @type body: L{ScenarioChangeOwnerRequest}
+        @in body: body
+        @param installer: installer type
+        @type installer: L{string}
+        @in installer: query
+        @required installer: True
+        @param version: version
+        @type version: L{string}
+        @in version: query
+        @required version: True
+        @return 200: change owner success.
+        @raise 404:  scenario/installer/version not existed
+        """
+        self.do_update('owner',
+                       'put',
+                       locators={'scenario': scenario,
+                                 'installer': None,
+                                 'version': None})
+
+
+class ScenarioVersionsHandler(GenericScenarioUpdateHandler):
+    @swagger.operation(nickname="addVersionsUnderScenario")
+    def post(self, scenario):
+        """
+        @description: add versions to scenario
+        @notes: add one or multiple versions
+            POST /api/v1/scenarios/<scenario_name>/versions? \
+                installer=<installer_name>
+        @param body: versions to be added
+        @type body: C{list} of L{ScenarioVersion}
+        @in body: body
+        @param installer: installer type
+        @type installer: L{string}
+        @in installer: query
+        @required installer: True
+        @return 200: versions are added.
+        @raise 400: bad schema
+        @raise 409: conflict, version already exists
+        @raise 404:  scenario/installer not exist
+        """
+        self.do_update('versions',
+                       'post',
+                       locators={'scenario': scenario,
+                                 'installer': None})
+
+    @swagger.operation(nickname="updateVersionsUnderScenario")
+    def put(self, scenario):
+        """
+        @description: replace all versions
+        @notes: substitute all versions as a totality
+            PUT /api/v1/scenarios/<scenario_name>/versions? \
+                installer=<installer_name>
+        @param body: new versions
+        @type body: C{list} of L{ScenarioVersion}
+        @in body: body
+        @param installer: installer type
+        @type installer: L{string}
+        @in installer: query
+        @required installer: True
+        @return 200: replace versions success.
+        @raise 400: bad schema
+        @raise 404:  scenario/installer not exist
+        """
+        self.do_update('versions',
+                       'put',
+                       locators={'scenario': scenario,
+                                 'installer': None})
+
+    @swagger.operation(nickname="deleteVersionsUnderScenario")
+    def delete(self, scenario):
+        """
+        @description: delete one or multiple versions
+        @notes: delete one or multiple versions
+            DELETE /api/v1/scenarios/<scenario_name>/versions? \
+                installer=<installer_name>
+        @param body: versions(names) to be deleted
+        @type body: C{list} of L{string}
+        @in body: body
+        @param installer: installer type
+        @type installer: L{string}
+        @in installer: query
+        @required installer: True
+        @return 200: delete versions success.
+        @raise 404:  scenario/installer not exist
+        """
+        self.do_update('versions',
+                       'delete',
+                       locators={'scenario': scenario,
+                                 'installer': None})
+
+
+class ScenarioInstallersHandler(GenericScenarioUpdateHandler):
+    @swagger.operation(nickname="addInstallersUnderScenario")
+    def post(self, scenario):
+        """
+        @description: add installers to scenario
+        @notes: add one or multiple installers
+            POST /api/v1/scenarios/<scenario_name>/installers
+        @param body: installers to be added
+        @type body: C{list} of L{ScenarioInstaller}
+        @in body: body
+        @return 200: installers are added.
+        @raise 400: bad schema
+        @raise 409: conflict, installer already exists
+        @raise 404:  scenario not exist
+        """
+        self.do_update('installers',
+                       'post',
+                       locators={'scenario': scenario})
+
+    @swagger.operation(nickname="updateInstallersUnderScenario")
+    def put(self, scenario):
+        """
+        @description: replace all installers
+        @notes: substitute all installers as a totality
+            PUT /api/v1/scenarios/<scenario_name>/installers
+        @param body: new installers
+        @type body: C{list} of L{ScenarioInstaller}
+        @in body: body
+        @return 200: replace versions success.
+        @raise 400: bad schema
+        @raise 404:  scenario/installer not exist
+        """
+        self.do_update('installers',
+                       'put',
+                       locators={'scenario': scenario})
+
+    @swagger.operation(nickname="deleteInstallersUnderScenario")
+    def delete(self, scenario):
+        """
+        @description: delete one or multiple installers
+        @notes: delete one or multiple installers
+            DELETE /api/v1/scenarios/<scenario_name>/installers
+        @param body: installers(names) to be deleted
+        @type body: C{list} of L{string}
+        @in body: body
+        @return 200: delete versions success.
+        @raise 404:  scenario/installer not exist
+        """
+        self.do_update('installers',
+                       'delete',
+                       locators={'scenario': scenario})
index 467cff2..d950ed1 100644 (file)
@@ -16,6 +16,13 @@ class ScenarioTI(models.ModelBase):
         self.date = date
         self.status = status
 
+    def __eq__(self, other):
+        return (self.date == other.date and
+                self.status == other.status)
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
 
 @swagger.model()
 class ScenarioScore(models.ModelBase):
@@ -23,6 +30,13 @@ class ScenarioScore(models.ModelBase):
         self.date = date
         self.score = score
 
+    def __eq__(self, other):
+        return (self.date == other.date and
+                self.score == other.score)
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
 
 @swagger.model()
 class ScenarioProject(models.ModelBase):
@@ -50,10 +64,10 @@ class ScenarioProject(models.ModelBase):
                 'trust_indicators': ScenarioTI}
 
     def __eq__(self, other):
-        return [self.project == other.project and
+        return (self.project == other.project and
                 self._customs_eq(other) and
                 self._scores_eq(other) and
-                self._ti_eq(other)]
+                self._ti_eq(other))
 
     def __ne__(self, other):
         return not self.__eq__(other)
@@ -62,10 +76,10 @@ class ScenarioProject(models.ModelBase):
         return set(self.customs) == set(other.customs)
 
     def _scores_eq(self, other):
-        return set(self.scores) == set(other.scores)
+        return self.scores == other.scores
 
     def _ti_eq(self, other):
-        return set(self.trust_indicators) == set(other.trust_indicators)
+        return self.trust_indicators == other.trust_indicators
 
 
 @swagger.model()
@@ -74,7 +88,8 @@ class ScenarioVersion(models.ModelBase):
         @property projects:
         @ptype projects: C{list} of L{ScenarioProject}
     """
-    def __init__(self, version=None, projects=None):
+    def __init__(self, owner=None, version=None, projects=None):
+        self.owner = owner
         self.version = version
         self.projects = list_default(projects)
 
@@ -83,7 +98,9 @@ class ScenarioVersion(models.ModelBase):
         return {'projects': ScenarioProject}
 
     def __eq__(self, other):
-        return [self.version == other.version and self._projects_eq(other)]
+        return (self.version == other.version and
+                self.owner == other.owner and
+                self._projects_eq(other))
 
     def __ne__(self, other):
         return not self.__eq__(other)
@@ -113,7 +130,7 @@ class ScenarioInstaller(models.ModelBase):
         return {'versions': ScenarioVersion}
 
     def __eq__(self, other):
-        return [self.installer == other.installer and self._versions_eq(other)]
+        return (self.installer == other.installer and self._versions_eq(other))
 
     def __ne__(self, other):
         return not self.__eq__(other)
@@ -143,19 +160,16 @@ class ScenarioCreateRequest(models.ModelBase):
         return {'installers': ScenarioInstaller}
 
 
+@swagger.model()
+class ScenarioChangeOwnerRequest(models.ModelBase):
+    def __init__(self, owner=None):
+        self.owner = owner
+
+
 @swagger.model()
 class ScenarioUpdateRequest(models.ModelBase):
-    """
-        @property field: update field
-        @property op: add/delete/update
-        @property locate: information used to locate the field
-        @property term: new value
-    """
-    def __init__(self, field=None, op=None, locate=None, term=None):
-        self.field = field
-        self.op = op
-        self.locate = dict_default(locate)
-        self.term = dict_default(term)
+    def __init__(self, name=None):
+        self.name = name
 
 
 @swagger.model()
@@ -178,7 +192,7 @@ class Scenario(models.ModelBase):
         return not self.__eq__(other)
 
     def __eq__(self, other):
-        return [self.name == other.name and self._installers_eq(other)]
+        return (self.name == other.name and self._installers_eq(other))
 
     def _installers_eq(self, other):
         for s_install in self.installers:
index a2312de..3e3ab87 100644 (file)
@@ -8,7 +8,7 @@
 ##############################################################################
 import tornado.web
 
-from opnfv_testapi.common import config
+from opnfv_testapi.common.config import CONF
 from opnfv_testapi.resources import handlers
 from opnfv_testapi.resources import pod_handlers
 from opnfv_testapi.resources import project_handlers
@@ -48,20 +48,36 @@ mappings = [
     # Push results with mandatory request payload parameters
     # (project, case, and pod)
     (r"/api/v1/results", result_handlers.ResultsCLHandler),
+    (r'/api/v1/results/upload', result_handlers.ResultsUploadHandler),
     (r"/api/v1/results/([^/]+)", result_handlers.ResultsGURHandler),
 
     # scenarios
     (r"/api/v1/scenarios", scenario_handlers.ScenariosCLHandler),
     (r"/api/v1/scenarios/([^/]+)", scenario_handlers.ScenarioGURHandler),
+    (r"/api/v1/scenarios/([^/]+)/scores",
+     scenario_handlers.ScenarioScoresHandler),
+    (r"/api/v1/scenarios/([^/]+)/trust_indicators",
+     scenario_handlers.ScenarioTIsHandler),
+    (r"/api/v1/scenarios/([^/]+)/customs",
+     scenario_handlers.ScenarioCustomsHandler),
+    (r"/api/v1/scenarios/([^/]+)/projects",
+     scenario_handlers.ScenarioProjectsHandler),
+    (r"/api/v1/scenarios/([^/]+)/owner",
+     scenario_handlers.ScenarioOwnerHandler),
+    (r"/api/v1/scenarios/([^/]+)/versions",
+     scenario_handlers.ScenarioVersionsHandler),
+    (r"/api/v1/scenarios/([^/]+)/installers",
+     scenario_handlers.ScenarioInstallersHandler),
 
     # static path
     (r'/(.*\.(css|png|gif|js|html|json|map|woff2|woff|ttf))',
      tornado.web.StaticFileHandler,
-     {'path': config.Config().static_path}),
+     {'path': CONF.static_path}),
 
     (r'/', root.RootHandler),
     (r'/api/v1/auth/signin', sign.SigninHandler),
     (r'/api/v1/auth/signin_return', sign.SigninReturnHandler),
     (r'/api/v1/auth/signout', sign.SignoutHandler),
     (r'/api/v1/profile', user.ProfileHandler),
+
 ]
index fda2a09..be7f2b9 100644 (file)
@@ -12,5 +12,5 @@ port = 8000
 debug = True
 authenticate = False
 
-[swagger]
-base_url = http://localhost:8000
+[ui]
+url = http://localhost:8000
index 77cc6c6..c81c6c5 100644 (file)
@@ -13,5 +13,5 @@ port = 8000
 debug = True
 authenticate = False
 
-[swagger]
-base_url = http://localhost:8000
+[ui]
+url = http://localhost:8000
index 9988fc0..a9ed49c 100644 (file)
@@ -7,5 +7,5 @@ port = 8000
 debug = True
 authenticate = False
 
-[swagger]
-base_url = http://localhost:8000
+[ui]
+url = http://localhost:8000
index b3f3276..3a11f9d 100644 (file)
@@ -13,5 +13,5 @@ port = 8000
 debug = True
 authenticate = notboolean
 
-[swagger]
-base_url = http://localhost:8000
+[ui]
+url = http://localhost:8000
index d1b752a..8180719 100644 (file)
@@ -13,5 +13,5 @@ port = notint
 debug = True
 authenticate = False
 
-[swagger]
-base_url = http://localhost:8000
+[ui]
+url = http://localhost:8000
index 446b944..8cfc513 100644 (file)
@@ -1,16 +1,15 @@
-import os
+import argparse
 
-from opnfv_testapi.common import config
 
-
-def test_config_success():
-    config_file = os.path.join(os.path.dirname(__file__),
-                               '../../../../etc/config.ini')
-    config.Config.CONFIG = config_file
-    conf = config.Config()
-    assert conf.mongo_url == 'mongodb://127.0.0.1:27017/'
-    assert conf.mongo_dbname == 'test_results_collection'
-    assert conf.api_port == 8000
-    assert conf.api_debug is True
-    assert conf.api_authenticate is False
-    assert conf.swagger_base_url == 'http://localhost:8000'
+def test_config_normal(mocker, config_normal):
+    mocker.patch(
+        'argparse.ArgumentParser.parse_known_args',
+        return_value=(argparse.Namespace(config_file=config_normal), None))
+    from opnfv_testapi.common import config
+    CONF = config.Config()
+    assert CONF.mongo_url == 'mongodb://127.0.0.1:27017/'
+    assert CONF.mongo_dbname == 'test_results_collection'
+    assert CONF.api_port == 8000
+    assert CONF.api_debug is True
+    assert CONF.api_authenticate is False
+    assert CONF.ui_url == 'http://localhost:8000'
diff --git a/utils/test/testapi/opnfv_testapi/tests/unit/conftest.py b/utils/test/testapi/opnfv_testapi/tests/unit/conftest.py
new file mode 100644 (file)
index 0000000..feff1da
--- /dev/null
@@ -0,0 +1,8 @@
+from os import path
+
+import pytest
+
+
+@pytest.fixture
+def config_normal():
+    return path.join(path.dirname(__file__), 'common/normal.ini')
index b30c325..b8f696c 100644 (file)
@@ -10,6 +10,20 @@ import functools
 import httplib
 
 
+def upload(excepted_status, excepted_response):
+    def _upload(create_request):
+        @functools.wraps(create_request)
+        def wrap(self):
+            request = create_request(self)
+            status, body = self.upload(request)
+            if excepted_status == httplib.OK:
+                getattr(self, excepted_response)(body)
+            else:
+                self.assertIn(excepted_response, body)
+        return wrap
+    return _upload
+
+
 def create(excepted_status, excepted_response):
     def _create(create_request):
         @functools.wraps(create_request)
index ef74a08..0ca83df 100644 (file)
@@ -6,9 +6,10 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
+from operator import itemgetter
+
 from bson.objectid import ObjectId
 from concurrent.futures import ThreadPoolExecutor
-from operator import itemgetter
 
 
 def thread_execute(method, *args, **kwargs):
@@ -20,38 +21,52 @@ def thread_execute(method, *args, **kwargs):
 class MemCursor(object):
     def __init__(self, collection):
         self.collection = collection
-        self.count = len(self.collection)
+        self.length = len(self.collection)
         self.sorted = []
 
     def _is_next_exist(self):
-        return self.count != 0
+        return self.length != 0
 
     @property
     def fetch_next(self):
         return thread_execute(self._is_next_exist)
 
     def next_object(self):
-        self.count -= 1
+        self.length -= 1
         return self.collection.pop()
 
     def sort(self, key_or_list):
-        key = key_or_list[0][0]
-        if key_or_list[0][1] == -1:
-            reverse = True
-        else:
-            reverse = False
+        for k, v in key_or_list.iteritems():
+            if v == -1:
+                reverse = True
+            else:
+                reverse = False
 
-        if key_or_list is not None:
             self.collection = sorted(self.collection,
-                                     key=itemgetter(key), reverse=reverse)
+                                     key=itemgetter(k), reverse=reverse)
         return self
 
     def limit(self, limit):
         if limit != 0 and limit < len(self.collection):
-            self.collection = self.collection[0:limit]
-            self.count = limit
+            self.collection = self.collection[0: limit]
+            self.length = limit
         return self
 
+    def skip(self, skip):
+        if skip < self.length and (skip > 0):
+            self.collection = self.collection[self.length - skip: -1]
+            self.length -= skip
+        elif skip >= self.length:
+            self.collection = []
+            self.length = 0
+        return self
+
+    def _count(self):
+        return self.length
+
+    def count(self):
+        return thread_execute(self._count)
+
 
 class MemDb(object):
 
@@ -105,10 +120,14 @@ class MemDb(object):
 
     @staticmethod
     def _compare_date(spec, value):
+        gte = True
+        lt = False
         for k, v in spec.iteritems():
-            if k == '$gte' and value >= v:
-                return True
-        return False
+            if k == '$gte' and value < v:
+                gte = False
+            elif k == '$lt' and value < v:
+                lt = True
+        return gte and lt
 
     def _in(self, content, *args):
         if self.name == 'scenarios':
@@ -171,9 +190,8 @@ class MemDb(object):
                 elif k == 'trust_indicator.current':
                     if content.get('trust_indicator').get('current') != v:
                         return False
-                elif content.get(k, None) != v:
+                elif not isinstance(v, dict) and content.get(k, None) != v:
                     return False
-
         return True
 
     def _find(self, *args):
@@ -187,6 +205,27 @@ class MemDb(object):
     def find(self, *args):
         return MemCursor(self._find(*args))
 
+    def _aggregate(self, *args, **kwargs):
+        res = self.contents
+        print args
+        for arg in args[0]:
+            for k, v in arg.iteritems():
+                if k == '$match':
+                    res = self._find(v)
+        cursor = MemCursor(res)
+        for arg in args[0]:
+            for k, v in arg.iteritems():
+                if k == '$sort':
+                    cursor = cursor.sort(v)
+                elif k == '$skip':
+                    cursor = cursor.skip(v)
+                elif k == '$limit':
+                    cursor = cursor.limit(v)
+        return cursor
+
+    def aggregate(self, *args, **kwargs):
+        return self._aggregate(*args, **kwargs)
+
     def _update(self, spec, document, check_keys=True):
         updated = False
 
diff --git a/utils/test/testapi/opnfv_testapi/tests/unit/resources/__init__.py b/utils/test/testapi/opnfv_testapi/tests/unit/resources/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
@@ -8,7 +8,7 @@
       [
         {
           "owner": "Lucky",
-          "version": "colorado",
+          "version": "danube",
           "projects":
           [
             {
@@ -29,7 +29,7 @@
               "scores": [
                 {
                   "date": "2017-01-08 22:46:44",
-                  "score": "0"
+                  "score": "0/1"
                 }
               ],
               "trust_indicators": [
@@ -12,13 +12,9 @@ from os import path
 import mock
 from tornado import testing
 
-from opnfv_testapi.common import config
 from opnfv_testapi.resources import models
 from opnfv_testapi.tests.unit import fake_pymongo
 
-config.Config.CONFIG = path.join(path.dirname(__file__),
-                                 '../../../etc/config.ini')
-
 
 class TestBase(testing.AsyncHTTPTestCase):
     headers = {'Content-Type': 'application/json; charset=UTF-8'}
@@ -37,20 +33,21 @@ class TestBase(testing.AsyncHTTPTestCase):
 
     def tearDown(self):
         self.db_patcher.stop()
+        self.config_patcher.stop()
 
     def _patch_server(self):
-        from opnfv_testapi.cmd import server
-        server.parse_config([
-            '--config-file',
-            path.join(path.dirname(__file__), 'common/normal.ini')
-        ])
-        self.db_patcher = mock.patch('opnfv_testapi.cmd.server.get_db',
-                                     self._fake_pymongo)
+        import argparse
+        config = path.join(path.dirname(__file__), '../common/normal.ini')
+        self.config_patcher = mock.patch(
+            'argparse.ArgumentParser.parse_known_args',
+            return_value=(argparse.Namespace(config_file=config), None))
+        self.db_patcher = mock.patch('opnfv_testapi.db.api.DB',
+                                     fake_pymongo)
+        self.config_patcher.start()
         self.db_patcher.start()
 
-    @staticmethod
-    def _fake_pymongo():
-        return fake_pymongo
+    def set_config_file(self):
+        self.config_file = 'normal.ini'
 
     def get_app(self):
         from opnfv_testapi.cmd import server
@@ -66,9 +63,12 @@ class TestBase(testing.AsyncHTTPTestCase):
         return self.create_help(self.basePath, req, *args)
 
     def create_help(self, uri, req, *args):
+        return self.post_direct_url(self._update_uri(uri, *args), req)
+
+    def post_direct_url(self, url, req):
         if req and not isinstance(req, str) and hasattr(req, 'format'):
             req = req.format()
-        res = self.fetch(self._update_uri(uri, *args),
+        res = self.fetch(url,
                          method='POST',
                          body=json.dumps(req),
                          headers=self.headers)
@@ -92,21 +92,35 @@ class TestBase(testing.AsyncHTTPTestCase):
                          headers=self.headers)
         return self._get_return(res, self.list_res)
 
-    def update(self, new=None, *args):
-        if new:
+    def update_direct_url(self, url, new=None):
+        if new and hasattr(new, 'format'):
             new = new.format()
-        res = self.fetch(self._get_uri(*args),
+        res = self.fetch(url,
                          method='PUT',
                          body=json.dumps(new),
                          headers=self.headers)
         return self._get_return(res, self.update_res)
 
-    def delete(self, *args):
-        res = self.fetch(self._get_uri(*args),
-                         method='DELETE',
-                         headers=self.headers)
+    def update(self, new=None, *args):
+        return self.update_direct_url(self._get_uri(*args), new)
+
+    def delete_direct_url(self, url, body):
+        if body:
+            res = self.fetch(url,
+                             method='DELETE',
+                             body=json.dumps(body),
+                             headers=self.headers,
+                             allow_nonstandard_methods=True)
+        else:
+            res = self.fetch(url,
+                             method='DELETE',
+                             headers=self.headers)
+
         return res.code, res.body
 
+    def delete(self, *args):
+        return self.delete_direct_url(self._get_uri(*args), None)
+
     @staticmethod
     def _get_valid_args(*args):
         new_args = tuple(['%s' % arg for arg in args if arg is not None])
@@ -132,7 +146,10 @@ class TestBase(testing.AsyncHTTPTestCase):
     def _get_return(self, res, cls):
         code = res.code
         body = res.body
-        return code, self._get_return_body(code, body, cls)
+        if body:
+            return code, self._get_return_body(code, body, cls)
+        else:
+            return code, None
 
     @staticmethod
     def _get_return_body(code, body, cls):
@@ -12,7 +12,7 @@ import unittest
 from opnfv_testapi.common import message
 from opnfv_testapi.resources import pod_models
 from opnfv_testapi.tests.unit import executor
-from opnfv_testapi.tests.unit import test_base as base
+from opnfv_testapi.tests.unit.resources import test_base as base
 
 
 class TestPodBase(base.TestBase):
@@ -85,5 +85,6 @@ class TestPodGet(TestPodBase):
             else:
                 self.assert_get_body(pod, self.req_e)
 
+
 if __name__ == '__main__':
     unittest.main()
@@ -4,7 +4,7 @@ import unittest
 from opnfv_testapi.common import message
 from opnfv_testapi.resources import project_models
 from opnfv_testapi.tests.unit import executor
-from opnfv_testapi.tests.unit import test_base as base
+from opnfv_testapi.tests.unit.resources import test_base as base
 
 
 class TestProjectBase(base.TestBase):
@@ -132,5 +132,6 @@ class TestProjectDelete(TestProjectBase):
         code, body = self.get(self.req_d.name)
         self.assertEqual(code, httplib.NOT_FOUND)
 
+
 if __name__ == '__main__':
     unittest.main()
@@ -7,17 +7,18 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 import copy
-from datetime import datetime, timedelta
 import httplib
 import unittest
+from datetime import datetime, timedelta
+import json
 
 from opnfv_testapi.common import message
 from opnfv_testapi.resources import pod_models
 from opnfv_testapi.resources import project_models
 from opnfv_testapi.resources import result_models
 from opnfv_testapi.resources import testcase_models
-from opnfv_testapi.tests.unit import test_base as base
 from opnfv_testapi.tests.unit import executor
+from opnfv_testapi.tests.unit.resources import test_base as base
 
 
 class Details(object):
@@ -60,9 +61,9 @@ class TestResultBase(base.TestBase):
         self.scenario = 'odl-l2'
         self.criteria = 'passed'
         self.trust_indicator = result_models.TI(0.7)
-        self.start_date = "2016-05-23 07:16:09.477097"
-        self.stop_date = "2016-05-23 07:16:19.477097"
-        self.update_date = "2016-05-24 07:16:19.477097"
+        self.start_date = str(datetime.now())
+        self.stop_date = str(datetime.now() + timedelta(minutes=1))
+        self.update_date = str(datetime.now() + timedelta(days=1))
         self.update_step = -0.05
         super(TestResultBase, self).setUp()
         self.details = Details(timestart='0', duration='9s', status='OK')
@@ -131,6 +132,22 @@ class TestResultBase(base.TestBase):
         _, res = self.create_d()
         return res.href.split('/')[-1]
 
+    def upload(self, req):
+        if req and not isinstance(req, str) and hasattr(req, 'format'):
+            req = req.format()
+        res = self.fetch(self.basePath + '/upload',
+                         method='POST',
+                         body=json.dumps(req),
+                         headers=self.headers)
+
+        return self._get_return(res, self.create_res)
+
+
+class TestResultUpload(TestResultBase):
+    @executor.upload(httplib.BAD_REQUEST, message.key_error('file'))
+    def test_filenotfind(self):
+        return None
+
 
 class TestResultCreate(TestResultBase):
     @executor.create(httplib.BAD_REQUEST, message.no_body())
@@ -208,9 +225,9 @@ class TestResultCreate(TestResultBase):
 class TestResultGet(TestResultBase):
     def setUp(self):
         super(TestResultGet, self).setUp()
+        self.req_10d_before = self._create_changed_date(days=-10)
         self.req_d_id = self._create_d()
         self.req_10d_later = self._create_changed_date(days=10)
-        self.req_10d_before = self._create_changed_date(days=-10)
 
     @executor.get(httplib.OK, 'assert_res')
     def test_getOne(self):
@@ -256,9 +273,9 @@ class TestResultGet(TestResultBase):
     def test_queryPeriodNotInt(self):
         return self._set_query('period=a')
 
-    @executor.query(httplib.OK, '_query_last_one', 1)
+    @executor.query(httplib.OK, '_query_period_one', 1)
     def test_queryPeriodSuccess(self):
-        return self._set_query('period=1')
+        return self._set_query('period=5')
 
     @executor.query(httplib.BAD_REQUEST, message.must_int('last'))
     def test_queryLastNotInt(self):
@@ -268,7 +285,17 @@ class TestResultGet(TestResultBase):
     def test_queryLast(self):
         return self._set_query('last=1')
 
-    @executor.query(httplib.OK, '_query_last_one', 1)
+    @executor.query(httplib.OK, '_query_success', 4)
+    def test_queryPublic(self):
+        self._create_public_data()
+        return self._set_query('')
+
+    @executor.query(httplib.OK, '_query_success', 1)
+    def test_queryPrivate(self):
+        self._create_private_data()
+        return self._set_query('public=false')
+
+    @executor.query(httplib.OK, '_query_period_one', 1)
     def test_combination(self):
         return self._set_query('pod',
                                'project',
@@ -279,7 +306,7 @@ class TestResultGet(TestResultBase):
                                'scenario',
                                'trust_indicator',
                                'criteria',
-                               'period=1')
+                               'period=5')
 
     @executor.query(httplib.OK, '_query_success', 0)
     def test_notFound(self):
@@ -294,6 +321,14 @@ class TestResultGet(TestResultBase):
                                'criteria',
                                'period=1')
 
+    @executor.query(httplib.OK, '_query_success', 1)
+    def test_filterErrorStartdate(self):
+        self._create_error_start_date(None)
+        self._create_error_start_date('None')
+        self._create_error_start_date('null')
+        self._create_error_start_date('')
+        return self._set_query('period=5')
+
     def _query_success(self, body, number):
         self.assertEqual(number, len(body.results))
 
@@ -301,6 +336,16 @@ class TestResultGet(TestResultBase):
         self.assertEqual(number, len(body.results))
         self.assert_res(body.results[0], self.req_10d_later)
 
+    def _query_period_one(self, body, number):
+        self.assertEqual(number, len(body.results))
+        self.assert_res(body.results[0], self.req_d)
+
+    def _create_error_start_date(self, start_date):
+        req = copy.deepcopy(self.req_d)
+        req.start_date = start_date
+        self.create(req)
+        return req
+
     def _create_changed_date(self, **kwargs):
         req = copy.deepcopy(self.req_d)
         req.start_date = datetime.now() + timedelta(**kwargs)
@@ -309,16 +354,29 @@ class TestResultGet(TestResultBase):
         self.create(req)
         return req
 
+    def _create_public_data(self, **kwargs):
+        req = copy.deepcopy(self.req_d)
+        req.public = 'true'
+        self.create(req)
+        return req
+
+    def _create_private_data(self, **kwargs):
+        req = copy.deepcopy(self.req_d)
+        req.public = 'false'
+        self.create(req)
+        return req
+
     def _set_query(self, *args):
         def get_value(arg):
             return self.__getattribute__(arg) \
                 if arg != 'trust_indicator' else self.trust_indicator.current
         uri = ''
         for arg in args:
-            if '=' in arg:
-                uri += arg + '&'
-            else:
-                uri += '{}={}&'.format(arg, get_value(arg))
+            if arg:
+                if '=' in arg:
+                    uri += arg + '&'
+                else:
+                    uri += '{}={}&'.format(arg, get_value(arg))
         return uri[0: -1]
 
 
diff --git a/utils/test/testapi/opnfv_testapi/tests/unit/resources/test_scenario.py b/utils/test/testapi/opnfv_testapi/tests/unit/resources/test_scenario.py
new file mode 100644 (file)
index 0000000..1367fc6
--- /dev/null
@@ -0,0 +1,449 @@
+import functools
+import httplib
+import json
+import os
+
+from datetime import datetime
+
+from opnfv_testapi.common import message
+import opnfv_testapi.resources.scenario_models as models
+from opnfv_testapi.tests.unit.resources import test_base as base
+
+
+def _none_default(check, default):
+    return check if check else default
+
+
+class TestScenarioBase(base.TestBase):
+    def setUp(self):
+        super(TestScenarioBase, self).setUp()
+        self.get_res = models.Scenario
+        self.list_res = models.Scenarios
+        self.basePath = '/api/v1/scenarios'
+        self.req_d = self._load_request('scenario-c1.json')
+        self.req_2 = self._load_request('scenario-c2.json')
+
+    def tearDown(self):
+        pass
+
+    def assert_body(self, project, req=None):
+        pass
+
+    @staticmethod
+    def _load_request(f_req):
+        abs_file = os.path.join(os.path.dirname(__file__), f_req)
+        with open(abs_file, 'r') as f:
+            loader = json.load(f)
+            f.close()
+        return loader
+
+    def create_return_name(self, req):
+        _, res = self.create(req)
+        return res.href.split('/')[-1]
+
+    def assert_res(self, code, scenario, req=None):
+        self.assertEqual(code, httplib.OK)
+        if req is None:
+            req = self.req_d
+        self.assertIsNotNone(scenario._id)
+        self.assertIsNotNone(scenario.creation_date)
+        self.assertEqual(scenario, models.Scenario.from_dict(req))
+
+    @staticmethod
+    def set_query(*args):
+        uri = ''
+        for arg in args:
+            uri += arg + '&'
+        return uri[0: -1]
+
+    def get_and_assert(self, name):
+        code, body = self.get(name)
+        self.assert_res(code, body, self.req_d)
+
+
+class TestScenarioCreate(TestScenarioBase):
+    def test_withoutBody(self):
+        (code, body) = self.create()
+        self.assertEqual(code, httplib.BAD_REQUEST)
+
+    def test_emptyName(self):
+        req_empty = models.ScenarioCreateRequest('')
+        (code, body) = self.create(req_empty)
+        self.assertEqual(code, httplib.BAD_REQUEST)
+        self.assertIn(message.missing('name'), body)
+
+    def test_noneName(self):
+        req_none = models.ScenarioCreateRequest(None)
+        (code, body) = self.create(req_none)
+        self.assertEqual(code, httplib.BAD_REQUEST)
+        self.assertIn(message.missing('name'), body)
+
+    def test_success(self):
+        (code, body) = self.create_d()
+        self.assertEqual(code, httplib.OK)
+        self.assert_create_body(body)
+
+    def test_alreadyExist(self):
+        self.create_d()
+        (code, body) = self.create_d()
+        self.assertEqual(code, httplib.FORBIDDEN)
+        self.assertIn(message.exist_base, body)
+
+
+class TestScenarioGet(TestScenarioBase):
+    def setUp(self):
+        super(TestScenarioGet, self).setUp()
+        self.scenario_1 = self.create_return_name(self.req_d)
+        self.scenario_2 = self.create_return_name(self.req_2)
+
+    def test_getByName(self):
+        self.get_and_assert(self.scenario_1)
+
+    def test_getAll(self):
+        self._query_and_assert(query=None, reqs=[self.req_d, self.req_2])
+
+    def test_queryName(self):
+        query = self.set_query('name=nosdn-nofeature-ha')
+        self._query_and_assert(query, reqs=[self.req_d])
+
+    def test_queryInstaller(self):
+        query = self.set_query('installer=apex')
+        self._query_and_assert(query, reqs=[self.req_d])
+
+    def test_queryVersion(self):
+        query = self.set_query('version=master')
+        self._query_and_assert(query, reqs=[self.req_d])
+
+    def test_queryProject(self):
+        query = self.set_query('project=functest')
+        self._query_and_assert(query, reqs=[self.req_d, self.req_2])
+
+    # close due to random fail, open again after solve it in another patch
+    # def test_queryCombination(self):
+    #     query = self._set_query('name=nosdn-nofeature-ha',
+    #                             'installer=apex',
+    #                             'version=master',
+    #                             'project=functest')
+    #
+    #     self._query_and_assert(query, reqs=[self.req_d])
+
+    def _query_and_assert(self, query, found=True, reqs=None):
+        code, body = self.query(query)
+        if not found:
+            self.assertEqual(code, httplib.OK)
+            self.assertEqual(0, len(body.scenarios))
+        else:
+            self.assertEqual(len(reqs), len(body.scenarios))
+            for req in reqs:
+                for scenario in body.scenarios:
+                    if req['name'] == scenario.name:
+                        self.assert_res(code, scenario, req)
+
+
+class TestScenarioDelete(TestScenarioBase):
+    def test_notFound(self):
+        code, body = self.delete('notFound')
+        self.assertEqual(code, httplib.NOT_FOUND)
+
+    def test_success(self):
+        scenario = self.create_return_name(self.req_d)
+        code, _ = self.delete(scenario)
+        self.assertEqual(code, httplib.OK)
+        code, _ = self.get(scenario)
+        self.assertEqual(code, httplib.NOT_FOUND)
+
+
+class TestScenarioUpdate(TestScenarioBase):
+    def setUp(self):
+        super(TestScenarioUpdate, self).setUp()
+        self.scenario = self.create_return_name(self.req_d)
+        self.scenario_2 = self.create_return_name(self.req_2)
+        self.update_url = ''
+        self.scenario_url = '/api/v1/scenarios/{}'.format(self.scenario)
+        self.installer = self.req_d['installers'][0]['installer']
+        self.version = self.req_d['installers'][0]['versions'][0]['version']
+        self.locate_project = 'installer={}&version={}&project={}'.format(
+            self.installer,
+            self.version,
+            'functest')
+
+    def update_url_fixture(item):
+        def _update_url_fixture(xstep):
+            def wrapper(self, *args, **kwargs):
+                self.update_url = '{}/{}'.format(self.scenario_url, item)
+                locator = None
+                if item in ['projects', 'owner']:
+                    locator = 'installer={}&version={}'.format(
+                        self.installer,
+                        self.version)
+                elif item in ['versions']:
+                    locator = 'installer={}'.format(
+                        self.installer)
+                elif item in ['rename']:
+                    self.update_url = self.scenario_url
+
+                if locator:
+                    self.update_url = '{}?{}'.format(self.update_url, locator)
+
+                xstep(self, *args, **kwargs)
+            return wrapper
+        return _update_url_fixture
+
+    def update_partial(operate, expected):
+        def _update_partial(set_update):
+            @functools.wraps(set_update)
+            def wrapper(self):
+                update = set_update(self)
+                code, body = getattr(self, operate)(update)
+                getattr(self, expected)(code)
+            return wrapper
+        return _update_partial
+
+    @update_partial('_add', '_success')
+    def test_addScore(self):
+        add = models.ScenarioScore(date=str(datetime.now()), score='11/12')
+        projects = self.req_d['installers'][0]['versions'][0]['projects']
+        functest = filter(lambda f: f['project'] == 'functest', projects)[0]
+        functest['scores'].append(add.format())
+        self.update_url = '{}/scores?{}'.format(self.scenario_url,
+                                                self.locate_project)
+
+        return add
+
+    @update_partial('_add', '_success')
+    def test_addTrustIndicator(self):
+        add = models.ScenarioTI(date=str(datetime.now()), status='gold')
+        projects = self.req_d['installers'][0]['versions'][0]['projects']
+        functest = filter(lambda f: f['project'] == 'functest', projects)[0]
+        functest['trust_indicators'].append(add.format())
+        self.update_url = '{}/trust_indicators?{}'.format(self.scenario_url,
+                                                          self.locate_project)
+
+        return add
+
+    @update_partial('_add', '_success')
+    def test_addCustoms(self):
+        adds = ['odl', 'parser', 'vping_ssh']
+        projects = self.req_d['installers'][0]['versions'][0]['projects']
+        functest = filter(lambda f: f['project'] == 'functest', projects)[0]
+        functest['customs'] = list(set(functest['customs'] + adds))
+        self.update_url = '{}/customs?{}'.format(self.scenario_url,
+                                                 self.locate_project)
+        return adds
+
+    @update_partial('_update', '_success')
+    def test_updateCustoms(self):
+        updates = ['odl', 'parser', 'vping_ssh']
+        projects = self.req_d['installers'][0]['versions'][0]['projects']
+        functest = filter(lambda f: f['project'] == 'functest', projects)[0]
+        functest['customs'] = updates
+        self.update_url = '{}/customs?{}'.format(self.scenario_url,
+                                                 self.locate_project)
+
+        return updates
+
+    @update_partial('_delete', '_success')
+    def test_deleteCustoms(self):
+        deletes = ['vping_ssh']
+        projects = self.req_d['installers'][0]['versions'][0]['projects']
+        functest = filter(lambda f: f['project'] == 'functest', projects)[0]
+        functest['customs'] = ['healthcheck']
+        self.update_url = '{}/customs?{}'.format(self.scenario_url,
+                                                 self.locate_project)
+
+        return deletes
+
+    @update_url_fixture('projects')
+    @update_partial('_add', '_success')
+    def test_addProjects_succ(self):
+        add = models.ScenarioProject(project='qtip').format()
+        self.req_d['installers'][0]['versions'][0]['projects'].append(add)
+        return [add]
+
+    @update_url_fixture('projects')
+    @update_partial('_add', '_conflict')
+    def test_addProjects_already_exist(self):
+        add = models.ScenarioProject(project='functest').format()
+        return [add]
+
+    @update_url_fixture('projects')
+    @update_partial('_add', '_bad_request')
+    def test_addProjects_bad_schema(self):
+        add = models.ScenarioProject(project='functest').format()
+        add['score'] = None
+        return [add]
+
+    @update_url_fixture('projects')
+    @update_partial('_update', '_success')
+    def test_updateProjects_succ(self):
+        update = models.ScenarioProject(project='qtip').format()
+        self.req_d['installers'][0]['versions'][0]['projects'] = [update]
+        return [update]
+
+    @update_url_fixture('projects')
+    @update_partial('_update', '_conflict')
+    def test_updateProjects_duplicated(self):
+        update = models.ScenarioProject(project='qtip').format()
+        return [update, update]
+
+    @update_url_fixture('projects')
+    @update_partial('_update', '_bad_request')
+    def test_updateProjects_bad_schema(self):
+        update = models.ScenarioProject(project='functest').format()
+        update['score'] = None
+        return [update]
+
+    @update_url_fixture('projects')
+    @update_partial('_delete', '_success')
+    def test_deleteProjects(self):
+        deletes = ['functest']
+        projects = self.req_d['installers'][0]['versions'][0]['projects']
+        self.req_d['installers'][0]['versions'][0]['projects'] = filter(
+            lambda f: f['project'] != 'functest',
+            projects)
+        return deletes
+
+    @update_url_fixture('owner')
+    @update_partial('_update', '_success')
+    def test_changeOwner(self):
+        new_owner = 'new_owner'
+        update = models.ScenarioChangeOwnerRequest(new_owner).format()
+        self.req_d['installers'][0]['versions'][0]['owner'] = new_owner
+        return update
+
+    @update_url_fixture('versions')
+    @update_partial('_add', '_success')
+    def test_addVersions_succ(self):
+        add = models.ScenarioVersion(version='Euphrates').format()
+        self.req_d['installers'][0]['versions'].append(add)
+        return [add]
+
+    @update_url_fixture('versions')
+    @update_partial('_add', '_conflict')
+    def test_addVersions_already_exist(self):
+        add = models.ScenarioVersion(version='master').format()
+        return [add]
+
+    @update_url_fixture('versions')
+    @update_partial('_add', '_bad_request')
+    def test_addVersions_bad_schema(self):
+        add = models.ScenarioVersion(version='euphrates').format()
+        add['notexist'] = None
+        return [add]
+
+    @update_url_fixture('versions')
+    @update_partial('_update', '_success')
+    def test_updateVersions_succ(self):
+        update = models.ScenarioVersion(version='euphrates').format()
+        self.req_d['installers'][0]['versions'] = [update]
+        return [update]
+
+    @update_url_fixture('versions')
+    @update_partial('_update', '_conflict')
+    def test_updateVersions_duplicated(self):
+        update = models.ScenarioVersion(version='euphrates').format()
+        return [update, update]
+
+    @update_url_fixture('versions')
+    @update_partial('_update', '_bad_request')
+    def test_updateVersions_bad_schema(self):
+        update = models.ScenarioVersion(version='euphrates').format()
+        update['not_owner'] = 'Iam'
+        return [update]
+
+    @update_url_fixture('versions')
+    @update_partial('_delete', '_success')
+    def test_deleteVersions(self):
+        deletes = ['master']
+        versions = self.req_d['installers'][0]['versions']
+        self.req_d['installers'][0]['versions'] = filter(
+            lambda f: f['version'] != 'master',
+            versions)
+        return deletes
+
+    @update_url_fixture('installers')
+    @update_partial('_add', '_success')
+    def test_addInstallers_succ(self):
+        add = models.ScenarioInstaller(installer='daisy').format()
+        self.req_d['installers'].append(add)
+        return [add]
+
+    @update_url_fixture('installers')
+    @update_partial('_add', '_conflict')
+    def test_addInstallers_already_exist(self):
+        add = models.ScenarioInstaller(installer='apex').format()
+        return [add]
+
+    @update_url_fixture('installers')
+    @update_partial('_add', '_bad_request')
+    def test_addInstallers_bad_schema(self):
+        add = models.ScenarioInstaller(installer='daisy').format()
+        add['not_exist'] = 'not_exist'
+        return [add]
+
+    @update_url_fixture('installers')
+    @update_partial('_update', '_success')
+    def test_updateInstallers_succ(self):
+        update = models.ScenarioInstaller(installer='daisy').format()
+        self.req_d['installers'] = [update]
+        return [update]
+
+    @update_url_fixture('installers')
+    @update_partial('_update', '_conflict')
+    def test_updateInstallers_duplicated(self):
+        update = models.ScenarioInstaller(installer='daisy').format()
+        return [update, update]
+
+    @update_url_fixture('installers')
+    @update_partial('_update', '_bad_request')
+    def test_updateInstallers_bad_schema(self):
+        update = models.ScenarioInstaller(installer='daisy').format()
+        update['not_exist'] = 'not_exist'
+        return [update]
+
+    @update_url_fixture('installers')
+    @update_partial('_delete', '_success')
+    def test_deleteInstallers(self):
+        deletes = ['apex']
+        installers = self.req_d['installers']
+        self.req_d['installers'] = filter(
+            lambda f: f['installer'] != 'apex',
+            installers)
+        return deletes
+
+    @update_url_fixture('rename')
+    @update_partial('_update', '_success')
+    def test_renameScenario(self):
+        new_name = 'new_scenario_name'
+        update = models.ScenarioUpdateRequest(name=new_name)
+        self.req_d['name'] = new_name
+        return update
+
+    @update_url_fixture('rename')
+    @update_partial('_update', '_forbidden')
+    def test_renameScenario_exist(self):
+        new_name = self.req_d['name']
+        update = models.ScenarioUpdateRequest(name=new_name)
+        return update
+
+    def _add(self, update_req):
+        return self.post_direct_url(self.update_url, update_req)
+
+    def _update(self, update_req):
+        return self.update_direct_url(self.update_url, update_req)
+
+    def _delete(self, update_req):
+        return self.delete_direct_url(self.update_url, update_req)
+
+    def _success(self, status):
+        self.assertEqual(status, httplib.OK)
+        self.get_and_assert(self.req_d['name'])
+
+    def _forbidden(self, status):
+        self.assertEqual(status, httplib.FORBIDDEN)
+
+    def _bad_request(self, status):
+        self.assertEqual(status, httplib.BAD_REQUEST)
+
+    def _conflict(self, status):
+        self.assertEqual(status, httplib.CONFLICT)
@@ -13,8 +13,8 @@ import unittest
 from opnfv_testapi.common import message
 from opnfv_testapi.resources import project_models
 from opnfv_testapi.resources import testcase_models
-from opnfv_testapi.tests.unit import test_base as base
 from opnfv_testapi.tests.unit import executor
+from opnfv_testapi.tests.unit.resources import test_base as base
 
 
 class TestCaseBase(base.TestBase):
@@ -10,14 +10,14 @@ from tornado import web
 
 from opnfv_testapi.common import message
 from opnfv_testapi.resources import project_models
-from opnfv_testapi.router import url_mappings
 from opnfv_testapi.tests.unit import executor
 from opnfv_testapi.tests.unit import fake_pymongo
-from opnfv_testapi.tests.unit import test_base as base
+from opnfv_testapi.tests.unit.resources import test_base as base
 
 
 class TestToken(base.TestBase):
     def get_app(self):
+        from opnfv_testapi.router import url_mappings
         return web.Application(
             url_mappings.mappings,
             db=fake_pymongo,
@@ -109,5 +109,6 @@ class TestTokenUpdateProject(TestToken):
     def _update_success(self, request, body):
         self.assertIn(request.name, body)
 
+
 if __name__ == '__main__':
     unittest.main()
@@ -11,7 +11,7 @@ import unittest
 
 from opnfv_testapi.resources import models
 from opnfv_testapi.tests.unit import executor
-from opnfv_testapi.tests.unit import test_base as base
+from opnfv_testapi.tests.unit.resources import test_base as base
 
 
 class TestVersionBase(base.TestBase):
diff --git a/utils/test/testapi/opnfv_testapi/tests/unit/test_scenario.py b/utils/test/testapi/opnfv_testapi/tests/unit/test_scenario.py
deleted file mode 100644 (file)
index b232bc1..0000000
+++ /dev/null
@@ -1,360 +0,0 @@
-from copy import deepcopy
-from datetime import datetime
-import functools
-import httplib
-import json
-import os
-
-from opnfv_testapi.common import message
-import opnfv_testapi.resources.scenario_models as models
-from opnfv_testapi.tests.unit import test_base as base
-
-
-class TestScenarioBase(base.TestBase):
-    def setUp(self):
-        super(TestScenarioBase, self).setUp()
-        self.get_res = models.Scenario
-        self.list_res = models.Scenarios
-        self.basePath = '/api/v1/scenarios'
-        self.req_d = self._load_request('scenario-c1.json')
-        self.req_2 = self._load_request('scenario-c2.json')
-
-    def tearDown(self):
-        pass
-
-    def assert_body(self, project, req=None):
-        pass
-
-    @staticmethod
-    def _load_request(f_req):
-        abs_file = os.path.join(os.path.dirname(__file__), f_req)
-        with open(abs_file, 'r') as f:
-            loader = json.load(f)
-            f.close()
-        return loader
-
-    def create_return_name(self, req):
-        _, res = self.create(req)
-        return res.href.split('/')[-1]
-
-    def assert_res(self, code, scenario, req=None):
-        self.assertEqual(code, httplib.OK)
-        if req is None:
-            req = self.req_d
-        self.assertIsNotNone(scenario._id)
-        self.assertIsNotNone(scenario.creation_date)
-
-        scenario == models.Scenario.from_dict(req)
-
-    @staticmethod
-    def _set_query(*args):
-        uri = ''
-        for arg in args:
-            uri += arg + '&'
-        return uri[0: -1]
-
-    def _get_and_assert(self, name, req=None):
-        code, body = self.get(name)
-        self.assert_res(code, body, req)
-
-
-class TestScenarioCreate(TestScenarioBase):
-    def test_withoutBody(self):
-        (code, body) = self.create()
-        self.assertEqual(code, httplib.BAD_REQUEST)
-
-    def test_emptyName(self):
-        req_empty = models.ScenarioCreateRequest('')
-        (code, body) = self.create(req_empty)
-        self.assertEqual(code, httplib.BAD_REQUEST)
-        self.assertIn(message.missing('name'), body)
-
-    def test_noneName(self):
-        req_none = models.ScenarioCreateRequest(None)
-        (code, body) = self.create(req_none)
-        self.assertEqual(code, httplib.BAD_REQUEST)
-        self.assertIn(message.missing('name'), body)
-
-    def test_success(self):
-        (code, body) = self.create_d()
-        self.assertEqual(code, httplib.OK)
-        self.assert_create_body(body)
-
-    def test_alreadyExist(self):
-        self.create_d()
-        (code, body) = self.create_d()
-        self.assertEqual(code, httplib.FORBIDDEN)
-        self.assertIn(message.exist_base, body)
-
-
-class TestScenarioGet(TestScenarioBase):
-    def setUp(self):
-        super(TestScenarioGet, self).setUp()
-        self.scenario_1 = self.create_return_name(self.req_d)
-        self.scenario_2 = self.create_return_name(self.req_2)
-
-    def test_getByName(self):
-        self._get_and_assert(self.scenario_1, self.req_d)
-
-    def test_getAll(self):
-        self._query_and_assert(query=None, reqs=[self.req_d, self.req_2])
-
-    def test_queryName(self):
-        query = self._set_query('name=nosdn-nofeature-ha')
-        self._query_and_assert(query, reqs=[self.req_d])
-
-    def test_queryInstaller(self):
-        query = self._set_query('installer=apex')
-        self._query_and_assert(query, reqs=[self.req_d])
-
-    def test_queryVersion(self):
-        query = self._set_query('version=master')
-        self._query_and_assert(query, reqs=[self.req_d])
-
-    def test_queryProject(self):
-        query = self._set_query('project=functest')
-        self._query_and_assert(query, reqs=[self.req_d, self.req_2])
-
-    def test_queryCombination(self):
-        query = self._set_query('name=nosdn-nofeature-ha',
-                                'installer=apex',
-                                'version=master',
-                                'project=functest')
-
-        self._query_and_assert(query, reqs=[self.req_d])
-
-    def _query_and_assert(self, query, found=True, reqs=None):
-        code, body = self.query(query)
-        if not found:
-            self.assertEqual(code, httplib.OK)
-            self.assertEqual(0, len(body.scenarios))
-        else:
-            self.assertEqual(len(reqs), len(body.scenarios))
-            for req in reqs:
-                for scenario in body.scenarios:
-                    if req['name'] == scenario.name:
-                        self.assert_res(code, scenario, req)
-
-
-class TestScenarioUpdate(TestScenarioBase):
-    def setUp(self):
-        super(TestScenarioUpdate, self).setUp()
-        self.scenario = self.create_return_name(self.req_d)
-        self.scenario_2 = self.create_return_name(self.req_2)
-
-    def _execute(set_update):
-        @functools.wraps(set_update)
-        def magic(self):
-            update, scenario = set_update(self, deepcopy(self.req_d))
-            self._update_and_assert(update, scenario)
-        return magic
-
-    def _update(expected):
-        def _update(set_update):
-            @functools.wraps(set_update)
-            def wrap(self):
-                update, scenario = set_update(self, deepcopy(self.req_d))
-                code, body = self.update(update, self.scenario)
-                getattr(self, expected)(code, scenario)
-            return wrap
-        return _update
-
-    @_update('_success')
-    def test_renameScenario(self, scenario):
-        new_name = 'nosdn-nofeature-noha'
-        scenario['name'] = new_name
-        update_req = models.ScenarioUpdateRequest(field='name',
-                                                  op='update',
-                                                  locate={},
-                                                  term={'name': new_name})
-        return update_req, scenario
-
-    @_update('_forbidden')
-    def test_renameScenario_exist(self, scenario):
-        new_name = self.scenario_2
-        scenario['name'] = new_name
-        update_req = models.ScenarioUpdateRequest(field='name',
-                                                  op='update',
-                                                  locate={},
-                                                  term={'name': new_name})
-        return update_req, scenario
-
-    @_update('_bad_request')
-    def test_renameScenario_noName(self, scenario):
-        new_name = self.scenario_2
-        scenario['name'] = new_name
-        update_req = models.ScenarioUpdateRequest(field='name',
-                                                  op='update',
-                                                  locate={},
-                                                  term={})
-        return update_req, scenario
-
-    @_execute
-    def test_addInstaller(self, scenario):
-        add = models.ScenarioInstaller(installer='daisy', versions=list())
-        scenario['installers'].append(add.format())
-        update = models.ScenarioUpdateRequest(field='installer',
-                                              op='add',
-                                              locate={},
-                                              term=add.format())
-        return update, scenario
-
-    @_execute
-    def test_deleteInstaller(self, scenario):
-        scenario['installers'] = filter(lambda f: f['installer'] != 'apex',
-                                        scenario['installers'])
-
-        update = models.ScenarioUpdateRequest(field='installer',
-                                              op='delete',
-                                              locate={'installer': 'apex'})
-        return update, scenario
-
-    @_execute
-    def test_addVersion(self, scenario):
-        add = models.ScenarioVersion(version='danube', projects=list())
-        scenario['installers'][0]['versions'].append(add.format())
-        update = models.ScenarioUpdateRequest(field='version',
-                                              op='add',
-                                              locate={'installer': 'apex'},
-                                              term=add.format())
-        return update, scenario
-
-    @_execute
-    def test_deleteVersion(self, scenario):
-        scenario['installers'][0]['versions'] = filter(
-            lambda f: f['version'] != 'master',
-            scenario['installers'][0]['versions'])
-
-        update = models.ScenarioUpdateRequest(field='version',
-                                              op='delete',
-                                              locate={'installer': 'apex',
-                                                      'version': 'master'})
-        return update, scenario
-
-    @_execute
-    def test_changeOwner(self, scenario):
-        scenario['installers'][0]['versions'][0]['owner'] = 'lucy'
-
-        update = models.ScenarioUpdateRequest(field='owner',
-                                              op='update',
-                                              locate={'installer': 'apex',
-                                                      'version': 'master'},
-                                              term={'owner': 'lucy'})
-        return update, scenario
-
-    @_execute
-    def test_addProject(self, scenario):
-        add = models.ScenarioProject(project='qtip').format()
-        scenario['installers'][0]['versions'][0]['projects'].append(add)
-        update = models.ScenarioUpdateRequest(field='project',
-                                              op='add',
-                                              locate={'installer': 'apex',
-                                                      'version': 'master'},
-                                              term=add)
-        return update, scenario
-
-    @_execute
-    def test_deleteProject(self, scenario):
-        scenario['installers'][0]['versions'][0]['projects'] = filter(
-            lambda f: f['project'] != 'functest',
-            scenario['installers'][0]['versions'][0]['projects'])
-
-        update = models.ScenarioUpdateRequest(field='project',
-                                              op='delete',
-                                              locate={
-                                                  'installer': 'apex',
-                                                  'version': 'master',
-                                                  'project': 'functest'})
-        return update, scenario
-
-    @_execute
-    def test_addCustoms(self, scenario):
-        add = ['odl', 'parser', 'vping_ssh']
-        projects = scenario['installers'][0]['versions'][0]['projects']
-        functest = filter(lambda f: f['project'] == 'functest', projects)[0]
-        functest['customs'] = ['healthcheck', 'odl', 'parser', 'vping_ssh']
-        update = models.ScenarioUpdateRequest(field='customs',
-                                              op='add',
-                                              locate={
-                                                  'installer': 'apex',
-                                                  'version': 'master',
-                                                  'project': 'functest'},
-                                              term=add)
-        return update, scenario
-
-    @_execute
-    def test_deleteCustoms(self, scenario):
-        projects = scenario['installers'][0]['versions'][0]['projects']
-        functest = filter(lambda f: f['project'] == 'functest', projects)[0]
-        functest['customs'] = ['healthcheck']
-        update = models.ScenarioUpdateRequest(field='customs',
-                                              op='delete',
-                                              locate={
-                                                  'installer': 'apex',
-                                                  'version': 'master',
-                                                  'project': 'functest'},
-                                              term=['vping_ssh'])
-        return update, scenario
-
-    @_execute
-    def test_addScore(self, scenario):
-        add = models.ScenarioScore(date=str(datetime.now()), score='11/12')
-        projects = scenario['installers'][0]['versions'][0]['projects']
-        functest = filter(lambda f: f['project'] == 'functest', projects)[0]
-        functest['scores'].append(add.format())
-        update = models.ScenarioUpdateRequest(field='score',
-                                              op='add',
-                                              locate={
-                                                  'installer': 'apex',
-                                                  'version': 'master',
-                                                  'project': 'functest'},
-                                              term=add.format())
-        return update, scenario
-
-    @_execute
-    def test_addTi(self, scenario):
-        add = models.ScenarioTI(date=str(datetime.now()), status='gold')
-        projects = scenario['installers'][0]['versions'][0]['projects']
-        functest = filter(lambda f: f['project'] == 'functest', projects)[0]
-        functest['trust_indicators'].append(add.format())
-        update = models.ScenarioUpdateRequest(field='trust_indicator',
-                                              op='add',
-                                              locate={
-                                                  'installer': 'apex',
-                                                  'version': 'master',
-                                                  'project': 'functest'},
-                                              term=add.format())
-        return update, scenario
-
-    def _update_and_assert(self, update_req, new_scenario, name=None):
-        code, _ = self.update(update_req, self.scenario)
-        self.assertEqual(code, httplib.OK)
-        self._get_and_assert(_none_default(name, self.scenario),
-                             new_scenario)
-
-    def _success(self, status, new_scenario):
-        self.assertEqual(status, httplib.OK)
-        self._get_and_assert(new_scenario.get('name'), new_scenario)
-
-    def _forbidden(self, status, new_scenario):
-        self.assertEqual(status, httplib.FORBIDDEN)
-
-    def _bad_request(self, status, new_scenario):
-        self.assertEqual(status, httplib.BAD_REQUEST)
-
-
-class TestScenarioDelete(TestScenarioBase):
-    def test_notFound(self):
-        code, body = self.delete('notFound')
-        self.assertEqual(code, httplib.NOT_FOUND)
-
-    def test_success(self):
-        scenario = self.create_return_name(self.req_d)
-        code, _ = self.delete(scenario)
-        self.assertEqual(code, httplib.OK)
-        code, _ = self.get(scenario)
-        self.assertEqual(code, httplib.NOT_FOUND)
-
-
-def _none_default(check, default):
-    return check if check else default
index 83f389a..6125c95 100644 (file)
@@ -94,11 +94,18 @@ class DocParser(object):
 
     def _parse_type(self, **kwargs):
         arg = kwargs.get('arg', None)
-        body = self._get_body(**kwargs)
-        self.params.setdefault(arg, {}).update({
-            'name': arg,
-            'dataType': body
-        })
+        code = self._parse_epytext_para('code', **kwargs)
+        link = self._parse_epytext_para('link', **kwargs)
+        if code is None:
+            self.params.setdefault(arg, {}).update({
+                'name': arg,
+                'type': link
+            })
+        elif code == 'list':
+            self.params.setdefault(arg, {}).update({
+                'type': 'array',
+                'items': {'type': link}
+            })
 
     def _parse_in(self, **kwargs):
         arg = kwargs.get('arg', None)
index 43f69d7..44ccb46 100644 (file)
@@ -1,4 +1,6 @@
 OPENID = 'openid'
+ROLE = 'role'
+DEFAULT_ROLE = 'user'
 
 # OpenID parameters
 OPENID_MODE = 'openid.mode'
index 6a9d94e..4623952 100644 (file)
@@ -1,11 +1,12 @@
 from six.moves.urllib import parse
+from tornado import gen
+from tornado import web
 
-from opnfv_testapi.common import config
+from opnfv_testapi.common.config import CONF
+from opnfv_testapi.db import api as dbapi
 from opnfv_testapi.ui.auth import base
 from opnfv_testapi.ui.auth import constants as const
 
-CONF = config.Config()
-
 
 class SigninHandler(base.BaseHandler):
     def get(self):
@@ -31,20 +32,30 @@ class SigninHandler(base.BaseHandler):
 
 
 class SigninReturnHandler(base.BaseHandler):
+    @web.asynchronous
+    @gen.coroutine
     def get(self):
         if self.get_query_argument(const.OPENID_MODE) == 'cancel':
             self._auth_failure('Authentication canceled.')
 
         openid = self.get_query_argument(const.OPENID_CLAIMED_ID)
-        user_info = {
+        role = const.DEFAULT_ROLE
+        new_user_info = {
             'openid': openid,
             'email': self.get_query_argument(const.OPENID_NS_SREG_EMAIL),
-            'fullname': self.get_query_argument(const.OPENID_NS_SREG_FULLNAME)
+            'fullname': self.get_query_argument(const.OPENID_NS_SREG_FULLNAME),
+            const.ROLE: role
         }
+        user = yield dbapi.db_find_one(self.table, {'openid': openid})
+        if not user:
+            dbapi.db_save(self.table, new_user_info)
+        else:
+            role = user.get(const.ROLE)
 
-        self.db_save(self.table, user_info)
-        if not self.get_secure_cookie('openid'):
-            self.set_secure_cookie('openid', openid)
+        self.clear_cookie(const.OPENID)
+        self.clear_cookie(const.ROLE)
+        self.set_secure_cookie(const.OPENID, openid)
+        self.set_secure_cookie(const.ROLE, role)
         self.redirect(url=CONF.ui_url)
 
     def _auth_failure(self, message):
@@ -57,9 +68,8 @@ class SigninReturnHandler(base.BaseHandler):
 class SignoutHandler(base.BaseHandler):
     def get(self):
         """Handle signout request."""
-        openid = self.get_secure_cookie(const.OPENID)
-        if openid:
-            self.clear_cookie(const.OPENID)
+        self.clear_cookie(const.OPENID)
+        self.clear_cookie(const.ROLE)
         params = {'openid_logout': CONF.osid_openid_logout_endpoint}
         url = parse.urljoin(CONF.ui_url,
                             '/#/logout?' + parse.urlencode(params))
index 140bca5..955cdee 100644 (file)
@@ -2,6 +2,7 @@ from tornado import gen
 from tornado import web
 
 from opnfv_testapi.common import raises
+from opnfv_testapi.db import api as dbapi
 from opnfv_testapi.ui.auth import base
 
 
@@ -12,12 +13,12 @@ class ProfileHandler(base.BaseHandler):
         openid = self.get_secure_cookie('openid')
         if openid:
             try:
-                user = yield self.db_find_one({'openid': openid})
+                user = yield dbapi.db_find_one(self.table, {'openid': openid})
                 self.finish_request({
                     "openid": user.get('openid'),
                     "email": user.get('email'),
                     "fullname": user.get('fullname'),
-                    "is_admin": False
+                    "role": user.get('role', 'user')
                 })
             except Exception:
                 pass
index bba7a86..5b2c922 100644 (file)
@@ -1,10 +1,10 @@
 from opnfv_testapi.resources.handlers import GenericApiHandler
-from opnfv_testapi.common import config
+from opnfv_testapi.common.config import CONF
 
 
 class RootHandler(GenericApiHandler):
     def get_template_path(self):
-        return config.Config().static_path
+        return CONF.static_path
 
     def get(self):
         self.render('testapi-ui/index.html')
index 955ffc8..4b6f75c 100644 (file)
@@ -2,9 +2,9 @@
 # of appearance. Changing the order has an impact on the overall integration
 # process, which may cause wedges in the gate later.
 
-pbr>=1.6
-setuptools>=16.0
-tornado>=3.1,<=4.3
+pbr>=2.0.0,!=2.1.0  # Apache-2.0
+setuptools>=16.0,!=24.0.0,!=34.0.0,!=34.0.1,!=34.0.2,!=34.0.3,!=34.1.0,!=34.1.1,!=34.2.0,!=34.3.0,!=34.3.1,!=34.3.2  # PSF/ZPL
+tornado>=3.1,<=4.3  # Apache-2.0
 epydoc>=0.3.1
-six>=1.9.0
-motor
+six>=1.9.0  # MIT
+motor  # Apache-2.0
diff --git a/utils/test/testapi/run_test.sh b/utils/test/testapi/run_test.sh
deleted file mode 100755 (executable)
index 1e05dd6..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/bin/bash
-
-set -o errexit
-
-# Get script directory
-SCRIPTDIR=`dirname $0`
-
-echo "Running unit tests..."
-
-# Creating virtual environment
-if [ ! -z $VIRTUAL_ENV ]; then
-    venv=$VIRTUAL_ENV
-else
-    venv=$SCRIPTDIR/.venv
-    virtualenv $venv
-fi
-source $venv/bin/activate
-
-# Install requirements
-pip install -r $SCRIPTDIR/requirements.txt
-pip install -r $SCRIPTDIR/test-requirements.txt
-
-find . -type f -name "*.pyc" -delete
-
-nosetests --with-xunit \
-    --with-coverage \
-    --cover-erase \
-    --cover-package=$SCRIPTDIR/opnfv_testapi/cmd \
-    --cover-package=$SCRIPTDIR/opnfv_testapi/common \
-    --cover-package=$SCRIPTDIR/opnfv_testapi/resources \
-    --cover-package=$SCRIPTDIR/opnfv_testapi/router \
-    --cover-xml \
-    --cover-html \
-    $SCRIPTDIR/opnfv_testapi/tests
-
-exit_code=$?
-
-deactivate
-
-exit $exit_code
index 15dda96..dd52373 100644 (file)
@@ -1,9 +1,20 @@
-import setuptools
+import os
+import subprocess
 
+import setuptools
 
 __author__ = 'serena'
 
+try:
+    import multiprocessing  # noqa
+except ImportError:
+    pass
+
+dirpath = os.path.dirname(os.path.abspath(__file__))
+subprocess.call(['ln', '-s',
+                 '{}/3rd_party/static'.format(dirpath),
+                 '{}/opnfv_testapi/static'.format(dirpath)])
 
 setuptools.setup(
-    setup_requires=['pbr>=1.8'],
+    setup_requires=['pbr==2.0.0'],
     pbr=True)
index 645687b..233f465 100644 (file)
@@ -2,7 +2,9 @@
 # of appearance. Changing the order has an impact on the overall integration
 # process, which may cause wedges in the gate later.
 
-mock
-pytest
-coverage
-nose>=1.3.1
+coverage>=4.0,!=4.4  # Apache-2.0
+mock>=2.0  # BSD
+nose  # LGPL
+pytest  # MIT
+pytest-cov  # MIT
+pytest-mock  # MIT
index 81c9dfa..d300f1a 100644 (file)
@@ -4,7 +4,7 @@
 # and then run "tox" from this directory.
 
 [tox]
-envlist = py27,pep8
+envlist = pep8,py27
 skipsdist = True
 sitepackages = True
 
@@ -16,9 +16,11 @@ deps =
   -rtest-requirements.txt
 commands=
   py.test \
-    --basetemp={envtmpdir} \
-    --cov \
-    {posargs}
+  --basetemp={envtmpdir} \
+  --cov \
+  --cov-report term-missing \
+  --cov-report xml \
+  {posargs}
 setenv=
   HOME = {envtmpdir}
   PYTHONPATH = {toxinidir}
index 7e0dd55..9c24377 100644 (file)
@@ -40,5 +40,6 @@ def backup(args):
     cmd = ['mongodump', '-o', '%s' % out]
     execute(cmd, args)
 
+
 if __name__ == '__main__':
     main(backup, parser)
index ba4334a..f759592 100644 (file)
@@ -85,5 +85,6 @@ def update(args):
     rename_fields(fields_old2New)
     rename_collections(collections_old2New)
 
+
 if __name__ == '__main__':
     main(update, parser)
diff --git a/utils/upload-artifact.sh b/utils/upload-artifact.sh
new file mode 100644 (file)
index 0000000..b66cdb7
--- /dev/null
@@ -0,0 +1,48 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2016 Orange and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+set -e
+set -o pipefail
+
+export PATH=$PATH:/usr/local/bin/
+
+# 2 paramters
+# - $1: the source directory where the files to be uploaded are located
+# - $2: the target on artifact http://artifact.opnfv.org/<project>/$2
+#   if not set, default value is <project>/docs
+project=$PROJECT
+if [ -z "$2" ]
+  then
+      artifact_dir="$project/docs"
+  else
+      artifact_dir="$project/$2"
+fi
+DIRECTORY="$1"
+
+
+# check that the API doc directory does exist before pushing it to artifact
+if [ ! -d "$DIRECTORY" ]; then
+    echo "Directory to be uploaded "$DIRECTORY" does not exist"
+    exit 1
+fi
+set +e
+gsutil&>/dev/null
+if [ $? != 0 ]; then
+    echo "Not possible to push results to artifact: gsutil not installed"
+    exit 1
+else
+    gsutil ls gs://artifacts.opnfv.org/"$project"/ &>/dev/null
+    if [ $? != 0 ]; then
+        echo "Not possible to push results to artifact: gsutil not installed."
+        exit 1
+    else
+        echo "Uploading file(s) to artifact $artifact_dir"
+        gsutil -m cp -r "$DIRECTORY"/* gs://artifacts.opnfv.org/"$artifact_dir"/ >/dev/null 2>&1
+    fi
+fi