Merge "Backup Pharos Dashboard Data"
authorTrevor Bramwell <tbramwell@linuxfoundation.org>
Wed, 10 Jan 2018 18:25:52 +0000 (18:25 +0000)
committerGerrit Code Review <gerrit@opnfv.org>
Wed, 10 Jan 2018 18:25:52 +0000 (18:25 +0000)
39 files changed:
INFO
INFO.yaml [new file with mode: 0644]
jjb/apex/apex-verify-jobs.yml
jjb/apex/apex.yml
jjb/apex/apex.yml.j2
jjb/apex/scenarios.yaml.hidden
jjb/armband/armband-ci-jobs.yml
jjb/compass4nfv/compass-ci-jobs.yml
jjb/compass4nfv/compass-deploy.sh
jjb/compass4nfv/compass-verify-jobs.yml
jjb/container4nfv/container4nfv-project.yml
jjb/daisy4nfv/daisy-daily-jobs.yml
jjb/daisy4nfv/daisy-deploy.sh
jjb/doctor/doctor.yml
jjb/dovetail/dovetail-project-jobs.yml
jjb/dovetail/dovetail-run.sh
jjb/fuel/fuel-daily-jobs.yml
jjb/functest/functest-daily-jobs.yml
jjb/global/releng-macros.yml
jjb/global/slave-params.yml
jjb/qtip/qtip-experimental-jobs.yml
jjb/qtip/qtip-verify-jobs.yml
jjb/releng/compass4nfv-docker.yml
jjb/releng/functest-docker.yml
jjb/releng/opnfv-docker.sh
jjb/releng/opnfv-docker.yml
jjb/releng/opnfv-repo-archiver.sh
jjb/releng/opnfv-utils.yml
jjb/xci/bifrost-verify-jobs.yml
jjb/xci/bifrost-verify.sh
jjb/xci/osa-periodic-jobs.yml
jjb/xci/xci-verify-jobs.yml
jjb/yardstick/yardstick-project-jobs.yml
modules/opnfv/deployment/daisy/__init__.py [new file with mode: 0644]
modules/opnfv/deployment/daisy/adapter.py [new file with mode: 0644]
modules/opnfv/deployment/factory.py
modules/opnfv/utils/ssh_utils.py
utils/create_pod_file.py
utils/push-test-logs.sh

diff --git a/INFO b/INFO
index 18c8cf2..d9051ab 100644 (file)
--- a/INFO
+++ b/INFO
@@ -16,7 +16,6 @@ Fatih Degirmenci (Ericsson, fatih.degirmenci@ericsson.com)
 Aric Gardner (Linux Foundation, agardner@linuxfoundation.org)
 Tim Rozet (Red Hat, trozet@redhat.com)
 Morgan Richomme (Orange, morgan.richomme@orange.com)
-Matthew Lijun (Huawei, matthew.lijun@huawei.com)
 Jose Lausuch (Ericsson, jose.lausuch@ericsson.com)
 Ryota Mibu (NEC, r-mibu@cq.jp.nec.com)
 Mei Mei (Huawei, meimei@huawei.com)
diff --git a/INFO.yaml b/INFO.yaml
new file mode 100644 (file)
index 0000000..3bb3cbe
--- /dev/null
+++ b/INFO.yaml
@@ -0,0 +1,125 @@
+---
+project: 'Release Engineering (Releng)'
+project_creation_date: '2015-06-14'
+project_category: 'Integration & Testing'
+lifecycle_state: 'Incubation'
+project_lead: &opnfv_releng_ptl
+    name: 'Fatih Degirmenci'
+    email: 'fatih.degirmenci@ericsson.com'
+    id: 'fdegir'
+    company: 'Ericsson'
+    timezone: 'Europe/Stockholm'
+primary_contact: *opnfv_releng_ptl
+issue_tracking:
+    type: 'jira'
+    url: 'https://jira.opnfv.org/projects/RELENG'
+    key: 'RELENG'
+mailing_list:
+    type: 'mailman2'
+    url: 'opnfv-tech-discuss@lists.opnfv.org'
+    tag: '[releng]'
+realtime_discussion:
+    type: 'irc'
+    server: 'freenode.net'
+    channel: '#lf-releng'
+meetings:
+    - type: 'gotomeeting+irc'
+      agenda: 'https://wiki.opnfv.org/display/INF/Infra+Working+Group'
+      url: 'https://global.gotomeeting.com/join/819733085'
+      server: 'freenode.net'
+      channel: '#opnfv-meeting'
+      repeats: 'weekly'
+      time: '16:00 UTC'
+repositories:
+    - 'releng'
+    - 'releng-anteater'
+    - 'releng-testresults'
+    - 'releng-utils'
+    - 'releng-xci'
+committers:
+    - <<: *opnfv_releng_ptl
+    - name: 'Aric Gardner'
+      email: 'agardner@linuxfoundation.org'
+      company: 'The Linux Foundation'
+      id: 'agardner'
+      timezone: 'Canada/Atlantic'
+    - name: 'Tim Rozet'
+      email: 'trozet@redhat.com'
+      company: 'Red Hat'
+      id: 'trozet'
+      timezone: 'America/New_York'
+    - name: 'Morgan Richomme'
+      email: 'morgan.richomme@orange.com'
+      company: 'Orange'
+      id: 'mrichomme'
+      timezone: 'Europe/Paris'
+    - name: 'Jose Lausuch'
+      company: 'SUSE'
+      email: 'jose.lausuch@ericsson.com'
+      id: 'jose.lausuch'
+      timezone: 'Europe/Madrid'
+    - name: 'Ryota Mibu'
+      company: 'NEC'
+      email: 'r-mibu@cq.jp.nec.com'
+      id: 'r-mibu'
+      timezone: 'Asia/Tokyo'
+    - name: 'Mei Mei'
+      company: 'Huawei'
+      email: 'meimei@huawei.com'
+      id: 'm00133142'
+      timezone: 'Asia/Shanghai'
+    - name: 'Trevor Bramwell'
+      company: 'The Linux Foundation'
+      email: 'tbramwell@linuxfoundation.org'
+      id: 'bramwelt'
+      timezone: 'America/Los_Angeles'
+    - name: 'Serena Feng'
+      company: 'ZTE'
+      email: 'feng.xiaowei@zte.com.cn'
+      id: 'SerenaFeng'
+      timezone: 'Asia/Shanghai'
+    - name: 'Yolanda Robla Mota'
+      company: 'Red Hat'
+      email: 'yroblamo@redhat.com'
+      id: 'yrobla'
+      timezone: 'America/New_York'
+    - name: 'Markos Chandras'
+      company: 'SUSE'
+      email: 'mchandras@suse.de'
+      id: 'mchandras'
+      timezone: 'Europe/Berlin'
+    - name: 'Luke Hinds'
+      company: 'Red Hat'
+      email: 'lhinds@redhat.com'
+      id: 'lukehinds'
+      timezone: 'Europe/London'
+tsc:
+    approval: 'http://ircbot.wl.linuxfoundation.org/meetings/opnfv-meeting/2015/opnfv-meeting.2015-07-14-14.00.html'
+    changes:
+        - type: 'removal'
+          name: 'Guy Rodrigue Koffi'
+          link: ''
+        - type: 'removal'
+          name: 'Victor Laza'
+          link: 'http://meetbot.opnfv.org/meetings/opnfv-meeting/2016/opnfv-meeting.2016-02-16-14.59.html'
+        - type: 'promotion'
+          name: 'Mei Mei'
+          link: 'http://lists.opnfv.org/pipermail/opnfv-tsc/2016-March/002228.html'
+        - type: 'removal'
+          name: 'Peter Banzi'
+          link: ''
+        - type: 'promotion'
+          name: 'Trevor Bramwell'
+          link: 'http://lists.opnfv.org/pipermail/opnfv-tech-discuss/2016-July/011659.html'
+        - type: 'promotion'
+          name: 'Serena Feng'
+          link: ''
+        - type: 'promotion'
+          name: 'Yolanda Robla Mota'
+          link: ''
+        - type: 'promotion'
+          name: 'Markos'
+          link: ''
+        - type: 'promotion'
+          name: 'Luke Hinds'
+          link: ''
index c57ac1d..88c1b17 100644 (file)
                   pattern: 'apex/*'
                 - compare-type: ANT
                   pattern: 'build/**'
+                - compare-type: ANT
+                  pattern: 'ci/**'
                 - compare-type: ANT
                   pattern: 'lib/**'
                 - compare-type: ANT
index 43e234c..6714d6a 100644 (file)
@@ -35,7 +35,7 @@
           baremetal-slave: 'apex-baremetal-master'
           verify-scenario: 'os-odl-nofeature-ha'
           scenario_stream: 'euphrates'
-          disable_daily: false
+          disable_daily: true
       - danube: &danube
           branch: 'stable/danube'
           gs-pathname: '/danube'
           <<: *master
       - 'os-odl-bgpvpn-ha':
           <<: *master
+      - 'os-odl-bgpvpn-noha':
+          <<: *master
       - 'os-ovn-nofeature-noha':
           <<: *master
       - 'os-nosdn-fdio-noha':
               predefined-parameters:
                 DEPLOY_SCENARIO=$DEPLOY_SCENARIO
               kill-phase-on: NEVER
-              enable-condition: "def m = '$DEPLOY_SCENARIO' ==~ /os-(nosdn-nofeature|nosdn-kvm|odl_l3-fdio)-ha/"
+              enable-condition: "def m = '$DEPLOY_SCENARIO' ==~ /os-(nosdn-nofeature|odl-bgpvpn)-ha/"
               abort-all-job: false
               git-revision: false
       - multijob:
               kill-phase-on: NEVER
               abort-all-job: true
               git-revision: false
+            - name: 'apex-os-odl-bgpvpn-noha-baremetal-master'
+              node-parameters: false
+              current-parameters: false
+              predefined-parameters: |
+                OPNFV_CLEAN=yes
+              kill-phase-on: NEVER
+              abort-all-job: true
+              git-revision: false
             - name: 'apex-os-ovn-nofeature-noha-baremetal-master'
               node-parameters: false
               current-parameters: false
index 6cd9de2..27a854d 100644 (file)
@@ -35,7 +35,7 @@
           baremetal-slave: 'apex-baremetal-master'
           verify-scenario: 'os-odl-nofeature-ha'
           scenario_stream: 'euphrates'
-          disable_daily: false
+          disable_daily: true
       - danube: &danube
           branch: 'stable/danube'
           gs-pathname: '/danube'
               predefined-parameters:
                 DEPLOY_SCENARIO=$DEPLOY_SCENARIO
               kill-phase-on: NEVER
-              enable-condition: "def m = '$DEPLOY_SCENARIO' ==~ /os-(nosdn-nofeature|nosdn-kvm|odl_l3-fdio)-ha/"
+              enable-condition: "def m = '$DEPLOY_SCENARIO' ==~ /os-(nosdn-nofeature|odl-bgpvpn)-ha/"
               abort-all-job: false
               git-revision: false
       - multijob:
index 98b698d..789ca7f 100644 (file)
@@ -4,6 +4,7 @@ master:
   - 'os-odl-nofeature-ha'
   - 'os-odl-nofeature-noha'
   - 'os-odl-bgpvpn-ha'
+  - 'os-odl-bgpvpn-noha'
   - 'os-ovn-nofeature-noha'
   - 'os-nosdn-fdio-noha'
   - 'os-nosdn-fdio-ha'
index 0202ef0..b2fa62f 100644 (file)
                     build-step-failure-threshold: 'never'
                     failure-threshold: 'never'
                     unstable-threshold: 'FAILURE'
-      # 1.dovetail only master, based on D release
-      # 2.here the stream means the SUT stream,
-      #   dovetail stream is defined in its own job
-      # 3.only proposed_tests testsuite here(refstack, ha, ipv6, bgpvpn)
-      # 4.not used for release criteria or compliance,
+      # 1.here the stream means the SUT stream, dovetail stream is defined in its own job
+      # 2.only debug testsuite here(refstack, ha, vping, ipv6, tempest, bgpvpn)
+      # 3.not used for release criteria or compliance,
       #   only to debug the dovetail tool bugs with arm pods
-      # 5.only run against scenario os-(nosdn|odl)-(nofeature-bgpvpn)-ha
-      - conditional-step:
-          condition-kind: and
-          condition-operands:
-            - condition-kind: regex-match
-              regex: os-(nosdn|odl)-(nofeature|bgpvpn)-ha
-              label: '{scenario}'
-            - condition-kind: regex-match
-              regex: 'danube'
-              label: '{stream}'
-          steps:
-            - trigger-builds:
-                - project: 'dovetail-{installer}-{pod}-proposed_tests-master'
-                  current-parameters: false
-                  predefined-parameters:
-                    DEPLOY_SCENARIO={scenario}
-                  block: true
-                  same-node: true
-                  block-thresholds:
-                    build-step-failure-threshold: 'never'
-                    failure-threshold: 'never'
-                    unstable-threshold: 'FAILURE'
+      - trigger-builds:
+          - project: 'dovetail-{installer}-{pod}-proposed_tests-{stream}'
+            current-parameters: false
+            predefined-parameters:
+              DEPLOY_SCENARIO={scenario}
+            block: true
+            same-node: true
+            block-thresholds:
+              build-step-failure-threshold: 'never'
+              failure-threshold: 'never'
+              unstable-threshold: 'FAILURE'
       # Armband uses Fuel's log collection project job, no need to duplicate
       - conditional-step:
           condition-kind: not
index 4adfc2a..0790b83 100644 (file)
@@ -13,7 +13,7 @@
       gs-pathname: ''
       ppa-pathname: '/{stream}'
       disabled: false
-      openstack-version: ocata
+      openstack-version: pike
     euphrates: &euphrates
       stream: euphrates
       branch: 'stable/{stream}'
@@ -54,7 +54,7 @@
       #        master
       # -------------------------------
       - baremetal-centos:
-          slave-label: 'intel-pod8'
+          slave-label: 'intel-pod17'
           os-version: 'centos7'
           <<: *master
       # -------------------------------
       - 'os-nosdn-ovs_dpdk-noha':
           disabled: false
           auto-trigger-name: 'compass-{scenario}-{pod}-{stream}-trigger'
+      - 'os-nosdn-bar-ha':
+          disabled: false
+          auto-trigger-name: 'compass-{scenario}-{pod}-{stream}-trigger'
 
     jobs:
       - 'compass-{scenario}-{pod}-daily-{stream}'
               unstable-threshold: 'FAILURE'
       # dovetail only master by now, not sync with A/B/C branches
       # here the stream means the SUT stream, dovetail stream is defined in its own job
-      # only run on os-(nosdn|odl_l2|onos|odl_l3)-nofeature-ha scenario
-      # run against SUT master branch, dovetail docker image with latest tag
-      # run against SUT danube branch, dovetail docker image with latest tag(Monday and Sunday)
-      # run against SUT danube branch, dovetail docker image with cvp.X.X.X tag(Tuesday, Thursday, Friday and Saturday)
+      # only run on os-(nosdn|odl_l2|odl_l3)-nofeature-ha scenario
+      # run against SUT master/euphrates branch, dovetail docker image with latest tag(Monday, Tuesday)
+      # run against SUT master/euphrates branch, dovetail docker image with cvp.X.X.X tag(Thursday, Friday)
+      # run against SUT danube branch, dovetail docker image with cvp.X.X.X tag on huawei-pod7
       - conditional-step:
           condition-kind: and
           condition-operands:
             - condition-kind: regex-match
-              regex: danube
-              label: '{stream}'
-            - condition-kind: regex-match
-              regex: os-(nosdn|odl_l2|odl_l3)-nofeature-ha
+              regex: os-(nosdn|odl_l3)-nofeature-ha
               label: '{scenario}'
             - condition-kind: day-of-week
               day-selector: select-days
               days:
                 MON: true
-                SUN: true
+                TUES: true
               use-build-time: true
           steps:
             - trigger-builds:
           condition-kind: and
           condition-operands:
             - condition-kind: regex-match
-              regex: danube
-              label: '{stream}'
-            - condition-kind: regex-match
-              regex: os-(nosdn|odl_l2|odl_l3)-nofeature-ha
+              regex: os-(nosdn|odl_l3)-nofeature-ha
               label: '{scenario}'
             - condition-kind: day-of-week
               day-selector: select-days
               days:
-                TUES: true
-                WED: true
                 THURS: true
                 FRI: true
-                SAT: true
               use-build-time: true
           steps:
             - trigger-builds:
                     build-step-failure-threshold: 'never'
                     failure-threshold: 'never'
                     unstable-threshold: 'FAILURE'
-      - conditional-step:
-          condition-kind: and
-          condition-operands:
-            - condition-kind: regex-match
-              regex: os-(nosdn|odl_l2|odl_l3)-nofeature-ha
-              label: '{scenario}'
-            - condition-kind: regex-match
-              regex: master
-              label: '{stream}'
-          steps:
-            - trigger-builds:
-                - project: 'dovetail-compass-{pod}-proposed_tests-{stream}'
-                  current-parameters: false
-                  predefined-parameters:
-                    DEPLOY_SCENARIO={scenario}
-                  block: true
-                  same-node: true
-                  block-thresholds:
-                    build-step-failure-threshold: 'never'
-                    failure-threshold: 'never'
-                    unstable-threshold: 'FAILURE'
       - conditional-step:
           condition-kind: and
           condition-operands:
       - build-name:
           name: '$BUILD_NUMBER - Scenario: $DEPLOY_SCENARIO'
       - timeout:
-          timeout: 240
+          timeout: 360
           abort: true
       - fix-workspace-permissions
 
     name: 'compass-k8-nosdn-nofeature-ha-baremetal-centos-master-trigger'
     triggers:
       - timed: ''
+- trigger:
+    name: 'compass-os-nosdn-bar-ha-baremetal-centos-master-trigger'
+    triggers:
+      - timed: ''  # '0 19 * * *'
 
 # ----------------------------
 # noha-baremetal-centos-master
     name: 'compass-k8-nosdn-nofeature-ha-huawei-pod7-danube-trigger'
     triggers:
       - timed: ''
+- trigger:
+    name: 'compass-os-nosdn-bar-ha-huawei-pod7-danube-trigger'
+    triggers:
+      - timed: ''  # '0 19 * * *'
 
 # ----------------------------
 # noha-huawei-pod7-danube
 - trigger:
     name: 'compass-os-nosdn-nofeature-ha-baremetal-master-trigger'
     triggers:
-      - timed: '0 20 * * *'
+      - timed: '0 20 2-30/2 * *'
 - trigger:
     name: 'compass-os-nosdn-openo-ha-baremetal-master-trigger'
     triggers:
 - trigger:
     name: 'compass-os-odl_l3-nofeature-ha-baremetal-master-trigger'
     triggers:
-      - timed: '0 18 * * *'
+      - timed: '0 18 1-29/2 * *'
 - trigger:
     name: 'compass-os-onos-nofeature-ha-baremetal-master-trigger'
     triggers:
 - trigger:
     name: 'compass-os-odl_l2-moon-ha-baremetal-master-trigger'
     triggers:
-      - timed: '0 12 * * *'
+      - timed: '0 12 2-30/2 * *'
 - trigger:
     name: 'compass-os-nosdn-kvm-ha-baremetal-master-trigger'
     triggers:
-      - timed: '0 14 * * *'
+      - timed: '0 14 1-29/2 * *'
 - trigger:
     name: 'compass-os-nosdn-ovs_dpdk-ha-baremetal-master-trigger'
     triggers:
-      - timed: '0 16 * * *'
+      - timed: '0 16 2-30/2 * *'
 - trigger:
     name: 'compass-k8-nosdn-nofeature-ha-baremetal-master-trigger'
     triggers:
 - trigger:
     name: 'compass-os-odl-sfc-ha-baremetal-master-trigger'
     triggers:
-      - timed: '0 10 * * *'
+      - timed: '0 10 1-29/2 * *'
+- trigger:
+    name: 'compass-os-nosdn-bar-ha-baremetal-master-trigger'
+    triggers:
+      - timed: '0 2 2-30/2 * *'
 
 # ---------------------
 # noha-baremetal-master
 - trigger:
     name: 'compass-os-nosdn-nofeature-ha-baremetal-euphrates-trigger'
     triggers:
-      - timed: '0 1 * * *'
+      - timed: '0 1 1-29/2 * *'
 - trigger:
     name: 'compass-os-nosdn-openo-ha-baremetal-euphrates-trigger'
     triggers:
 - trigger:
     name: 'compass-os-odl_l3-nofeature-ha-baremetal-euphrates-trigger'
     triggers:
-      - timed: '0 21 * * *'
+      - timed: '0 21 2-30/2 * *'
 - trigger:
     name: 'compass-os-onos-nofeature-ha-baremetal-euphrates-trigger'
     triggers:
 - trigger:
     name: 'compass-os-odl_l2-moon-ha-baremetal-euphrates-trigger'
     triggers:
-      - timed: '0 5 * * *'
+      - timed: '0 5 1-29/2 * *'
 - trigger:
     name: 'compass-os-nosdn-kvm-ha-baremetal-euphrates-trigger'
     triggers:
-      - timed: '0 13 * * *'
+      - timed: '0 13 2-30/2 * *'
 - trigger:
     name: 'compass-os-nosdn-ovs_dpdk-ha-baremetal-euphrates-trigger'
     triggers:
-      - timed: '0 9 * * *'
+      - timed: '0 9 1-29/2 * *'
 - trigger:
     name: 'compass-k8-nosdn-nofeature-ha-baremetal-euphrates-trigger'
     triggers:
 - trigger:
     name: 'compass-os-odl-sfc-ha-baremetal-euphrates-trigger'
     triggers:
-      - timed: '0 17 * * *'
+      - timed: '0 17 2-30/2 * *'
+- trigger:
+    name: 'compass-os-nosdn-bar-ha-baremetal-euphrates-trigger'
+    triggers:
+      - timed: '0 21 1-29/2 * *'
 
 # ---------------------
 # noha-baremetal-euphrates
 - trigger:
     name: 'compass-os-odl_l3-nofeature-ha-virtual-master-trigger'
     triggers:
-      - timed: '0 19 * * *'
+      - timed: '0 19 2-30/2 * *'
 - trigger:
     name: 'compass-os-onos-nofeature-ha-virtual-master-trigger'
     triggers:
 - trigger:
     name: 'compass-os-odl_l2-moon-ha-virtual-master-trigger'
     triggers:
-      - timed: '30 12 * * *'
+      - timed: '30 12 1-29/2 * *'
 - trigger:
     name: 'compass-os-nosdn-kvm-ha-virtual-master-trigger'
     triggers:
-      - timed: '0 13 * * *'
+      - timed: '0 13 1-29/2 * *'
 - trigger:
     name: 'compass-os-nosdn-ovs_dpdk-ha-virtual-master-trigger'
     triggers:
-      - timed: '0 17 * * *'
+      - timed: '0 17 2-30/2 * *'
 - trigger:
     name: 'compass-k8-nosdn-nofeature-ha-virtual-master-trigger'
     triggers:
 - trigger:
     name: 'compass-os-odl-sfc-ha-virtual-master-trigger'
     triggers:
-      - timed: '0 16 * * *'
+      - timed: '0 16 2-30/2 * *'
+- trigger:
+    name: 'compass-os-nosdn-bar-ha-virtual-master-trigger'
+    triggers:
+      - timed: '0 17 1-29/2 * *'
 
 # -------------------
 # noha-virtual-master
 - trigger:
     name: 'compass-os-nosdn-kvm-noha-virtual-master-trigger'
     triggers:
-      - timed: '30 13 * * *'
+      - timed: '30 13 1-29/2 * *'
 - trigger:
     name: 'compass-os-nosdn-nofeature-noha-virtual-master-trigger'
     triggers:
-      - timed: '0 14 * * *'
+      - timed: '0 14 2-30/2 * *'
 - trigger:
     name: 'compass-os-odl_l3-nofeature-noha-virtual-master-trigger'
     triggers:
-      - timed: '0 15 * * *'
+      - timed: '0 15 1-29/2 * *'
 - trigger:
     name: 'compass-os-odl_l2-moon-noha-virtual-master-trigger'
     triggers:
-      - timed: '0 18 * * *'
+      - timed: '0 18 2-30/2 * *'
 - trigger:
     name: 'compass-os-odl-sfc-noha-virtual-master-trigger'
     triggers:
-      - timed: '0 20 * * *'
+      - timed: '0 20 1-29/2 * *'
 - trigger:
     name: 'compass-os-nosdn-ovs_dpdk-noha-virtual-master-trigger'
     triggers:
-      - timed: '0 11 * * *'
+      - timed: '0 11 2-30/2 * *'
 
 # -----------------
 # ha-virtual-euphrates
 - trigger:
     name: 'compass-os-nosdn-nofeature-ha-virtual-euphrates-trigger'
     triggers:
-      - timed: '0 23 * * *'
+      - timed: '0 23 1-29/2 * *'
 - trigger:
     name: 'compass-os-nosdn-openo-ha-virtual-euphrates-trigger'
     triggers:
 - trigger:
     name: 'compass-os-odl_l3-nofeature-ha-virtual-euphrates-trigger'
     triggers:
-      - timed: '0 22 * * *'
+      - timed: '0 22 2-30/2 * *'
 - trigger:
     name: 'compass-os-onos-nofeature-ha-virtual-euphrates-trigger'
     triggers:
 - trigger:
     name: 'compass-os-odl_l2-moon-ha-virtual-euphrates-trigger'
     triggers:
-      - timed: '0 20 * * *'
+      - timed: '0 20 1-29/2 * *'
 - trigger:
     name: 'compass-os-nosdn-kvm-ha-virtual-euphrates-trigger'
     triggers:
-      - timed: '0 16 * * *'
+      - timed: '0 16 2-30/2 * *'
 - trigger:
     name: 'compass-os-nosdn-ovs_dpdk-ha-virtual-euphrates-trigger'
     triggers:
-      - timed: '0 14 * * *'
+      - timed: '0 14 1-29/2 * *'
 - trigger:
     name: 'compass-os-odl-sfc-ha-virtual-euphrates-trigger'
     triggers:
-      - timed: '0 18 * * *'
+      - timed: '0 18 2-30/2 * *'
 - trigger:
     name: 'compass-k8-nosdn-nofeature-ha-virtual-euphrates-trigger'
     triggers:
-      - timed: '5 1 * * *'
+      - timed: '5 1 2-30/2 * *'
+- trigger:
+    name: 'compass-os-nosdn-bar-ha-virtual-euphrates-trigger'
+    triggers:
+      - timed: '0 19 1-29/2 * *'
 
 # -------------------
 # noha-virtual-euphrates
 - trigger:
     name: 'compass-os-nosdn-kvm-noha-virtual-euphrates-trigger'
     triggers:
-      - timed: '0 15 * * *'
+      - timed: '0 15 1-29/2 * *'
 - trigger:
     name: 'compass-os-nosdn-nofeature-noha-virtual-euphrates-trigger'
     triggers:
-      - timed: '0 17 * * *'
+      - timed: '0 17 2-30/2 * *'
 - trigger:
     name: 'compass-os-odl_l3-nofeature-noha-virtual-euphrates-trigger'
     triggers:
-      - timed: '0 23 * * *'
+      - timed: '0 23 1-29/2 * *'
 - trigger:
     name: 'compass-os-odl_l2-moon-noha-virtual-euphrates-trigger'
     triggers:
-      - timed: '0 21 * * *'
+      - timed: '0 21 2-30/2 * *'
 - trigger:
     name: 'compass-os-odl-sfc-noha-virtual-euphrates-trigger'
     triggers:
-      - timed: '0 19 * * *'
+      - timed: '0 19 1-29/2 * *'
 - trigger:
     name: 'compass-os-nosdn-ovs_dpdk-noha-virtual-euphrates-trigger'
     triggers:
-      - timed: '0 12 * * *'
+      - timed: '0 12 2-30/2 * *'
index ad069a5..ac649b9 100644 (file)
@@ -45,10 +45,6 @@ else
     export NETWORK_CONF_FILE=network.yml
 fi
 
-if [[ "$NODE_NAME" =~ "intel-pod8" ]]; then
-    export OS_MGMT_NIC=em4
-fi
-
 if [[ "$NODE_NAME" =~ "-virtual" ]]; then
     export NETWORK_CONF=$CONFDIR/vm_environment/$NODE_NAME/${NETWORK_CONF_FILE}
     export DHA_CONF=$CONFDIR/vm_environment/${DEPLOY_SCENARIO}.yml
@@ -58,7 +54,11 @@ if [[ "$NODE_NAME" =~ "-virtual" ]]; then
         export VIRT_NUMBER=2
     fi
 else
-    export INSTALL_NIC=eth1
+    if [[ "$NODE_NAME" =~ "intel-pod17" ]]; then
+        export INSTALL_NIC=eno2
+    else
+        export INSTALL_NIC=eth1
+    fi
     export NETWORK_CONF=$CONFDIR/hardware_environment/$NODE_NAME/${NETWORK_CONF_FILE}
     export DHA_CONF=$CONFDIR/hardware_environment/$NODE_NAME/${DEPLOY_SCENARIO}.yml
 fi
index 6927145..7024dad 100644 (file)
           disabled: false
           openstack-version: 'ocata'
           branch-type: 'master'
-      - danube:
+      - euphrates:
           branch: 'stable/{stream}'
           gs-pathname: '/{stream}'
           ppa-pathname: '/{stream}'
           disabled: false
-          openstack-version: 'newton'
-          branch-type: 'branch'
+          openstack-version: 'ocata'
+          branch-type: 'master'
 
     distro:
       - 'xenial':
@@ -75,7 +75,7 @@
     wrappers:
       - ssh-agent-wrapper
       - timeout:
-          timeout: 240
+          timeout: 360
           fail: true
       - fix-workspace-permissions
 
index 58070e1..03bbb65 100644 (file)
@@ -10,7 +10,8 @@
 
     jobs:
       - 'container4nfv-verify-{stream}'
-      - 'container4nfv-daily-{stream}'
+      - 'container4nfv-daily-upload-{stream}'
+      - 'container4nfv-daily-deploy-{stream}'
 
     stream:
       - master:
           cd $WORKSPACE/ci
           ./build.sh
 
+- job-template:
+    name: 'container4nfv-daily-upload-{stream}'
+
+    disabled: '{obj:disabled}'
+
+    concurrent: false
+
+    scm:
+      - git-scm
+
+    wrappers:
+      - fix-workspace-permissions
+
+    parameters:
+      - project-parameter:
+          project: '{project}'
+          branch: '{branch}'
+      - 'opnfv-build-ubuntu-defaults'
+      - 'container4nfv-defaults':
+          gs-pathname: '{gs-pathname}'
+
+    builders:
+      - shell: |
+          cd $WORKSPACE/ci
+          ./upload.sh
 
 - job-template:
-    name: 'container4nfv-daily-{stream}'
+    name: 'container4nfv-daily-deploy-{stream}'
 
     project-type: freestyle
 
       - shell: |
           cd $WORKSPACE/ci
           ./deploy.sh
+
+###################
+# parameter macros
+###################
+- parameter:
+    name: 'container4nfv-defaults'
+    parameters:
+      - string:
+          name: GS_URL
+          default: artifacts.opnfv.org/$PROJECT{gs-pathname}
+          description: "URL to Google Storage."
index 090d2e1..e61272c 100644 (file)
 - trigger:
     name: 'daisy-os-nosdn-nofeature-ha-baremetal-daily-master-trigger'
     triggers:
-      - timed: '0 18 * * *'
+      - timed: '0 12 * * *'
 # Basic NOHA Scenarios
 - trigger:
     name: 'daisy-os-nosdn-nofeature-noha-baremetal-daily-master-trigger'
 - trigger:
     name: 'daisy-os-odl-nofeature-ha-baremetal-daily-master-trigger'
     triggers:
-      - timed: '0 12 * * *'
+      - timed: '0 18 * * *'
 
 # ----------------------------------------------
 # Triggers for job running on daisy-virtual against master branch
index 803ff5b..1723fd1 100755 (executable)
@@ -19,14 +19,22 @@ fi
 
 # clone the securedlab repo
 cd $WORKSPACE
-SECURELAB_DIR=/var/tmp/opnfv-securedlab
 
-echo "Cloning securedlab repo $BRANCH to $SECURELAB_DIR"
-rm -rf $SECURELAB_DIR
-git clone ssh://jenkins-zte@gerrit.opnfv.org:29418/securedlab --quiet \
-    --branch $BRANCH $SECURELAB_DIR
+# There are no PDFs in euphrates branch of pharos repo.
+if [[  "$BRANCH" =~ "euphrates" ]]; then
+    CONFIG_REPO_NAME=securedlab
+else
+    CONFIG_REPO_NAME=pharos
+fi
+
+LABS_DIR=/var/tmp/opnfv-${CONFIG_REPO_NAME}
+
+echo "Cloning ${CONFIG_REPO_NAME} repo $BRANCH to $LABS_DIR"
+rm -rf $LABS_DIR
+git clone ssh://jenkins-zte@gerrit.opnfv.org:29418/${CONFIG_REPO_NAME} \
+    --quiet --branch $BRANCH $LABS_DIR
 
-DEPLOY_COMMAND="sudo -E ./ci/deploy/deploy.sh -L $SECURELAB_DIR \
+DEPLOY_COMMAND="sudo -E ./ci/deploy/deploy.sh -L $LABS_DIR \
                 -l $LAB_NAME -p $POD_NAME -B $BRIDGE -s $DEPLOY_SCENARIO"
 
 # log info to console
index e0de9aa..5612401 100644 (file)
     task:
       - verify:
           auto-trigger-name: 'doctor-verify'
-          is-python: false
-      - python-verify:
-          auto-trigger-name: 'doctor-verify'
-          is-python: true
 
     exclude:
       - installer: 'apex'
       - git-scm-gerrit
 
     triggers:
-      - gerrit:
-          server-name: 'gerrit.opnfv.org'
-          trigger-on:
-            - patchset-created-event:
-                exclude-drafts: 'false'
-                exclude-trivial-rebase: 'false'
-                exclude-no-code-change: 'false'
-            - draft-published-event
-            - comment-added-contains-event:
-                comment-contains-value: 'recheck'
-            - comment-added-contains-event:
-                comment-contains-value: 'reverify'
-          projects:
-            - project-compare-type: 'ANT'
-              project-pattern: '{project}'
-              branches:
-                - branch-compare-type: 'ANT'
-                  branch-pattern: '**/{branch}'
-              file-paths:
-                - compare-type: ANT
-                  pattern: 'doctor_tests/**'
+      - 'doctor-verify':
+          project: '{project}'
+          branch: '{branch}'
+          files: 'doctor_tests/**'
+          is-skip-vote: false
 
     builders:
       - shell: |
     publishers:
       - 'doctor-verify-unit-test-publishers-macro'
 
-
 - job-template:
     name: 'doctor-{task}-{inspector}-{stream}'
 
       - string:
           name: TESTCASE_OPTIONS
           # yamllint disable rule:line-length
-          default: '-e INSPECTOR_TYPE={inspector} -e PYTHON_ENABLE={is-python} -v $WORKSPACE:/home/opnfv/repos/doctor'
+          default: '-e INSPECTOR_TYPE={inspector} -v $WORKSPACE:/home/opnfv/repos/doctor'
           # yamllint enable rule:line-length
           description: 'Addtional parameters specific to test case(s)'
       # functest-parameter
           project: '{project}'
           branch: '{branch}'
           files: 'doctor_tests/**'
+          is-skip-vote: true
 
     builders:
       - shell: |
 - builder:
     name: 'doctor-verify-unit-test-builders-macro'
     builders:
-      - shell: "[ -e tests/run.sh ] && bash -n ./tests/run.sh"
+      - shell: "tox -e pep8"
+
 - builder:
     name: 'doctor-verify-installer-inspector-builders-macro'
     builders:
       - archive:
           artifacts: 'functest_results/$FUNCTEST_SUITE_NAME.log'
       - email-jenkins-admins-on-failure
+
 - publisher:
     name: 'doctor-verify-unit-test-publishers-macro'
     publishers:
       - email-jenkins-admins-on-failure
+      - archive:
+          artifacts: '.tox/'
+
 
 #####################################
 # trigger macros
                 - compare-type: ANT
                   pattern: '{files}'
           skip-vote:
-            successful: true
-            failed: true
-            unstable: true
-            notbuilt: true
+            successful: '{is-skip-vote}'
+            failed: '{is-skip-vote}'
+            unstable: '{is-skip-vote}'
+            notbuilt: '{is-skip-vote}'
index c38ec96..1accffc 100644 (file)
       - master:
           branch: '{stream}'
           disabled: false
+      - danube:
+          branch: 'stable/{stream}'
+          gs-pathname: '/{stream}'
+          disabled: false
 
 ################################
 # job templates
index e084e4b..e50242b 100755 (executable)
@@ -13,6 +13,9 @@
 set -e
 [[ $CI_DEBUG == true ]] && redirect="/dev/stdout" || redirect="/dev/null"
 
+DEPLOY_TYPE=baremetal
+[[ $BUILD_TAG =~ "virtual" ]] && DEPLOY_TYPE=virt
+
 DOVETAIL_HOME=${WORKSPACE}/cvp
 [ -d ${DOVETAIL_HOME} ] && sudo rm -rf ${DOVETAIL_HOME}
 
@@ -21,6 +24,8 @@ mkdir -p ${DOVETAIL_HOME}
 DOVETAIL_CONFIG=${DOVETAIL_HOME}/pre_config
 mkdir -p ${DOVETAIL_CONFIG}
 
+ssh_options="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
+
 sshkey=""
 # The path of openrc.sh is defined in fetch_os_creds.sh
 OPENRC=${DOVETAIL_CONFIG}/env_config.sh
@@ -73,13 +78,17 @@ if [[ -f $OPENRC ]]; then
             exit 1
         fi
     fi
-    cat $OPENRC
 else
     echo "ERROR: cannot find file $OPENRC. Please check if it is existing."
     sudo ls -al ${DOVETAIL_CONFIG}
     exit 1
 fi
 
+if [[ ! "${SUT_BRANCH}" =~ "danube" && ${INSTALLER_TYPE} == "fuel" ]]; then
+    sed -i "s#/etc/ssl/certs/mcp_os_cacert#${CACERT}#g" ${OPENRC}
+fi
+cat $OPENRC
+
 if [[ ! "${SUT_BRANCH}" =~ "danube" && ${INSTALLER_TYPE} == "compass" ]]; then
     cat << EOF >${DOVETAIL_CONFIG}/pod.yaml
 nodes:
@@ -92,6 +101,19 @@ nodes:
 EOF
 fi
 
+if [[ ! "${SUT_BRANCH}" =~ "danube" && ${INSTALLER_TYPE} == 'fuel' && ${DEPLOY_TYPE} == 'baremetal' ]]; then
+    fuel_ctl_ssh_options="${ssh_options} -i ${SSH_KEY}"
+    ssh_user="ubuntu"
+    fuel_ctl_ip=$(ssh 2>/dev/null ${fuel_ctl_ssh_options} "${ssh_user}@${INSTALLER_IP}" \
+            "sudo salt --out yaml 'ctl*' pillar.get _param:openstack_control_address | \
+                awk '{print \$2; exit}'") &> /dev/null
+    cat << EOF >${DOVETAIL_CONFIG}/pod.yaml
+nodes:
+- {ip: ${fuel_ctl_ip}, name: node1, key_filename: /root/.ssh/id_rsa, role: controller, user: ${ssh_user}}
+
+EOF
+fi
+
 if [[ ! -f ${DOVETAIL_CONFIG}/pod.yaml ]]; then
     set +e
 
@@ -109,6 +131,8 @@ if [[ ! -f ${DOVETAIL_CONFIG}/pod.yaml ]]; then
         options="-u root -p r00tme"
     elif [[ ${INSTALLER_TYPE} == apex ]]; then
         options="-u stack -k /root/.ssh/id_rsa"
+    elif [[ ${INSTALLER_TYPE} == daisy ]]; then
+        options="-u root -p r00tme"
     else
         echo "Don't support to generate pod.yaml on ${INSTALLER_TYPE} currently."
         echo "HA test cases may not run properly."
@@ -135,11 +159,13 @@ else
     echo "HA test cases may not run properly."
 fi
 
-ssh_options="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
-
 if [ "$INSTALLER_TYPE" == "fuel" ]; then
-    echo "Fetching id_rsa file from jump_server $INSTALLER_IP..."
-    sshpass -p r00tme sudo scp $ssh_options root@${INSTALLER_IP}:~/.ssh/id_rsa ${DOVETAIL_CONFIG}/id_rsa
+    if [[ "${SUT_BRANCH}" =~ "danube" ]]; then
+        echo "Fetching id_rsa file from jump_server $INSTALLER_IP..."
+        sshpass -p r00tme sudo scp $ssh_options root@${INSTALLER_IP}:~/.ssh/id_rsa ${DOVETAIL_CONFIG}/id_rsa
+    else
+        cp ${SSH_KEY} ${DOVETAIL_CONFIG}/id_rsa
+    fi
 fi
 
 if [ "$INSTALLER_TYPE" == "apex" ]; then
@@ -147,6 +173,12 @@ if [ "$INSTALLER_TYPE" == "apex" ]; then
     sudo scp $ssh_options stack@${INSTALLER_IP}:~/.ssh/id_rsa ${DOVETAIL_CONFIG}/id_rsa
 fi
 
+if [ "$INSTALLER_TYPE" == "daisy" ]; then
+    echo "Fetching id_dsa file from jump_server $INSTALLER_IP..."
+    sshpass -p r00tme sudo scp $ssh_options root@${INSTALLER_IP}:~/.ssh/id_dsa ${DOVETAIL_CONFIG}/id_rsa
+fi
+
+
 image_path=${HOME}/opnfv/dovetail/images
 if [[ ! -d ${image_path} ]]; then
     mkdir -p ${image_path}
@@ -174,20 +206,26 @@ docker_volume="-v /var/run/docker.sock:/var/run/docker.sock"
 dovetail_home_volume="-v ${DOVETAIL_HOME}:${DOVETAIL_HOME}"
 
 # Pull the image with correct tag
-echo "Dovetail: Pulling image opnfv/dovetail:${DOCKER_TAG}"
-docker pull opnfv/dovetail:$DOCKER_TAG >$redirect
+DOCKER_REPO='opnfv/dovetail'
+if [ "$(uname -m)" = 'aarch64' ]; then
+    DOCKER_REPO="${DOCKER_REPO}_$(uname -m)"
+    DOCKER_TAG="latest"
+fi
+
+echo "Dovetail: Pulling image ${DOCKER_REPO}:${DOCKER_TAG}"
+docker pull ${DOCKER_REPO}:$DOCKER_TAG >$redirect
 
 env4bgpvpn="-e INSTALLER_TYPE=${INSTALLER_TYPE} -e INSTALLER_IP=${INSTALLER_IP}"
 
 cmd="docker run ${opts} -e DOVETAIL_HOME=${DOVETAIL_HOME} ${docker_volume} ${dovetail_home_volume} \
-     ${sshkey} ${env4bgpvpn} opnfv/dovetail:${DOCKER_TAG} /bin/bash"
+     ${sshkey} ${env4bgpvpn} ${DOCKER_REPO}:${DOCKER_TAG} /bin/bash"
 echo "Dovetail: running docker run command: ${cmd}"
 ${cmd} >${redirect}
 sleep 5
-container_id=$(docker ps | grep "opnfv/dovetail:${DOCKER_TAG}" | awk '{print $1}' | head -1)
+container_id=$(docker ps | grep "${DOCKER_REPO}:${DOCKER_TAG}" | awk '{print $1}' | head -1)
 echo "Container ID=${container_id}"
 if [ -z ${container_id} ]; then
-    echo "Cannot find opnfv/dovetail container ID ${container_id}. Please check if it is existing."
+    echo "Cannot find ${DOCKER_REPO} container ID ${container_id}. Please check if it is existing."
     docker ps -a
     exit 1
 fi
@@ -195,11 +233,25 @@ echo "Container Start: docker start ${container_id}"
 docker start ${container_id}
 sleep 5
 docker ps >${redirect}
-if [ $(docker ps | grep "opnfv/dovetail:${DOCKER_TAG}" | wc -l) == 0 ]; then
-    echo "The container opnfv/dovetail with ID=${container_id} has not been properly started. Exiting..."
+if [ $(docker ps | grep "${DOCKER_REPO}:${DOCKER_TAG}" | wc -l) == 0 ]; then
+    echo "The container ${DOCKER_REPO} with ID=${container_id} has not been properly started. Exiting..."
     exit 1
 fi
 
+if [[ ! "${SUT_BRANCH}" =~ "danube" && ${INSTALLER_TYPE} == 'fuel' && ${DEPLOY_TYPE} == 'baremetal' ]]; then
+    source_cmd="source ${OPENRC}"
+    get_public_url_cmd="openstack --insecure endpoint list --service keystone --interface public | sed -n 4p | awk '{print \$14}'"
+    public_url=$(sudo docker exec "$container_id" /bin/bash -c "${source_cmd} && ${get_public_url_cmd}")
+    sed -i 's#OS_AUTH_URL=.*#OS_AUTH_URL='"${public_url}"'#g' ${OPENRC}
+    sed -i 's/internal/public/g' ${OPENRC}
+    if [[ ${public_url} =~ 'v2' ]]; then
+        sed -i "s/OS_IDENTITY_API_VERSION=3/OS_IDENTITY_API_VERSION=2.0/g" ${OPENRC}
+        sed -i '/OS_PROJECT_DOMAIN_NAME/d' ${OPENRC}
+        sed -i '/OS_USER_DOMAIN_NAME/d' ${OPENRC}
+    fi
+    cat ${OPENRC}
+fi
+
 # Modify tempest_conf.yaml file
 tempest_conf_file=${DOVETAIL_CONFIG}/tempest_conf.yaml
 if [[ ${INSTALLER_TYPE} == 'compass' || ${INSTALLER_TYPE} == 'apex' ]]; then
index 5dc8a72..902e754 100644 (file)
       - zte-pod1:
           slave-label: zte-pod1
           <<: *master
-      - zte-pod3:
-          slave-label: zte-pod3
-          <<: *master
       - zte-pod1:
           slave-label: zte-pod1
           <<: *euphrates
-      - zte-pod3:
-          slave-label: zte-pod3
-          <<: *euphrates
-      - zte-pod1:
-          slave-label: zte-pod1
-          <<: *danube
     # -------------------------------
     #       scenarios
     # -------------------------------
@@ -94,6 +85,8 @@
           auto-trigger-name: 'fuel-{scenario}-{pod}-daily-{stream}-trigger'
       - 'os-onos-nofeature-noha':
           auto-trigger-name: 'fuel-{scenario}-{pod}-daily-{stream}-trigger'
+      - 'os-ovn-nofeature-noha':
+          auto-trigger-name: 'fuel-{scenario}-{pod}-daily-{stream}-trigger'
       - 'os-nosdn-kvm-noha':
           auto-trigger-name: 'fuel-{scenario}-{pod}-daily-{stream}-trigger'
       - 'os-nosdn-ovs-noha':
                     build-step-failure-threshold: 'never'
                     failure-threshold: 'never'
                     unstable-threshold: 'FAILURE'
-      # 1.dovetail only has master, based on D release
-      # 2.here the stream means the SUT stream, dovetail stream is defined in its own job
-      # 3.only debug testsuite here(refstack, ha, ipv6, bgpvpn)
-      # 4.not used for release criteria or compliance,
-      #   only to debug the dovetail tool bugs with bgpvpn and nosdn-nofeature
-      # 5.only run against scenario os-odl-bgpvpn-ha(regex used here, can extend to more scenarios future)
-      # 6.ZTE pod1, os-nosdn-nofeature-ha and os-odl-bgpvpn-ha, run against danube
-      - conditional-step:
-          condition-kind: and
-          condition-operands:
-            - condition-kind: regex-match
-              regex: os-(nosdn-nofeature|odl_l2-bgpvpn)-ha
-              label: '{scenario}'
-            - condition-kind: regex-match
-              regex: 'danube'
-              label: '{stream}'
-          steps:
-            - trigger-builds:
-                - project: 'dovetail-fuel-{pod}-proposed_tests-master'
-                  current-parameters: false
-                  predefined-parameters:
-                    DEPLOY_SCENARIO={scenario}
-                  block: true
-                  same-node: true
-                  block-thresholds:
-                    build-step-failure-threshold: 'never'
-                    failure-threshold: 'never'
-                    unstable-threshold: 'FAILURE'
+      # 1.here the stream means the SUT stream, dovetail stream is defined in its own job
+      # 2.only debug testsuite here(refstack, ha, vping, ipv6, tempest, bgpvpn)
+      # 3.not used for release criteria or compliance, only to debug the dovetail tool bugs
+      # 4.ZTE pod1, os-nosdn-nofeature-ha and os-odl-bgpvpn-ha, run against danube
+      - trigger-builds:
+          - project: 'dovetail-fuel-{pod}-proposed_tests-{stream}'
+            current-parameters: false
+            predefined-parameters:
+              DEPLOY_SCENARIO={scenario}
+            block: true
+            same-node: true
+            block-thresholds:
+              build-step-failure-threshold: 'never'
+              failure-threshold: 'never'
+              unstable-threshold: 'FAILURE'
       - conditional-step:
           condition-kind: not
           condition-operand:
     name: 'fuel-os-onos-nofeature-noha-baremetal-daily-master-trigger'
     triggers:
       - timed: ''
+- trigger:
+    name: 'fuel-os-ovn-nofeature-noha-baremetal-daily-master-trigger'
+    triggers:
+      - timed: ''
 - trigger:
     name: 'fuel-os-nosdn-kvm-noha-baremetal-daily-master-trigger'
     triggers:
     name: 'fuel-os-onos-nofeature-noha-baremetal-daily-euphrates-trigger'
     triggers:
       - timed: ''
+- trigger:
+    name: 'fuel-os-ovn-nofeature-noha-baremetal-daily-euphrates-trigger'
+    triggers:
+      - timed: ''
 - trigger:
     name: 'fuel-os-nosdn-kvm-noha-baremetal-daily-euphrates-trigger'
     triggers:
     name: 'fuel-os-onos-nofeature-noha-virtual-daily-master-trigger'
     triggers:
       - timed: ''  # '5 23 * * *'
+- trigger:
+    name: 'fuel-os-ovn-nofeature-noha-virtual-daily-master-trigger'
+    triggers:
+      - timed: '5 23 * * *'
 - trigger:
     name: 'fuel-os-nosdn-kvm-noha-virtual-daily-master-trigger'
     triggers:
     name: 'fuel-os-onos-nofeature-noha-virtual-daily-euphrates-trigger'
     triggers:
       - timed: ''  # '0 23 * * *'
+- trigger:
+    name: 'fuel-os-ovn-nofeature-noha-virtual-daily-euphrates-trigger'
+    triggers:
+      - timed: ''
 - trigger:
     name: 'fuel-os-nosdn-kvm-noha-virtual-daily-euphrates-trigger'
     triggers:
 - trigger:
     name: 'fuel-os-nosdn-nofeature-ha-zte-pod1-daily-master-trigger'
     triggers:
-      - timed: '0 10 * * *'
+      - timed: '0 22 * * *'
 - trigger:
     name: 'fuel-os-odl-nofeature-ha-zte-pod1-daily-master-trigger'
     triggers:
     name: 'fuel-os-onos-nofeature-noha-zte-pod1-daily-master-trigger'
     triggers:
       - timed: ''
+- trigger:
+    name: 'fuel-os-ovn-nofeature-noha-zte-pod1-daily-master-trigger'
+    triggers:
+      - timed: ''
 - trigger:
     name: 'fuel-os-nosdn-kvm-noha-zte-pod1-daily-master-trigger'
     triggers:
     triggers:
       - timed: ''
 # ----------------------------------------------
-# ZTE POD3 Triggers running against master branch
-# ----------------------------------------------
-- trigger:
-    name: 'fuel-os-nosdn-nofeature-ha-zte-pod3-daily-master-trigger'
-    triggers:
-      - timed: ''    # '0 10 * * *'
-- trigger:
-    name: 'fuel-os-odl-nofeature-ha-zte-pod3-daily-master-trigger'
-    triggers:
-      - timed: ''
-- trigger:
-    name: 'fuel-os-onos-sfc-ha-zte-pod3-daily-master-trigger'
-    triggers:
-      - timed: ''
-- trigger:
-    name: 'fuel-os-onos-nofeature-ha-zte-pod3-daily-master-trigger'
-    triggers:
-      - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-kvm-ha-zte-pod3-daily-master-trigger'
-    triggers:
-      - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-ovs-ha-zte-pod3-daily-master-trigger'
-    triggers:
-      - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-kvm_ovs_dpdk-ha-zte-pod3-daily-master-trigger'
-    triggers:
-      - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-ha-zte-pod3-daily-master-trigger'
-    triggers:
-      - timed: ''
-# NOHA Scenarios
-- trigger:
-    name: 'fuel-os-nosdn-nofeature-noha-zte-pod3-daily-master-trigger'
-    triggers:
-      - timed: ''
-- trigger:
-    name: 'fuel-os-odl-nofeature-noha-zte-pod3-daily-master-trigger'
-    triggers:
-      - timed: ''
-- trigger:
-    name: 'fuel-os-onos-sfc-noha-zte-pod3-daily-master-trigger'
-    triggers:
-      - timed: ''
-- trigger:
-    name: 'fuel-os-onos-nofeature-noha-zte-pod3-daily-master-trigger'
-    triggers:
-      - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-kvm-noha-zte-pod3-daily-master-trigger'
-    triggers:
-      - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-ovs-noha-zte-pod3-daily-master-trigger'
-    triggers:
-      - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-zte-pod3-daily-master-trigger'
-    triggers:
-      - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-zte-pod3-daily-master-trigger'
-    triggers:
-      - timed: ''
-# ----------------------------------------------
 # ZTE POD1 Triggers running against euphrates branch
 # ----------------------------------------------
 - trigger:
     name: 'fuel-os-nosdn-nofeature-ha-zte-pod1-daily-euphrates-trigger'
     triggers:
-      - timed: ''
+      - timed: '0 10 * * *'
 - trigger:
     name: 'fuel-os-odl-nofeature-ha-zte-pod1-daily-euphrates-trigger'
     triggers:
     name: 'fuel-os-onos-nofeature-noha-zte-pod1-daily-euphrates-trigger'
     triggers:
       - timed: ''
+- trigger:
+    name: 'fuel-os-ovn-nofeature-noha-zte-pod1-daily-euphrates-trigger'
+    triggers:
+      - timed: ''
 - trigger:
     name: 'fuel-os-nosdn-kvm-noha-zte-pod1-daily-euphrates-trigger'
     triggers:
     name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-zte-pod1-daily-euphrates-trigger'
     triggers:
       - timed: ''
-# ----------------------------------------------
-# ZTE POD3 Triggers running against euphrates branch
-# ----------------------------------------------
-- trigger:
-    name: 'fuel-os-nosdn-nofeature-ha-zte-pod3-daily-euphrates-trigger'
-    triggers:
-      - timed: ''  # '0 18 * * *'
-- trigger:
-    name: 'fuel-os-odl-nofeature-ha-zte-pod3-daily-euphrates-trigger'
-    triggers:
-      - timed: ''
-- trigger:
-    name: 'fuel-os-onos-sfc-ha-zte-pod3-daily-euphrates-trigger'
-    triggers:
-      - timed: ''
-- trigger:
-    name: 'fuel-os-onos-nofeature-ha-zte-pod3-daily-euphrates-trigger'
-    triggers:
-      - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-kvm-ha-zte-pod3-daily-euphrates-trigger'
-    triggers:
-      - timed: ''  # '0 2 * * *'
-- trigger:
-    name: 'fuel-os-nosdn-ovs-ha-zte-pod3-daily-euphrates-trigger'
-    triggers:
-      - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-kvm_ovs_dpdk-ha-zte-pod3-daily-euphrates-trigger'
-    triggers:
-      - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-ha-zte-pod3-daily-euphrates-trigger'
-    triggers:
-      - timed: ''
-# NOHA Scenarios
-- trigger:
-    name: 'fuel-os-nosdn-nofeature-noha-zte-pod3-daily-euphrates-trigger'
-    triggers:
-      - timed: ''
-- trigger:
-    name: 'fuel-os-odl-nofeature-noha-zte-pod3-daily-euphrates-trigger'
-    triggers:
-      - timed: ''
-- trigger:
-    name: 'fuel-os-onos-sfc-noha-zte-pod3-daily-euphrates-trigger'
-    triggers:
-      - timed: ''
-- trigger:
-    name: 'fuel-os-onos-nofeature-noha-zte-pod3-daily-euphrates-trigger'
-    triggers:
-      - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-kvm-noha-zte-pod3-daily-euphrates-trigger'
-    triggers:
-      - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-ovs-noha-zte-pod3-daily-euphrates-trigger'
-    triggers:
-      - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-zte-pod3-daily-euphrates-trigger'
-    triggers:
-      - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-zte-pod3-daily-euphrates-trigger'
-    triggers:
-      - timed: ''
-# -----------------------------------------------
-# ZTE POD1 Triggers running against danube branch
-# -----------------------------------------------
-- trigger:
-    name: 'fuel-os-nosdn-nofeature-ha-zte-pod1-daily-danube-trigger'
-    triggers:
-      - timed: ''
-- trigger:
-    name: 'fuel-os-odl_l2-bgpvpn-ha-zte-pod1-daily-danube-trigger'
-    triggers:
-      - timed: ''
-- trigger:
-    name: 'fuel-os-odl-nofeature-ha-zte-pod1-daily-danube-trigger'
-    triggers:
-      - timed: ''
-- trigger:
-    name: 'fuel-os-onos-sfc-ha-zte-pod1-daily-danube-trigger'
-    triggers:
-      - timed: ''
-- trigger:
-    name: 'fuel-os-onos-nofeature-ha-zte-pod1-daily-danube-trigger'
-    triggers:
-      - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-kvm-ha-zte-pod1-daily-danube-trigger'
-    triggers:
-      - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-ovs-ha-zte-pod1-daily-danube-trigger'
-    triggers:
-      - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-kvm_ovs_dpdk-ha-zte-pod1-daily-danube-trigger'
-    triggers:
-      - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-ha-zte-pod1-daily-danube-trigger'
-    triggers:
-      - timed: ''
-# NOHA Scenarios
-- trigger:
-    name: 'fuel-os-nosdn-nofeature-noha-zte-pod1-daily-danube-trigger'
-    triggers:
-      - timed: ''
-- trigger:
-    name: 'fuel-os-odl-nofeature-noha-zte-pod1-daily-danube-trigger'
-    triggers:
-      - timed: ''
-- trigger:
-    name: 'fuel-os-onos-sfc-noha-zte-pod1-daily-danube-trigger'
-    triggers:
-      - timed: ''
-- trigger:
-    name: 'fuel-os-onos-nofeature-noha-zte-pod1-daily-danube-trigger'
-    triggers:
-      - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-kvm-noha-zte-pod1-daily-danube-trigger'
-    triggers:
-      - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-ovs-noha-zte-pod1-daily-danube-trigger'
-    triggers:
-      - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-zte-pod1-daily-danube-trigger'
-    triggers:
-      - timed: ''
-- trigger:
-    name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-zte-pod1-daily-danube-trigger'
-    triggers:
-      - timed: ''
index 2d5d397..79e5c15 100644 (file)
       - 'suite':
           job-timeout: 60
       - 'daily':
-          job-timeout: 240
+          job-timeout: 300
       - 'arm-daily':
-          job-timeout: 240
+          job-timeout: 300
 
     jobs:
       - 'functest-{installer}-{pod}-{testsuite}-{stream}'
index 86f4c42..aeeb246 100644 (file)
           submodule:
             recursive: true
             timeout: 20
+
+- scm:
+    name: git-scm-openstack
+    scm:
+      - git: &git-scm-openstack-defaults
+          url: '$GIT_BASE'
+          branches:
+            - 'origin/$BRANCH'
+          timeout: 15
+
 - trigger:
     name: 'daily-trigger-disabled'
     triggers:
index ae0ae1e..376d09b 100644 (file)
           name: GIT_BASE
           default: https://gerrit.opnfv.org/gerrit/$PROJECT
 
+- parameter:
+    name: 'intel-pod17-defaults'
+    parameters:
+      - node:
+          name: SLAVE_NAME
+          description: 'Slave name on Jenkins'
+          allowed-slaves:
+            - intel-pod17
+          default-slaves:
+            - intel-pod17
+      - string:
+          name: GIT_BASE
+          default: https://gerrit.opnfv.org/gerrit/$PROJECT
+
 - parameter:
     name: 'huawei-virtual5-defaults'
     parameters:
index db2fb8b..103069f 100644 (file)
@@ -7,7 +7,7 @@
     name: qtip-experimental-jobs
     project: qtip
     jobs:
-      - 'qtip-experimental-{stream}'
+      - 'qtip-experimental-{pod}-{stream}'
     stream:
       - master:
           branch: '{stream}'
           gs-pathname: '/{stream}'
           disabled: false
 
+    pod:
+      - zte-virtual6:
+          installer: fuel
+          pod: zte-virtual6
+
 ################################
 ## job templates
 #################################
 
 - job-template:
-    name: 'qtip-experimental-{stream}'
+    name: 'qtip-experimental-{pod}-{stream}'
 
     disabled: '{obj:disabled}'
 
@@ -31,9 +36,8 @@
       - project-parameter:
           project: '{project}'
           branch: '{branch}'
-      # Pin the tests on zte-pod6 with MCP deployment
-      - fuel-defaults
-      - zte-virtual6-defaults
+      - '{installer}-defaults'
+      - '{pod}-defaults'
     scm:
       - git-scm-gerrit
 
index 047d7f2..783c92b 100644 (file)
@@ -8,8 +8,6 @@
     project: qtip
     jobs:
       - 'qtip-verify-{stream}'
-      - 'qtip-verify-notebook-{stream}'
-      - 'qtip-merged-notebook-{stream}'
     stream:
       - master:
           branch: '{stream}'
@@ -67,7 +65,8 @@
       - publish-coverage
       - email-jenkins-admins-on-failure
 
-# upload juypter notebook to artifacts for review
+# Upload juypter notebook to artifacts for review
+# TODO(yujunz): deal with *.ipynb deletion
 - job-template:
     name: 'qtip-verify-notebook-{stream}'
 
 
           mkdir -p $local_path
 
-          git diff HEAD~1 --name-only | grep -E ".+\.ipynb$" | xargs -I '{}' cp '{}' $local_path
+          git diff HEAD~1 --name-status | grep -E "[AM]\t.+\.ipynb$" | awk '{print $2}' \
+            | xargs -I '{}' cp '{}' $local_path
           gsutil -m cp -r "$local_path" "gs://$gs_base/"
 
           echo "Document link(s):" >> gerrit_comment.txt
index 299908d..db2e427 100644 (file)
@@ -26,8 +26,7 @@
       - 'cobbler'
       - 'db'
       - 'deck'
-      - 'tasks-k8s'
-      - 'tasks-osa'
+      - 'tasks-base'
 
     # settings for jobs run in multijob phases
     build-job-settings: &build-job-settings
 
     builders:
       - multijob:
-          name: 'build compass-tasks images'
+          name: 'build compass-tasks-base images'
           execution-type: PARALLEL
           projects:
-            - name: 'compass-tasks-build-amd64-{stream}'
+            - name: 'compass-tasks-base-build-amd64-{stream}'
               <<: *build-job-settings
       - multijob:
           name: 'build all compass images'
@@ -94,9 +93,7 @@
               <<: *build-job-settings
             - name: 'compass-deck-build-amd64-{stream}'
               <<: *build-job-settings
-            - name: 'compass-tasks-k8s-build-amd64-{stream}'
-              <<: *build-job-settings
-            - name: 'compass-tasks-osa-build-amd64-{stream}'
+            - name: 'compass-tasks-build-amd64-{stream}'
               <<: *build-job-settings
 
     publishers:
index acf381f..92dd54e 100644 (file)
@@ -58,8 +58,8 @@
     # yamllint enable rule:key-duplicates
     jobs:
       - "functest-docker-{stream}"
-      - "functest-{image}-build-{arch_tag}-{stream}"
-      - "functest-{image}-manifest-{stream}"
+      - "functest-{image}-docker-build-{arch_tag}-{stream}"
+      - "functest-{image}-docker-manifest-{stream}"
 
 ########################
 # job templates
           name: 'build functest-core images'
           execution-type: PARALLEL
           projects:
-            - name: 'functest-core-build-amd64-{stream}'
+            - name: 'functest-core-docker-build-amd64-{stream}'
               <<: *build-job-settings
-            - name: 'functest-core-build-arm64-{stream}'
+            - name: 'functest-core-docker-build-arm64-{stream}'
               <<: *build-job-settings
       - multijob:
           name: 'publish functest-core manifests'
           execution-type: PARALLEL
           projects:
-            - name: 'functest-core-manifest-{stream}'
+            - name: 'functest-core-docker-manifest-{stream}'
               <<: *manifest-job-settings
       - multijob:
           name: 'build all functest images'
           condition: SUCCESSFUL
           execution-type: PARALLEL
           projects:
-            - name: 'functest-healthcheck-build-amd64-{stream}'
+            - name: 'functest-healthcheck-docker-build-amd64-{stream}'
               <<: *build-job-settings
-            - name: 'functest-healthcheck-build-arm64-{stream}'
+            - name: 'functest-healthcheck-docker-build-arm64-{stream}'
               <<: *build-job-settings
-            - name: 'functest-features-build-amd64-{stream}'
+            - name: 'functest-features-docker-build-amd64-{stream}'
               <<: *build-job-settings
-            - name: 'functest-features-build-arm64-{stream}'
+            - name: 'functest-features-docker-build-arm64-{stream}'
               <<: *build-job-settings
-            - name: 'functest-components-build-amd64-{stream}'
+            - name: 'functest-components-docker-build-amd64-{stream}'
               <<: *build-job-settings
-            - name: 'functest-components-build-arm64-{stream}'
+            - name: 'functest-components-docker-build-arm64-{stream}'
               <<: *build-job-settings
-            - name: 'functest-parser-build-amd64-{stream}'
+            - name: 'functest-parser-docker-build-amd64-{stream}'
               <<: *build-job-settings
-            - name: 'functest-parser-build-arm64-{stream}'
+            - name: 'functest-parser-docker-build-arm64-{stream}'
               <<: *build-job-settings
-            - name: 'functest-smoke-build-amd64-{stream}'
+            - name: 'functest-smoke-docker-build-amd64-{stream}'
               <<: *build-job-settings
-            - name: 'functest-smoke-build-arm64-{stream}'
+            - name: 'functest-smoke-docker-build-arm64-{stream}'
               <<: *build-job-settings
-            - name: 'functest-vnf-build-amd64-{stream}'
+            - name: 'functest-vnf-docker-build-amd64-{stream}'
               <<: *build-job-settings
-            - name: 'functest-restapi-build-amd64-{stream}'
+            - name: 'functest-restapi-docker-build-amd64-{stream}'
               <<: *build-job-settings
       - multijob:
           name: 'publish all manifests'
           condition: SUCCESSFUL
           execution-type: PARALLEL
           projects:
-            - name: 'functest-healthcheck-manifest-{stream}'
+            - name: 'functest-healthcheck-docker-manifest-{stream}'
               <<: *manifest-job-settings
-            - name: 'functest-features-manifest-{stream}'
+            - name: 'functest-features-docker-manifest-{stream}'
               <<: *manifest-job-settings
-            - name: 'functest-components-manifest-{stream}'
+            - name: 'functest-components-docker-manifest-{stream}'
               <<: *manifest-job-settings
-            - name: 'functest-parser-manifest-{stream}'
+            - name: 'functest-parser-docker-manifest-{stream}'
               <<: *manifest-job-settings
-            - name: 'functest-smoke-manifest-{stream}'
+            - name: 'functest-smoke-docker-manifest-{stream}'
               <<: *manifest-job-settings
-            - name: 'functest-vnf-manifest-{stream}'
+            - name: 'functest-vnf-docker-manifest-{stream}'
               <<: *manifest-job-settings
-            - name: 'functest-restapi-manifest-{stream}'
+            - name: 'functest-restapi-docker-manifest-{stream}'
               <<: *manifest-job-settings
 
     publishers:
       - 'functest-arm64-recipients'
 
 - job-template:
-    name: 'functest-{image}-build-{arch_tag}-{stream}'
+    name: 'functest-{image}-docker-build-{arch_tag}-{stream}'
     disabled: '{obj:disabled}'
     parameters:
       - job-parameters:
           exit $?
 
 - job-template:
-    name: 'functest-{image}-manifest-{stream}'
+    name: 'functest-{image}-docker-manifest-{stream}'
 
     parameters:
       - project-parameter:
index ec7b3fd..7f646f1 100644 (file)
@@ -61,7 +61,9 @@ done
 # Remove the existing containers and images before building
 remove_containers_images
 
-cd "$WORKSPACE/$DOCKER_DIR" || exit 1
+DOCKER_PATH=$WORKSPACE/$DOCKER_DIR
+
+cd $DOCKER_PATH || exit 1
 HOST_ARCH="$(uname -m)"
 #If there is a patch for other arch then x86, apply the patch and
 #replace Dockerfile file
@@ -107,7 +109,8 @@ echo "Building docker image: $DOCKER_REPO_NAME:$DOCKER_TAG"
 echo "--------------------------------------------------------"
 echo
 cmd="docker build --no-cache -t $DOCKER_REPO_NAME:$DOCKER_TAG --build-arg BRANCH=$BUILD_BRANCH
-    -f $DOCKERFILE ."
+    $ARCH_BUILD_ARG
+    -f $DOCKERFILE $DOCKER_PATH"
 
 echo ${cmd}
 ${cmd}
index 8250bfe..3351a5e 100644 (file)
           project: 'releng-anteater'
           <<: *master
           <<: *other-receivers
+      - 'barometer':
+          project: 'barometer'
+          <<: *master
+          <<: *other-receivers
       - 'bottlenecks':
           project: 'bottlenecks'
           <<: *master
@@ -57,6 +61,8 @@
           <<: *other-receivers
       - 'qtip':
           project: 'qtip'
+          dockerdir: '.'
+          dockerfile: 'docker/Dockerfile.local'
           <<: *master
           <<: *other-receivers
       - 'storperf-master':
           project: 'yardstick'
           <<: *master
           <<: *other-receivers
-      # projects with jobs for Danube
-      - 'qtip':
-          project: 'qtip'
+      # projects with jobs for euphrates
+      - 'bottlenecks':
+          project: 'bottlenecks'
           <<: *euphrates
           <<: *other-receivers
-      - 'yardstick':
-          project: 'yardstick'
+      - 'nfvbench':
+          project: 'nfvbench'
           <<: *euphrates
           <<: *other-receivers
-      # projects with jobs for euphrates
-      - 'bottlenecks':
-          project: 'bottlenecks'
+      - 'qtip':
+          project: 'qtip'
           <<: *euphrates
           <<: *other-receivers
       - 'storperf-master':
           arch_tag: 'x86_64'
           <<: *euphrates
           <<: *storperf-receivers
-      - 'nfvbench':
-          project: 'nfvbench'
+      - 'yardstick':
+          project: 'yardstick'
           <<: *euphrates
           <<: *other-receivers
+      # projects with jobs for danube
+      - 'dovetail':
+          project: 'dovetail'
+          <<: *danube
+          <<: *other-receivers
 
     # yamllint enable rule:key-duplicates
     jobs:
index 2408c2a..6fa0aef 100644 (file)
@@ -61,6 +61,8 @@ echo "--------------------------------------"
 gsutil cp "$WORKSPACE/opnfv-archive-$DATE.tar.gz" \
     "gs://opnfv-archive/opnfv-archive-$DATE.tar.gz" 2>&1
 
+echo "https://storage.googleapis.com/opnfv-archive/opnfv-archive-$DATE.tar.gz" > archive-link.txt
+
 rm -f opnfv-archive-$DATE.tar.gz
 
 echo "Finished"
index 80cd08f..6f77cd2 100644 (file)
@@ -24,8 +24,8 @@
           description: Slaves to prune docker images
           default-slaves:
             - arm-build2
+            - ericsson-build3
             - ericsson-build4
-            - ericsson-build5
             - lf-build2
           allowed-multiselect: true
           ignore-offline-nodes: true
       - shell:
           !include-raw-escape: opnfv-repo-archiver.sh
 
+    publishers:
+      - email-ext:
+          content-type: 'text'
+          failure: false
+          always: true
+          body:
+            ${{FILE,path="archive-link.txt"}}
+          reply-to: >
+            helpdesk@opnfv.org
+          recipients: >
+            swinslow@linuxfoundation.org
+
+
 - job-template:
     name: 'check-status-of-slaves'
 
index bbce974..e4c2d0e 100644 (file)
             - comment-added-contains-event:
                 comment-contains-value: 'recheck'
           silent-start: true
+          custom-url: '* $JOB_NAME $BUILD_URL'
           projects:
             - project-compare-type: 'PLAIN'
               project-pattern: 'openstack/bifrost'
index 263f544..198f2e1 100755 (executable)
@@ -25,9 +25,6 @@ cd ~/bifrost
 # provision 3 VMs; xcimaster, controller, and compute
 ./scripts/bifrost-provision.sh
 
-# list the provisioned VMs
-source env-vars
-ironic node-list
 sudo -H -E virsh list
 EOF
 chmod a+x bifrost_test.sh
@@ -44,6 +41,6 @@ export XCI_UPDATE_CLEAN_VM_OS=true
 
 ./xci/scripts/vm/start-new-vm.sh $VM_DISTRO
 
-rsync -a $WORKSPACE/ ${VM_DISTRO}_xci_vm:~/bifrost
+rsync -a -e "ssh -F $HOME/.ssh/xci-vm-config" $WORKSPACE/ ${VM_DISTRO}_xci_vm:~/bifrost
 
 ssh -F $HOME/.ssh/xci-vm-config ${VM_DISTRO}_xci_vm "cd ~/bifrost/releng-xci && ./bifrost_test.sh"
index 26c1575..64daa69 100644 (file)
@@ -2,7 +2,7 @@
 - project:
     name: 'opnfv-osa-periodic'
 
-    project: 'releng-xci'
+    project: openstack-ansible
     # -------------------------------
     # branches
     # -------------------------------
     # distros
     # -------------------------------
     distro:
-      - 'xenial':
+      - ubuntu:
+          disabled: false
+      - centos:
+          disabled: false
+      - opensuse:
           disabled: false
-      - 'centos7':
-          disabled: true
-      - 'suse':
-          disabled: true
     # -------------------------------
     # type
     # -------------------------------
     type:
       - virtual
     # -------------------------------
-    # phases
+    # periodic deploy & test phases
     # -------------------------------
     phase:
       - 'deploy'
     # jobs
     # -------------------------------
     jobs:
-      - 'osa-periodic-{distro}-{type}-{stream}'
-      - 'osa-periodic-{phase}-{type}-{stream}'
-
+      - 'xci-osa-periodic-{distro}-{type}-{stream}'
+      - 'xci-osa-periodic-{distro}-{phase}-{type}-{stream}'
 # -------------------------------
 # job templates
 # -------------------------------
 - job-template:
-    name: 'osa-periodic-{distro}-{type}-{stream}'
+    name: 'xci-osa-periodic-{distro}-{type}-{stream}'
 
     project-type: multijob
 
     disabled: '{obj:disabled}'
 
-    concurrent: false
+    concurrent: true
 
     properties:
       - logrotate-default
       - build-blocker:
           use-build-blocker: true
           blocking-jobs:
-            - 'xci-verify-.*'
-            - 'bifrost-verify-.*'
-            - 'bifrost-periodic-.*'
-            - 'osa-verify-.*'
-            - 'osa-periodic-.*'
+            - 'xci-verify-{distro}-.*'
+            - 'bifrost-verify-{distro}-.*'
+            - 'bifrost-periodic-{distro}-.*'
+            - 'xci-osa-verify-{distro}-.*'
+            - 'xci-osa-periodic-{distro}-.*'
           block-level: 'NODE'
+      - throttle:
+          max-per-node: 2
+          max-total: 10
+          categories:
+            - xci-verify-virtual
+          option: category
 
     wrappers:
       - ssh-agent-wrapper
       - fix-workspace-permissions
 
     scm:
-      - git-scm-osa
-
-    triggers:
-      - pollscm:
-          cron: "@midnight"
-          ignore-post-commit-hooks: true
+      - git-scm-openstack
 
     parameters:
       - project-parameter:
           branch: '{branch}'
       - label:
           name: SLAVE_LABEL
-          default: 'xci-virtual-{distro}'
+          default: 'xci-virtual'
+      - string:
+          name: OPENSTACK_OSA_VERSION
+          default: 'master'
+      - string:
+          name: CLEAN_DIB_IMAGES
+          default: 'true'
+      - string:
+          name: GIT_BASE
+          default: 'https://git.openstack.org/openstack/$PROJECT'
+          description: 'Git URL to use on this Jenkins Slave'
 
     builders:
       - description-setter:
           name: deploy
           condition: SUCCESSFUL
           projects:
-            - name: 'osa-periodic-deploy-{type}-{stream}'
+            - name: 'xci-osa-periodic-{distro}-deploy-{type}-{stream}'
               current-parameters: true
               predefined-parameters: |
                 DISTRO={distro}
                 DEPLOY_SCENARIO=os-nosdn-nofeature-noha
-              git-revision: true
+                OPENSTACK_OSA_VERSION=$OPENSTACK_OSA_VERSION
+                CLEAN_DIB_IMAGES=$CLEAN_DIB_IMAGES
+                BRANCH=$BRANCH
               node-parameters: true
               kill-phase-on: FAILURE
               abort-all-job: true
+              git-revision: true
       - multijob:
           name: healthcheck
           condition: SUCCESSFUL
           projects:
-            - name: 'osa-periodic-healthcheck-{type}-{stream}'
+            - name: 'xci-osa-periodic-{distro}-healthcheck-{type}-{stream}'
               current-parameters: true
               predefined-parameters: |
                 DISTRO={distro}
                 DEPLOY_SCENARIO=os-nosdn-nofeature-noha
+                OPENSTACK_OSA_VERSION=$OPENSTACK_OSA_VERSION
+                CLEAN_DIB_IMAGES=$CLEAN_DIB_IMAGES
                 FUNCTEST_MODE=tier
                 FUNCTEST_TIER=healthcheck
+                BRANCH=$BRANCH
               node-parameters: true
               kill-phase-on: NEVER
-              abort-all-job: false
+              abort-all-job: true
 
 - job-template:
-    name: 'osa-periodic-{phase}-{type}-{stream}'
+    name: 'xci-osa-periodic-{distro}-{phase}-{type}-{stream}'
 
     disabled: false
 
       - build-blocker:
           use-build-blocker: true
           blocking-jobs:
-            - 'xci-verify-deploy-.*'
-            - 'xci-verify-healthcheck-.*'
-            - 'bifrost-verify-.*'
-            - 'bifrost-periodic-.*'
-            - 'osa-verify-deploy-.*'
-            - 'osa-verify-halthcheck-.*'
-            - 'osa-periodic-deploy-.*'
-            - 'osa-periodic-healthcheck-.*'
+            - '.*-bifrost-verify-.*'
+            - '.*-bifrost-periodic-.*'
           block-level: 'NODE'
 
     parameters:
       - project-parameter:
           project: '{project}'
           branch: '{branch}'
-      - label:
-          name: SLAVE_LABEL
-          default: 'xci-virtual-{distro}'
-      - string:
-          name: OPENSTACK_OSA_VERSION
-          default: 'master'
       - string:
           name: DISTRO
-          default: 'xenial'
+          default: 'ubuntu'
       - string:
           name: DEPLOY_SCENARIO
           default: 'os-nosdn-nofeature-noha'
       - string:
-          name: XCI_FLAVOR
-          default: 'mini'
-      - string:
-          name: XCI_LOOP
-          default: 'periodic'
-      - string:
-          name: OPNFV_RELENG_DEV_PATH
-          default: $WORKSPACE/releng-xci
+          name: OPENSTACK_OSA_VERSION
+          default: 'master'
       - string:
           name: FUNCTEST_MODE
           default: 'tier'
           name: FUNCTEST_SUITE_NAME
           default: 'healthcheck'
       - string:
-          name: FORCE_MASTER
+          name: XCI_FLAVOR
+          default: 'mini'
+      - string:
+          name: CLEAN_DIB_IMAGES
           default: 'true'
+      - string:
+          name: INSTALLER_TYPE
+          default: 'osa'
       - string:
           name: GIT_BASE
-          default: https://gerrit.opnfv.org/gerrit/$PROJECT
-
-    scm:
-      - git-scm-osa
+          default: 'https://git.openstack.org/openstack/$PROJECT'
+          description: 'Git URL to use on this Jenkins Slave'
 
     wrappers:
       - ssh-agent-wrapper
           timeout: 240
       - fix-workspace-permissions
 
+    scm:
+      - git-scm-openstack
+
     builders:
       - description-setter:
           description: "Built on $NODE_NAME"
-      - 'osa-periodic-{phase}-macro'
+      - 'xci-osa-periodic-{phase}-macro'
 
 # -------------------------------
 # builder macros
 # -------------------------------
 - builder:
-    name: 'osa-periodic-deploy-macro'
+    name: 'xci-osa-periodic-deploy-macro'
     builders:
       - shell: |
           #!/bin/bash
 
-          # here we will
-          # - clone releng-xci repo as the jobs are running against openstack gerrit
-          #   and we need to clone releng-xci ourselves to $OPNFV_RELENG_DEV_PATH
-          # - run sources-branch-updater.sh from osa to update/pin the role versions
-          #   at the time this job gets triggered against osa master in case if the
-          #   deployment succeeds and we decide to bump version used by xci
-          # - copy generated role versions into $OPNFV_RELENG_DEV_PATH/xci/file
-          # - start the deployment by executing xci-deploy.sh as usual
-          #
-          # we might also need to pin versions of openstack services as well.
+          cd $WORKSPACE
+
+          # The start-new-vm.sh script will copy the entire releng-xci directory
+          # so lets prepare the test script now so it can be copied by the script.
+          # Please do not move it elsewhere or you would have to move it to the VM
+          # yourself.
+          cat > xci_test.sh<<EOF
+          #!/bin/bash
+          export DISTRO=$DISTRO
+          export DEPLOY_SCENARIO=$DEPLOY_SCENARIO
+          export OPENSTACK_OSA_VERSION=$OPENSTACK_OSA_VERSION
+          export FUNCTEST_MODE=$FUNCTEST_MODE
+          export FUNCTEST_SUITE_NAME=$FUNCTEST_SUITE_NAME
+          export XCI_FLAVOR=$XCI_FLAVOR
+          export CLEAN_DIB_IMAGES=$CLEAN_DIB_IMAGES
+          export OPNFV_RELENG_DEV_PATH=/home/devuser/releng-xci/
+          export INSTALLER_TYPE=$INSTALLER_TYPE
+          export GIT_BASE=$GIT_BASE
+          export JENKINS_HOME=$JENKINS_HOME
+
+          cd xci
+          ./xci-deploy.sh
+          EOF
+          chmod a+x xci_test.sh
+
+          export XCI_BUILD_CLEAN_VM_OS=false
+          export XCI_UPDATE_CLEAN_VM_OS=true
+
+          ./xci/scripts/vm/start-new-vm.sh $DISTRO
+      - shell: |
+          #!/bin/bash
+
+          ssh -F $HOME/.ssh/xci-vm-config ${DISTRO}_xci_vm "cd releng-xci && ./xci_test.sh"
 
-          echo "Hello World!"
 
 - builder:
-    name: 'osa-periodic-healthcheck-macro'
+    name: 'xci-osa-periodic-healthcheck-macro'
     builders:
       - shell: |
           #!/bin/bash
 
           echo "Hello World!"
-# -------------------------------
-# scm macro
-# -------------------------------
-- scm:
-    name: git-scm-osa
-    scm:
-      - git:
-          url: https://review.openstack.org/p/openstack/openstack-ansible.git
-          branches:
-            - master
-          timeout: 15
+      - shell: |
+          #!/bin/bash
+
+          sudo virsh destroy ${DISTRO}_xci_vm
+          sudo virsh undefine ${DISTRO}_xci_vm
+
+# this will be enabled once the xci is prepared
+# - builder:
+#    name: 'xci-verify-healthcheck-macro'
+#    builders:
+#        - shell:
+#            !include-raw: ../../utils/fetch_os_creds.sh
+#        - shell:
+#            !include-raw: ../functest/functest-alpine.sh
index 93ca187..fffd5c2 100644 (file)
@@ -68,8 +68,8 @@
             - 'xci-verify-{distro}-.*'
             - 'bifrost-verify-{distro}-.*'
             - 'bifrost-periodic-{distro}-.*'
-            - 'osa-verify-{distro}-.*'
-            - 'osa-periodic-{distro}-.*'
+            - 'xci-osa-verify-{distro}-.*'
+            - 'xci-osa-periodic-{distro}-.*'
           block-level: 'NODE'
       - throttle:
           max-per-node: 2
               branches:
                 - branch-compare-type: 'ANT'
                   branch-pattern: '**/{branch}'
-              disable-strict-forbidden-file-verification: 'true'
-              file-paths:
-                - compare-type: ANT
-                  pattern: 'bifrost/**'
-                - compare-type: ANT
-                  pattern: 'xci/**'
+              disable-strict-forbidden-file-verification: 'false'
               forbidden-file-paths:
                 - compare-type: ANT
-                  pattern: 'prototypes/**'
-                - compare-type: ANT
-                  pattern: 'upstream/**'
-                - compare-type: ANT
-                  pattern: '**/README.rst'
-                - compare-type: ANT
-                  pattern: 'docs/**'
+                  pattern: 'xci/scripts/vm/**'
           readable-message: true
+          custom-url: '* $JOB_NAME $BUILD_URL'
           skip-vote:
             successful: '{obj:successful}'
             failed: '{obj:failed}'
               current-parameters: true
               predefined-parameters: |
                 DISTRO={distro}
-                DEPLOY_SCENARIO=os-nosdn-nofeature-noha
+                DEPLOY_SCENARIO=os-nosdn-nofeature
                 CLEAN_DIB_IMAGES=$CLEAN_DIB_IMAGES
                 GERRIT_BRANCH=$GERRIT_BRANCH
                 GERRIT_REFSPEC=$GERRIT_REFSPEC
               current-parameters: true
               predefined-parameters: |
                 DISTRO={distro}
-                DEPLOY_SCENARIO=os-nosdn-nofeature-noha
+                DEPLOY_SCENARIO=os-nosdn-nofeature
                 CLEAN_DIB_IMAGES=$CLEAN_DIB_IMAGES
                 FUNCTEST_MODE=tier
                 FUNCTEST_TIER=healthcheck
       - build-blocker:
           use-build-blocker: true
           blocking-jobs:
-            - 'bifrost-verify-.*'
-            - 'bifrost-periodic-.*'
+            - '.*-bifrost-verify-.*'
+            - '.*-bifrost-periodic-.*'
             - 'osa-verify-.*'
             - 'osa-periodic-.*'
           block-level: 'NODE'
           default: 'ubuntu'
       - string:
           name: DEPLOY_SCENARIO
-          default: 'os-nosdn-nofeature-noha'
+          default: 'os-nosdn-nofeature'
       - string:
           name: FUNCTEST_MODE
           default: 'tier'
index 7486d8a..e2fee29 100644 (file)
           fail: true
 
     builders:
-      - yardstick-unit-tests-and-docs-build
+      - yardstick-unit-tests-python-27
+      - yardstick-unit-tests-python-3
+      - yardstick-functional-tests-python-27
+      - yardstick-functional-tests-python-3
+      - yardstick-coverage-tests
+      - yardstick-pep8-tests
 
 - job-template:
     name: 'yardstick-merge-{stream}'
           fail: true
 
     builders:
-      - yardstick-unit-tests-and-docs-build
+      - yardstick-unit-tests-python-27
+      - yardstick-unit-tests-python-3
+      - yardstick-functional-tests-python-27
+      - yardstick-functional-tests-python-3
+      - yardstick-coverage-tests
+      - yardstick-pep8-tests
 
 ################################
 # job builders
 ################################
 
 - builder:
-    name: yardstick-unit-tests-and-docs-build
+    name: yardstick-unit-tests-python-27
     builders:
       - shell: |
           #!/bin/bash
 
           sudo apt-get install -y build-essential python-dev python3-dev
 
-          echo "Running unit tests..."
+          echo "Running unit tests in Python 2.7 ..."
           cd $WORKSPACE
-          tox
+          tox -epy27
+
+- builder:
+    name: yardstick-unit-tests-python-3
+    builders:
+      - shell: |
+          #!/bin/bash
+          set -o errexit
+          set -o pipefail
+
+          sudo apt-get install -y build-essential python-dev python3-dev
+
+          echo "Running unit tests in Python 3 ..."
+          cd $WORKSPACE
+          tox -epy3
+
+- builder:
+    name: yardstick-functional-tests-python-27
+    builders:
+      - shell: |
+          #!/bin/bash
+          set -o errexit
+          set -o pipefail
+
+          sudo apt-get install -y build-essential python-dev python3-dev
+
+          echo "Running functional tests in Python 2.7 ..."
+          cd $WORKSPACE
+          tox -efunctional
+
+- builder:
+    name: yardstick-functional-tests-python-3
+    builders:
+      - shell: |
+          #!/bin/bash
+          set -o errexit
+          set -o pipefail
+
+          sudo apt-get install -y build-essential python-dev python3-dev
+
+          echo "Running functional tests in Python 3 ..."
+          cd $WORKSPACE
+          tox -efunctional-py3
+
+- builder:
+    name: yardstick-coverage-tests
+    builders:
+      - shell: |
+          #!/bin/bash
+          set -o errexit
+          set -o pipefail
+
+          sudo apt-get install -y build-essential python-dev python3-dev
+
+          echo "Running coverage tests ..."
+          cd $WORKSPACE
+          tox -ecoverage
+
+- builder:
+    name: yardstick-pep8-tests
+    builders:
+      - shell: |
+          #!/bin/bash
+          set -o errexit
+          set -o pipefail
+
+          sudo apt-get install -y build-essential python-dev python3-dev
+
+          echo "Running style guidelines (PEP8) tests ..."
+          cd $WORKSPACE
+          tox -epep8
diff --git a/modules/opnfv/deployment/daisy/__init__.py b/modules/opnfv/deployment/daisy/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/modules/opnfv/deployment/daisy/adapter.py b/modules/opnfv/deployment/daisy/adapter.py
new file mode 100644 (file)
index 0000000..5634e24
--- /dev/null
@@ -0,0 +1,202 @@
+##############################################################################
+# Copyright (c) 2017 ZTE Corporation and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+
+from opnfv.deployment import manager
+from opnfv.utils import opnfv_logger as logger
+from opnfv.utils import ssh_utils
+
+logger = logger.Logger(__name__).getLogger()
+
+
+class DaisyAdapter(manager.DeploymentHandler):
+
+    def __init__(self, installer_ip, installer_user, installer_pwd):
+        super(DaisyAdapter, self).__init__(installer='daisy',
+                                           installer_ip=installer_ip,
+                                           installer_user=installer_user,
+                                           installer_pwd=installer_pwd,
+                                           pkey_file=None)
+
+    def _get_clusters(self):
+        clusters = []
+        cmd = 'source /root/daisyrc_admin; daisy cluster-list | grep -v "+--"'
+        output = self.installer_node.run_cmd(cmd)
+        lines = output.rsplit('\n')
+        if len(lines) < 2:
+            logger.info("No environments found in the deployment.")
+            return None
+        else:
+            fields = lines[0].rsplit('|')
+
+            index_id = -1
+            index_status = -1
+            index_name = -1
+            index_nodes = -1
+
+            for i in range(len(fields)):
+                if "ID" in fields[i]:
+                    index_id = i
+                elif "Status" in fields[i]:
+                    index_status = i
+                elif "Name" in fields[i]:
+                    index_name = i
+                elif "Nodes" in fields[i]:
+                    index_nodes = i
+
+            # order env info
+            for i in range(1, len(lines)):
+                fields = lines[i].rsplit('|')
+                dict = {"id": fields[index_id].strip(),
+                        "status": fields[index_status].strip(),
+                        "name": fields[index_name].strip(),
+                        "nodes": fields[index_nodes].strip()}
+                clusters.append(dict)
+
+        return clusters
+
+    def get_nodes(self, options=None):
+        if hasattr(self, 'nodes') and len(self.nodes) > 0:
+            if options and 'cluster' in options and options['cluster']:
+                nodes = []
+                for node in self.nodes:
+                    if str(node.info['cluster']) == str(options['cluster']):
+                        nodes.append(node)
+                return nodes
+            else:
+                return self.nodes
+
+        clusters = self._get_clusters()
+        nodes = []
+        for cluster in clusters:
+            if options and 'cluster' in options and options['cluster']:
+                if cluster["id"] != options['cluster']:
+                    continue
+            cmd = 'source /root/daisyrc_admin; daisy host-list ' \
+                  '--cluster-id {} | grep -v "+--"'.format(cluster["id"])
+            output = self.installer_node.run_cmd(cmd)
+            lines = output.rsplit('\n')
+            if len(lines) < 2:
+                logger.info("No nodes found in the cluster {}".format(
+                    cluster["id"]))
+                continue
+
+            fields = lines[0].rsplit('|')
+            index_id = -1
+            index_status = -1
+            index_name = -1
+
+            for i in range(len(fields)):
+                if "ID" in fields[i]:
+                    index_id = i
+                elif "Role_status" in fields[i]:
+                    index_status = i
+                elif "Name" in fields[i]:
+                    index_name = i
+
+            for i in range(1, len(lines)):
+                fields = lines[i].rsplit('|')
+                id = fields[index_id].strip().encode()
+                status_node = fields[index_status].strip().encode().lower()
+                name = fields[index_name].strip().encode()
+                ip = ".".join(name.split("-")[1:])
+
+                cmd_role = 'source /root/daisyrc_admin; ' \
+                           'daisy host-detail {} | grep "^| role"'.format(id)
+                output_role = self.installer_node.run_cmd(cmd_role)
+                role_all = output_role.rsplit('|')[2].strip().encode()
+                roles = []
+                if 'COMPUTER' in role_all:
+                    roles.append(manager.Role.COMPUTE)
+                if 'CONTROLLER_LB' in role_all or 'CONTROLLER_HA' in role_all:
+                    roles.append(manager.Role.CONTROLLER)
+
+                ssh_client = None
+                if status_node == 'active':
+                    status = manager.NodeStatus.STATUS_OK
+                    proxy = {'ip': self.installer_ip,
+                             'username': self.installer_user,
+                             'password': self.installer_pwd,
+                             'pkey_file': '/root/.ssh/id_dsa'}
+                    ssh_client = ssh_utils.get_ssh_client(hostname=ip,
+                                                          username='root',
+                                                          proxy=proxy)
+                else:
+                    status = manager.NodeStatus.STATUS_INACTIVE
+
+                node = DaisyNode(id, ip, name, status, roles, ssh_client)
+                nodes.append(node)
+        return nodes
+
+    def get_openstack_version(self):
+        cmd = 'docker exec nova_api nova-manage version 2>/dev/null'
+        version = None
+        for node in self.nodes:
+            if node.is_controller() and node.is_active():
+                version = node.run_cmd(cmd)
+                break
+        return version
+
+    def get_sdn_version(self):
+        version = None
+        for node in self.nodes:
+            if manager.Role.CONTROLLER in node.roles and node.is_active():
+                cmd = 'docker inspect --format=\'{{.Name}}\' `docker ps -q`'
+                output = node.run_cmd(cmd)
+                if '/opendaylight' in output.rsplit('\n'):
+                    cmd2 = 'docker exec opendaylight ' \
+                           'sudo yum info opendaylight 2>/dev/null ' \
+                           '| grep Version | tail -1'
+                    odl_ver = node.run_cmd(cmd2)
+                    if odl_ver:
+                        version = 'OpenDaylight: ' + odl_ver.split(' ')[-1]
+                    break
+        return version
+
+    def get_deployment_status(self):
+        clusters = self._get_clusters()
+        if clusters is None or len(clusters) == 0:
+            return 'unknown'
+        else:
+            return clusters[0]['status']
+
+
+class DaisyNode(manager.Node):
+
+    def __init__(self,
+                 id,
+                 ip,
+                 name,
+                 status,
+                 roles=None,
+                 ssh_client=None,
+                 info=None):
+        super(DaisyNode, self).__init__(id, ip, name, status,
+                                        roles, ssh_client, info)
+
+    def is_odl(self):
+        '''
+        Returns if the node is an opendaylight
+        '''
+        if manager.Role.CONTROLLER in self.roles and self.is_active():
+            cmd = 'docker inspect --format=\'{{.Name}}\' `docker ps -q`'
+            output = self.run_cmd(cmd)
+            if '/opendaylight' in output.rsplit('\n'):
+                return True
+        return False
+
+    def get_ovs_info(self):
+        '''
+        Returns the ovs version installed
+        '''
+        if self.is_active():
+            cmd = 'docker exec openvswitch_vswitchd ' \
+                  'ovs-vsctl --version | head -1 | awk \'{print $NF}\''
+            return self.run_cmd(cmd)
+        return None
index e14783f..2788e5e 100644 (file)
@@ -12,6 +12,7 @@ from opnfv.deployment.apex import adapter as apex_adapter
 from opnfv.deployment.compass import adapter as compass_adapter
 from opnfv.deployment.fuel import adapter as fuel_adapter
 from opnfv.deployment.osa import adapter as osa_adapter
+from opnfv.deployment.daisy import adapter as daisy_adapter
 from opnfv.utils import opnfv_logger as logger
 
 logger = logger.Logger(__name__).getLogger()
@@ -51,6 +52,10 @@ class Factory(object):
             return osa_adapter.OSAAdapter(installer_ip=installer_ip,
                                           installer_user=installer_user,
                                           pkey_file=pkey_file)
+        elif installer.lower() == "daisy":
+            return daisy_adapter.DaisyAdapter(installer_ip=installer_ip,
+                                              installer_user=installer_user,
+                                              installer_pwd=installer_pwd)
         else:
             raise Exception("Installer adapter is not implemented for "
                             "the given installer.")
index 4c5ff5c..175a380 100644 (file)
@@ -49,9 +49,11 @@ def get_ssh_client(hostname,
             client = paramiko.SSHClient()
         else:
             client = ProxyHopClient()
+            proxy_pkey_file = proxy.get('pkey_file', '/root/.ssh/id_rsa')
             client.configure_jump_host(proxy['ip'],
                                        proxy['username'],
-                                       proxy['password'])
+                                       proxy['password'],
+                                       proxy_pkey_file)
         if client is None:
             raise Exception('Could not connect to client')
 
@@ -115,6 +117,8 @@ class ProxyHopClient(paramiko.SSHClient):
                             jh_ssh_key='/root/.ssh/id_rsa'):
         self.proxy_ip = jh_ip
         self.proxy_ssh_key = jh_ssh_key
+        self.local_ssh_key = os.path.join(os.getcwd(),
+                                          jh_ssh_key.split('/')[-1])
         self.proxy_ssh = paramiko.SSHClient()
         self.proxy_ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
         self.proxy_ssh.connect(jh_ip,
@@ -138,8 +142,12 @@ class ProxyHopClient(paramiko.SSHClient):
                                     self.local_ssh_key)
             if get_file_res is None:
                 raise Exception('Could\'t fetch SSH key from jump host')
-            proxy_key = (paramiko.RSAKey
-                         .from_private_key_file(self.local_ssh_key))
+            if self.proxy_ssh_key.split('/')[-1] == 'id_dsa':
+                proxy_key = (paramiko.DSSKey
+                             .from_private_key_file(self.local_ssh_key))
+            else:
+                proxy_key = (paramiko.RSAKey
+                             .from_private_key_file(self.local_ssh_key))
 
             self.proxy_channel = self.proxy_transport.open_channel(
                 "direct-tcpip",
index def5ecc..a60ece4 100644 (file)
@@ -92,6 +92,9 @@ def create_file(handler, INSTALLER_TYPE):
     if args.INSTALLER_TYPE == 'compass':
         for item in node_list:
             item['password'] = 'root'
+    elif args.INSTALLER_TYPE == 'daisy':
+        for item in node_list:
+            item['key_filename'] = '/root/.ssh/id_dsa'
     else:
         for item in node_list:
             item['key_filename'] = args.sshkey
index 238c4c6..0fcea0d 100644 (file)
@@ -23,7 +23,7 @@ dir_result="${HOME}/opnfv/$project/results/${branch}"
 node_list=(\
 'lf-pod1' 'lf-pod2' 'intel-pod2' 'intel-pod12' \
 'lf-virtual2' 'lf-virtual3' \
-'intel-pod5' 'intel-pod6' 'intel-pod7' 'intel-pod8' \
+'intel-pod5' 'intel-pod6' 'intel-pod7' 'intel-pod8' 'intel-pod18' \
 'ericsson-pod1' 'ericsson-pod2' \
 'ericsson-virtual1' 'ericsson-virtual2'  'ericsson-virtual3' \
 'ericsson-virtual4' 'ericsson-virtual5' 'ericsson-virtual12' \