Merge "Update opnfv-docs Merge Job Trigger and Parameter"
authorTrevor Bramwell <tbramwell@linuxfoundation.org>
Thu, 20 Oct 2016 16:03:02 +0000 (16:03 +0000)
committerGerrit Code Review <gerrit@opnfv.org>
Thu, 20 Oct 2016 16:03:02 +0000 (16:03 +0000)
jjb/apex/apex-deploy.sh
jjb/apex/apex-upload-artifact.sh
jjb/armband/armband-ci-jobs.yml
jjb/daisy4nfv/daisy4nfv-verify-jobs.yml
jjb/functest/functest-ci-jobs.yml
jjb/opnfv/slave-params.yml
jjb/yardstick/yardstick-ci-jobs.yml
prototypes/bifrost/scripts/destroy-env.sh
prototypes/puppet-infracloud/deploy_on_baremetal.md [new file with mode: 0644]

index 72fa6f6..e21387a 100755 (executable)
@@ -3,7 +3,7 @@ set -o errexit
 set -o nounset
 set -o pipefail
 
-APEX_PKGS="common undercloud opendaylight-sfc onos"
+APEX_PKGS="common undercloud onos"
 IPV6_FLAG=False
 
 # log info to console
index 0dd112b..f54e4c5 100755 (executable)
@@ -49,13 +49,13 @@ echo "ISO Upload Complete!"
 RPM_INSTALL_PATH=$BUILD_DIRECTORY/noarch
 RPM_LIST=$RPM_INSTALL_PATH/$(basename $OPNFV_RPM_URL)
 VERSION_EXTENSION=$(echo $(basename $OPNFV_RPM_URL) | sed 's/opnfv-apex-//')
-for pkg in common undercloud opendaylight-sfc onos; do
+for pkg in common undercloud onos; do
     RPM_LIST+=" ${RPM_INSTALL_PATH}/opnfv-apex-${pkg}-${VERSION_EXTENSION}"
 done
 SRPM_INSTALL_PATH=$BUILD_DIRECTORY
 SRPM_LIST=$SRPM_INSTALL_PATH/$(basename $OPNFV_SRPM_URL)
 VERSION_EXTENSION=$(echo $(basename $OPNFV_SRPM_URL) | sed 's/opnfv-apex-//')
-for pkg in common undercloud opendaylight-sfc onos; do
+for pkg in common undercloud onos; do
     SRPM_LIST+=" ${SRPM_INSTALL_PATH}/opnfv-apex-${pkg}-${VERSION_EXTENSION}"
 done
 }
index d4fa5da..2122959 100644 (file)
             slave-label: arm-pod2
             installer: fuel
             <<: *colorado
+        - arm-pod3:
+            slave-label: arm-pod3
+            installer: fuel
+            <<: *colorado
 #--------------------------------
 #        master
 #--------------------------------
             slave-label: arm-pod2
             installer: fuel
             <<: *master
+        - arm-pod3:
+            slave-label: arm-pod3
+            installer: fuel
+            <<: *master
 #--------------------------------
 #       scenarios
 #--------------------------------
     name: 'fuel-os-odl_l2-sfc-noha-arm-pod2-colorado-trigger'
     triggers:
         - timed: ''
+#----------------------------------------------------------
+# Enea Armband POD 3 Triggers running against master branch
+#----------------------------------------------------------
+- trigger:
+    name: 'fuel-os-odl_l2-nofeature-ha-arm-pod3-master-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-nosdn-nofeature-ha-arm-pod3-master-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l3-nofeature-ha-arm-pod3-master-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l2-bgpvpn-ha-arm-pod3-master-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l2-nofeature-noha-arm-pod3-master-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l2-sfc-ha-arm-pod3-master-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l2-sfc-noha-arm-pod3-master-trigger'
+    triggers:
+        - timed: ''
+#---------------------------------------------------------------
+# Enea Armband POD 3 Triggers running against colorado branch
+#---------------------------------------------------------------
+- trigger:
+    name: 'fuel-os-odl_l2-nofeature-ha-arm-pod3-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-nosdn-nofeature-ha-arm-pod3-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l3-nofeature-ha-arm-pod3-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l2-bgpvpn-ha-arm-pod3-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l2-nofeature-noha-arm-pod3-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l2-sfc-ha-arm-pod3-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l2-sfc-noha-arm-pod3-colorado-trigger'
+    triggers:
+        - timed: ''
index e81e300..7c47d9f 100644 (file)
@@ -1,9 +1,7 @@
 - project:
     name: 'daisy4nfv-verify-jobs'
 
-    project: 'daisy4nfv'
-
-    installer: 'daisy4nfv'
+    project: 'daisy'
 #####################################
 # branch definitions
 #####################################
 # builder macros
 #####################################
 - builder:
-    name: 'daisy4nfv-verify-basic-macro'
+    name: 'daisy-verify-basic-macro'
     builders:
         - shell:
             !include-raw: ./daisy4nfv-basic.sh
 
 - builder:
-    name: 'daisy4nfv-verify-build-macro'
+    name: 'daisy-verify-build-macro'
     builders:
         - shell:
             !include-raw: ./daisy4nfv-build.sh
 
 - builder:
-    name: 'daisy4nfv-verify-deploy-virtual-macro'
+    name: 'daisy-verify-deploy-virtual-macro'
     builders:
         - shell:
             !include-raw: ./daisy4nfv-virtual-deploy.sh
 
 - builder:
-    name: 'daisy4nfv-verify-smoke-test-macro'
+    name: 'daisy-verify-smoke-test-macro'
     builders:
         - shell: |
             #!/bin/bash
index 3487793..afeb1f9 100644 (file)
             slave-label: '{pod}'
             installer: fuel
             <<: *master
+        - arm-pod3:
+            slave-label: '{pod}'
+            installer: fuel
+            <<: *master
         - zte-pod1:
             slave-label: '{pod}'
             installer: fuel
             slave-label: '{pod}'
             installer: fuel
             <<: *colorado
+        - arm-pod3:
+            slave-label: '{pod}'
+            installer: fuel
+            <<: *colorado
 # PODs for verify jobs triggered by each patch upload
         - ool-virtual1:
             slave-label: '{pod}'
index 7eca41a..b46960f 100644 (file)
             name: LAB_CONFIG_URL
             default: ssh://jenkins-enea@gerrit.opnfv.org:29418/securedlab
             description: 'Base URI to the configuration directory'
+- parameter:
+    name: 'arm-pod3-defaults'
+    parameters:
+        - node:
+            name: SLAVE_NAME
+            description: 'Slave name on Jenkins'
+            allowed-slaves:
+                - arm-pod3
+            default-slaves:
+                - arm-pod3
+        - string:
+            name: GIT_BASE
+            default: https://gerrit.opnfv.org/gerrit/$PROJECT
+            description: 'Git URL to use on this Jenkins Slave'
+        - string:
+            name: LAB_CONFIG_URL
+            default: ssh://jenkins-enea@gerrit.opnfv.org:29418/securedlab
+            description: 'Base URI to the configuration directory'
 - parameter:
     name: 'intel-virtual6-defaults'
     parameters:
index 962ea47..c10daab 100644 (file)
             installer: fuel
             auto-trigger-name: 'daily-trigger-disabled'
             <<: *colorado
+        - arm-pod3:
+            slave-label: '{pod}'
+            installer: fuel
+            auto-trigger-name: 'daily-trigger-disabled'
+            <<: *master
+        - arm-pod3:
+            slave-label: '{pod}'
+            installer: fuel
+            auto-trigger-name: 'daily-trigger-disabled'
+            <<: *colorado
         - orange-pod2:
             slave-label: '{pod}'
             installer: joid
             default: '-i 104.197.68.199:8086'
             description: 'Arguments to use in order to choose the backend DB'
 
+- parameter:
+    name: 'yardstick-params-arm-pod3'
+    parameters:
+        - string:
+            name: YARDSTICK_DB_BACKEND
+            default: '-i 104.197.68.199:8086'
+            description: 'Arguments to use in order to choose the backend DB'
+
 - parameter:
     name: 'yardstick-params-virtual'
     parameters:
index 6746457..cdc55df 100755 (executable)
@@ -46,7 +46,7 @@ fi
 rm -rf /var/lib/libvirt/images/*.qcow2
 
 echo "restarting services"
-service dnsmasq restart
+service dnsmasq restart || true
 service libvirtd restart
 service ironic-api restart
 service ironic-conductor start
diff --git a/prototypes/puppet-infracloud/deploy_on_baremetal.md b/prototypes/puppet-infracloud/deploy_on_baremetal.md
new file mode 100644 (file)
index 0000000..334dff4
--- /dev/null
@@ -0,0 +1,57 @@
+How to deploy Infra Cloud on baremetal
+==================================
+
+Install bifrost controller
+--------------------------
+First step for deploying Infra Cloud is to install the bifrost controller. This can be virtualized, doesn't need to be on baremetal.
+To achieve that, first we can create a virtual machine with libvirt, with the proper network setup. This VM needs to share one physical interface (the PXE boot one), with the servers for the controller and compute nodes.
+Please follow documentation on: [https://git.openstack.org/cgit/openstack/bifrost/tree/tools/virsh_dev_env/README.md](https://git.openstack.org/cgit/openstack/bifrost/tree/tools/virsh_dev_env/README.md) to get sample templates and instructions for creating the bifrost VM.
+
+Once the **baremetal** VM is finished, you can login by ssh and start installing bifrost there. To proceed, follow this steps:
+
+ 1. Change to root user, install git
+ 2. Clone releng project (cd /opt, git clone https://gerrit.opnfv.org/gerrit/releng)
+ 3. cd /opt/releng/prototypes/puppet-infracloud
+ 4. Copy hiera to the right folder (cp hiera/common_baremetal.yaml /var/lib/hiera/common.yaml)
+ 5. Ensure hostname is properly set ( hostnamectl set-hostname baremetal.opnfvlocal , hostname -f )
+ 6. Install puppet and modules ( ./install_puppet.sh , ./install_modules.sh )
+ 7. Apply puppet to install bifrost (puppet apply manifests/site.pp --modulepath=/etc/puppet/modules:/opt/releng/prototypes/puppet-infracloud/modules)
+
+ With these steps you will have a bifrost controller up and running.
+
+Deploy baremetal servers
+--------------------------
+Once you have bifrost controller ready, you need to use it to start deployment of the baremetal servers.
+On the same bifrost VM, follow these steps:
+
+ 1. Source bifrost env vars: source /opt/stack/bifrost/env-vars
+ 2. Export baremetal servers inventory:  export BIFROST_INVENTORY-SOURCE=/opt/stack/baremetal.json 
+ 3. Enroll the servers: ansible-playbook -vvv -i inventory/bifrost_inventory.py enroll-dynamic.yaml -e @/etc/bifrost/bifrost_global_vars
+ 4. Deploy the servers:  ansible-playbook -vvv -i inventory/bifrost_inventory.py deploy-dynamic.yaml -e @/etc/bifrost/bifrost_global_vars
+ 5. Wait until they are on **active** state, check it with: ironic node-list
+
+In case of some server needing to be redeployed, you can reset it and redeploy again with:
+
+ 1. ironic node-set-provision-state <name_of_server> deleted
+ 2. Wait and check with ironic node-list until the server is on **available** state
+ 3. Redeploy again: ansible-playbook -vvv -i inventory/bifrost_inventory.py deploy-dynamic.yaml -e @/etc/bifrost/bifrost_global_vars
+
+Deploy baremetal servers
+--------------------------
+Once all the servers are on **active** state, they can be accessed by ssh and InfraCloud manifests can be deployed on them, to properly deploy a controller and a compute.
+On each of those, follow that steps:
+
+ 1. ssh from the bifrost controller to their external ips: ssh root@172.30.13.90
+ 2. cd /opt, clone releng project (git clone https://gerrit.opnfv.org/gerrit/releng)
+ 3. Copy hiera to the right folder ( cp hiera/common_baremetal.yaml /var/lib/hiera/common.yaml)
+ 4. Install modules: ./install_modules.sh
+ 5. Apply puppet: puppet apply manifests/site.pp --modulepath=/etc/puppet/modules:/opt/releng/prototypes/puppet-infracloud/modules
+
+Once this has been done on controller and compute, you will have a working cloud. To start working with it, follow that steps:
+
+ 1. Ensure that controller00.opnfvlocal resolves properly to the external IP (this is already done in the bifrost controller)
+ 2. Copy releng/prototypes/puppet-infracloud/creds/clouds.yaml to $HOME/.config/openstack/clouds.yaml
+ 3. Install python-openstackclient
+ 4. Specify the cloud you want to use: export OS_CLOUD=opnfvlocal
+ 5. Now you can start operating in your cloud with openstack-client: openstack flavor list
+