Merge "Removing _l3 from Apex scenario names in master"
[releng.git] / jjb / apex / apex-deploy.sh
1 #!/bin/bash
2 set -o errexit
3 set -o nounset
4 set -o pipefail
5
6 APEX_PKGS="common undercloud" # removed onos for danube
7 IPV6_FLAG=False
8
9 # log info to console
10 echo "Starting the Apex virtual deployment."
11 echo "--------------------------------------------------------"
12 echo
13
14 if ! rpm -q wget > /dev/null; then
15   sudo yum -y install wget
16 fi
17
18 if [[ "$BUILD_DIRECTORY" == *verify* || "$BUILD_DIRECTORY" == *promote* ]]; then
19     # Build is from a verify, use local build artifacts (not RPMs)
20     cd $WORKSPACE/../${BUILD_DIRECTORY}
21     WORKSPACE=$(pwd)
22     echo "WORKSPACE modified to $WORKSPACE"
23     cd $WORKSPACE/ci
24 elif [[ ! "$ARTIFACT_NAME" == "latest" ]]; then
25     # if artifact name is passed the pull a
26     # specific artifact from artifacts.opnfv.org
27     # artifact specified should be opnfv-apex-<version>.noarch.rpm
28     RPM_INSTALL_PATH=$GS_URL
29     RPM_LIST=$RPM_INSTALL_PATH/$ARTIFACT_NAME
30 else
31     # Use latest RPMS
32     if [[ $BUILD_DIRECTORY == *apex-build* ]]; then
33       # Triggered from a daily so RPMS should be in local directory
34       BUILD_DIRECTORY=$WORKSPACE/../$BUILD_DIRECTORY
35       echo "BUILD DIRECTORY modified to $BUILD_DIRECTORY"
36
37       if [[ -f ${BUILD_DIRECTORY}/../opnfv.properties ]]; then
38         # if opnfv.properties exists then use the
39         # local build. Source the file so we get local OPNFV vars
40         source ${BUILD_DIRECTORY}/../opnfv.properties
41         RPM_INSTALL_PATH=${BUILD_DIRECTORY}/noarch
42         RPM_LIST=$RPM_INSTALL_PATH/$(basename $OPNFV_RPM_URL)
43       else
44         echo "BUILD_DIRECTORY is from a daily job, so will not use latest from URL"
45         echo "Check that the slave has opnfv.properties in $BUILD_DIRECTORY"
46         exit 1
47       fi
48     else
49       # use the latest from artifacts.opnfv.org
50       # get the latest.properties to get the link to the latest artifact
51       if ! wget -O $WORKSPACE/opnfv.properties http://$GS_URL/latest.properties; then
52         echo "ERROR: Unable to find latest.properties at ${GS_URL}...exiting"
53         exit 1
54       fi
55       # source the file so we get OPNFV vars
56       source opnfv.properties
57       RPM_INSTALL_PATH=$(echo "http://"$OPNFV_RPM_URL | sed 's/\/'"$(basename $OPNFV_RPM_URL)"'//')
58       RPM_LIST=${RPM_INSTALL_PATH}/$(basename $OPNFV_RPM_URL)
59     fi
60 fi
61
62 # rename odl_l3 to odl only for master
63 # this can be removed once all the odl_l3 references
64 # are updated to odl after the danube jobs are removed
65 if [[ "$BUILD_DIRECTORY" == *master* ]]; then
66     DEPLOY_SCENARIO=${DEPLOY_SCENARIO/odl_l3/odl}
67 fi
68 if [ -z "$DEPLOY_SCENARIO" ]; then
69   echo "Deploy scenario not set!"
70   exit 1
71 elif [[ "$DEPLOY_SCENARIO" == *gate* ]]; then
72   echo "Detecting Gating scenario..."
73   if [ -z "$GERRIT_EVENT_COMMENT_TEXT" ]; then
74     echo "ERROR: Gate job triggered without comment!"
75     exit 1
76   else
77     DEPLOY_SCENARIO=$(echo ${GERRIT_EVENT_COMMENT_TEXT} | grep start-gate-scenario | grep -Eo 'os-.*$')
78     if [ -z "$DEPLOY_SCENARIO" ]; then
79       echo "ERROR: Unable to detect scenario in Gerrit Comment!"
80       echo "Format of comment to trigger gate should be 'start-gate-scenario: <scenario>'"
81       exit 1
82     else
83       echo "Gate scenario detected: ${DEPLOY_SCENARIO}"
84     fi
85   fi
86 fi
87
88 # use local build for verify and promote
89 if [[ "$BUILD_DIRECTORY" == *verify* || "$BUILD_DIRECTORY" == *promote* ]]; then
90     if [ ! -e "${WORKSPACE}/build/lib" ]; then
91       ln -s ${WORKSPACE}/lib ${WORKSPACE}/build/lib
92     fi
93     DEPLOY_SETTINGS_DIR="${WORKSPACE}/config/deploy"
94     NETWORK_SETTINGS_DIR="${WORKSPACE}/config/network"
95     DEPLOY_CMD="$(pwd)/deploy.sh"
96     IMAGES="${WORKSPACE}/.build/"
97     BASE="${WORKSPACE}/build"
98     LIB="${WORKSPACE}/lib"
99     # Make sure python34 deps are installed
100     for dep_pkg in epel-release python34 python34-PyYAML python34-setuptools; do
101       if ! rpm -q ${dep_pkg} > /dev/null; then
102         if ! sudo yum install -y ${dep_pkg}; then
103           echo "Failed to install ${dep_pkg}"
104           exit 1
105         fi
106       fi
107     done
108
109     # Make sure jinja2 is installed
110     for python_pkg in jinja2; do
111       if ! python3.4 -c "import $python_pkg"; then
112         echo "$python_pkg package not found for python3.4, attempting to install..."
113         if ! sudo easy_install-3.4 $python_pkg; then
114           echo -e "Failed to install $python_pkg package for python3.4"
115           exit 1
116         fi
117       fi
118     done
119
120     # Make sure ipxe-roms-qemu package is updated to latest.
121     # This package is needed for multi virtio nic PXE boot in virtual environment.
122     sudo yum update -y ipxe-roms-qemu
123
124     if [ -z ${PYTHONPATH:-} ]; then
125         export PYTHONPATH=${WORKSPACE}/lib/python
126     else
127         export PYTHONPATH=$PYTHONPATH:${WORKSPACE}/lib/python
128     fi
129 # use RPMs
130 else
131     # find version of RPM
132     VERSION_EXTENSION=$(echo $(basename $RPM_LIST) | grep -Eo '[0-9]+\.[0-9]+-([0-9]{8}|[a-z]+-[0-9]\.[0-9]+)')
133     # build RPM List which already includes base Apex RPM
134     for pkg in ${APEX_PKGS}; do
135         RPM_LIST+=" ${RPM_INSTALL_PATH}/opnfv-apex-${pkg}-${VERSION_EXTENSION}.noarch.rpm"
136     done
137
138     # remove old / install new RPMs
139     if rpm -q opnfv-apex > /dev/null; then
140       INSTALLED_RPMS=$(rpm -qa | grep apex)
141       if [ -n "$INSTALLED_RPMS" ]; then
142         sudo yum remove -y ${INSTALLED_RPMS}
143       fi
144     fi
145
146     if ! sudo yum install -y $RPM_LIST; then
147       echo "Unable to install new RPMs: $RPM_LIST"
148       exit 1
149     fi
150
151     DEPLOY_CMD=opnfv-deploy
152     DEPLOY_SETTINGS_DIR="/etc/opnfv-apex/"
153     NETWORK_SETTINGS_DIR="/etc/opnfv-apex/"
154     IMAGES="/var/opt/opnfv/images"
155     BASE="/var/opt/opnfv"
156     LIB="/var/opt/opnfv/lib"
157 fi
158
159 # set env vars to deploy cmd
160 DEPLOY_CMD="BASE=${BASE} IMAGES=${IMAGES} LIB=${LIB} ${DEPLOY_CMD}"
161
162 if [ "$OPNFV_CLEAN" == 'yes' ]; then
163   if sudo test -e '/root/inventory/pod_settings.yaml'; then
164     clean_opts='-i /root/inventory/pod_settings.yaml'
165   else
166     clean_opts=''
167   fi
168   if [[ "$BUILD_DIRECTORY" == *verify* || "$BUILD_DIRECTORY" == *promote* ]]; then
169     sudo BASE=${BASE} LIB=${LIB} ./clean.sh ${clean_opts}
170   else
171     sudo BASE=${BASE} LIB=${LIB} opnfv-clean ${clean_opts}
172   fi
173 fi
174
175 if echo ${DEPLOY_SCENARIO} | grep ipv6; then
176   IPV6_FLAG=True
177   DEPLOY_SCENARIO=$(echo ${DEPLOY_SCENARIO} |  sed 's/-ipv6//')
178   echo "INFO: IPV6 Enabled"
179 fi
180
181 echo "Deploy Scenario set to ${DEPLOY_SCENARIO}"
182 DEPLOY_FILE="${DEPLOY_SETTINGS_DIR}/${DEPLOY_SCENARIO}.yaml"
183
184 if [ ! -e "$DEPLOY_FILE" ]; then
185   echo "ERROR: Required settings file missing: Deploy settings file ${DEPLOY_FILE}"
186 fi
187
188 if [[ "$JOB_NAME" == *virtual* ]]; then
189   # settings for virtual deployment
190   DEPLOY_CMD="${DEPLOY_CMD} -v"
191   if [[ "${DEPLOY_SCENARIO}" =~ fdio|ovs ]]; then
192     DEPLOY_CMD="${DEPLOY_CMD} --virtual-default-ram 14 --virtual-compute-ram 8"
193   fi
194   if [[ "$JOB_NAME" == *csit* ]]; then
195     DEPLOY_CMD="${DEPLOY_CMD} -e csit-environment.yaml"
196   fi
197   if [[ "$JOB_NAME" == *promote* ]]; then
198     DEPLOY_CMD="${DEPLOY_CMD} --virtual-computes 2"
199   fi
200 else
201   # settings for bare metal deployment
202   NETWORK_SETTINGS_DIR="/root/network"
203   INVENTORY_FILE="/root/inventory/pod_settings.yaml"
204
205 # (trozet) According to FDS folks uio_pci_generic works with UCS-B
206 # and there appears to be a bug with vfio-pci
207   # if fdio on baremetal, then we are using UCS enic and
208   # need to use vfio-pci instead of uio generic
209 #  if [[ "$DEPLOY_SCENARIO" == *fdio* ]]; then
210 #    TMP_DEPLOY_FILE="${WORKSPACE}/${DEPLOY_SCENARIO}.yaml"
211 #    cp -f ${DEPLOY_FILE} ${TMP_DEPLOY_FILE}
212 #    sed -i 's/^\(\s*uio-driver:\).*$/\1 vfio-pci/g' ${TMP_DEPLOY_FILE}
213 #    DEPLOY_FILE=${TMP_DEPLOY_FILE}
214 #  fi
215
216   if ! sudo test -e "$INVENTORY_FILE"; then
217     echo "ERROR: Required settings file missing: Inventory settings file ${INVENTORY_FILE}"
218     exit 1
219   fi
220   # include inventory file for bare metal deployment
221   DEPLOY_CMD="${DEPLOY_CMD} -i ${INVENTORY_FILE}"
222 fi
223
224 if [ "$IPV6_FLAG" == "True" ]; then
225   NETWORK_FILE="${NETWORK_SETTINGS_DIR}/network_settings_v6.yaml"
226 elif echo ${DEPLOY_SCENARIO} | grep fdio; then
227   NETWORK_FILE="${NETWORK_SETTINGS_DIR}/network_settings_vpp.yaml"
228 else
229   NETWORK_FILE="${NETWORK_SETTINGS_DIR}/network_settings.yaml"
230 fi
231
232 # Check that network settings file exists
233 if ! sudo test -e "$NETWORK_FILE"; then
234   echo "ERROR: Required settings file missing: Network Settings file ${NETWORK_FILE}"
235   exit 1
236 fi
237
238 # start deployment
239 sudo ${DEPLOY_CMD} -d ${DEPLOY_FILE} -n ${NETWORK_FILE} --debug
240
241 if [[ "$JOB_NAME" == *csit* ]]; then
242   echo "CSIT job: setting host route for floating ip routing"
243   # csit route to allow docker container to reach floating ips
244   UNDERCLOUD=$(sudo virsh domifaddr undercloud | grep -Eo "[0-9\.]+{3}[0-9]+")
245   if sudo route | grep 192.168.37.128 > /dev/null; then
246     sudo route del -net 192.168.37.128 netmask 255.255.255.128
247   fi
248   sudo route add -net 192.168.37.128 netmask 255.255.255.128 gw ${UNDERCLOUD}
249 fi
250
251 echo
252 echo "--------------------------------------------------------"
253 echo "Done!"