[Dovetail] Changing the path of the ssh key in pod.yaml for yardstick
[releng.git] / jjb / dovetail / dovetail-run.sh
index 0c389e9..df681dd 100755 (executable)
@@ -24,6 +24,11 @@ mkdir -p ${DOVETAIL_HOME}
 DOVETAIL_CONFIG=${DOVETAIL_HOME}/pre_config
 mkdir -p ${DOVETAIL_CONFIG}
 
+DOVETAIL_IMAGES=${DOVETAIL_HOME}/images
+mkdir -p ${DOVETAIL_IMAGES}
+
+ssh_options="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
+
 sshkey=""
 # The path of openrc.sh is defined in fetch_os_creds.sh
 OPENRC=${DOVETAIL_CONFIG}/env_config.sh
@@ -76,6 +81,7 @@ if [[ -f $OPENRC ]]; then
             exit 1
         fi
     fi
+    echo "export EXTERNAL_NETWORK=${EXTERNAL_NETWORK}" >> ${OPENRC}
 else
     echo "ERROR: cannot find file $OPENRC. Please check if it is existing."
     sudo ls -al ${DOVETAIL_CONFIG}
@@ -88,13 +94,37 @@ fi
 cat $OPENRC
 
 if [[ ! "${SUT_BRANCH}" =~ "danube" && ${INSTALLER_TYPE} == "compass" ]]; then
+    compass_repo=${WORKSPACE}/compass4nfv/
+    git clone https://github.com/opnfv/compass4nfv.git ${compass_repo} >/dev/null
+    sudo pip install shyaml
+    scenario_file=${compass_repo}/deploy/conf/hardware_environment/$NODE_NAME/os-nosdn-nofeature-ha.yml
+    ipmiIp=$(cat ${scenario_file} | shyaml get-value hosts.0.ipmiIp)
+    ipmiPass=$(cat ${scenario_file} | shyaml get-value hosts.0.ipmiPass)
+    ipmiUser=root
+    jumpserver_ip=$(ifconfig | grep -A 5 docker0 | grep "inet addr" | cut -d ':' -f 2 | cut -d ' ' -f 1)
+
     cat << EOF >${DOVETAIL_CONFIG}/pod.yaml
 nodes:
-- {ip: 10.1.0.52, name: node1, password: root, role: controller, user: root}
+- {ip: ${jumpserver_ip}, name: node0, password: root, role: Jumpserver, user: root}
+- {ip: 10.1.0.50, name: node1, password: root, role: controller, user: root,
+   ipmi_ip: ${ipmiIp}, ipmi_user: ${ipmiUser}, ipmi_password: ${ipmiPass}}
 - {ip: 10.1.0.51, name: node2, password: root, role: controller, user: root}
-- {ip: 10.1.0.50, name: node3, password: root, role: controller, user: root}
-- {ip: 10.1.0.54, name: node4, password: root, role: compute, user: root}
-- {ip: 10.1.0.53, name: node5, password: root, role: compute, user: root}
+- {ip: 10.1.0.52, name: node3, password: root, role: controller, user: root}
+- {ip: 10.1.0.53, name: node4, password: root, role: compute, user: root}
+- {ip: 10.1.0.54, name: node5, password: root, role: compute, user: root}
+
+EOF
+fi
+
+if [[ ! "${SUT_BRANCH}" =~ "danube" && ${INSTALLER_TYPE} == 'fuel' && ${DEPLOY_TYPE} == 'baremetal' ]]; then
+    fuel_ctl_ssh_options="${ssh_options} -i ${SSH_KEY}"
+    ssh_user="ubuntu"
+    fuel_ctl_ip=$(ssh 2>/dev/null ${fuel_ctl_ssh_options} "${ssh_user}@${INSTALLER_IP}" \
+            "sudo salt --out yaml 'ctl*' pillar.get _param:openstack_control_address | \
+                awk '{print \$2; exit}'") &> /dev/null
+    cat << EOF >${DOVETAIL_CONFIG}/pod.yaml
+nodes:
+- {ip: ${fuel_ctl_ip}, name: node1, key_filename: /home/opnfv/userconfig/pre_config/id_rsa, role: controller, user: ${ssh_user}}
 
 EOF
 fi
@@ -116,13 +146,16 @@ if [[ ! -f ${DOVETAIL_CONFIG}/pod.yaml ]]; then
         options="-u root -p r00tme"
     elif [[ ${INSTALLER_TYPE} == apex ]]; then
         options="-u stack -k /root/.ssh/id_rsa"
+    elif [[ ${INSTALLER_TYPE} == daisy ]]; then
+        options="-u root -p r00tme"
     else
         echo "Don't support to generate pod.yaml on ${INSTALLER_TYPE} currently."
         echo "HA test cases may not run properly."
     fi
 
     cmd="sudo python ${releng_repo}/utils/create_pod_file.py -t ${INSTALLER_TYPE} \
-         -i ${INSTALLER_IP} ${options} -f ${DOVETAIL_CONFIG}/pod.yaml"
+         -i ${INSTALLER_IP} ${options} -f ${DOVETAIL_CONFIG}/pod.yaml \
+         -s /home/opnfv/userconfig/pre_config/id_rsa"
     echo ${cmd}
     ${cmd}
 
@@ -134,6 +167,14 @@ if [[ ! -f ${DOVETAIL_CONFIG}/pod.yaml ]]; then
 fi
 
 if [ -f ${DOVETAIL_CONFIG}/pod.yaml ]; then
+    sudo chmod 666 ${DOVETAIL_CONFIG}/pod.yaml
+    echo "Adapt process info for $INSTALLER_TYPE ..."
+    attack_process='rabbitmq'
+    cat << EOF >> ${DOVETAIL_CONFIG}/pod.yaml
+process_info:
+- {testcase_name: dovetail.ha.tc010, attack_process: ${attack_process}}
+
+EOF
     echo "file ${DOVETAIL_CONFIG}/pod.yaml:"
     cat ${DOVETAIL_CONFIG}/pod.yaml
 else
@@ -142,8 +183,6 @@ else
     echo "HA test cases may not run properly."
 fi
 
-ssh_options="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
-
 if [ "$INSTALLER_TYPE" == "fuel" ]; then
     if [[ "${SUT_BRANCH}" =~ "danube" ]]; then
         echo "Fetching id_rsa file from jump_server $INSTALLER_IP..."
@@ -158,6 +197,12 @@ if [ "$INSTALLER_TYPE" == "apex" ]; then
     sudo scp $ssh_options stack@${INSTALLER_IP}:~/.ssh/id_rsa ${DOVETAIL_CONFIG}/id_rsa
 fi
 
+if [ "$INSTALLER_TYPE" == "daisy" ]; then
+    echo "Fetching id_dsa file from jump_server $INSTALLER_IP..."
+    sshpass -p r00tme sudo scp $ssh_options root@${INSTALLER_IP}:~/.ssh/id_dsa ${DOVETAIL_CONFIG}/id_rsa
+fi
+
+
 image_path=${HOME}/opnfv/dovetail/images
 if [[ ! -d ${image_path} ]]; then
     mkdir -p ${image_path}
@@ -168,16 +213,45 @@ if [[ ! -f ${ubuntu_image} ]]; then
     echo "Download image ubuntu-16.04-server-cloudimg-amd64-disk1.img ..."
     wget -q -nc http://artifacts.opnfv.org/sdnvpn/ubuntu-16.04-server-cloudimg-amd64-disk1.img -P ${image_path}
 fi
-sudo cp ${ubuntu_image} ${DOVETAIL_CONFIG}
+sudo cp ${ubuntu_image} ${DOVETAIL_IMAGES}
 
-# functest needs to download this image first before running
+# yardstick and bottlenecks need to download this image first before running
 cirros_image=${image_path}/cirros-0.3.5-x86_64-disk.img
 if [[ ! -f ${cirros_image} ]]; then
     echo "Download image cirros-0.3.5-x86_64-disk.img ..."
     wget -q -nc http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img -P ${image_path}
 fi
-sudo cp ${cirros_image} ${DOVETAIL_CONFIG}
+sudo cp ${cirros_image} ${DOVETAIL_IMAGES}
 
+# functest needs to download this image first before running
+cirros_image=${image_path}/cirros-0.4.0-x86_64-disk.img
+if [[ ! -f ${cirros_image} ]]; then
+    echo "Download image cirros-0.4.0-x86_64-disk.img ..."
+    wget -q -nc http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img -P ${image_path}
+fi
+sudo cp ${cirros_image} ${DOVETAIL_IMAGES}
+
+# snaps_smoke test case needs to download this image first before running
+ubuntu14_image=${image_path}/ubuntu-14.04-server-cloudimg-amd64-disk1.img
+if [[ ! -f ${ubuntu14_image} ]]; then
+    echo "Download image ubuntu-14.04-server-cloudimg-amd64-disk1.img ..."
+    wget -q -nc https://cloud-images.ubuntu.com/releases/14.04/release/ubuntu-14.04-server-cloudimg-amd64-disk1.img -P ${image_path}
+fi
+sudo cp ${ubuntu14_image} ${DOVETAIL_IMAGES}
+
+# cloudify_ims test case needs to download these 2 images first before running
+cloudify_image=${image_path}/cloudify-manager-premium-4.0.1.qcow2
+if [[ ! -f ${cloudify_image} ]]; then
+    echo "Download image cloudify-manager-premium-4.0.1.qcow2 ..."
+    wget -q -nc http://repository.cloudifysource.org/cloudify/4.0.1/sp-release/cloudify-manager-premium-4.0.1.qcow2 -P ${image_path}
+fi
+sudo cp ${cloudify_image} ${DOVETAIL_IMAGES}
+trusty_image=${image_path}/trusty-server-cloudimg-amd64-disk1.img
+if [[ ! -f ${trusty_image} ]]; then
+    echo "Download image trusty-server-cloudimg-amd64-disk1.img ..."
+    wget -q -nc http://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img -P ${image_path}
+fi
+sudo cp ${trusty_image} ${DOVETAIL_IMAGES}
 
 opts="--privileged=true -id"
 
@@ -194,10 +268,8 @@ fi
 echo "Dovetail: Pulling image ${DOCKER_REPO}:${DOCKER_TAG}"
 docker pull ${DOCKER_REPO}:$DOCKER_TAG >$redirect
 
-env4bgpvpn="-e INSTALLER_TYPE=${INSTALLER_TYPE} -e INSTALLER_IP=${INSTALLER_IP}"
-
 cmd="docker run ${opts} -e DOVETAIL_HOME=${DOVETAIL_HOME} ${docker_volume} ${dovetail_home_volume} \
-     ${sshkey} ${env4bgpvpn} ${DOCKER_REPO}:${DOCKER_TAG} /bin/bash"
+     ${sshkey} ${DOCKER_REPO}:${DOCKER_TAG} /bin/bash"
 echo "Dovetail: running docker run command: ${cmd}"
 ${cmd} >${redirect}
 sleep 5
@@ -217,15 +289,6 @@ if [ $(docker ps | grep "${DOCKER_REPO}:${DOCKER_TAG}" | wc -l) == 0 ]; then
     exit 1
 fi
 
-if [[ ! "${SUT_BRANCH}" =~ "danube" && ${INSTALLER_TYPE} == 'fuel' && ${DEPLOY_TYPE} == 'baremetal' ]]; then
-    source_cmd="source ${OPENRC}"
-    get_public_url_cmd="openstack --insecure endpoint list --service keystone --interface public | sed -n 4p | awk '{print \$14}'"
-    public_url=$(sudo docker exec "$container_id" /bin/bash -c "${source_cmd} && ${get_public_url_cmd}")
-    sed -i 's#OS_AUTH_URL=.*#OS_AUTH_URL='"${public_url}"'#g' ${OPENRC}
-    sed -i 's/internal/public/g' ${OPENRC}
-    cat ${OPENRC}
-fi
-
 # Modify tempest_conf.yaml file
 tempest_conf_file=${DOVETAIL_CONFIG}/tempest_conf.yaml
 if [[ ${INSTALLER_TYPE} == 'compass' || ${INSTALLER_TYPE} == 'apex' ]]; then
@@ -249,22 +312,28 @@ cp_tempest_cmd="docker cp ${DOVETAIL_CONFIG}/tempest_conf.yaml $container_id:/ho
 echo "exec command: ${cp_tempest_cmd}"
 $cp_tempest_cmd
 
-list_cmd="dovetail list ${TESTSUITE}"
-run_cmd="dovetail run --testsuite ${TESTSUITE} -d"
-echo "Container exec command: ${list_cmd}"
-docker exec $container_id ${list_cmd}
+if [[ ${TESTSUITE} == 'default' ]]; then
+    testsuite=''
+else
+    testsuite="--testsuite ${TESTSUITE}"
+fi
+
+run_cmd="dovetail run ${testsuite} -d"
 echo "Container exec command: ${run_cmd}"
 docker exec $container_id ${run_cmd}
 
 sudo cp -r ${DOVETAIL_HOME}/results ./
 # To make sure the file owner is the current user, for the copied results files in the above line
-# if not, there will be error when next time to wipe workspace
-# CURRENT_USER=${SUDO_USER:-$USER}
-# PRIMARY_GROUP=$(id -gn $CURRENT_USER)
-# sudo chown -R ${CURRENT_USER}:${PRIMARY_GROUP} ${WORKSPACE}/results
+echo "Change owner of result files ..."
+CURRENT_USER=${SUDO_USER:-$USER}
+PRIMARY_GROUP=$(id -gn $CURRENT_USER)
+echo "Current user is ${CURRENT_USER}, group is ${PRIMARY_GROUP}"
+sudo chown -R ${CURRENT_USER}:${PRIMARY_GROUP} ./results
 
-#remove useless workspace from yardstick to save disk space
+#remove useless files to save disk space
 sudo rm -rf ./results/workspace
+sudo rm -f ./results/yardstick.img
+sudo rm -f ./results/tmp*
 
 echo "Dovetail: done!"