X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=blobdiff_plain;f=jjb%2Fdovetail%2Fdovetail-run.sh;h=0b3510863b6e522c1c0b584fe5ec8115c95d8f46;hb=96745713cb9a9b377254050902ca196aa41bc8a5;hp=1c53dbee440d138878f89654d89a27a88b0db25f;hpb=14b5f30468a96c5e400d2139caac7c52a1542b26;p=releng.git diff --git a/jjb/dovetail/dovetail-run.sh b/jjb/dovetail/dovetail-run.sh index 1c53dbee4..0b3510863 100755 --- a/jjb/dovetail/dovetail-run.sh +++ b/jjb/dovetail/dovetail-run.sh @@ -60,6 +60,10 @@ releng_repo=${WORKSPACE}/releng [ -d ${releng_repo} ] && sudo rm -rf ${releng_repo} git clone https://gerrit.opnfv.org/gerrit/releng ${releng_repo} >/dev/null +pharos_repo=${WORKSPACE}/pharos +[ -d ${pharos_repo} ] && sudo rm -rf ${pharos_repo} +git clone https://git.opnfv.org/pharos ${pharos_repo} >/dev/null + if [[ ${INSTALLER_TYPE} != 'joid' ]]; then echo "SUT branch is $SUT_BRANCH" echo "dovetail branch is $BRANCH" @@ -81,6 +85,7 @@ if [[ -f $OPENRC ]]; then exit 1 fi fi + echo "export EXTERNAL_NETWORK=${EXTERNAL_NETWORK}" >> ${OPENRC} else echo "ERROR: cannot find file $OPENRC. Please check if it is existing." sudo ls -al ${DOVETAIL_CONFIG} @@ -92,10 +97,14 @@ if [[ ! "${SUT_BRANCH}" =~ "danube" && ${INSTALLER_TYPE} == "fuel" ]]; then fi cat $OPENRC +# These packages are used for parsing yaml files and decrypting ipmi user and password. +sudo pip install shyaml +sudo yum install -y rubygems || sudo apt-get install -y ruby +sudo gem install hiera-eyaml + if [[ ! "${SUT_BRANCH}" =~ "danube" && ${INSTALLER_TYPE} == "compass" ]]; then compass_repo=${WORKSPACE}/compass4nfv/ git clone https://github.com/opnfv/compass4nfv.git ${compass_repo} >/dev/null - pip install shyaml scenario_file=${compass_repo}/deploy/conf/hardware_environment/$NODE_NAME/os-nosdn-nofeature-ha.yml ipmiIp=$(cat ${scenario_file} | shyaml get-value hosts.0.ipmiIp) ipmiPass=$(cat ${scenario_file} | shyaml get-value hosts.0.ipmiPass) @@ -119,12 +128,39 @@ if [[ ! "${SUT_BRANCH}" =~ "danube" && ${INSTALLER_TYPE} == 'fuel' && ${DEPLOY_T fuel_ctl_ssh_options="${ssh_options} -i ${SSH_KEY}" ssh_user="ubuntu" fuel_ctl_ip=$(ssh 2>/dev/null ${fuel_ctl_ssh_options} "${ssh_user}@${INSTALLER_IP}" \ - "sudo salt --out yaml 'ctl*' pillar.get _param:openstack_control_address | \ - awk '{print \$2; exit}'") &> /dev/null + "sudo salt 'cfg*' pillar.get _param:openstack_control_address --out text| \ + cut -f2 -d' '") + fuel_cmp_ip=$(ssh 2>/dev/null ${fuel_ctl_ssh_options} "${ssh_user}@${INSTALLER_IP}" \ + "sudo salt 'cmp001*' pillar.get _param:openstack_control_address --out text| \ + cut -f2 -d' '") + fuel_dbs_ip=$(ssh 2>/dev/null ${fuel_ctl_ssh_options} "${ssh_user}@${INSTALLER_IP}" \ + "sudo salt 'dbs01*' pillar.get _param:openstack_control_address --out text| \ + cut -f2 -d' '") + fuel_msg_ip=$(ssh 2>/dev/null ${fuel_ctl_ssh_options} "${ssh_user}@${INSTALLER_IP}" \ + "sudo salt 'msg01*' pillar.get _param:openstack_control_address --out text| \ + cut -f2 -d' '") + ipmi_index=$(ssh 2>/dev/null ${fuel_ctl_ssh_options} "${ssh_user}@${INSTALLER_IP}" \ + "sudo salt 'ctl*' network.ip_addrs cidr=${fuel_ctl_ip} --out text | grep ${fuel_ctl_ip} | cut -c 5") + + organization="$(cut -d'-' -f1 <<< "${NODE_NAME}")" + pod_name="$(cut -d'-' -f2 <<< "${NODE_NAME}")" + pdf_file=${pharos_repo}/labs/${organization}/${pod_name}.yaml + ipmiIp=$(cat ${pdf_file} | shyaml get-value nodes.$[ipmi_index-1].remote_management.address) + ipmiIp="$(cut -d'/' -f1 <<< "${ipmiIp}")" + ipmiPass=$(cat ${pdf_file} | shyaml get-value nodes.$[ipmi_index-1].remote_management.pass) + ipmiUser=$(cat ${pdf_file} | shyaml get-value nodes.$[ipmi_index-1].remote_management.user) + [[ $ipmiUser == ENC* ]] && ipmiUser=$(eyaml decrypt -s ${ipmiUser//[[:blank:]]/}) + [[ $ipmiPass == ENC* ]] && ipmiPass=$(eyaml decrypt -s ${ipmiPass//[[:blank:]]/}) + cat << EOF >${DOVETAIL_CONFIG}/pod.yaml nodes: -- {ip: ${fuel_ctl_ip}, name: node1, key_filename: /root/.ssh/id_rsa, role: controller, user: ${ssh_user}} - +- {ip: ${INSTALLER_IP}, name: node0, key_filename: /home/opnfv/userconfig/pre_config/id_rsa, + role: Jumpserver, user: ${ssh_user}} +- {ip: ${fuel_ctl_ip}, name: node1, key_filename: /home/opnfv/userconfig/pre_config/id_rsa, + role: controller, user: ${ssh_user}, ipmi_ip: ${ipmiIp}, ipmi_user: ${ipmiUser}, ipmi_password: ${ipmiPass}} +- {ip: ${fuel_msg_ip}, name: msg01, key_filename: /home/opnfv/userconfig/pre_config/id_rsa, role: controller, user: ${ssh_user}} +- {ip: ${fuel_cmp_ip}, name: cmp01, key_filename: /home/opnfv/userconfig/pre_config/id_rsa, role: controller, user: ${ssh_user}} +- {ip: ${fuel_dbs_ip}, name: dbs01, key_filename: /home/opnfv/userconfig/pre_config/id_rsa, role: controller, user: ${ssh_user}} EOF fi @@ -153,7 +189,8 @@ if [[ ! -f ${DOVETAIL_CONFIG}/pod.yaml ]]; then fi cmd="sudo python ${releng_repo}/utils/create_pod_file.py -t ${INSTALLER_TYPE} \ - -i ${INSTALLER_IP} ${options} -f ${DOVETAIL_CONFIG}/pod.yaml" + -i ${INSTALLER_IP} ${options} -f ${DOVETAIL_CONFIG}/pod.yaml \ + -s /home/opnfv/userconfig/pre_config/id_rsa" echo ${cmd} ${cmd} @@ -165,14 +202,23 @@ if [[ ! -f ${DOVETAIL_CONFIG}/pod.yaml ]]; then fi if [ -f ${DOVETAIL_CONFIG}/pod.yaml ]; then - sudo chown jenkins:jenkins ${DOVETAIL_CONFIG}/pod.yaml + sudo chmod 666 ${DOVETAIL_CONFIG}/pod.yaml echo "Adapt process info for $INSTALLER_TYPE ..." - attack_process='rabbitmq' - cat << EOF >> ${DOVETAIL_CONFIG}/pod.yaml + if [ "$INSTALLER_TYPE" == "apex" ]; then + cat << EOF >> ${DOVETAIL_CONFIG}/pod.yaml process_info: -- {testcase_name: dovetail.ha.tc010, attack_process: ${attack_process}} - +- {testcase_name: dovetail.ha.rabbitmq, attack_process: rabbitmq_server} +EOF + elif [ "$INSTALLER_TYPE" == "fuel" ]; then + cat << EOF >> ${DOVETAIL_CONFIG}/pod.yaml +process_info: +- {testcase_name: dovetail.ha.cinder_api, attack_process: cinder_wsgi} +- {testcase_name: dovetail.ha.rabbitmq, attack_process: rabbitmq-server, attack_host: msg01} +- {testcase_name: dovetail.ha.neutron_l3_agent, attack_process: neutron-l3-agent, attack_host: cmp01} +- {testcase_name: dovetail.ha.database, attack_process: mysqld, attack_host: dbs01} EOF + fi + echo "file ${DOVETAIL_CONFIG}/pod.yaml:" cat ${DOVETAIL_CONFIG}/pod.yaml else @@ -213,7 +259,7 @@ if [[ ! -f ${ubuntu_image} ]]; then fi sudo cp ${ubuntu_image} ${DOVETAIL_IMAGES} -# functest needs to download this image first before running +# yardstick and bottlenecks need to download this image first before running cirros_image=${image_path}/cirros-0.3.5-x86_64-disk.img if [[ ! -f ${cirros_image} ]]; then echo "Download image cirros-0.3.5-x86_64-disk.img ..." @@ -221,6 +267,14 @@ if [[ ! -f ${cirros_image} ]]; then fi sudo cp ${cirros_image} ${DOVETAIL_IMAGES} +# functest needs to download this image first before running +cirros_image=${image_path}/cirros-0.4.0-x86_64-disk.img +if [[ ! -f ${cirros_image} ]]; then + echo "Download image cirros-0.4.0-x86_64-disk.img ..." + wget -q -nc http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img -P ${image_path} +fi +sudo cp ${cirros_image} ${DOVETAIL_IMAGES} + # snaps_smoke test case needs to download this image first before running ubuntu14_image=${image_path}/ubuntu-14.04-server-cloudimg-amd64-disk1.img if [[ ! -f ${ubuntu14_image} ]]; then @@ -314,14 +368,16 @@ docker exec $container_id ${run_cmd} sudo cp -r ${DOVETAIL_HOME}/results ./ # To make sure the file owner is the current user, for the copied results files in the above line -# if not, there will be error when next time to wipe workspace -# CURRENT_USER=${SUDO_USER:-$USER} -# PRIMARY_GROUP=$(id -gn $CURRENT_USER) -# sudo chown -R ${CURRENT_USER}:${PRIMARY_GROUP} ${WORKSPACE}/results +echo "Change owner of result files ..." +CURRENT_USER=${SUDO_USER:-$USER} +PRIMARY_GROUP=$(id -gn $CURRENT_USER) +echo "Current user is ${CURRENT_USER}, group is ${PRIMARY_GROUP}" +sudo chown -R ${CURRENT_USER}:${PRIMARY_GROUP} ./results #remove useless files to save disk space sudo rm -rf ./results/workspace sudo rm -f ./results/yardstick.img +sudo rm -f ./results/tmp* echo "Dovetail: done!"