X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=blobdiff_plain;f=ci%2Fdeploy.sh;h=b722d2ae40adb5890883866ea54dcda3e05db7de;hb=ea65265e93f162bdf3b236d0c58ac2d30db57f88;hp=63d550a6a8c089f9b9c9a968616cb1406d3b82ce;hpb=3c54cd78b23a62a3a4ff1426cd8a9065e9fb6918;p=joid.git diff --git a/ci/deploy.sh b/ci/deploy.sh index 63d550a6..b722d2ae 100755 --- a/ci/deploy.sh +++ b/ci/deploy.sh @@ -2,190 +2,353 @@ set -ex -#need to put mutiple cases here where decide this bundle to deploy by default use the odl bundle. +source common/tools.sh + +#need to put multiple cases here where decide this bundle to deploy by default use the odl bundle. # Below parameters are the default and we can according the release opnfvsdn=nosdn -opnfvtype=nonha -openstack=liberty +opnfvtype=noha +openstack=ocata opnfvlab=default -opnfvrel=b -opnfvfeature=odl_l2 - -read_config() { - opnfvrel=`grep release: deploy.yaml | cut -d ":" -f2` - openstack=`grep openstack: deploy.yaml | cut -d ":" -f2` - opnfvtype=`grep type: deploy.yaml | cut -d ":" -f2` - opnfvlab=`grep lab: deploy.yaml | cut -d ":" -f2` - opnfvsdn=`grep sdn: deploy.yaml | cut -d ":" -f2` +opnfvlabfile= +opnfvrel=e +opnfvfeature=none +opnfvdistro=xenial +opnfvarch=amd64 +opnfvmodel=openstack +virtinstall=0 +maasinstall=0 + +jujuver=`juju --version` + +usage() { echo "Usage: $0 + [-s|--sdn ] + [-t|--type ] + [-o|--openstack ] + [-l|--lab ] + [-f|--feature ] + [-d|--distro ] + [-a|--arch ] + [-m|--model ] + [-i|--virtinstall <0|1>] + [--maasinstall <0|1>] + [--labfile ] + [-r|--release ]" 1>&2 exit 1; } -usage() { echo "Usage: $0 [-s ] - [-t ] - [-o ] - [-l ] - [-f ] - [-r ]" 1>&2 exit 1; } - -while getopts ":s:t:o:l:h:r:f:" opt; do - case "${opt}" in - s) - opnfvsdn=${OPTARG} - ;; - t) - opnfvtype=${OPTARG} - ;; - o) - openstack=${OPTARG} - ;; - l) - opnfvlab=${OPTARG} - ;; - r) - opnfvrel=${OPTARG} - ;; - f) - opnfvfeature=${OPTARG} - ;; - h) - usage - ;; - *) - ;; - esac -done - -deploy_dep() { - sudo apt-add-repository ppa:juju/stable -y - sudo apt-get update - sudo apt-get install juju git juju-deployer -y - juju init -f - cp environments.yaml ~/.juju/ -} +#A string with command options +options=$@ + +# An array with all the arguments +arguments=($options) + +# Loop index +index=0 + +for argument in $options + do + # Incrementing index + index=`expr $index + 1` + + # The conditions + case $argument in + -h|--help ) + usage; + ;; + -s|--sdn ) + if ([ "arguments[index]" != "" ]); then + opnfvsdn=${arguments[index]} + fi; + ;; + -t|--type ) + if ([ "arguments[index]" != "" ]); then + opnfvtype=${arguments[index]} + fi; + ;; + -o|--openstack ) + if ([ "arguments[index]" != "" ]); then + openstack=${arguments[index]} + fi; + ;; + + -l|--lab ) + if ([ "arguments[index]" != "" ]); then + opnfvlab=${arguments[index]} + fi; + ;; + + -r|--release ) + if ([ "arguments[index]" != "" ]); then + opnfvrel=${arguments[index]} + fi; + ;; + + -f|--feature ) + if ([ "arguments[index]" != "" ]); then + opnfvfeature=${arguments[index]} + fi; + ;; + + -d|--distro ) + if ([ "arguments[index]" != "" ]); then + opnfdistro=${arguments[index]} + fi; + ;; + + -a|--arch ) + if ([ "arguments[index]" != "" ]); then + opnfvarch=${arguments[index]} + fi; + ;; + + -m|--model ) + if ([ "arguments[index]" != "" ]); then + opnfvmodel=${arguments[index]} + fi; + ;; + + -i|--virtinstall ) + if ([ "arguments[index]" != "" ]); then + virtinstall=${arguments[index]} + fi; + ;; + --maasinstall ) + if ([ "arguments[index]" != "" ]); then + maasinstall=${arguments[index]} + fi; + ;; + --labfile ) + if ([ "arguments[index]" != "" ]); then + labfile=${arguments[index]} + fi; + ;; + esac + done + #by default maas creates two VMs in case of three more VM needed. createresource() { - maas_ip=`grep " ip_address" deployment.yaml | cut -d " " -f 10` - apikey=`grep maas-oauth: environments.yaml | cut -d "'" -f 2` - maas login maas http://${maas_ip}/MAAS/api/1.0 ${apikey} - - nodeexist=`maas maas nodes list hostname=node3-control` - - if [ $nodeexist != *node3* ]; then - sudo virt-install --connect qemu:///system --name node3-control --ram 8192 --vcpus 4 --disk size=120,format=qcow2,bus=virtio,io=native,pool=default --network bridge=virbr0,model=virtio --network bridge=virbr0,model=virtio --boot network,hd,menu=off --noautoconsole --vnc --print-xml | tee node3-control - - sudo virt-install --connect qemu:///system --name node4-control --ram 8192 --vcpus 4 --disk size=120,format=qcow2,bus=virtio,io=native,pool=default --network bridge=virbr0,model=virtio --network bridge=virbr0,model=virtio --boot network,hd,menu=off --noautoconsole --vnc --print-xml | tee node4-control - - sudo virt-install --connect qemu:///system --name node5-compute --ram 8192 --vcpus 4 --disk size=120,format=qcow2,bus=virtio,io=native,pool=default --network bridge=virbr0,model=virtio --network bridge=virbr0,model=virtio --boot network,hd,menu=off --noautoconsole --vnc --print-xml | tee node5-compute - - node3controlmac=`grep "mac address" node3-control | head -1 | cut -d "'" -f 2` - node4controlmac=`grep "mac address" node4-control | head -1 | cut -d "'" -f 2` - node5computemac=`grep "mac address" node5-compute | head -1 | cut -d "'" -f 2` - - sudo virsh -c qemu:///system define --file node3-control - sudo virsh -c qemu:///system define --file node4-control - sudo virsh -c qemu:///system define --file node5-compute - - controlnodeid=`maas maas nodes new autodetect_nodegroup='yes' name='node3-control' tags='control' hostname='node3-control' power_type='virsh' mac_addresses=$node3controlmac power_parameters_power_address='qemu+ssh://'$USER'@192.168.122.1/system' architecture='amd64/generic' power_parameters_power_id='node3-control' | grep system_id | cut -d '"' -f 4 ` - - maas maas tag update-nodes control add=$controlnodeid - - controlnodeid=`maas maas nodes new autodetect_nodegroup='yes' name='node4-control' tags='control' hostname='node4-control' power_type='virsh' mac_addresses=$node4controlmac power_parameters_power_address='qemu+ssh://'$USER'@192.168.122.1/system' architecture='amd64/generic' power_parameters_power_id='node4-control' | grep system_id | cut -d '"' -f 4 ` - - maas maas tag update-nodes control add=$controlnodeid - - computenodeid=`maas maas nodes new autodetect_nodegroup='yes' name='node5-compute' tags='compute' hostname='node5-compute' power_type='virsh' mac_addresses=$node5computemac power_parameters_power_address='qemu+ssh://'$USER'@192.168.122.1/system' architecture='amd64/generic' power_parameters_power_id='node5-compute' | grep system_id | cut -d '"' -f 4 ` - - maas maas tag update-nodes compute add=$computenodeid + # TODO: make sure this function run with the same parameters used in 03-maasdeploy.sh + PROFILE=${PROFILE:-ubuntu} + MAAS_IP=$(grep " ip_address" deployconfig.yaml | cut -d ':' -f 2 | sed -e 's/ //') + API_SERVER="http://$MAAS_IP:5240/MAAS/api/2.0" + API_KEY=`sudo maas-region apikey --username=ubuntu` + maas login $PROFILE $API_SERVER $API_KEY + + # if we have a virshurl configuration we use it, else we use local + VIRSHURL=$(cat labconfig.json | jq -r '.opnfv.virshurl') + if ([ $VIRSHURL == "" ] || [ "$VIRSHURL" == "null" ]); then + VIRSHIP=$MAAS_IP + VIRSHURL="qemu+ssh://$USER@$VIRSHIP/system " + VIRSHHOST="" + else + VIRSHHOST=$(echo $VIRSHURL| cut -d\/ -f 3 | cut -d@ -f2) + VIRSHIP="" # TODO: parse from $VIRSHURL if needed fi + + for node in node3-control node4-control + do + node_id=$(maas $PROFILE machines read | \ + jq -r ".[] | select(.hostname == \"$node\").system_id") + if [[ -z "$node_id" ]]; then + sudo virt-install --connect qemu:///system --name $node \ + --ram 8192 --cpu host --vcpus 4 \ + --disk size=120,format=qcow2,bus=virtio,cache=directsync,io=native,pool=default \ + --network bridge=virbr0,model=virtio \ + --network bridge=virbr0,model=virtio \ + --boot network,hd,menu=off \ + --noautoconsole --vnc --print-xml | tee _node.xml + node_mac=$(grep "mac address" _node.xml | head -1 | cut -d "'" -f 2) + sudo virsh -c $VIRSHURL define --file _node.xml + rm -f _node.xml + + maas $PROFILE nodes new autodetect_nodegroup='yes' name=$node \ + tags='control' hostname=$name power_type='virsh' \ + mac_addresses=$node3controlmac \ + power_parameters_power_address="qemu+ssh://$USER@192.168.122.1/system" \ + architecture='amd64/generic' power_parameters_power_id='node3-control' + sudo virsh -c $VIRSHURL autostart $node + node_id=$(maas $PROFILE machines read | \ + jq -r ".[] | select(.hostname == \"$node\").system_id") + fi + if [[ -z "$node_id" ]]; then + echo_error "Error: failed to create node $node ." + exit 1 + fi + maas $PROFILE tag update-nodes control add=$node_id || true + done } #copy the files and create extra resources needed for HA deployment # in case of default VM labs. deploy() { - #copy the script which needs to get deployed as part of ofnfv release - echo "...... deploying now ......" - echo " " >> environments.yaml - echo " enable-os-refresh-update: false" >> environments.yaml - echo " enable-os-upgrade: false" >> environments.yaml - echo " admin-secret: admin" >> environments.yaml - echo " default-series: trusty" >> environments.yaml - echo " address-allocation: true" >> environments.yaml + if [ ! -f ./labconfig.yaml ] && [ -e ~/joid_config/labconfig.yaml ]; then + cp ~/joid_config/labconfig.yaml ./labconfig.yaml + + if [ ! -f ./deployconfig.yaml ] && [ -e ~/joid_config/deployconfig.yaml ]; then + cp ~/joid_config/deployconfig.yaml ./deployconfig.yaml + else + python genDeploymentConfig.py -l labconfig.yaml > deployconfig.yaml + fi + else + if [ -e ./labconfig.yaml ]; then + if [ ! -f ./deployconfig.yaml ] && [ -e ~/joid_config/deployconfig.yaml ]; then + cp ~/joid_config/deployconfig.yaml ./deployconfig.yaml + else + python genDeploymentConfig.py -l labconfig.yaml > deployconfig.yaml + fi + else + echo_error "MAAS not deployed please deploy MAAS first." + fi + fi - cp environments.yaml ~/.juju/ + #create json file which is missing in case of new deployment after maas and git tree cloned freshly. + python -c 'import sys, yaml, json; json.dump(yaml.load(sys.stdin), sys.stdout, indent=4)' < labconfig.yaml > labconfig.json + python -c 'import sys, yaml, json; json.dump(yaml.load(sys.stdin), sys.stdout, indent=4)' < deployconfig.yaml > deployconfig.json + + # Install MAAS and expecting the labconfig.yaml at local directory. + + if [ "$maasinstall" -eq 1 ]; then + ./clean.sh || true + PROFILE=${PROFILE:-ubuntu} + MAAS_IP=$(grep " ip_address" deployconfig.yaml | cut -d ':' -f 2 | sed -e 's/ //') + API_SERVER="http://$MAAS_IP:5240/MAAS/api/2.0" + if which maas > /dev/null; then + API_KEY=`sudo maas-region apikey --username=ubuntu` + maas login $PROFILE $API_SERVER $API_KEY + + # make sure there is no machine entry in maas + for m in $(maas $PROFILE machines read | jq -r '.[].system_id') + do + maas $PROFILE machine delete $m || true + done + podno=$(maas $PROFILE pods read | jq -r ".[]".id) + maas $PROFILE pod delete $podno || true + fi + ./cleanvm.sh || true + + if [ "$virtinstall" -eq 1 ]; then + ./00-maasdeploy.sh virtual + else + if [ -z "$labfile" ]; then + if [ ! -e ./labconfig.yaml ]; then + echo_error "Labconfig file must be specified when using custom" + else + echo_warning "Labconfig was not specified, using ./labconfig.yaml instead" + fi + elif [ ! -e "$labfile" ]; then + echo_warning "Labconfig not found locally, trying download" + wget $labfile -t 3 -T 10 -O ./labconfig.yaml || true + count=`wc -l labconfig.yaml | cut -d " " -f 1` + if [ $count -lt 10 ]; then + echo_error "Unable to download labconfig" + exit 1 + fi + else + echo_info "Using $labfile to setup deployment" + cp $labfile ./labconfig.yaml + fi + + ./00-maasdeploy.sh custom + fi + fi if [[ "$opnfvtype" = "ha" && "$opnfvlab" = "default" ]]; then createresource fi - #cp ./$opnfvsdn/01-deploybundle.sh ./01-deploybundle.sh - ./00-bootstrap.sh + #bootstrap the node + ./01-bootstrap.sh - #case default: - ./01-deploybundle.sh $opnfvtype $openstack $opnfvlab $opnfvsdn $opnfvfeature + juju model-config default-series=$opnfvdistro enable-os-refresh-update=false enable-os-upgrade=false + + # case default deploy the opnfv platform: + ./02-deploybundle.sh $opnfvtype $openstack $opnfvlab $opnfvsdn $opnfvfeature $opnfvdistro $opnfvmodel } #check whether charms are still executing the code even juju-deployer says installed. check_status() { + waitstatus=$1 retval=0 timeoutiter=0 + + echo_info "Executing the relationships within charms..." while [ $retval -eq 0 ]; do - sleep 30 - juju status > status.txt - if [ "$(grep -c "executing" status.txt )" -ge 1 ]; then - echo " still executing the reltionship within charms ..." - if [ $timeoutiter -ge 60 ]; then + if juju status | grep -q $waitstatus; then + echo_info "Still waiting for $waitstatus units" + if [ $timeoutiter -ge 180 ]; then + echo_error 'Timed out' retval=1 + else + sleep 30 fi timeoutiter=$((timeoutiter+1)) else + echo_info 'Done executing the relationships' retval=1 fi done - echo "...... deployment finishing ......." -} - -#create config RC file to consume by various tests. -configOpenrc() -{ - echo " " > ./cloud/admin-openrc - echo "export OS_USERNAME=$1" >> ./cloud/admin-openrc - echo "export OS_PASSWORD=$2" >> ./cloud/admin-openrc - echo "export OS_TENANT_NAME=$3" >> ./cloud/admin-openrc - echo "export OS_AUTH_URL=$4" >> ./cloud/admin-openrc - echo "export OS_REGION_NAME=$5" >> ./cloud/admin-openrc - } -#to get the address of a service using juju -unitAddress() -{ - juju status | python -c "import yaml; import sys; print yaml.load(sys.stdin)[\"services\"][\"$1\"][\"units\"][\"$1/$2\"][\"public-address\"]" 2> /dev/null -} + if [[ "$opnfvmodel" = "openstack" ]]; then + juju expose ceph-radosgw || true + #juju ssh ceph/0 \ 'sudo radosgw-admin user create --uid="ubuntu" --display-name="Ubuntu Ceph"' + fi -createopenrc() -{ - mkdir -m 0700 -p cloud + echo_info "Deployment finishing..." + } - controller_address=$(unitAddress keystone 0) - configOpenrc admin openstack admin http://$controller_address:5000/v2.0 Canonical - chmod 0600 cloud/admin-openrc -} -if [ "$#" -eq 0 ]; then - echo "This installtion will use default options" - #read_config +# In the case of a virtual deployment +if [ "$virtinstall" -eq 1 ]; then + ./clean.sh || true fi -echo "...... deployment started ......" -#deploy_dep +echo_info "Deployment started" deploy -check_status -echo "...... deployment finished ......." -echo "...... creating OpenRc file for consuming by various user ......." +check_status executing + +echo_info "Deployment finished" +juju status --format=tabular -createopenrc +# translate bundle.yaml to json +python -c 'import sys, yaml, json; json.dump(yaml.load(sys.stdin), sys.stdout, indent=4)' < bundles.yaml > bundles.json + +# Configuring deployment +if ([ $opnfvmodel == "openstack" ]); then + echo_info "Configuring OpenStack deployment" + + ./openstack.sh "$opnfvsdn" "$opnfvlab" "$opnfvdistro" "$openstack" || true + + # creating heat domain after pushing the public API into /etc/hosts + if [[ "$jujuver" > "2" ]]; then + status=`juju run-action heat/0 domain-setup` + echo $status + else + status=`juju action do heat/0 domain-setup` + echo $status + fi + + sudo ../juju/get-cloud-images || true + ../juju/joid-configure-openstack || true + + if grep -q 'openbaton' bundles.yaml; then + juju add-relation openbaton keystone + fi + +elif ([ $opnfvmodel == "kubernetes" ]); then + #Workarounf for master chanrm as it takes 5 minutes to run properly + check_status waiting + check_status executing + echo_info "Configuring Kubernetes deployment" + + ./k8.sh +fi -echo "...... finished ......." +# expose the juju gui-url to login into juju gui +echo_info "Juju GUI can be accessed using the following URL and credentials:" +juju gui --show-credentials --no-browser +echo "Finished deployment and configuration"