JIRA: JOID-38 added eth1 to compute VMs as well on request of onos
[joid.git] / ci / deploy.sh
1 #!/bin/bash
2
3 set -ex
4
5 #need to put mutiple cases here where decide this bundle to deploy by default use the odl bundle.
6 # Below parameters are the default and we can according the release
7
8 opnfvsdn=nosdn
9 opnfvtype=nonha
10 openstack=liberty
11 opnfvlab=default
12 opnfvrel=b
13
14 read_config() {
15     opnfvrel=`grep release: deploy.yaml | cut -d ":" -f2`
16     openstack=`grep openstack: deploy.yaml | cut -d ":" -f2`
17     opnfvtype=`grep type: deploy.yaml | cut -d ":" -f2`
18     opnfvlab=`grep lab: deploy.yaml | cut -d ":" -f2`
19     opnfvsdn=`grep sdn: deploy.yaml | cut -d ":" -f2`
20 }
21
22 usage() { echo "Usage: $0 [-s <nosdn|odl|opencontrail>]
23                          [-t <nonha|ha|tip>] 
24                          [-o <juno|liberty>]
25                          [-l <default|intelpod5>]
26                          [-r <a|b>]" 1>&2 exit 1; } 
27
28 while getopts ":s:t:o:l:h:r:" opt; do
29     case "${opt}" in
30         s)
31             opnfvsdn=${OPTARG}
32             ;;
33         t)
34             opnfvtype=${OPTARG}
35             ;;
36         o)
37             openstack=${OPTARG}
38             ;;
39         l)
40             opnfvlab=${OPTARG}
41             ;;
42         r)
43             opnfvrel=${OPTARG}
44             ;;
45         h)
46             usage
47             ;;
48         *)
49             ;;
50     esac
51 done
52
53 deploy_dep() {
54     sudo apt-add-repository ppa:juju/stable -y
55     sudo apt-get update
56     sudo apt-get install juju git juju-deployer -y
57     juju init -f
58     cp environments.yaml ~/.juju/
59 }
60
61 #by default maas creates two VMs in case of three more VM needed.
62 createresource() {
63     maas_ip=`grep " ip_address" deployment.yaml | cut -d " "  -f 10`
64     apikey=`grep maas-oauth: environments.yaml | cut -d "'" -f 2`
65     maas login maas http://${maas_ip}/MAAS/api/1.0 ${apikey}
66
67     nodeexist=`maas maas nodes list hostname=node3-control`
68
69     if [ $nodeexist != *node3* ]; then
70         sudo virt-install --connect qemu:///system --name node3-control --ram 8192 --vcpus 4 --disk size=120,format=qcow2,bus=virtio,io=native,pool=default --network bridge=virbr0,model=virtio --network bridge=virbr0,model=virtio --boot network,hd,menu=off --noautoconsole --vnc --print-xml | tee node3-control
71
72         sudo virt-install --connect qemu:///system --name node4-control --ram 8192 --vcpus 4 --disk size=120,format=qcow2,bus=virtio,io=native,pool=default --network bridge=virbr0,model=virtio --network bridge=virbr0,model=virtio --boot network,hd,menu=off --noautoconsole --vnc --print-xml | tee node4-control
73
74         sudo virt-install --connect qemu:///system --name node5-compute --ram 8192 --vcpus 4 --disk size=120,format=qcow2,bus=virtio,io=native,pool=default --network bridge=virbr0,model=virtio --network bridge=virbr0,model=virtio --boot network,hd,menu=off --noautoconsole --vnc --print-xml | tee node5-compute
75
76         node3controlmac=`grep  "mac address" node3-control | head -1 | cut -d "'" -f 2`
77         node4controlmac=`grep  "mac address" node4-control | head -1 | cut -d "'" -f 2`
78         node5computemac=`grep  "mac address" node5-compute | head -1 | cut -d "'" -f 2`
79
80         sudo virsh -c qemu:///system define --file node3-control
81         sudo virsh -c qemu:///system define --file node4-control
82         sudo virsh -c qemu:///system define --file node5-compute
83
84         controlnodeid=`maas maas nodes new autodetect_nodegroup='yes' name='node3-control' tags='control' hostname='node3-control' power_type='virsh' mac_addresses=$node3controlmac power_parameters_power_address='qemu+ssh://'$USER'@192.168.122.1/system' architecture='amd64/generic' power_parameters_power_id='node3-control' | grep system_id | cut -d '"' -f 4 `
85
86         maas maas tag update-nodes control add=$controlnodeid
87
88         controlnodeid=`maas maas nodes new autodetect_nodegroup='yes' name='node4-control' tags='control' hostname='node4-control' power_type='virsh' mac_addresses=$node4controlmac power_parameters_power_address='qemu+ssh://'$USER'@192.168.122.1/system' architecture='amd64/generic' power_parameters_power_id='node4-control' | grep system_id | cut -d '"' -f 4 `
89
90         maas maas tag update-nodes control add=$controlnodeid
91
92         computenodeid=`maas maas nodes new autodetect_nodegroup='yes' name='node5-compute' tags='compute' hostname='node5-compute' power_type='virsh' mac_addresses=$node5computemac power_parameters_power_address='qemu+ssh://'$USER'@192.168.122.1/system' architecture='amd64/generic' power_parameters_power_id='node5-compute' | grep system_id | cut -d '"' -f 4 `
93
94         maas maas tag update-nodes compute add=$computenodeid
95     fi
96 }
97
98 #copy the files and create extra resources needed for HA deployment
99 # in case of default VM labs.
100 deploy() {
101     #copy the script which needs to get deployed as part of ofnfv release
102     echo "...... deploying now ......"
103     echo "   " >> environments.yaml
104     echo "        enable-os-refresh-update: false" >> environments.yaml
105     echo "        enable-os-upgrade: false" >> environments.yaml
106     echo "        admin-secret: admin" >> environments.yaml
107     echo "        default-series: trusty" >> environments.yaml
108
109     cp environments.yaml ~/.juju/
110
111     if [[ "$opnfvtype" = "ha" && "$opnfvlab" = "default" ]]; then
112         createresource
113     fi
114
115     cp ./$opnfvsdn/01-deploybundle.sh ./01-deploybundle.sh
116     ./00-bootstrap.sh
117
118     #case default:
119     ./01-deploybundle.sh $opnfvtype $openstack $opnfvlab
120 }
121
122 #check whether charms are still executing the code even juju-deployer says installed.
123 check_status() {
124     retval=0
125     timeoutiter=0
126     while [ $retval -eq 0 ]; do
127        sleep 30
128        juju status > status.txt 
129        if [ "$(grep -c "executing" status.txt )" -ge 1 ]; then
130            echo " still executing the reltionship within charms ..."
131            if [ $timeoutiter -ge 60 ]; then
132                retval=1
133            fi
134            timeoutiter=$((timeoutiter+1))
135        else
136            retval=1
137        fi
138     done
139     echo "...... deployment finishing ......."
140 }
141
142 #create config RC file to consume by various tests.
143 configOpenrc()
144 {
145     echo  "  " > ./cloud/admin-openrc
146     echo  "export OS_USERNAME=$1" >> ./cloud/admin-openrc 
147     echo  "export OS_PASSWORD=$2" >> ./cloud/admin-openrc
148     echo  "export OS_TENANT_NAME=$3" >> ./cloud/admin-openrc
149     echo  "export OS_AUTH_URL=$4" >> ./cloud/admin-openrc
150     echo  "export OS_REGION_NAME=$5" >> ./cloud/admin-openrc
151  }
152
153 #to get the address of a service using juju
154 unitAddress()
155 {
156     juju status | python -c "import yaml; import sys; print yaml.load(sys.stdin)[\"services\"][\"$1\"][\"units\"][\"$1/$2\"][\"public-address\"]" 2> /dev/null
157 }
158
159 createopenrc()
160 {
161     mkdir -m 0700 -p cloud
162
163     controller_address=$(unitAddress keystone 0)
164     configOpenrc admin openstack admin http://$controller_address:5000/v2.0 Canonical 
165     chmod 0600 cloud/admin-openrc
166 }
167
168 if [ "$#" -eq 0 ]; then
169   echo "This installtion will use default options" 
170   #read_config
171 fi
172
173 echo "...... deployment started ......"
174 #deploy_dep
175 deploy
176 check_status
177 echo "...... deployment finished  ......."
178
179 echo "...... creating OpenRc file for consuming by various user ......."
180
181 createopenrc
182
183 echo "...... finished  ......."
184
185