mdified to create a domain for heat after the deployment. It seems
[joid.git] / ci / deploy.sh
1 #!/bin/bash
2
3 set -ex
4
5 #need to put mutiple cases here where decide this bundle to deploy by default use the odl bundle.
6 # Below parameters are the default and we can according the release
7
8 opnfvsdn=nosdn
9 opnfvtype=nonha
10 openstack=liberty
11 opnfvlab=default
12 opnfvrel=b
13 opnfvfeature=odl_l2
14
15 read_config() {
16     opnfvrel=`grep release: deploy.yaml | cut -d ":" -f2`
17     openstack=`grep openstack: deploy.yaml | cut -d ":" -f2`
18     opnfvtype=`grep type: deploy.yaml | cut -d ":" -f2`
19     opnfvlab=`grep lab: deploy.yaml | cut -d ":" -f2`
20     opnfvsdn=`grep sdn: deploy.yaml | cut -d ":" -f2`
21 }
22
23 usage() { echo "Usage: $0 [-s <nosdn|odl|opencontrail>]
24                          [-t <nonha|ha|tip>] 
25                          [-o <juno|liberty>]
26                          [-l <default|intelpod5>]
27                          [-f <ipv6|l2|l3|dvr>]
28                          [-r <a|b>]" 1>&2 exit 1; } 
29
30 while getopts ":s:t:o:l:h:r:f:" opt; do
31     case "${opt}" in
32         s)
33             opnfvsdn=${OPTARG}
34             ;;
35         t)
36             opnfvtype=${OPTARG}
37             ;;
38         o)
39             openstack=${OPTARG}
40             ;;
41         l)
42             opnfvlab=${OPTARG}
43             ;;
44         r)
45             opnfvrel=${OPTARG}
46             ;;
47         f)
48             opnfvfeature=${OPTARG}
49             ;;
50         h)
51             usage
52             ;;
53         *)
54             ;;
55     esac
56 done
57
58 deploy_dep() {
59     sudo apt-add-repository ppa:juju/stable -y
60     sudo apt-get update
61     sudo apt-get install juju git juju-deployer -y
62     juju init -f
63     cp environments.yaml ~/.juju/
64 }
65
66 #by default maas creates two VMs in case of three more VM needed.
67 createresource() {
68     maas_ip=`grep " ip_address" deployment.yaml | cut -d " "  -f 10`
69     apikey=`grep maas-oauth: environments.yaml | cut -d "'" -f 2`
70     maas login maas http://${maas_ip}/MAAS/api/1.0 ${apikey}
71
72     nodeexist=`maas maas nodes list hostname=node3-control`
73
74     if [ $nodeexist != *node3* ]; then
75         sudo virt-install --connect qemu:///system --name node3-control --ram 8192 --vcpus 4 --disk size=120,format=qcow2,bus=virtio,io=native,pool=default --network bridge=virbr0,model=virtio --network bridge=virbr0,model=virtio --boot network,hd,menu=off --noautoconsole --vnc --print-xml | tee node3-control
76
77         sudo virt-install --connect qemu:///system --name node4-control --ram 8192 --vcpus 4 --disk size=120,format=qcow2,bus=virtio,io=native,pool=default --network bridge=virbr0,model=virtio --network bridge=virbr0,model=virtio --boot network,hd,menu=off --noautoconsole --vnc --print-xml | tee node4-control
78
79         sudo virt-install --connect qemu:///system --name node5-compute --ram 8192 --vcpus 4 --disk size=120,format=qcow2,bus=virtio,io=native,pool=default --network bridge=virbr0,model=virtio --network bridge=virbr0,model=virtio --boot network,hd,menu=off --noautoconsole --vnc --print-xml | tee node5-compute
80
81         node3controlmac=`grep  "mac address" node3-control | head -1 | cut -d "'" -f 2`
82         node4controlmac=`grep  "mac address" node4-control | head -1 | cut -d "'" -f 2`
83         node5computemac=`grep  "mac address" node5-compute | head -1 | cut -d "'" -f 2`
84
85         sudo virsh -c qemu:///system define --file node3-control
86         sudo virsh -c qemu:///system define --file node4-control
87         sudo virsh -c qemu:///system define --file node5-compute
88
89         controlnodeid=`maas maas nodes new autodetect_nodegroup='yes' name='node3-control' tags='control' hostname='node3-control' power_type='virsh' mac_addresses=$node3controlmac power_parameters_power_address='qemu+ssh://'$USER'@192.168.122.1/system' architecture='amd64/generic' power_parameters_power_id='node3-control' | grep system_id | cut -d '"' -f 4 `
90
91         maas maas tag update-nodes control add=$controlnodeid
92
93         controlnodeid=`maas maas nodes new autodetect_nodegroup='yes' name='node4-control' tags='control' hostname='node4-control' power_type='virsh' mac_addresses=$node4controlmac power_parameters_power_address='qemu+ssh://'$USER'@192.168.122.1/system' architecture='amd64/generic' power_parameters_power_id='node4-control' | grep system_id | cut -d '"' -f 4 `
94
95         maas maas tag update-nodes control add=$controlnodeid
96
97         computenodeid=`maas maas nodes new autodetect_nodegroup='yes' name='node5-compute' tags='compute' hostname='node5-compute' power_type='virsh' mac_addresses=$node5computemac power_parameters_power_address='qemu+ssh://'$USER'@192.168.122.1/system' architecture='amd64/generic' power_parameters_power_id='node5-compute' | grep system_id | cut -d '"' -f 4 `
98
99         maas maas tag update-nodes compute add=$computenodeid
100     fi
101 }
102
103 #copy the files and create extra resources needed for HA deployment
104 # in case of default VM labs.
105 deploy() {
106     #copy the script which needs to get deployed as part of ofnfv release
107     echo "...... deploying now ......"
108     echo "   " >> environments.yaml
109     echo "        enable-os-refresh-update: false" >> environments.yaml
110     echo "        enable-os-upgrade: false" >> environments.yaml
111     echo "        admin-secret: admin" >> environments.yaml
112     echo "        default-series: trusty" >> environments.yaml
113
114     cp environments.yaml ~/.juju/
115
116     if [[ "$opnfvtype" = "ha" && "$opnfvlab" = "default" ]]; then
117         createresource
118     fi
119
120     #cp ./$opnfvsdn/01-deploybundle.sh ./01-deploybundle.sh
121     ./00-bootstrap.sh
122
123     #case default:
124     ./01-deploybundle.sh $opnfvtype $openstack $opnfvlab $opnfvsdn $opnfvfeature
125 }
126
127 #check whether charms are still executing the code even juju-deployer says installed.
128 check_status() {
129     retval=0
130     timeoutiter=0
131     while [ $retval -eq 0 ]; do
132        sleep 30
133        juju status > status.txt 
134        if [ "$(grep -c "executing" status.txt )" -ge 1 ]; then
135            echo " still executing the reltionship within charms ..."
136            if [ $timeoutiter -ge 60 ]; then
137                retval=1
138            fi
139            timeoutiter=$((timeoutiter+1))
140        else
141            retval=1
142        fi
143     done
144     status=`juju action do heat/0 domain-setup`
145     echo $status
146     echo "...... deployment finishing ......."
147 }
148
149 #create config RC file to consume by various tests.
150 configOpenrc()
151 {
152     echo  "  " > ./cloud/admin-openrc
153     echo  "export OS_USERNAME=$1" >> ./cloud/admin-openrc 
154     echo  "export OS_PASSWORD=$2" >> ./cloud/admin-openrc
155     echo  "export OS_TENANT_NAME=$3" >> ./cloud/admin-openrc
156     echo  "export OS_AUTH_URL=$4" >> ./cloud/admin-openrc
157     echo  "export OS_REGION_NAME=$5" >> ./cloud/admin-openrc
158  }
159
160 #to get the address of a service using juju
161 unitAddress()
162 {
163     juju status | python -c "import yaml; import sys; print yaml.load(sys.stdin)[\"services\"][\"$1\"][\"units\"][\"$1/$2\"][\"public-address\"]" 2> /dev/null
164 }
165
166 createopenrc()
167 {
168     if [ "$opnfvsdn" == "onos" ]; then
169         sh onos/juju_test_prepare.sh "$opnfvlab"
170         check_status
171     fi
172
173     mkdir -m 0700 -p cloud
174
175     controller_address=$(unitAddress keystone 0)
176     configOpenrc admin openstack admin http://$controller_address:5000/v2.0 Canonical 
177     chmod 0600 cloud/admin-openrc
178 }
179
180 if [ "$#" -eq 0 ]; then
181   echo "This installtion will use default options" 
182   #read_config
183 fi
184
185 echo "...... deployment started ......"
186 #deploy_dep
187 deploy
188 check_status
189 echo "...... deployment finished  ......."
190
191 echo "...... creating OpenRc file for consuming by various user ......."
192
193 createopenrc
194
195 echo "...... finished  ......."
196
197