added a command line option -f in deploy.sh to enable ipv6 during
[joid.git] / ci / deploy.sh
1 #!/bin/bash
2
3 set -ex
4
5 #need to put mutiple cases here where decide this bundle to deploy by default use the odl bundle.
6 # Below parameters are the default and we can according the release
7
8 opnfvsdn=nosdn
9 opnfvtype=nonha
10 openstack=liberty
11 opnfvlab=default
12 opnfvrel=b
13 opnfvfeature=odl_l2
14
15 read_config() {
16     opnfvrel=`grep release: deploy.yaml | cut -d ":" -f2`
17     openstack=`grep openstack: deploy.yaml | cut -d ":" -f2`
18     opnfvtype=`grep type: deploy.yaml | cut -d ":" -f2`
19     opnfvlab=`grep lab: deploy.yaml | cut -d ":" -f2`
20     opnfvsdn=`grep sdn: deploy.yaml | cut -d ":" -f2`
21 }
22
23 usage() { echo "Usage: $0 [-s <nosdn|odl|opencontrail>]
24                          [-t <nonha|ha|tip>] 
25                          [-o <juno|liberty>]
26                          [-l <default|intelpod5>]
27                          [-f <ipv6|l2|l3|dvr>]
28                          [-r <a|b>]" 1>&2 exit 1; } 
29
30 while getopts ":s:t:o:l:h:r:f:" opt; do
31     case "${opt}" in
32         s)
33             opnfvsdn=${OPTARG}
34             ;;
35         t)
36             opnfvtype=${OPTARG}
37             ;;
38         o)
39             openstack=${OPTARG}
40             ;;
41         l)
42             opnfvlab=${OPTARG}
43             ;;
44         r)
45             opnfvrel=${OPTARG}
46             ;;
47         f)
48             opnfvfeature=${OPTARG}
49             ;;
50         h)
51             usage
52             ;;
53         *)
54             ;;
55     esac
56 done
57
58 deploy_dep() {
59     sudo apt-add-repository ppa:juju/stable -y
60     sudo apt-get update
61     sudo apt-get install juju git juju-deployer -y
62     juju init -f
63     cp environments.yaml ~/.juju/
64 }
65
66 #by default maas creates two VMs in case of three more VM needed.
67 createresource() {
68     maas_ip=`grep " ip_address" deployment.yaml | cut -d " "  -f 10`
69     apikey=`grep maas-oauth: environments.yaml | cut -d "'" -f 2`
70     maas login maas http://${maas_ip}/MAAS/api/1.0 ${apikey}
71
72     nodeexist=`maas maas nodes list hostname=node3-control`
73
74     if [ $nodeexist != *node3* ]; then
75         sudo virt-install --connect qemu:///system --name node3-control --ram 8192 --vcpus 4 --disk size=120,format=qcow2,bus=virtio,io=native,pool=default --network bridge=virbr0,model=virtio --network bridge=virbr0,model=virtio --boot network,hd,menu=off --noautoconsole --vnc --print-xml | tee node3-control
76
77         sudo virt-install --connect qemu:///system --name node4-control --ram 8192 --vcpus 4 --disk size=120,format=qcow2,bus=virtio,io=native,pool=default --network bridge=virbr0,model=virtio --network bridge=virbr0,model=virtio --boot network,hd,menu=off --noautoconsole --vnc --print-xml | tee node4-control
78
79         sudo virt-install --connect qemu:///system --name node5-compute --ram 8192 --vcpus 4 --disk size=120,format=qcow2,bus=virtio,io=native,pool=default --network bridge=virbr0,model=virtio --network bridge=virbr0,model=virtio --boot network,hd,menu=off --noautoconsole --vnc --print-xml | tee node5-compute
80
81         node3controlmac=`grep  "mac address" node3-control | head -1 | cut -d "'" -f 2`
82         node4controlmac=`grep  "mac address" node4-control | head -1 | cut -d "'" -f 2`
83         node5computemac=`grep  "mac address" node5-compute | head -1 | cut -d "'" -f 2`
84
85         sudo virsh -c qemu:///system define --file node3-control
86         sudo virsh -c qemu:///system define --file node4-control
87         sudo virsh -c qemu:///system define --file node5-compute
88
89         controlnodeid=`maas maas nodes new autodetect_nodegroup='yes' name='node3-control' tags='control' hostname='node3-control' power_type='virsh' mac_addresses=$node3controlmac power_parameters_power_address='qemu+ssh://'$USER'@192.168.122.1/system' architecture='amd64/generic' power_parameters_power_id='node3-control' | grep system_id | cut -d '"' -f 4 `
90
91         maas maas tag update-nodes control add=$controlnodeid
92
93         controlnodeid=`maas maas nodes new autodetect_nodegroup='yes' name='node4-control' tags='control' hostname='node4-control' power_type='virsh' mac_addresses=$node4controlmac power_parameters_power_address='qemu+ssh://'$USER'@192.168.122.1/system' architecture='amd64/generic' power_parameters_power_id='node4-control' | grep system_id | cut -d '"' -f 4 `
94
95         maas maas tag update-nodes control add=$controlnodeid
96
97         computenodeid=`maas maas nodes new autodetect_nodegroup='yes' name='node5-compute' tags='compute' hostname='node5-compute' power_type='virsh' mac_addresses=$node5computemac power_parameters_power_address='qemu+ssh://'$USER'@192.168.122.1/system' architecture='amd64/generic' power_parameters_power_id='node5-compute' | grep system_id | cut -d '"' -f 4 `
98
99         maas maas tag update-nodes compute add=$computenodeid
100     fi
101 }
102
103 #copy the files and create extra resources needed for HA deployment
104 # in case of default VM labs.
105 deploy() {
106     #copy the script which needs to get deployed as part of ofnfv release
107     echo "...... deploying now ......"
108     echo "   " >> environments.yaml
109     echo "        enable-os-refresh-update: false" >> environments.yaml
110     echo "        enable-os-upgrade: false" >> environments.yaml
111     echo "        admin-secret: admin" >> environments.yaml
112     echo "        default-series: trusty" >> environments.yaml
113
114     cp environments.yaml ~/.juju/
115
116     if [[ "$opnfvtype" = "ha" && "$opnfvlab" = "default" ]]; then
117         createresource
118     fi
119
120     #cp ./$opnfvsdn/01-deploybundle.sh ./01-deploybundle.sh
121     ./00-bootstrap.sh
122
123     #case default:
124     ./01-deploybundle.sh $opnfvtype $openstack $opnfvlab $opnfvsdn $opnfvfeature
125 }
126
127 #check whether charms are still executing the code even juju-deployer says installed.
128 check_status() {
129     retval=0
130     timeoutiter=0
131     while [ $retval -eq 0 ]; do
132        sleep 30
133        juju status > status.txt 
134        if [ "$(grep -c "executing" status.txt )" -ge 1 ]; then
135            echo " still executing the reltionship within charms ..."
136            if [ $timeoutiter -ge 60 ]; then
137                retval=1
138            fi
139            timeoutiter=$((timeoutiter+1))
140        else
141            retval=1
142        fi
143     done
144     echo "...... deployment finishing ......."
145 }
146
147 #create config RC file to consume by various tests.
148 configOpenrc()
149 {
150     echo  "  " > ./cloud/admin-openrc
151     echo  "export OS_USERNAME=$1" >> ./cloud/admin-openrc 
152     echo  "export OS_PASSWORD=$2" >> ./cloud/admin-openrc
153     echo  "export OS_TENANT_NAME=$3" >> ./cloud/admin-openrc
154     echo  "export OS_AUTH_URL=$4" >> ./cloud/admin-openrc
155     echo  "export OS_REGION_NAME=$5" >> ./cloud/admin-openrc
156  }
157
158 #to get the address of a service using juju
159 unitAddress()
160 {
161     juju status | python -c "import yaml; import sys; print yaml.load(sys.stdin)[\"services\"][\"$1\"][\"units\"][\"$1/$2\"][\"public-address\"]" 2> /dev/null
162 }
163
164 createopenrc()
165 {
166     mkdir -m 0700 -p cloud
167
168     controller_address=$(unitAddress keystone 0)
169     configOpenrc admin openstack admin http://$controller_address:5000/v2.0 Canonical 
170     chmod 0600 cloud/admin-openrc
171 }
172
173 if [ "$#" -eq 0 ]; then
174   echo "This installtion will use default options" 
175   #read_config
176 fi
177
178 echo "...... deployment started ......"
179 #deploy_dep
180 deploy
181 check_status
182 echo "...... deployment finished  ......."
183
184 echo "...... creating OpenRc file for consuming by various user ......."
185
186 createopenrc
187
188 echo "...... finished  ......."
189
190