modified the environment file so that all containers will get
[joid.git] / ci / deploy.sh
1 #!/bin/bash
2
3 set -ex
4
5 #need to put mutiple cases here where decide this bundle to deploy by default use the odl bundle.
6 # Below parameters are the default and we can according the release
7
8 opnfvsdn=nosdn
9 opnfvtype=nonha
10 openstack=liberty
11 opnfvlab=default
12 opnfvrel=b
13 opnfvfeature=odl_l2
14
15 read_config() {
16     opnfvrel=`grep release: deploy.yaml | cut -d ":" -f2`
17     openstack=`grep openstack: deploy.yaml | cut -d ":" -f2`
18     opnfvtype=`grep type: deploy.yaml | cut -d ":" -f2`
19     opnfvlab=`grep lab: deploy.yaml | cut -d ":" -f2`
20     opnfvsdn=`grep sdn: deploy.yaml | cut -d ":" -f2`
21 }
22
23 usage() { echo "Usage: $0 [-s <nosdn|odl|opencontrail>]
24                          [-t <nonha|ha|tip>] 
25                          [-o <juno|liberty>]
26                          [-l <default|intelpod5>]
27                          [-f <ipv6|l2|l3|dvr>]
28                          [-r <a|b>]" 1>&2 exit 1; } 
29
30 while getopts ":s:t:o:l:h:r:f:" opt; do
31     case "${opt}" in
32         s)
33             opnfvsdn=${OPTARG}
34             ;;
35         t)
36             opnfvtype=${OPTARG}
37             ;;
38         o)
39             openstack=${OPTARG}
40             ;;
41         l)
42             opnfvlab=${OPTARG}
43             ;;
44         r)
45             opnfvrel=${OPTARG}
46             ;;
47         f)
48             opnfvfeature=${OPTARG}
49             ;;
50         h)
51             usage
52             ;;
53         *)
54             ;;
55     esac
56 done
57
58 deploy_dep() {
59     sudo apt-add-repository ppa:juju/stable -y
60     sudo apt-get update
61     sudo apt-get install juju git juju-deployer -y
62     juju init -f
63     cp environments.yaml ~/.juju/
64 }
65
66 #by default maas creates two VMs in case of three more VM needed.
67 createresource() {
68     maas_ip=`grep " ip_address" deployment.yaml | cut -d " "  -f 10`
69     apikey=`grep maas-oauth: environments.yaml | cut -d "'" -f 2`
70     maas login maas http://${maas_ip}/MAAS/api/1.0 ${apikey}
71
72     nodeexist=`maas maas nodes list hostname=node3-control`
73
74     if [ $nodeexist != *node3* ]; then
75         sudo virt-install --connect qemu:///system --name node3-control --ram 8192 --vcpus 4 --disk size=120,format=qcow2,bus=virtio,io=native,pool=default --network bridge=virbr0,model=virtio --network bridge=virbr0,model=virtio --boot network,hd,menu=off --noautoconsole --vnc --print-xml | tee node3-control
76
77         sudo virt-install --connect qemu:///system --name node4-control --ram 8192 --vcpus 4 --disk size=120,format=qcow2,bus=virtio,io=native,pool=default --network bridge=virbr0,model=virtio --network bridge=virbr0,model=virtio --boot network,hd,menu=off --noautoconsole --vnc --print-xml | tee node4-control
78
79         sudo virt-install --connect qemu:///system --name node5-compute --ram 8192 --vcpus 4 --disk size=120,format=qcow2,bus=virtio,io=native,pool=default --network bridge=virbr0,model=virtio --network bridge=virbr0,model=virtio --boot network,hd,menu=off --noautoconsole --vnc --print-xml | tee node5-compute
80
81         node3controlmac=`grep  "mac address" node3-control | head -1 | cut -d "'" -f 2`
82         node4controlmac=`grep  "mac address" node4-control | head -1 | cut -d "'" -f 2`
83         node5computemac=`grep  "mac address" node5-compute | head -1 | cut -d "'" -f 2`
84
85         sudo virsh -c qemu:///system define --file node3-control
86         sudo virsh -c qemu:///system define --file node4-control
87         sudo virsh -c qemu:///system define --file node5-compute
88
89         controlnodeid=`maas maas nodes new autodetect_nodegroup='yes' name='node3-control' tags='control' hostname='node3-control' power_type='virsh' mac_addresses=$node3controlmac power_parameters_power_address='qemu+ssh://'$USER'@192.168.122.1/system' architecture='amd64/generic' power_parameters_power_id='node3-control' | grep system_id | cut -d '"' -f 4 `
90
91         maas maas tag update-nodes control add=$controlnodeid
92
93         controlnodeid=`maas maas nodes new autodetect_nodegroup='yes' name='node4-control' tags='control' hostname='node4-control' power_type='virsh' mac_addresses=$node4controlmac power_parameters_power_address='qemu+ssh://'$USER'@192.168.122.1/system' architecture='amd64/generic' power_parameters_power_id='node4-control' | grep system_id | cut -d '"' -f 4 `
94
95         maas maas tag update-nodes control add=$controlnodeid
96
97         computenodeid=`maas maas nodes new autodetect_nodegroup='yes' name='node5-compute' tags='compute' hostname='node5-compute' power_type='virsh' mac_addresses=$node5computemac power_parameters_power_address='qemu+ssh://'$USER'@192.168.122.1/system' architecture='amd64/generic' power_parameters_power_id='node5-compute' | grep system_id | cut -d '"' -f 4 `
98
99         maas maas tag update-nodes compute add=$computenodeid
100     fi
101 }
102
103 #copy the files and create extra resources needed for HA deployment
104 # in case of default VM labs.
105 deploy() {
106     #copy the script which needs to get deployed as part of ofnfv release
107     echo "...... deploying now ......"
108     echo "   " >> environments.yaml
109     echo "        enable-os-refresh-update: false" >> environments.yaml
110     echo "        enable-os-upgrade: false" >> environments.yaml
111     echo "        admin-secret: admin" >> environments.yaml
112     echo "        default-series: trusty" >> environments.yaml
113     echo "        address-allocation: true" >> environments.yaml
114
115     cp environments.yaml ~/.juju/
116
117     if [[ "$opnfvtype" = "ha" && "$opnfvlab" = "default" ]]; then
118         createresource
119     fi
120
121     #cp ./$opnfvsdn/01-deploybundle.sh ./01-deploybundle.sh
122     ./00-bootstrap.sh
123
124     #case default:
125     ./01-deploybundle.sh $opnfvtype $openstack $opnfvlab $opnfvsdn $opnfvfeature
126 }
127
128 #check whether charms are still executing the code even juju-deployer says installed.
129 check_status() {
130     retval=0
131     timeoutiter=0
132     while [ $retval -eq 0 ]; do
133        sleep 30
134        juju status > status.txt 
135        if [ "$(grep -c "executing" status.txt )" -ge 1 ]; then
136            echo " still executing the reltionship within charms ..."
137            if [ $timeoutiter -ge 60 ]; then
138                retval=1
139            fi
140            timeoutiter=$((timeoutiter+1))
141        else
142            retval=1
143        fi
144     done
145     echo "...... deployment finishing ......."
146 }
147
148 #create config RC file to consume by various tests.
149 configOpenrc()
150 {
151     echo  "  " > ./cloud/admin-openrc
152     echo  "export OS_USERNAME=$1" >> ./cloud/admin-openrc 
153     echo  "export OS_PASSWORD=$2" >> ./cloud/admin-openrc
154     echo  "export OS_TENANT_NAME=$3" >> ./cloud/admin-openrc
155     echo  "export OS_AUTH_URL=$4" >> ./cloud/admin-openrc
156     echo  "export OS_REGION_NAME=$5" >> ./cloud/admin-openrc
157  }
158
159 #to get the address of a service using juju
160 unitAddress()
161 {
162     juju status | python -c "import yaml; import sys; print yaml.load(sys.stdin)[\"services\"][\"$1\"][\"units\"][\"$1/$2\"][\"public-address\"]" 2> /dev/null
163 }
164
165 createopenrc()
166 {
167     mkdir -m 0700 -p cloud
168
169     controller_address=$(unitAddress keystone 0)
170     configOpenrc admin openstack admin http://$controller_address:5000/v2.0 Canonical 
171     chmod 0600 cloud/admin-openrc
172 }
173
174 if [ "$#" -eq 0 ]; then
175   echo "This installtion will use default options" 
176   #read_config
177 fi
178
179 echo "...... deployment started ......"
180 #deploy_dep
181 deploy
182 check_status
183 echo "...... deployment finished  ......."
184
185 echo "...... creating OpenRc file for consuming by various user ......."
186
187 createopenrc
188
189 echo "...... finished  ......."
190
191