more changes needed for juju 2.0
[joid.git] / ci / deploy.sh
1 #!/bin/bash
2
3 set -ex
4
5 #need to put mutiple cases here where decide this bundle to deploy by default use the odl bundle.
6 # Below parameters are the default and we can according the release
7
8 opnfvsdn=nosdn
9 opnfvtype=nonha
10 openstack=mitaka
11 opnfvlab=default
12 opnfvrel=c
13 opnfvfeature=none
14 opnfvdistro=xenial
15 opnfvarch=amd64
16
17 jujuver=`juju --version`
18
19 read_config() {
20     opnfvrel=`grep release: deploy.yaml | cut -d ":" -f2`
21     openstack=`grep openstack: deploy.yaml | cut -d ":" -f2`
22     opnfvtype=`grep type: deploy.yaml | cut -d ":" -f2`
23     opnfvlab=`grep lab: deploy.yaml | cut -d ":" -f2`
24     opnfvsdn=`grep sdn: deploy.yaml | cut -d ":" -f2`
25 }
26
27 usage() { echo "Usage: $0 [-s <nosdn|odl|opencontrail>]
28                          [-t <nonha|ha|tip>]
29                          [-o <juno|liberty>]
30                          [-l <default|intelpod5>]
31                          [-f <ipv6,dpdk,lxd,dvr>]
32                          [-d <trusty|xenial>]
33                          [-a <amd64>]
34                          [-r <a|b>]" 1>&2 exit 1; }
35
36 while getopts ":s:t:o:l:h:r:f:d:a:" opt; do
37     case "${opt}" in
38         s)
39             opnfvsdn=${OPTARG}
40             ;;
41         t)
42             opnfvtype=${OPTARG}
43             ;;
44         o)
45             openstack=${OPTARG}
46             ;;
47         l)
48             opnfvlab=${OPTARG}
49             ;;
50         r)
51             opnfvrel=${OPTARG}
52             ;;
53         f)
54             opnfvfeature=${OPTARG}
55             ;;
56         d)
57             opnfvdistro=${OPTARG}
58             ;;
59         a)
60             opnfvarch=${OPTARG}
61             ;;
62         h)
63             usage
64             ;;
65         *)
66             ;;
67     esac
68 done
69
70 #by default maas creates two VMs in case of three more VM needed.
71 createresource() {
72     maas_ip=`grep " ip_address" deployment.yaml | cut -d " "  -f 10`
73     apikey=`grep maas-oauth: environments.yaml | cut -d "'" -f 2`
74     maas login maas http://${maas_ip}/MAAS/api/1.0 ${apikey}
75
76     nodeexist=`maas maas nodes list hostname=node3-control`
77
78     if [ $nodeexist != *node3* ]; then
79         sudo virt-install --connect qemu:///system --name node3-control --ram 8192 --vcpus 4 --disk size=120,format=qcow2,bus=virtio,io=native,pool=default --network bridge=virbr0,model=virtio --network bridge=virbr0,model=virtio --boot network,hd,menu=off --noautoconsole --vnc --print-xml | tee node3-control
80
81         sudo virt-install --connect qemu:///system --name node4-control --ram 8192 --vcpus 4 --disk size=120,format=qcow2,bus=virtio,io=native,pool=default --network bridge=virbr0,model=virtio --network bridge=virbr0,model=virtio --boot network,hd,menu=off --noautoconsole --vnc --print-xml | tee node4-control
82
83         node3controlmac=`grep  "mac address" node3-control | head -1 | cut -d "'" -f 2`
84         node4controlmac=`grep  "mac address" node4-control | head -1 | cut -d "'" -f 2`
85
86         sudo virsh -c qemu:///system define --file node3-control
87         sudo virsh -c qemu:///system define --file node4-control
88
89         controlnodeid=`maas maas nodes new autodetect_nodegroup='yes' name='node3-control' tags='control' hostname='node3-control' power_type='virsh' mac_addresses=$node3controlmac power_parameters_power_address='qemu+ssh://'$USER'@192.168.122.1/system' architecture='amd64/generic' power_parameters_power_id='node3-control' | grep system_id | cut -d '"' -f 4 `
90
91         maas maas tag update-nodes control add=$controlnodeid
92
93         controlnodeid=`maas maas nodes new autodetect_nodegroup='yes' name='node4-control' tags='control' hostname='node4-control' power_type='virsh' mac_addresses=$node4controlmac power_parameters_power_address='qemu+ssh://'$USER'@192.168.122.1/system' architecture='amd64/generic' power_parameters_power_id='node4-control' | grep system_id | cut -d '"' -f 4 `
94
95         maas maas tag update-nodes control add=$controlnodeid
96
97     fi
98 }
99
100 #copy the files and create extra resources needed for HA deployment
101 # in case of default VM labs.
102 deploy() {
103
104     if [ ! -f ./environments.yaml ] && [ -e ~/.juju/environments.yaml ]; then
105         cp ~/.juju/environments.yaml ./environments.yaml
106     elif [ ! -f ./environments.yaml ] && [ -e ~/joid_config/environments.yaml ]; then
107         cp ~/joid_config/environments.yaml ./environments.yaml
108     fi
109     if [ ! -f ./deployment.yaml ] && [ -e ~/.juju/deployment.yaml ]; then
110         cp ~/.juju/deployment.yaml ./deployment.yaml
111     elif [ ! -f ./deployment.yaml ] && [ -e ~/joid_config/deployment.yaml ]; then
112         cp ~/joid_config/deployment.yaml ./deployment.yaml
113     fi
114     if [ ! -f ./labconfig.yaml ] && [ -e ~/.juju/labconfig.yaml ]; then
115         cp ~/.juju/labconfig.yaml ./labconfig.yaml
116     elif [ ! -f ./labconfig.yaml ] && [ -e ~/joid_config/labconfig.yaml ]; then
117         cp ~/joid_config/labconfig.yaml ./labconfig.yaml
118     fi
119     if [ ! -f ./deployconfig.yaml ] && [ -e ~/.juju/deployconfig.yaml ]; then
120         cp ~/.juju/deployconfig.yaml ./deployconfig.yaml
121     elif [ ! -f ./deployconfig.yaml ] && [ -e ~/joid_config/deployconfig.yaml ]; then
122         cp ~/joid_config/deployconfig.yaml ./deployconfig.yaml
123     fi
124
125     #copy the script which needs to get deployed as part of ofnfv release
126     echo "...... deploying now ......"
127     echo "   " >> environments.yaml
128     echo "        enable-os-refresh-update: false" >> environments.yaml
129     echo "        enable-os-upgrade: false" >> environments.yaml
130     echo "        admin-secret: admin" >> environments.yaml
131     echo "        default-series: $opnfvdistro" >> environments.yaml
132
133     cp environments.yaml ~/.juju/
134     cp environments.yaml ~/joid_config/
135
136     if [[ "$opnfvtype" = "ha" && "$opnfvlab" = "default" ]]; then
137         createresource
138     fi
139
140     #bootstrap the node
141     ./01-bootstrap.sh
142
143     #case default deploy the opnfv platform:
144     ./02-deploybundle.sh $opnfvtype $openstack $opnfvlab $opnfvsdn $opnfvfeature $opnfvdistro
145 }
146
147 #check whether charms are still executing the code even juju-deployer says installed.
148 check_status() {
149     retval=0
150     timeoutiter=0
151     while [ $retval -eq 0 ]; do
152        sleep 30
153        juju status > status.txt
154        if [ "$(grep -c "executing" status.txt )" -ge 1 ]; then
155            echo " still executing the reltionship within charms ..."
156            if [ $timeoutiter -ge 120 ]; then
157                retval=1
158            fi
159            timeoutiter=$((timeoutiter+1))
160        else
161            retval=1
162        fi
163     done
164
165     juju expose ceph-radosgw
166     #juju ssh ceph/0 \ 'sudo radosgw-admin user create --uid="ubuntu" --display-name="Ubuntu Ceph"'
167
168     echo "...... deployment finishing ......."
169 }
170
171 echo "...... deployment started ......"
172 deploy
173
174 check_status
175
176 echo "...... deployment finished  ......."
177
178 ./openstack.sh "$opnfvsdn" "$opnfvlab" "$opnfvdistro" "$openstack" || true
179
180 # creating heat domain after puching the public API into /etc/hosts
181
182 if [ "$jujuver" > "2" ]; then
183     status=`juju run-action do heat/0 domain-setup`
184     echo $status
185 else
186     status=`juju action do heat/0 domain-setup`
187     echo $status
188 fi
189
190
191 sudo ../juju/get-cloud-images || true
192 ../juju/joid-configure-openstack || true
193
194 echo "...... finished  ......."