Improve and highlight logging to console (2)
[joid.git] / ci / deploy.sh
1 #!/bin/bash
2
3 set -ex
4
5 source tools.sh
6
7 #need to put multiple cases here where decide this bundle to deploy by default use the odl bundle.
8 # Below parameters are the default and we can according the release
9
10 opnfvsdn=nosdn
11 opnfvtype=noha
12 openstack=ocata
13 opnfvlab=default
14 opnfvrel=e
15 opnfvfeature=none
16 opnfvdistro=xenial
17 opnfvarch=amd64
18 opnfvmodel=openstack
19 virtinstall=0
20
21 jujuver=`juju --version`
22
23 read_config() {
24     opnfvrel=`grep release: deploy.yaml | cut -d ":" -f2`
25     openstack=`grep openstack: deploy.yaml | cut -d ":" -f2`
26     opnfvtype=`grep type: deploy.yaml | cut -d ":" -f2`
27     opnfvlab=`grep lab: deploy.yaml | cut -d ":" -f2`
28     opnfvsdn=`grep sdn: deploy.yaml | cut -d ":" -f2`
29 }
30
31 usage() { echo "Usage: $0 [-s <nosdn|odl|opencontrail>]
32                          [-t <noha|ha|tip>]
33                          [-o <juno|liberty>]
34                          [-l <default|intelpod5>]
35                          [-f <ipv6,dpdk,lxd,dvr>]
36                          [-d <trusty|xenial>]
37                          [-a <amd64>]
38                          [-m <openstack|kubernetes>]
39                          [-i <0|1>]
40                          [-r <a|b>]" 1>&2 exit 1; }
41
42 while getopts ":s:t:o:l:h:r:f:d:a:m:i:" opt; do
43     case "${opt}" in
44         s)
45             opnfvsdn=${OPTARG}
46             ;;
47         t)
48             opnfvtype=${OPTARG}
49             ;;
50         o)
51             openstack=${OPTARG}
52             ;;
53         l)
54             opnfvlab=${OPTARG}
55             ;;
56         r)
57             opnfvrel=${OPTARG}
58             ;;
59         f)
60             opnfvfeature=${OPTARG}
61             ;;
62         d)
63             opnfvdistro=${OPTARG}
64             ;;
65         a)
66             opnfvarch=${OPTARG}
67             ;;
68         m)
69             opnfvmodel=${OPTARG}
70             ;;
71         i)
72             virtinstall=${OPTARG}
73             ;;
74         h)
75             usage
76             ;;
77         *)
78             ;;
79     esac
80 done
81
82 #by default maas creates two VMs in case of three more VM needed.
83 createresource() {
84     # TODO: make sure this function run with the same parameters used in 03-maasdeploy.sh
85     PROFILE=${PROFILE:-ubuntu}
86     MAAS_IP=$(grep " ip_address" deployconfig.yaml | cut -d ':' -f 2 | sed -e 's/ //')
87     API_SERVER="http://$MAAS_IP:5240/MAAS/api/2.0"
88     API_KEY=`sudo maas-region apikey --username=ubuntu`
89     maas login $PROFILE $API_SERVER $API_KEY
90
91     for node in node3-control node4-control
92     do
93         node_id=$(maas $PROFILE machines read | \
94                   jq -r ".[] | select(.hostname == \"$node\").system_id")
95         if [[ -z "$node_id" ]]; then
96             sudo virt-install --connect qemu:///system --name $node \
97                 --ram 8192 --cpu host --vcpus 4 \
98                 --disk size=120,format=qcow2,bus=virtio,cache=directsync,io=native,pool=default \
99                 --network bridge=virbr0,model=virtio \
100                 --network bridge=virbr0,model=virtio \
101                 --boot network,hd,menu=off \
102                 --noautoconsole --vnc --print-xml | tee _node.xml
103             node_mac=$(grep "mac address" _node.xml | head -1 | cut -d "'" -f 2)
104             sudo virsh -c qemu:///system define --file _node.xml
105             rm -f _node.xml
106
107             maas $PROFILE nodes new autodetect_nodegroup='yes' name=$node \
108                 tags='control' hostname=$name power_type='virsh' \
109                 mac_addresses=$node3controlmac \
110                 power_parameters_power_address="qemu+ssh://$USER@192.168.122.1/system" \
111                 architecture='amd64/generic' power_parameters_power_id='node3-control'
112             node_id=$(maas $PROFILE machines read | \
113                   jq -r ".[] | select(.hostname == \"$node\").system_id")
114         fi
115         if [[ -z "$node_id" ]]; then
116             echo_error "Error: failed to create node $node ."
117             exit 1
118         fi
119         maas $PROFILE tag update-nodes control add=$node_id || true
120     done
121 }
122
123 #copy the files and create extra resources needed for HA deployment
124 # in case of default VM labs.
125 deploy() {
126     if [[ "$jujuver" > "2" ]]; then
127         if [ ! -f ./labconfig.yaml ] && [ -e ~/joid_config/labconfig.yaml ]; then
128             cp ~/joid_config/labconfig.yaml ./labconfig.yaml
129
130             if [ ! -f ./deployconfig.yaml ] && [ -e ~/joid_config/deployconfig.yaml ]; then
131                 cp ~/joid_config/deployconfig.yaml ./deployconfig.yaml
132             else
133                 python genDeploymentConfig.py -l labconfig.yaml > deployconfig.yaml
134             fi
135         else
136             if [ -e ./labconfig.yaml ]; then
137                 if [ ! -f ./deployconfig.yaml ] && [ -e ~/joid_config/deployconfig.yaml ]; then
138                     cp ~/joid_config/deployconfig.yaml ./deployconfig.yaml
139                 else
140                     python genDeploymentConfig.py -l labconfig.yaml > deployconfig.yaml
141                 fi
142             else
143                 echo_error "MAAS not deployed please deploy MAAS first."
144             fi
145         fi
146
147         #create json file which is missing in case of new deployment after maas and git tree cloned freshly.
148         python -c 'import sys, yaml, json; json.dump(yaml.load(sys.stdin), sys.stdout, indent=4)' < labconfig.yaml > labconfig.json
149         python -c 'import sys, yaml, json; json.dump(yaml.load(sys.stdin), sys.stdout, indent=4)' < deployconfig.yaml > deployconfig.json
150
151     else
152         if [ ! -f ./environments.yaml ] && [ -e ~/.juju/environments.yaml ]; then
153             cp ~/.juju/environments.yaml ./environments.yaml
154         elif [ ! -f ./environments.yaml ] && [ -e ~/joid_config/environments.yaml ]; then
155             cp ~/joid_config/environments.yaml ./environments.yaml
156         fi
157         #copy the script which needs to get deployed as part of ofnfv release
158         echo_info "Deploying now..."
159         echo "   " >> environments.yaml
160         echo "        enable-os-refresh-update: false" >> environments.yaml
161         echo "        enable-os-upgrade: false" >> environments.yaml
162         echo "        admin-secret: admin" >> environments.yaml
163         echo "        default-series: $opnfvdistro" >> environments.yaml
164         cp environments.yaml ~/.juju/
165         cp environments.yaml ~/joid_config/
166     fi
167
168     if [[ "$opnfvtype" = "ha" && "$opnfvlab" = "default" ]]; then
169         createresource
170     fi
171
172     #bootstrap the node
173     ./01-bootstrap.sh
174
175     if [[ "$jujuver" > "2" ]]; then
176         juju model-config default-series=$opnfvdistro enable-os-refresh-update=false enable-os-upgrade=false
177     fi
178
179     #case default deploy the opnfv platform:
180     ./02-deploybundle.sh $opnfvtype $openstack $opnfvlab $opnfvsdn $opnfvfeature $opnfvdistro $opnfvmodel
181 }
182
183 #check whether charms are still executing the code even juju-deployer says installed.
184 check_status() {
185     waitstatus=$1
186     retval=0
187     timeoutiter=0
188
189     echo_info "Executing the relationships within charms..."
190     while [ $retval -eq 0 ]; do
191         if juju status | grep -q $waitstatus; then
192            echo_info "Still waiting for $waitstatus units"
193            if [ $timeoutiter -ge 180 ]; then
194                echo_error 'Timed out'
195                retval=1
196            else
197                sleep 30
198            fi
199            timeoutiter=$((timeoutiter+1))
200        else
201            echo_info 'Done executing the relationships'
202            retval=1
203        fi
204     done
205
206     if [[ "$opnfvmodel" = "openstack" ]]; then
207         juju expose ceph-radosgw || true
208         #juju ssh ceph/0 \ 'sudo radosgw-admin user create --uid="ubuntu" --display-name="Ubuntu Ceph"'
209     fi
210
211     echo_info "Deployment finishing..."
212  }
213
214 # In the case of a virtual deployment
215 if [ "$virtinstall" -eq 1 ]; then
216     ./clean.sh || true
217 fi
218
219 echo_info "Deployment started"
220 deploy
221
222 check_status executing
223
224 echo_info "Deployment finished"
225
226
227 echo_info "Configuring public access"
228
229 # translate bundle.yaml to json
230 python -c 'import sys, yaml, json; json.dump(yaml.load(sys.stdin), sys.stdout, indent=4)' < bundles.yaml > bundles.json
231 # get services list having a public interface
232 srv_list=$(cat bundles.json | jq -r ".services | to_entries[] | {\"key\": .key, \"value\": .value[\"bindings\"]} | select (.value!=null) | select(.value[] | contains(\"public-api\"))".key)
233 # get cnt list from service list
234 cnt_list=$(for cnt in $srv_list; do juju status $cnt --format=json | jq -r ".machines[].containers | to_entries[]".key; done)
235 # get public network gateway (supposing it is the first ip of the network)
236 public_api_gw=$(cat labconfig.json | jq --raw-output ".opnfv.spaces[] | select(.type==\"public\")".gateway)
237 admin_gw=$(cat labconfig.json | jq --raw-output ".opnfv.spaces[] | select(.type==\"admin\")".gateway)
238
239 if ([ $admin_gw ] && [ $admin_gw != "null" ]); then
240     # set default gateway to public api gateway
241     for cnt in $cnt_list; do
242         echo_info "Changing default gateway on $cnt"
243         if ([ $public_api_gw ] && [ $public_api_gw != "null" ]); then
244             juju ssh $cnt "sudo ip r d default && sudo ip r a default via $public_api_gw";
245             juju ssh $cnt "gw_dev=\$(ip  r l | grep 'via $public_api_gw' | cut -d \  -f5) &&\
246                    sudo cp /etc/network/interfaces /etc/network/interfaces.bak &&\
247                    echo 'removing old default gateway' &&\
248                    sudo perl -i -pe 's/^\ *gateway $admin_gw\n$//' /etc/network/interfaces &&\
249                    sudo perl -i -pe \"s/iface \$gw_dev inet static/iface \$gw_dev inet static\\n  gateway $public_api_gw/\" /etc/network/interfaces \
250                    ";
251         fi
252     done
253 fi
254
255 // Configuring deployment
256 if ([ $opnfvmodel == "openstack" ]); then
257     echo_info "Configuring OpenStack deployment"
258
259     ./openstack.sh "$opnfvsdn" "$opnfvlab" "$opnfvdistro" "$openstack" || true
260
261     # creating heat domain after pushing the public API into /etc/hosts
262     if [[ "$jujuver" > "2" ]]; then
263         status=`juju run-action heat/0 domain-setup`
264         echo $status
265     else
266         status=`juju action do heat/0 domain-setup`
267         echo $status
268     fi
269
270     sudo ../juju/get-cloud-images || true
271     ../juju/joid-configure-openstack || true
272
273     if grep -q 'openbaton' bundles.yaml; then
274         juju add-relation openbaton keystone
275     fi
276
277 elif ([ $opnfvmodel == "kubernetes" ]); then
278     echo_info "Configuring Kubernetes deployment"
279
280     ./k8.sh
281 fi
282
283 # expose the juju gui-url to login into juju gui
284
285 echo_info "Juju GUI can be accessed using the following URL and credentials:"
286 juju gui --show-credentials --no-browser
287
288 echo "Finished deployment and configuration"