5 #need to put mutiple cases here where decide this bundle to deploy by default use the odl bundle.
6 # Below parameters are the default and we can according the release
15 opnfvrel=`grep release: deploy.yaml | cut -d ":" -f2`
16 openstack=`grep openstack: deploy.yaml | cut -d ":" -f2`
17 opnfvtype=`grep type: deploy.yaml | cut -d ":" -f2`
18 opnfvlab=`grep lab: deploy.yaml | cut -d ":" -f2`
19 opnfvsdn=`grep sdn: deploy.yaml | cut -d ":" -f2`
22 usage() { echo "Usage: $0 [-s <nosdn|odl|opencontrail>]
25 [-l <default|intelpod5>]
26 [-r <a|b>]" 1>&2 exit 1; }
28 while getopts ":s:t:o:l:h:r:" opt; do
54 sudo apt-add-repository ppa:juju/stable -y
56 sudo apt-get install juju git juju-deployer -y
58 cp environments.yaml ~/.juju/
61 #by default maas creates two VMs in case of three more VM needed.
63 maas_ip=`grep " ip_address" deployment.yaml | cut -d " " -f 10`
64 apikey=`grep maas-oauth: environments.yaml | cut -d "'" -f 2`
65 maas login maas http://${maas_ip}/MAAS/api/1.0 ${apikey}
67 nodeexist=`maas maas nodes list hostname=node3-control`
69 if [ $nodeexist != *node3* ]; then
70 sudo virt-install --connect qemu:///system --name node3-control --ram 8192 --vcpus 4 --disk size=120,format=qcow2,bus=virtio,io=native,pool=default --network bridge=virbr0,model=virtio --network bridge=virbr0,model=virtio --boot network,hd,menu=off --noautoconsole --vnc --print-xml | tee node3-control
72 sudo virt-install --connect qemu:///system --name node4-control --ram 8192 --vcpus 4 --disk size=120,format=qcow2,bus=virtio,io=native,pool=default --network bridge=virbr0,model=virtio --network bridge=virbr0,model=virtio --boot network,hd,menu=off --noautoconsole --vnc --print-xml | tee node4-control
74 sudo virt-install --connect qemu:///system --name node5-compute --ram 8192 --vcpus 4 --disk size=120,format=qcow2,bus=virtio,io=native,pool=default --network bridge=virbr0,model=virtio --network bridge=virbr0,model=virtio --boot network,hd,menu=off --noautoconsole --vnc --print-xml | tee node5-compute
76 node3controlmac=`grep "mac address" node3-control | head -1 | cut -d "'" -f 2`
77 node4controlmac=`grep "mac address" node4-control | head -1 | cut -d "'" -f 2`
78 node5computemac=`grep "mac address" node5-compute | head -1 | cut -d "'" -f 2`
80 sudo virsh -c qemu:///system define --file node3-control
81 sudo virsh -c qemu:///system define --file node4-control
82 sudo virsh -c qemu:///system define --file node5-compute
84 controlnodeid=`maas maas nodes new autodetect_nodegroup='yes' name='node3-control' tags='control' hostname='node3-control' power_type='virsh' mac_addresses=$node3controlmac power_parameters_power_address='qemu+ssh://'$USER'@192.168.122.1/system' architecture='amd64/generic' power_parameters_power_id='node3-control' | grep system_id | cut -d '"' -f 4 `
86 maas maas tag update-nodes control add=$controlnodeid
88 controlnodeid=`maas maas nodes new autodetect_nodegroup='yes' name='node4-control' tags='control' hostname='node4-control' power_type='virsh' mac_addresses=$node4controlmac power_parameters_power_address='qemu+ssh://'$USER'@192.168.122.1/system' architecture='amd64/generic' power_parameters_power_id='node4-control' | grep system_id | cut -d '"' -f 4 `
90 maas maas tag update-nodes control add=$controlnodeid
92 computenodeid=`maas maas nodes new autodetect_nodegroup='yes' name='node5-compute' tags='compute' hostname='node5-compute' power_type='virsh' mac_addresses=$node5computemac power_parameters_power_address='qemu+ssh://'$USER'@192.168.122.1/system' architecture='amd64/generic' power_parameters_power_id='node5-compute' | grep system_id | cut -d '"' -f 4 `
94 maas maas tag update-nodes compute add=$computenodeid
98 #copy the files and create extra resources needed for HA deployment
99 # in case of default VM labs.
101 #copy the script which needs to get deployed as part of ofnfv release
102 echo "...... deploying now ......"
103 echo " " >> environments.yaml
104 echo " enable-os-refresh-update: false" >> environments.yaml
105 echo " enable-os-upgrade: false" >> environments.yaml
106 echo " admin-secret: admin" >> environments.yaml
107 echo " default-series: trusty" >> environments.yaml
109 cp environments.yaml ~/.juju/
111 if [[ "$opnfvtype" = "ha" && "$opnfvlab" = "default" ]]; then
115 #cp ./$opnfvsdn/01-deploybundle.sh ./01-deploybundle.sh
119 ./01-deploybundle.sh $opnfvtype $openstack $opnfvlab $opnfvsdn
122 #check whether charms are still executing the code even juju-deployer says installed.
126 while [ $retval -eq 0 ]; do
128 juju status > status.txt
129 if [ "$(grep -c "executing" status.txt )" -ge 1 ]; then
130 echo " still executing the reltionship within charms ..."
131 if [ $timeoutiter -ge 60 ]; then
134 timeoutiter=$((timeoutiter+1))
139 echo "...... deployment finishing ......."
142 #create config RC file to consume by various tests.
145 echo " " > ./cloud/admin-openrc
146 echo "export OS_USERNAME=$1" >> ./cloud/admin-openrc
147 echo "export OS_PASSWORD=$2" >> ./cloud/admin-openrc
148 echo "export OS_TENANT_NAME=$3" >> ./cloud/admin-openrc
149 echo "export OS_AUTH_URL=$4" >> ./cloud/admin-openrc
150 echo "export OS_REGION_NAME=$5" >> ./cloud/admin-openrc
153 #to get the address of a service using juju
156 juju status | python -c "import yaml; import sys; print yaml.load(sys.stdin)[\"services\"][\"$1\"][\"units\"][\"$1/$2\"][\"public-address\"]" 2> /dev/null
161 mkdir -m 0700 -p cloud
163 controller_address=$(unitAddress keystone 0)
164 configOpenrc admin openstack admin http://$controller_address:5000/v2.0 Canonical
165 chmod 0600 cloud/admin-openrc
168 if [ "$#" -eq 0 ]; then
169 echo "This installtion will use default options"
173 echo "...... deployment started ......"
177 echo "...... deployment finished ......."
179 echo "...... creating OpenRc file for consuming by various user ......."
183 echo "...... finished ......."