5 #need to put mutiple cases here where decide this bundle to deploy by default use the odl bundle.
6 # Below parameters are the default and we can according the release
16 opnfvrel=`grep release: deploy.yaml | cut -d ":" -f2`
17 openstack=`grep openstack: deploy.yaml | cut -d ":" -f2`
18 opnfvtype=`grep type: deploy.yaml | cut -d ":" -f2`
19 opnfvlab=`grep lab: deploy.yaml | cut -d ":" -f2`
20 opnfvsdn=`grep sdn: deploy.yaml | cut -d ":" -f2`
23 usage() { echo "Usage: $0 [-s <nosdn|odl|opencontrail>]
26 [-l <default|intelpod5>]
28 [-r <a|b>]" 1>&2 exit 1; }
30 while getopts ":s:t:o:l:h:r:f:" opt; do
48 opnfvfeature=${OPTARG}
59 sudo apt-add-repository ppa:juju/stable -y
61 sudo apt-get install juju git juju-deployer -y
63 cp environments.yaml ~/.juju/
66 #by default maas creates two VMs in case of three more VM needed.
68 maas_ip=`grep " ip_address" deployment.yaml | cut -d " " -f 10`
69 apikey=`grep maas-oauth: environments.yaml | cut -d "'" -f 2`
70 maas login maas http://${maas_ip}/MAAS/api/1.0 ${apikey}
72 nodeexist=`maas maas nodes list hostname=node3-control`
74 if [ $nodeexist != *node3* ]; then
75 sudo virt-install --connect qemu:///system --name node3-control --ram 8192 --vcpus 4 --disk size=120,format=qcow2,bus=virtio,io=native,pool=default --network bridge=virbr0,model=virtio --network bridge=virbr0,model=virtio --boot network,hd,menu=off --noautoconsole --vnc --print-xml | tee node3-control
77 sudo virt-install --connect qemu:///system --name node4-control --ram 8192 --vcpus 4 --disk size=120,format=qcow2,bus=virtio,io=native,pool=default --network bridge=virbr0,model=virtio --network bridge=virbr0,model=virtio --boot network,hd,menu=off --noautoconsole --vnc --print-xml | tee node4-control
79 sudo virt-install --connect qemu:///system --name node5-compute --ram 8192 --vcpus 4 --disk size=120,format=qcow2,bus=virtio,io=native,pool=default --network bridge=virbr0,model=virtio --network bridge=virbr0,model=virtio --boot network,hd,menu=off --noautoconsole --vnc --print-xml | tee node5-compute
81 node3controlmac=`grep "mac address" node3-control | head -1 | cut -d "'" -f 2`
82 node4controlmac=`grep "mac address" node4-control | head -1 | cut -d "'" -f 2`
83 node5computemac=`grep "mac address" node5-compute | head -1 | cut -d "'" -f 2`
85 sudo virsh -c qemu:///system define --file node3-control
86 sudo virsh -c qemu:///system define --file node4-control
87 sudo virsh -c qemu:///system define --file node5-compute
89 controlnodeid=`maas maas nodes new autodetect_nodegroup='yes' name='node3-control' tags='control' hostname='node3-control' power_type='virsh' mac_addresses=$node3controlmac power_parameters_power_address='qemu+ssh://'$USER'@192.168.122.1/system' architecture='amd64/generic' power_parameters_power_id='node3-control' | grep system_id | cut -d '"' -f 4 `
91 maas maas tag update-nodes control add=$controlnodeid
93 controlnodeid=`maas maas nodes new autodetect_nodegroup='yes' name='node4-control' tags='control' hostname='node4-control' power_type='virsh' mac_addresses=$node4controlmac power_parameters_power_address='qemu+ssh://'$USER'@192.168.122.1/system' architecture='amd64/generic' power_parameters_power_id='node4-control' | grep system_id | cut -d '"' -f 4 `
95 maas maas tag update-nodes control add=$controlnodeid
97 computenodeid=`maas maas nodes new autodetect_nodegroup='yes' name='node5-compute' tags='compute' hostname='node5-compute' power_type='virsh' mac_addresses=$node5computemac power_parameters_power_address='qemu+ssh://'$USER'@192.168.122.1/system' architecture='amd64/generic' power_parameters_power_id='node5-compute' | grep system_id | cut -d '"' -f 4 `
99 maas maas tag update-nodes compute add=$computenodeid
103 #copy the files and create extra resources needed for HA deployment
104 # in case of default VM labs.
106 #copy the script which needs to get deployed as part of ofnfv release
107 echo "...... deploying now ......"
108 echo " " >> environments.yaml
109 echo " enable-os-refresh-update: false" >> environments.yaml
110 echo " enable-os-upgrade: false" >> environments.yaml
111 echo " admin-secret: admin" >> environments.yaml
112 echo " default-series: trusty" >> environments.yaml
114 cp environments.yaml ~/.juju/
116 if [[ "$opnfvtype" = "ha" && "$opnfvlab" = "default" ]]; then
120 #cp ./$opnfvsdn/01-deploybundle.sh ./01-deploybundle.sh
124 ./01-deploybundle.sh $opnfvtype $openstack $opnfvlab $opnfvsdn $opnfvfeature
127 #check whether charms are still executing the code even juju-deployer says installed.
131 while [ $retval -eq 0 ]; do
133 juju status > status.txt
134 if [ "$(grep -c "executing" status.txt )" -ge 1 ]; then
135 echo " still executing the reltionship within charms ..."
136 if [ $timeoutiter -ge 60 ]; then
139 timeoutiter=$((timeoutiter+1))
144 echo "...... deployment finishing ......."
147 #create config RC file to consume by various tests.
150 echo " " > ./cloud/admin-openrc
151 echo "export OS_USERNAME=$1" >> ./cloud/admin-openrc
152 echo "export OS_PASSWORD=$2" >> ./cloud/admin-openrc
153 echo "export OS_TENANT_NAME=$3" >> ./cloud/admin-openrc
154 echo "export OS_AUTH_URL=$4" >> ./cloud/admin-openrc
155 echo "export OS_REGION_NAME=$5" >> ./cloud/admin-openrc
158 #to get the address of a service using juju
161 juju status | python -c "import yaml; import sys; print yaml.load(sys.stdin)[\"services\"][\"$1\"][\"units\"][\"$1/$2\"][\"public-address\"]" 2> /dev/null
166 mkdir -m 0700 -p cloud
168 controller_address=$(unitAddress keystone 0)
169 configOpenrc admin openstack admin http://$controller_address:5000/v2.0 Canonical
170 chmod 0600 cloud/admin-openrc
173 if [ "$#" -eq 0 ]; then
174 echo "This installtion will use default options"
178 echo "...... deployment started ......"
182 echo "...... deployment finished ......."
184 echo "...... creating OpenRc file for consuming by various user ......."
188 echo "...... finished ......."