5 #need to put mutiple cases here where decide this bundle to deploy by default use the odl bundle.
6 # Below parameters are the default and we can according the release
18 opnfvrel=`grep release: deploy.yaml | cut -d ":" -f2`
19 openstack=`grep openstack: deploy.yaml | cut -d ":" -f2`
20 opnfvtype=`grep type: deploy.yaml | cut -d ":" -f2`
21 opnfvlab=`grep lab: deploy.yaml | cut -d ":" -f2`
22 opnfvsdn=`grep sdn: deploy.yaml | cut -d ":" -f2`
25 usage() { echo "Usage: $0 [-s <nosdn|odl|opencontrail>]
28 [-l <default|intelpod5>]
29 [-f <ipv6|dpdk|lxd|dvr>]
32 [-r <a|b>]" 1>&2 exit 1; }
34 while getopts ":s:t:o:l:h:r:f:d:a:" opt; do
52 opnfvfeature=${OPTARG}
69 sudo apt-add-repository ppa:juju/stable -y
71 sudo apt-get install juju git juju-deployer -y
73 cp environments.yaml ~/.juju/
76 #by default maas creates two VMs in case of three more VM needed.
78 maas_ip=`grep " ip_address" deployment.yaml | cut -d " " -f 10`
79 apikey=`grep maas-oauth: environments.yaml | cut -d "'" -f 2`
80 maas login maas http://${maas_ip}/MAAS/api/1.0 ${apikey}
82 nodeexist=`maas maas nodes list hostname=node3-control`
84 if [ $nodeexist != *node3* ]; then
85 sudo virt-install --connect qemu:///system --name node3-control --ram 8192 --vcpus 4 --disk size=120,format=qcow2,bus=virtio,io=native,pool=default --network bridge=virbr0,model=virtio --network bridge=virbr0,model=virtio --boot network,hd,menu=off --noautoconsole --vnc --print-xml | tee node3-control
87 sudo virt-install --connect qemu:///system --name node4-control --ram 8192 --vcpus 4 --disk size=120,format=qcow2,bus=virtio,io=native,pool=default --network bridge=virbr0,model=virtio --network bridge=virbr0,model=virtio --boot network,hd,menu=off --noautoconsole --vnc --print-xml | tee node4-control
89 node3controlmac=`grep "mac address" node3-control | head -1 | cut -d "'" -f 2`
90 node4controlmac=`grep "mac address" node4-control | head -1 | cut -d "'" -f 2`
92 sudo virsh -c qemu:///system define --file node3-control
93 sudo virsh -c qemu:///system define --file node4-control
95 controlnodeid=`maas maas nodes new autodetect_nodegroup='yes' name='node3-control' tags='control' hostname='node3-control' power_type='virsh' mac_addresses=$node3controlmac power_parameters_power_address='qemu+ssh://'$USER'@192.168.122.1/system' architecture='amd64/generic' power_parameters_power_id='node3-control' | grep system_id | cut -d '"' -f 4 `
97 maas maas tag update-nodes control add=$controlnodeid
99 controlnodeid=`maas maas nodes new autodetect_nodegroup='yes' name='node4-control' tags='control' hostname='node4-control' power_type='virsh' mac_addresses=$node4controlmac power_parameters_power_address='qemu+ssh://'$USER'@192.168.122.1/system' architecture='amd64/generic' power_parameters_power_id='node4-control' | grep system_id | cut -d '"' -f 4 `
101 maas maas tag update-nodes control add=$controlnodeid
106 #copy the files and create extra resources needed for HA deployment
107 # in case of default VM labs.
109 #copy the script which needs to get deployed as part of ofnfv release
110 echo "...... deploying now ......"
111 echo " " >> environments.yaml
112 echo " enable-os-refresh-update: false" >> environments.yaml
113 echo " enable-os-upgrade: false" >> environments.yaml
114 echo " admin-secret: admin" >> environments.yaml
115 echo " default-series: $opnfvdistro" >> environments.yaml
117 cp environments.yaml ~/.juju/
119 if [[ "$opnfvtype" = "ha" && "$opnfvlab" = "default" ]]; then
126 #case default deploy the opnfv platform:
127 ./01-deploybundle.sh $opnfvtype $openstack $opnfvlab $opnfvsdn $opnfvfeature $opnfvdistro
130 #check whether charms are still executing the code even juju-deployer says installed.
134 while [ $retval -eq 0 ]; do
136 juju status > status.txt
137 if [ "$(grep -c "executing" status.txt )" -ge 1 ]; then
138 echo " still executing the reltionship within charms ..."
139 if [ $timeoutiter -ge 60 ]; then
142 timeoutiter=$((timeoutiter+1))
147 status=`juju action do heat/0 domain-setup`
149 echo "...... deployment finishing ......."
152 #create config RC file to consume by various tests.
155 echo " " > ./cloud/admin-openrc
156 echo "export OS_USERNAME=$1" >> ./cloud/admin-openrc
157 echo "export OS_PASSWORD=$2" >> ./cloud/admin-openrc
158 echo "export OS_TENANT_NAME=$3" >> ./cloud/admin-openrc
159 echo "export OS_AUTH_URL=$4" >> ./cloud/admin-openrc
160 echo "export OS_REGION_NAME=$5" >> ./cloud/admin-openrc
163 #to get the address of a service using juju
166 juju status | python -c "import yaml; import sys; print yaml.load(sys.stdin)[\"services\"][\"$1\"][\"units\"][\"$1/$2\"][\"public-address\"]" 2> /dev/null
171 if [ "$opnfvsdn" == "onos" ]; then
172 sh onos/juju_test_prepare.sh "$opnfvlab"
176 mkdir -m 0700 -p cloud
178 controller_address=$(unitAddress keystone 0)
179 configOpenrc admin openstack admin http://$controller_address:5000/v2.0 Canonical
180 chmod 0600 cloud/admin-openrc
183 if [ "$#" -eq 0 ]; then
184 echo "This installtion will use default options"
188 echo "...... deployment started ......"
192 echo "...... deployment finished ......."
194 echo "...... creating OpenRc file for consuming by various user ......."
198 echo "...... finished ......."