echo " Cleanup Started ..."
./clean.sh
-virsh destroy opnfv-maas || true
-virsh destroy bootstrap || true
-virsh destroy node1-control || true
-virsh destroy node2-compute || true
-virsh undefine opnfv-maas || true
-virsh undefine bootstrap || true
-virsh undefine node1-control || true
-virsh undefine node2-compute || true
-sudo rm -rf /var/lib/libvirt/images/bootstrap.img /var/lib/libvirt/images/node1-control.img /var/lib/libvirt/images/node2-compute.img || true
-
+ virsh destroy opnfv-maas || true
+ virsh destroy bootstrap || true
+ virsh destroy node1-control || true
+ virsh destroy node3-control || true
+ virsh destroy node4-control || true
+ virsh destroy node2-compute || true
+ virsh destroy node5-compute || true
+ virsh undefine opnfv-maas || true
+ virsh undefine bootstrap || true
+ virsh undefine node1-control || true
+ virsh undefine node3-control || true
+ virsh undefine node4-control || true
+ virsh undefine node2-compute || true
+ virsh undefine node5-compute || true
+ sudo rm -rf /var/lib/libvirt/images/opnfv-maas.img /var/lib/libvirt/images/bootstrap.img /var/lib/libvirt/images/node1-control.img /var/lib/libvirt/images/node3-control.img /var/lib/libvirt/images/node4-control.img /var/lib/libvirt/images/node2-compute.img /var/lib/libvirt/images/node5-compute.img || true
+
echo " Cleanup Finished ..."
cp environments.yaml ~/.juju/
}
+#by default maas creates two VMs in case of three more VM needed.
+createresource() {
+ maas_ip=`grep " ip_address" deployment.yaml | cut -d " " -f 10`
+ apikey=`grep maas-oauth: environments.yaml | cut -d "'" -f 2`
+ maas login maas http://${maas_ip}/MAAS/api/1.0 ${apikey}
+
+ nodeexist=`maas maas nodes list hostname=node3-control`
+
+ if [ $nodeexist != *node3* ]; then
+ sudo virt-install --connect qemu:///system --name node3-control --ram 8192 --vcpus 4 --disk size=120,format=qcow2,bus=virtio,io=native,pool=default --network bridge=virbr0,model=virtio --network bridge=virbr0,model=virtio --boot network,hd,menu=off --noautoconsole --vnc --print-xml | tee node3-control
+
+ sudo virt-install --connect qemu:///system --name node4-control --ram 8192 --vcpus 4 --disk size=120,format=qcow2,bus=virtio,io=native,pool=default --network bridge=virbr0,model=virtio --network bridge=virbr0,model=virtio --boot network,hd,menu=off --noautoconsole --vnc --print-xml | tee node4-control
+
+ sudo virt-install --connect qemu:///system --name node5-compute --ram 8192 --vcpus 4 --disk size=120,format=qcow2,bus=virtio,io=native,pool=default --network bridge=virbr0,model=virtio --boot network,hd,menu=off --noautoconsole --vnc --print-xml | tee node5-compute
+
+ node3controlmac=`grep "mac address" node3-control | head -1 | cut -d "'" -f 2`
+ node4controlmac=`grep "mac address" node4-control | head -1 | cut -d "'" -f 2`
+ node5computemac=`grep "mac address" node5-compute | head -1 | cut -d "'" -f 2`
+
+ sudo virsh -c qemu:///system define --file node3-control
+ sudo virsh -c qemu:///system define --file node4-control
+ sudo virsh -c qemu:///system define --file node5-compute
+
+ controlnodeid=`maas maas nodes new autodetect_nodegroup='yes' name='node3-control' tags='control' hostname='node3-control' power_type='virsh' mac_addresses=$node3controlmac power_parameters_power_address='qemu+ssh://'$USER'@192.168.122.1/system' architecture='amd64/generic' power_parameters_power_id='node3-control' | grep system_id | cut -d '"' -f 4 `
+
+ maas maas tag update-nodes control add=$controlnodeid
+
+ controlnodeid=`maas maas nodes new autodetect_nodegroup='yes' name='node4-control' tags='control' hostname='node4-control' power_type='virsh' mac_addresses=$node4controlmac power_parameters_power_address='qemu+ssh://'$USER'@192.168.122.1/system' architecture='amd64/generic' power_parameters_power_id='node4-control' | grep system_id | cut -d '"' -f 4 `
+
+ maas maas tag update-nodes control add=$controlnodeid
+
+ computenodeid=`maas maas nodes new autodetect_nodegroup='yes' name='node5-compute' tags='compute' hostname='node5-compute' power_type='virsh' mac_addresses=$node5computemac power_parameters_power_address='qemu+ssh://'$USER'@192.168.122.1/system' architecture='amd64/generic' power_parameters_power_id='node5-compute' | grep system_id | cut -d '"' -f 4 `
+
+ maas maas tag update-nodes compute add=$computenodeid
+ fi
+}
+
+#copy the files and create extra resources needed for HA deployment
+# in case of default VM labs.
deploy() {
#copy the script which needs to get deployed as part of ofnfv release
echo "...... deploying now ......"
cp environments.yaml ~/.juju/
+ if [[ "$opnfvtype" = "ha" && "$opnfvlab" = "default" ]]; then
+ createresource
+ fi
+
cp ./$opnfvsdn/01-deploybundle.sh ./01-deploybundle.sh
./00-bootstrap.sh
./01-deploybundle.sh $opnfvtype $openstack $opnfvlab
}
+#check whether charms are still executing the code even juju-deployer says installed.
check_status() {
retval=0
timeoutiter=0
echo "...... deployment finishing ......."
}
+#create config RC file to consume by various tests.
configOpenrc()
{
echo " " > ./cloud/admin-openrc
echo "export OS_REGION_NAME=$5" >> ./cloud/admin-openrc
}
+#to get the address of a service using juju
unitAddress()
{
juju status | python -c "import yaml; import sys; print yaml.load(sys.stdin)[\"services\"][\"$1\"][\"units\"][\"$1/$2\"][\"public-address\"]" 2> /dev/null
# Package sources. These will be used on the MAAS controller.
apt_sources:
- - ppa:maas/stable
+ - ppa:maas/next
- ppa:juju/stable
# Virsh power settings
broadcast_ip: 192.168.122.255
router_ip: 192.168.122.1
static_range:
- low: 192.168.122.51
- high: 192.168.122.60
+ low: 192.168.122.101
+ high: 192.168.122.200
dynamic_range:
low: 192.168.122.5
- high: 192.168.122.50
+ high: 192.168.122.100
# Physical nodes to be added to the MAAS cluster. Nodes will be
# configured, commissioned and put into the Ready state so
case "$1" in
'nonha' )
juju-deployer -vW -d -c bundles.yaml trusty-"$2"-nodes
- juju-deployer -vW -d -c bundles.yaml trusty-"$2"
+ juju-deployer -vW -d -t 3600 -c bundles.yaml trusty-"$2"
;;
'ha' )
juju-deployer -vW -d -c bundles.yaml trusty-"$2"-nodes
- juju-deployer -vW -d -c bundles.yaml trusty-"$2"
+ juju-deployer -vW -d -t 3600 -c bundles.yaml trusty-"$2"
;;
'tip' )
juju-deployer -vW -d -c bundles.yaml trusty-"$2"-nodes
- juju-deployer -vW -d -c bundles.yaml trusty-"$2"
+ juju-deployer -vW -d -t 3600 -c bundles.yaml trusty-"$2"
;;
* )
juju-deployer -vW -d -c bundles.yaml trusty-"$2"-nodes
- juju-deployer -vW -d -c bundles.yaml trusty-"$2"
+ juju-deployer -vW -d -t 3600 -c bundles.yaml trusty-"$2"
;;
esac
- "lxc:nodes-api=2"
mongodb:
charm: cs:trusty/mongodb
- num_units: 3
+ num_units: 1
to:
- "lxc:nodes-api=0"
- - "lxc:nodes-api=1"
- - "lxc:nodes-api=2"
mysql:
charm: cs:trusty/percona-cluster
num_units: 3
case "$1" in
'nonha' )
juju-deployer -vW -d -c bundles.yaml trusty-"$2"-nodes
- juju-deployer -vW -d -c bundles.yaml trusty-"$2"
+ juju-deployer -vW -d -t 3600 -c bundles.yaml trusty-"$2"
;;
'ha' )
juju-deployer -vW -d -c bundles.yaml trusty-"$2"-nodes
- juju-deployer -vW -d -c bundles.yaml trusty-"$2"
+ juju-deployer -vW -d -t 3600 -c bundles.yaml trusty-"$2"
;;
'tip' )
juju-deployer -vW -d -c bundles.yaml trusty-"$2"-nodes
- juju-deployer -vW -d -c bundles.yaml trusty-"$2"
+ juju-deployer -vW -d -t 3600 -c bundles.yaml trusty-"$2"
;;
* )
juju-deployer -vW -d -c bundles.yaml trusty-"$2"-nodes
- juju-deployer -vW -d -c bundles.yaml trusty-"$2"
+ juju-deployer -vW -d -t 3600 -c bundles.yaml trusty-"$2"
;;
esac
juju status | python -c "import yaml; import sys; print yaml.load(sys.stdin)[\"services\"][\"$1\"][\"units\"][\"$1/$2\"][\"machine\"]" 2> /dev/null
}
-<<<<<<< HEAD
-juju run --service ceph 'sudo ceph osd pool set cinder-ceph size 1'
-juju run --service ceph 'sudo ceph osd pool set cinder-ceph min_size 1'
-=======
-#juju run --service ceph 'sudo ceph osd pool set cinder-ceph size 1'
-#juju run --service ceph 'sudo ceph osd pool set cinder-ceph min_size 1'
->>>>>>> 69227d1... modified the bundle to include the ceph and cinder changes.
-
mkdir -m 0700 -p cloud
controller_address=$(unitAddress keystone 0)
configOpenrc admin openstack admin http://$controller_address:5000/v2.0 Canonical > cloud/admin-openrc
case "$1" in
'nonha' )
juju-deployer -vW -d -c bundles.yaml trusty-"$2"-nodes
- juju-deployer -vW -d -c bundles.yaml trusty-"$2"
+ juju-deployer -vW -d -t 3600 -c bundles.yaml trusty-"$2"
;;
'ha' )
juju-deployer -vW -d -c bundles.yaml trusty-"$2"-nodes
- juju-deployer -vW -d -c bundles.yaml trusty-"$2"
+ juju-deployer -vW -d -t 3600 -c bundles.yaml trusty-"$2"
;;
'tip' )
juju-deployer -vW -d -c bundles.yaml trusty-"$2"-nodes
- juju-deployer -vW -d -c bundles.yaml trusty-"$2"
+ juju-deployer -vW -d -t 3600 -c bundles.yaml trusty-"$2"
;;
* )
juju-deployer -vW -d -c bundles.yaml trusty-"$2"-nodes
- juju-deployer -vW -d -c bundles.yaml trusty-"$2"
+ juju-deployer -vW -d -t 3600 -c bundles.yaml trusty-"$2"
;;
esac
juju status | python -c "import yaml; import sys; print yaml.load(sys.stdin)[\"services\"][\"$1\"][\"units\"][\"$1/$2\"][\"machine\"]" 2> /dev/null
}
-<<<<<<< HEAD
-juju run --service ceph 'sudo ceph osd pool set cinder-ceph size 1'
-juju run --service ceph 'sudo ceph osd pool set cinder-ceph min_size 1'
-=======
-#juju run --service ceph 'sudo ceph osd pool set cinder-ceph size 1'
-#juju run --service ceph 'sudo ceph osd pool set cinder-ceph min_size 1'
->>>>>>> 69227d1... modified the bundle to include the ceph and cinder changes.
-
mkdir -m 0700 -p cloud
controller_address=$(unitAddress keystone 0)
configOpenrc admin openstack admin http://$controller_address:5000/v2.0 Canonical > cloud/admin-openrc
case "$1" in
'nonha' )
juju-deployer -vW -d -c bundles.yaml trusty-"$2"-nodes
- juju-deployer -vW -d -c bundles.yaml trusty-"$2"
+ juju-deployer -vW -d -t 3600 -c bundles.yaml trusty-"$2"
;;
'ha' )
juju-deployer -vW -d -c bundles.yaml trusty-"$2"-nodes
- juju-deployer -vW -d -c bundles.yaml trusty-"$2"
+ juju-deployer -vW -d -t 3600 -c bundles.yaml trusty-"$2"
;;
'tip' )
juju-deployer -vW -d -c bundles.yaml trusty-"$2"-nodes
- juju-deployer -vW -d -c bundles.yaml trusty-"$2"
+ juju-deployer -vW -d -t 3600 -c bundles.yaml trusty-"$2"
;;
* )
juju-deployer -vW -d -c bundles.yaml trusty-"$2"-nodes
- juju-deployer -vW -d -c bundles.yaml trusty-"$2"
+ juju-deployer -vW -d -t 3600 -c bundles.yaml trusty-"$2"
;;
esac