2 #placeholder for deployment script.
8 #install the packages needed
9 sudo apt-add-repository ppa:maas-deployers/stable -y
10 sudo apt-add-repository ppa:juju/stable -y
11 sudo apt-add-repository ppa:maas/stable -y
12 sudo apt-add-repository cloud-archive:liberty -y
13 sudo apt-get update -y
14 sudo apt-get dist-upgrade -y
15 sudo apt-get install openssh-server git maas-deployer juju juju-deployer maas-cli python-pip python-openstackclient gsutil -y
17 cp maas/deployment.yaml ./deployment.yaml
19 #first parameter should be custom and second should be either
20 # absolute location of file (including file name) or url of the
23 if [ "$1" == "custom" ]; then
25 cp $2 ./labconfig.yaml || true
28 wget $2 -t 3 -T 10 -O ./labconfig.yaml || true
29 count=`wc -l labconfig.yaml | cut -d " " -f 1`
31 if [ $count -lt 10 ]; then
38 if [ ! -e ./labconfig.yaml ]; then
40 cp ../labconfig/default/deployment.yaml ./
41 cp ../labconfig/default/labconfig.yaml ./
43 labname=`grep "lab_location" labconfig.yaml | cut -d ':' -f 2 | sed -e 's/ //'`
47 cp ../labconfig/intel/pod5/labconfig.yaml ./
48 #to be removed later once converted for all labs.
52 cp ../labconfig/intel/pod6/labconfig.yaml ./
53 #to be removed later once converted for all labs.
57 cp ../labconfig/intel/pod6/labconfig.yaml ./
58 #to be removed later once converted for all labs.
62 cp maas/orange/pod1/deployment.yaml ./deployment.yaml
65 cp maas/orange/pod2/deployment.yaml ./deployment.yaml
68 cp maas/att/virpod1/deployment.yaml ./deployment.yaml
71 cp maas/juniper/pod1/deployment.yaml ./deployment.yaml
74 cp maas/cengn_lynx/pod1/deployment.yaml ./deployment.yaml
80 cp ../labconfig/default/deployment.yaml ./
85 #make sure no password asked during the deployment.
87 echo "$USER ALL=(ALL) NOPASSWD:ALL" > 90-joid-init
89 if [ -e /etc/sudoers.d/90-joid-init ]; then
90 sudo cp /etc/sudoers.d/90-joid-init 91-joid-init
91 sudo chown $USER:$USER 91-joid-init
92 sudo chmod 660 91-joid-init
93 sudo cat 90-joid-init >> 91-joid-init
94 sudo chown root:root 91-joid-init
95 sudo mv 91-joid-init /etc/sudoers.d/
97 sudo chown root:root 90-joid-init
98 sudo mv 90-joid-init /etc/sudoers.d/
101 echo "... Deployment of maas Started ...."
103 if [ ! -e $HOME/.ssh/id_rsa ]; then
104 ssh-keygen -N '' -f $HOME/.ssh/id_rsa
107 #define the pool and try to start even though its already exist.
108 # For fresh install this may or may not there.
110 sudo apt-get install libvirt-bin -y
111 sudo adduser $USER libvirtd
112 sudo virsh pool-define-as default --type dir --target /var/lib/libvirt/images/ || true
113 sudo virsh pool-start default || true
114 sudo virsh pool-autostart default || true
116 # To avoid problem between apiclient/maas_client and apiclient from google
117 # we remove the package google-api-python-client from yardstick installer
118 if [ $(pip list |grep google-api-python-client |wc -l) == 1 ]; then
119 sudo pip uninstall google-api-python-client
122 sudo pip install shyaml
125 cat $HOME/.ssh/id_rsa.pub >> $HOME/.ssh/authorized_keys
127 if [ "$virtinstall" -eq 1 ]; then
128 sudo virsh net-dumpxml default > default-net-org.xml
129 sudo sed -i '/dhcp/d' default-net-org.xml
130 sudo sed -i '/range/d' default-net-org.xml
131 sudo virsh net-define default-net-org.xml
132 sudo virsh net-destroy default
133 sudo virsh net-start default
136 #Below function will mark the interfaces in Auto mode to enbled by MAAS
138 listofnodes=`maas maas nodes list | grep system_id | cut -d '"' -f 4`
139 for nodes in $listofnodes
141 maas maas interface link-subnet $nodes $1 mode=$2 subnet=$3
145 #Below function will mark the interfaces in Auto mode to enbled by MAAS
146 # using hostname of the node added into MAAS
148 enableautomodebyname() {
149 if [ ! -z "$4" ]; then
152 nodes=`maas maas nodes list hostname=node$i-$4 | grep system_id | cut -d '"' -f 4`
153 if [ ! -z "$nodes" ]; then
154 maas maas interface link-subnet $nodes $1 mode=$2 subnet=$3
160 #Below function will create vlan and update interface with the new vlan
161 # will return the vlan id created
163 newvlanid=`maas maas vlans create $2 name=$3 vid=$4 | grep resource | cut -d '/' -f 6 `
164 maas maas subnet update $5 vlan=$newvlanid
165 eval "$1"="'$newvlanid'"
168 #Below function will create interface with new vlan and bind to physical interface
170 listofnodes=`maas maas nodes list | grep system_id | cut -d '"' -f 4`
172 for nodes in $listofnodes
174 parentid=`maas maas interface read $nodes $2 | grep interfaces | cut -d '/' -f 8`
175 maas maas interfaces create-vlan $nodes vlan=$1 parent=$parentid
179 #convert labconfig file to deployment.yaml to consume by MAAS.
182 #just make sure the ssh keys added into maas for the current user
183 sed --i "s@/home/ubuntu@$HOME@g" ./deployment.yaml
184 sed --i "s@qemu+ssh://ubuntu@qemu+ssh://$USER@g" ./deployment.yaml
186 sudo maas-deployer -c deployment.yaml -d --force
188 sudo chown $USER:$USER environments.yaml
190 echo "... Deployment of maas finish ...."
192 maas_ip=`grep " ip_address" deployment.yaml | cut -d ':' -f 2 | sed -e 's/ //'`
193 apikey=`grep maas-oauth: environments.yaml | cut -d "'" -f 2`
194 maas login maas http://${maas_ip}/MAAS/api/1.0 ${apikey}
195 maas maas sshkeys new key="`cat $HOME/.ssh/id_rsa.pub`"
197 #Added the Qtip public to run the Qtip test after install on bare metal nodes.
198 maas maas sshkeys new key="`cat ./maas/sshkeys/QtipKey.pub`"
200 #adding compute and control nodes VM to MAAS for deployment purpose.
201 if [ "$virtinstall" -eq 1 ]; then
202 # create two more VMs to do the deployment.
203 sudo virt-install --connect qemu:///system --name node1-control --ram 8192 --vcpus 4 --disk size=120,format=qcow2,bus=virtio,io=native,pool=default --network bridge=virbr0,model=virtio --network bridge=virbr0,model=virtio --boot network,hd,menu=off --noautoconsole --vnc --print-xml | tee node1-control
205 sudo virt-install --connect qemu:///system --name node2-compute --ram 8192 --vcpus 4 --disk size=120,format=qcow2,bus=virtio,io=native,pool=default --network bridge=virbr0,model=virtio --network bridge=virbr0,model=virtio --boot network,hd,menu=off --noautoconsole --vnc --print-xml | tee node2-compute
207 sudo virt-install --connect qemu:///system --name node5-compute --ram 8192 --vcpus 4 --disk size=120,format=qcow2,bus=virtio,io=native,pool=default --network bridge=virbr0,model=virtio --network bridge=virbr0,model=virtio --boot network,hd,menu=off --noautoconsole --vnc --print-xml | tee node5-compute
209 node1controlmac=`grep "mac address" node1-control | head -1 | cut -d "'" -f 2`
210 node2computemac=`grep "mac address" node2-compute | head -1 | cut -d "'" -f 2`
211 node5computemac=`grep "mac address" node5-compute | head -1 | cut -d "'" -f 2`
213 sudo virsh -c qemu:///system define --file node1-control
214 sudo virsh -c qemu:///system define --file node2-compute
215 sudo virsh -c qemu:///system define --file node5-compute
217 maas maas tags new name='control'
218 maas maas tags new name='compute'
220 controlnodeid=`maas maas nodes new autodetect_nodegroup='yes' name='node1-control' tags='control' hostname='node1-control' power_type='virsh' mac_addresses=$node1controlmac power_parameters_power_address='qemu+ssh://'$USER'@192.168.122.1/system' architecture='amd64/generic' power_parameters_power_id='node1-control' | grep system_id | cut -d '"' -f 4 `
222 maas maas tag update-nodes control add=$controlnodeid
224 computenodeid=`maas maas nodes new autodetect_nodegroup='yes' name='node2-compute' tags='compute' hostname='node2-compute' power_type='virsh' mac_addresses=$node2computemac power_parameters_power_address='qemu+ssh://'$USER'@192.168.122.1/system' architecture='amd64/generic' power_parameters_power_id='node2-compute' | grep system_id | cut -d '"' -f 4 `
226 maas maas tag update-nodes compute add=$computenodeid
228 computenodeid=`maas maas nodes new autodetect_nodegroup='yes' name='node5-compute' tags='compute' hostname='node5-compute' power_type='virsh' mac_addresses=$node5computemac power_parameters_power_address='qemu+ssh://'$USER'@192.168.122.1/system' architecture='amd64/generic' power_parameters_power_id='node5-compute' | grep system_id | cut -d '"' -f 4 `
230 maas maas tag update-nodes compute add=$computenodeid
233 # Enable vlan interfaces with maas
237 enableautomodebyname eth4 AUTO "10.5.12.0/24" compute || true
238 enableautomodebyname eth4 AUTO "10.5.12.0/24" control || true
242 enableautomodebyname eth4 AUTO "10.6.12.0/24" compute || true
243 enableautomodebyname eth4 AUTO "10.6.12.0/24" control || true
247 crvlanupdsubnet vlan902 1 "DataNetwork" 902 2 || true
248 crvlanupdsubnet vlan905 2 "PublicNetwork" 905 3 || true
249 crnodevlanint $vlan902 eth0 || true
250 crnodevlanint $vlan905 eth1 || true
251 enableautomodebyname eth0.902 AUTO "10.9.12.0/24" compute || true
252 enableautomodebyname eth1.905 AUTO "10.9.15.0/24" compute || true
253 enableautomodebyname eth0.902 AUTO "10.9.12.0/24" control || true
254 enableautomodebyname eth1.905 AUTO "10.9.15.0/24" control || true
258 enableautomodebyname eth2 DHCP "192.168.21.0/24" compute || true
259 enableautomodebyname eth3 AUTO "192.168.11.0/24" compute || true
260 enableautomodebyname eth2 DHCP "192.168.21.0/24" control || true
261 enableautomodebyname eth3 AUTO "192.168.11.0/24" control || true
265 enableautomodebyname eth4 DHCP "192.168.22.0/24" compute || true
266 enableautomodebyname eth5 AUTO "192.168.12.0/24" compute || true
267 enableautomodebyname eth2 DHCP "192.168.22.0/24" control || true
268 enableautomodebyname eth3 AUTO "192.168.12.0/24" control || true
271 enableautomodebyname eth1 AUTO "192.168.10.0/24" control || true
277 crvlanupdsubnet vlan1201 1 "DataNetwork" 1201 2 || true
278 crvlanupdsubnet vlan1202 2 "PublicNetwork" 1202 3 || true
279 crnodevlanint $vlan1201 eth1 || true
280 crnodevlanint $vlan1202 eth1 || true
281 enableautomode eth1.1201 AUTO "172.16.121.3/24" compute || true
282 enableautomode eth1.1201 AUTO "172.16.121.3/24" control || true
286 echo " .... MAAS deployment finished successfully ...."
288 #echo "... Deployment of opnfv release Started ...."
289 #python deploy.py $maas_ip