2 #placeholder for deployment script.
8 #install the packages needed
9 sudo apt-add-repository ppa:opnfv-team/proposed -y
10 sudo apt-add-repository ppa:maas-deployers/stable -y
11 sudo apt-add-repository ppa:juju/stable -y
12 sudo apt-add-repository ppa:maas/stable -y
13 sudo apt-add-repository cloud-archive:mitaka -y
14 sudo apt-get update -y
15 sudo apt-get dist-upgrade -y
16 sudo pip install --upgrade pip
17 sudo apt-get install openssh-server bzr git maas-deployer juju juju-deployer \
18 maas-cli python-pip python-psutil python-openstackclient \
19 python-congressclient gsutil charm-tools pastebinit -y
21 sudo apt-get purge juju -y
22 wget https://launchpad.net/~juju/+archive/ubuntu/stable/+files/juju-core_1.25.5-0ubuntu1~14.04.2~juju1_amd64.deb
23 sudo dpkg -i juju-core_1.25.5-0ubuntu1~14.04.2~juju1_amd64.deb
25 #first parameter should be custom and second should be either
26 # absolute location of file (including file name) or url of the
36 # Get labconfig and generate deployment.yaml for MAAS and deployconfig.yaml
38 intelpod[569]|orangepod[12]|cengnpod[12] )
39 array=(${labname//pod/ })
40 cp ../labconfig/${array[0]}/pod${array[1]}/labconfig.yaml .
41 python genMAASConfig.py -l labconfig.yaml > deployment.yaml
42 python genDeploymentConfig.py -l labconfig.yaml > deployconfig.yaml
45 cp ../labconfig/att/virpod1/labconfig.yaml .
46 python genMAASConfig.py -l labconfig.yaml > deployment.yaml
47 python genDeploymentConfig.py -l labconfig.yaml > deployconfig.yaml
50 cp maas/juniper/pod1/deployment.yaml ./deployment.yaml
53 if [ -e $labfile ]; then
54 cp $labfile ./labconfig.yaml || true
56 wget $labconfigfile -t 3 -T 10 -O ./labconfig.yaml || true
57 count=`wc -l labconfig.yaml | cut -d " " -f 1`
58 if [ $count -lt 10 ]; then
62 if [ ! -e ./labconfig.yaml ]; then
65 python genMAASConfig.py -l labconfig.yaml > deployment.yaml
66 python genDeploymentConfig.py -l labconfig.yaml > deployconfig.yaml
67 labname=`grep "maas_name" deployment.yaml | cut -d ':' -f 2 | sed -e 's/ //'`
75 # In the case of a virtual deployment get deployment.yaml and deployconfig.yaml
76 if [ "$virtinstall" -eq 1 ]; then
79 cp ../labconfig/default/deployment.yaml ./
80 cp ../labconfig/default/labconfig.yaml ./
81 cp ../labconfig/default/deployconfig.yaml ./
85 # Prepare local environment to avoid password asking
88 # make sure no password asked during the deployment.
89 echo "$USER ALL=(ALL) NOPASSWD:ALL" > 90-joid-init
91 if [ -e /etc/sudoers.d/90-joid-init ]; then
92 sudo cp /etc/sudoers.d/90-joid-init 91-joid-init
93 sudo chown $USER:$USER 91-joid-init
94 sudo chmod 660 91-joid-init
95 sudo cat 90-joid-init >> 91-joid-init
96 sudo chown root:root 91-joid-init
97 sudo mv 91-joid-init /etc/sudoers.d/
99 sudo chown root:root 90-joid-init
100 sudo mv 90-joid-init /etc/sudoers.d/
103 if [ ! -e $HOME/.ssh/id_rsa ]; then
104 ssh-keygen -N '' -f $HOME/.ssh/id_rsa
107 echo "... Deployment of maas Started ...."
113 # define the pool and try to start even though its already exist.
114 # For fresh install this may or may not there.
115 sudo apt-get install libvirt-bin -y
116 sudo adduser $USER libvirtd
117 sudo virsh pool-define-as default --type dir --target /var/lib/libvirt/images/ || true
118 sudo virsh pool-start default || true
119 sudo virsh pool-autostart default || true
121 # In case of virtual install set network
122 if [ "$virtinstall" -eq 1 ]; then
123 sudo virsh net-dumpxml default > default-net-org.xml
124 sudo sed -i '/dhcp/d' default-net-org.xml
125 sudo sed -i '/range/d' default-net-org.xml
126 sudo virsh net-define default-net-org.xml
127 sudo virsh net-destroy default
128 sudo virsh net-start default
131 # Ensure virsh can connect without ssh auth
132 cat $HOME/.ssh/id_rsa.pub >> $HOME/.ssh/authorized_keys
136 # Cleanup, juju init and config backup
139 # To avoid problem between apiclient/maas_client and apiclient from google
140 # we remove the package google-api-python-client from yardstick installer
141 if [ $(pip list |grep google-api-python-client |wc -l) == 1 ]; then
142 sudo pip uninstall google-api-python-client
145 #create backup directory
146 mkdir ~/joid_config/ || true
147 mkdir ~/.juju/ || true
156 sudo maas-deployer -c deployment.yaml -d --force
158 sudo chown $USER:$USER environments.yaml
160 echo "... Deployment of maas finish ...."
162 # Backup deployment.yaml and deployconfig.yaml in .juju folder
164 cp ./environments.yaml ~/.juju/
165 cp ./environments.yaml ~/joid_config/
167 if [ -e ./deployconfig.yaml ]; then
168 cp ./deployconfig.yaml ~/.juju/
169 cp ./labconfig.yaml ~/.juju/
170 cp ./deployconfig.yaml ~/joid_config/
171 cp ./labconfig.yaml ~/joid_config/
174 if [ -e ./deployment.yaml ]; then
175 cp ./deployment.yaml ~/.juju/
176 cp ./deployment.yaml ~/joid_config/
183 maas_ip=`grep " ip_address" deployment.yaml | cut -d ':' -f 2 | sed -e 's/ //'`
184 apikey=`grep maas-oauth: environments.yaml | cut -d "'" -f 2`
185 maas login maas http://${maas_ip}/MAAS/api/1.0 ${apikey}
186 maas maas sshkeys new key="`cat $HOME/.ssh/id_rsa.pub`"
188 #Added the Qtip public to run the Qtip test after install on bare metal nodes.
189 #maas maas sshkeys new key="`cat ./maas/sshkeys/QtipKey.pub`"
190 #maas maas sshkeys new key="`cat ./maas/sshkeys/DominoKey.pub`"
192 #adding compute and control nodes VM to MAAS for virtual deployment purpose.
193 if [ "$virtinstall" -eq 1 ]; then
194 # create two more VMs to do the deployment.
195 sudo virt-install --connect qemu:///system --name node1-control --ram 8192 --cpu host --vcpus 4 --disk size=120,format=qcow2,bus=virtio,io=native,pool=default --network bridge=virbr0,model=virtio --network bridge=virbr0,model=virtio --boot network,hd,menu=off --noautoconsole --vnc --print-xml | tee node1-control
197 sudo virt-install --connect qemu:///system --name node2-compute --ram 8192 --cpu host --vcpus 4 --disk size=120,format=qcow2,bus=virtio,io=native,pool=default --network bridge=virbr0,model=virtio --network bridge=virbr0,model=virtio --boot network,hd,menu=off --noautoconsole --vnc --print-xml | tee node2-compute
199 sudo virt-install --connect qemu:///system --name node5-compute --ram 8192 --cpu host --vcpus 4 --disk size=120,format=qcow2,bus=virtio,io=native,pool=default --network bridge=virbr0,model=virtio --network bridge=virbr0,model=virtio --boot network,hd,menu=off --noautoconsole --vnc --print-xml | tee node5-compute
201 node1controlmac=`grep "mac address" node1-control | head -1 | cut -d "'" -f 2`
202 node2computemac=`grep "mac address" node2-compute | head -1 | cut -d "'" -f 2`
203 node5computemac=`grep "mac address" node5-compute | head -1 | cut -d "'" -f 2`
205 sudo virsh -c qemu:///system define --file node1-control
206 sudo virsh -c qemu:///system define --file node2-compute
207 sudo virsh -c qemu:///system define --file node5-compute
209 maas maas tags new name='control'
210 maas maas tags new name='compute'
212 controlnodeid=`maas maas nodes new autodetect_nodegroup='yes' name='node1-control' tags='control' hostname='node1-control' power_type='virsh' mac_addresses=$node1controlmac power_parameters_power_address='qemu+ssh://'$USER'@192.168.122.1/system' architecture='amd64/generic' power_parameters_power_id='node1-control' | grep system_id | cut -d '"' -f 4 `
214 maas maas tag update-nodes control add=$controlnodeid
216 computenodeid=`maas maas nodes new autodetect_nodegroup='yes' name='node2-compute' tags='compute' hostname='node2-compute' power_type='virsh' mac_addresses=$node2computemac power_parameters_power_address='qemu+ssh://'$USER'@192.168.122.1/system' architecture='amd64/generic' power_parameters_power_id='node2-compute' | grep system_id | cut -d '"' -f 4 `
218 maas maas tag update-nodes compute add=$computenodeid
220 computenodeid=`maas maas nodes new autodetect_nodegroup='yes' name='node5-compute' tags='compute' hostname='node5-compute' power_type='virsh' mac_addresses=$node5computemac power_parameters_power_address='qemu+ssh://'$USER'@192.168.122.1/system' architecture='amd64/generic' power_parameters_power_id='node5-compute' | grep system_id | cut -d '"' -f 4 `
222 maas maas tag update-nodes compute add=$computenodeid
226 # Functions for MAAS network customization
229 #Below function will mark the interfaces in Auto mode to enbled by MAAS
231 listofnodes=`maas maas nodes list | grep system_id | cut -d '"' -f 4`
232 for nodes in $listofnodes
234 maas maas interface link-subnet $nodes $1 mode=$2 subnet=$3
238 #Below function will mark the interfaces in Auto mode to enbled by MAAS
239 # using hostname of the node added into MAAS
240 enableautomodebyname() {
241 if [ ! -z "$4" ]; then
244 nodes=`maas maas nodes list | grep system_id | cut -d '"' -f 4`
245 if [ ! -z "$nodes" ]; then
246 maas maas interface link-subnet $nodes $1 mode=$2 subnet=$3
252 #Below function will create vlan and update interface with the new vlan
253 # will return the vlan id created
255 newvlanid=`maas maas vlans create $2 name=$3 vid=$4 | grep resource | cut -d '/' -f 6 `
256 maas maas subnet update $5 vlan=$newvlanid
257 eval "$1"="'$newvlanid'"
260 #Below function will create interface with new vlan and bind to physical interface
262 listofnodes=`maas maas nodes list | grep system_id | cut -d '"' -f 4`
264 for nodes in $listofnodes
266 parentid=`maas maas interface read $nodes $2 | grep interfaces | cut -d '/' -f 8`
267 maas maas interfaces create-vlan $nodes vlan=$1 parent=$parentid
271 #function for JUJU envronment
274 controllername=`awk 'NR==1{print $2}' environments.yaml`
275 cloudname=`awk 'NR==1{print $2}' environments.yaml`
277 echo "credentials:" > credential.yaml
278 echo " $controllername:" >> credential.yaml
279 echo " opnfv-credentials:" >> credential.yaml
280 echo " auth-type: oauth1" >> credential.yaml
281 echo " maas-oauth: $apikey" >> credential.yaml
283 juju add-credential $controllername -f credential.yaml --replace
287 controllername=`awk 'NR==1{print $2}' environments.yaml`
288 cloudname=`awk 'NR==1{print $2}' environments.yaml`
290 echo "clouds:" > maas-cloud.yaml
291 echo " $cloudname:" >> maas-cloud.yaml
292 echo " type: maas" >> maas-cloud.yaml
293 echo " auth-types: [oauth1]" >> maas-cloud.yaml
294 echo " endpoint: http://$maas_ip/MAAS" >> maas-cloud.yaml
296 juju add-cloud $cloudname maas-cloud.yaml --replace
307 crvlanupdsubnet vlan904 fabric-1 "MgmtNetwork" 904 2 || true
308 crvlanupdsubnet vlan905 fabric-2 "PublicNetwork" 905 3 || true
309 crnodevlanint $vlan905 eth1 || true
310 crnodevlanint $vlan905 eth3 || true
311 enableautomodebyname eth1.905 AUTO "10.9.15.0/24" || true
312 enableautomodebyname eth3.905 AUTO "10.9.15.0/24" || true
313 enableautomodebyname eth0 AUTO "10.9.12.0/24" || true
314 enableautomodebyname eth2 AUTO "10.9.12.0/24" || true
319 # Enable MAAS nodes interfaces
322 #read interface needed in Auto mode and enable it. Will be rmeoved once auto enablement will be implemented in the maas-deployer.
323 if [ -e ~/joid_config/deployconfig.yaml ]; then
324 cp ~/joid_config/deployconfig.yaml ./deployconfig.yaml
325 elif [ -e ~/.juju/deployconfig.yaml ]; then
326 cp ~/.juju/deployconfig.yaml ./deployconfig.yaml
329 if [ -e ./deployconfig.yaml ]; then
330 enableiflist=`grep "interface-enable" deployconfig.yaml | cut -d ' ' -f 4 `
331 datanet=`grep "dataNetwork" deployconfig.yaml | cut -d ' ' -f 4 | sed -e 's/ //'`
332 stornet=`grep "storageNetwork" deployconfig.yaml | cut -d ' ' -f 4 | sed -e 's/ //'`
333 pubnet=`grep "publicNetwork" deployconfig.yaml | cut -d ' ' -f 4 | sed -e 's/ //'`
335 # split EXTERNAL_NETWORK=first ip;last ip; gateway;network
337 if [ "$datanet" != "''" ]; then
338 EXTNET=(${enableiflist//,/ })
340 while [ ! -z "${EXTNET[i]}" ];
342 enableautomode ${EXTNET[i]} AUTO $datanet || true
346 if [ "$stornet" != "''" ]; then
347 EXTNET=(${enableiflist//,/ })
349 while [ ! -z "${EXTNET[i]}" ];
351 enableautomode ${EXTNET[i]} AUTO $stornet || true
355 if [ "$pubnet" != "''" ]; then
356 EXTNET=(${enableiflist//,/ })
358 while [ ! -z "${EXTNET[i]}" ];
360 enableautomode ${EXTNET[i]} AUTO $pubnet || true
367 # Add the cloud and controller credentials for MAAS for that lab.
368 jujuver=`juju --version`
370 if [[ "$jujuver" > "2" ]]; then
378 echo " .... MAAS deployment finished successfully ...."