adding clarifications to installation document
[apex.git] / build / instack.sh
1 #!/bin/sh
2 ##############################################################################
3 # Copyright (c) 2015 Tim Rozet (Red Hat), Dan Radez (Red Hat) and others.
4 #
5 # All rights reserved. This program and the accompanying materials
6 # are made available under the terms of the Apache License, Version 2.0
7 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
9 ##############################################################################
10 set -e
11 declare -i CNT
12
13 #rdo_images_uri=https://repos.fedorapeople.org/repos/openstack-m/rdo-images-centos-liberty-opnfv
14 rdo_images_uri=file:///stable-images
15 onos_artifacts_uri=file:///stable-images/onos
16
17 vm_index=4
18 RDO_RELEASE=liberty
19 SSH_OPTIONS=(-o StrictHostKeyChecking=no -o GlobalKnownHostsFile=/dev/null -o UserKnownHostsFile=/dev/null)
20 OPNFV_NETWORK_TYPES="admin_network private_network public_network storage_network"
21
22 # check for dependancy packages
23 for i in rpm-build createrepo libguestfs-tools python-docutils bsdtar; do
24     if ! rpm -q $i > /dev/null; then
25         sudo yum install -y $i
26     fi
27 done
28
29 # RDO Manager expects a stack user to exist, this checks for one
30 # and creates it if you are root
31 if ! id stack > /dev/null; then
32     sudo useradd stack;
33     sudo echo 'stack ALL=(root) NOPASSWD:ALL' | sudo tee -a /etc/sudoers.d/stack
34     sudo echo 'Defaults:stack !requiretty' | sudo tee -a /etc/sudoers.d/stack
35     sudo chmod 0440 /etc/sudoers.d/stack
36     echo 'Added user stack'
37 fi
38
39 # ensure that I can ssh as the stack user
40 if ! sudo grep "$(cat ~/.ssh/id_rsa.pub)" /home/stack/.ssh/authorized_keys; then
41     if ! sudo ls -d /home/stack/.ssh/ ; then
42         sudo mkdir /home/stack/.ssh
43         sudo chown stack:stack /home/stack/.ssh
44         sudo chmod 700 /home/stack/.ssh
45     fi
46     USER=$(whoami) sudo sh -c "cat ~$USER/.ssh/id_rsa.pub >> /home/stack/.ssh/authorized_keys"
47     sudo chown stack:stack /home/stack/.ssh/authorized_keys
48 fi
49
50 # clean up stack user previously build instack disk images
51 ssh -T ${SSH_OPTIONS[@]} stack@localhost "rm -f instack*.qcow2"
52
53 # Yum repo setup for building the undercloud
54 if ! rpm -q rdo-release > /dev/null && [ "$1" != "-master" ]; then
55     #pulling from current-passed-ci instead of release repos
56     #sudo yum install -y https://rdoproject.org/repos/openstack-${RDO_RELEASE}/rdo-release-${RDO_RELEASE}.rpm
57     sudo yum -y install yum-plugin-priorities
58     sudo yum-config-manager --disable openstack-${RDO_RELEASE}
59     sudo curl -o /etc/yum.repos.d/delorean.repo http://trunk.rdoproject.org/centos7-liberty/current-passed-ci/delorean.repo
60     sudo curl -o /etc/yum.repos.d/delorean-deps.repo http://trunk.rdoproject.org/centos7-liberty/delorean-deps.repo
61     sudo rm -f /etc/yum.repos.d/delorean-current.repo
62 elif [ "$1" == "-master" ]; then
63     sudo yum -y install yum-plugin-priorities
64     sudo yum-config-manager --disable openstack-${RDO_RELEASE}
65     sudo curl -o /etc/yum.repos.d/delorean.repo http://trunk.rdoproject.org/centos7/current-passed-ci/delorean.repo
66     sudo curl -o /etc/yum.repos.d/delorean-deps.repo http://trunk.rdoproject.org/centos7-liberty/delorean-deps.repo
67     sudo rm -f /etc/yum.repos.d/delorean-current.repo
68 fi
69
70 # ensure the undercloud package is installed so we can build the undercloud
71 if ! rpm -q instack-undercloud > /dev/null; then
72     sudo yum install -y python-tripleoclient
73 fi
74
75 # ensure openvswitch is installed
76 if ! rpm -q openvswitch > /dev/null; then
77     sudo yum install -y openvswitch
78 fi
79
80 # ensure libvirt is installed
81 if ! rpm -q libvirt-daemon-kvm > /dev/null; then
82     sudo yum install -y libvirt-daemon-kvm
83 fi
84
85 # clean this up incase it's there
86 sudo rm -f /tmp/instack.answers
87
88 # ensure that no previous undercloud VMs are running
89 sudo ../ci/clean.sh
90 # and rebuild the bare undercloud VMs
91 ssh -T ${SSH_OPTIONS[@]} stack@localhost <<EOI
92 set -e
93 NODE_COUNT=5 NODE_CPU=2 NODE_MEM=8192 TESTENV_ARGS="--baremetal-bridge-names 'brbm brbm1 brbm2 brbm3'" instack-virt-setup
94 EOI
95
96 # let dhcp happen so we can get the ip
97 # just wait instead of checking until we see an address
98 # because there may be a previous lease that needs
99 # to be cleaned up
100 sleep 5
101
102 # get the undercloud ip address
103 UNDERCLOUD=$(grep instack /var/lib/libvirt/dnsmasq/default.leases | awk '{print $3}' | head -n 1)
104 if [ -z "$UNDERCLOUD" ]; then
105   #if not found then dnsmasq may be using leasefile-ro
106   instack_mac=$(ssh -T ${SSH_OPTIONS[@]} stack@localhost "virsh domiflist instack" | grep default | \
107                 grep -Eo "[0-9a-f\]+:[0-9a-f\]+:[0-9a-f\]+:[0-9a-f\]+:[0-9a-f\]+:[0-9a-f\]+")
108   UNDERCLOUD=$(/usr/sbin/arp -e | grep ${instack_mac} | awk {'print $1'})
109
110   if [ -z "$UNDERCLOUD" ]; then
111     echo "\n\nNever got IP for Instack. Can Not Continue."
112     exit 1
113   else
114     echo -e "${blue}\rInstack VM has IP $UNDERCLOUD${reset}"
115   fi
116 else
117    echo -e "${blue}\rInstack VM has IP $UNDERCLOUD${reset}"
118 fi
119
120 # ensure that we can ssh to the undercloud
121 CNT=10
122 while ! ssh -T ${SSH_OPTIONS[@]}  "root@$UNDERCLOUD" "echo ''" > /dev/null && [ $CNT -gt 0 ]; do
123     echo -n "."
124     sleep 3
125     CNT=CNT-1
126 done
127 # TODO fail if CNT=0
128
129 # yum repo, triple-o package and ssh key setup for the undercloud
130 ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" <<EOI
131 set -e
132
133 if ! rpm -q epel-release > /dev/null; then
134     yum install http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
135 fi
136
137 yum -y install yum-plugin-priorities
138 curl -o /etc/yum.repos.d/delorean.repo http://trunk.rdoproject.org/centos7-liberty/current-passed-ci/delorean.repo
139 curl -o /etc/yum.repos.d/delorean-deps.repo http://trunk.rdoproject.org/centos7-liberty/delorean-deps.repo
140
141 cp /root/.ssh/authorized_keys /home/stack/.ssh/authorized_keys
142 chown stack:stack /home/stack/.ssh/authorized_keys
143 EOI
144
145 # copy instackenv file for future virt deployments
146 if [ ! -d stack ]; then mkdir stack; fi
147 scp ${SSH_OPTIONS[@]} stack@$UNDERCLOUD:instackenv.json stack/instackenv.json
148
149 # make a copy of instack VM's definitions, and disk image
150 # it must be stopped to make a copy of its disk image
151 ssh -T ${SSH_OPTIONS[@]} stack@localhost <<EOI
152 set -e
153 echo "Shutting down instack to gather configs"
154 virsh shutdown instack
155
156 echo "Waiting for instack VM to shutdown"
157 CNT=20
158 while virsh list | grep instack > /dev/null && [ $CNT -gt 0 ]; do
159     echo -n "."
160     sleep 5
161     CNT=CNT-1
162 done
163 if virsh list | grep instack > /dev/null; then
164     echo "instack failed to shutdown for copy"
165     exit 1
166 fi
167
168 echo $'\nGenerating libvirt configuration'
169 for i in \$(seq 0 $vm_index); do
170   virsh dumpxml baremetalbrbm_brbm1_brbm2_brbm3_\$i | awk '/model type='\''virtio'\''/{c++;if(c==2){sub("model type='\''virtio'\''","model type='\''rtl8139'\''");c=0}}1' > baremetalbrbm_brbm1_brbm2_brbm3_\$i.xml
171 done
172
173 virsh dumpxml instack > instack.xml
174 virsh net-dumpxml brbm > brbm-net.xml
175 virsh net-dumpxml brbm1 > brbm1-net.xml
176 virsh net-dumpxml brbm2> brbm2-net.xml
177 virsh net-dumpxml brbm3 > brbm3-net.xml
178 virsh pool-dumpxml default > default-pool.xml
179 EOI
180
181 # copy off the instack artifacts
182 echo "Copying instack files to build directory"
183 for i in $(seq 0 $vm_index); do
184   scp ${SSH_OPTIONS[@]} stack@localhost:baremetalbrbm_brbm1_brbm2_brbm3_${i}.xml .
185 done
186
187 scp ${SSH_OPTIONS[@]} stack@localhost:instack.xml .
188 scp ${SSH_OPTIONS[@]} stack@localhost:brbm-net.xml .
189 scp ${SSH_OPTIONS[@]} stack@localhost:brbm1-net.xml .
190 scp ${SSH_OPTIONS[@]} stack@localhost:brbm2-net.xml .
191 scp ${SSH_OPTIONS[@]} stack@localhost:brbm3-net.xml .
192 scp ${SSH_OPTIONS[@]} stack@localhost:default-pool.xml .
193
194 # pull down the the built images
195 echo "Copying overcloud resources"
196 IMAGES="overcloud-full.tar"
197 IMAGES+=" undercloud.qcow2"
198
199 for i in $IMAGES; do
200   # download prebuilt images from RDO Project
201   if [ "$(curl -L $rdo_images_uri/${i}.md5 | awk {'print $1'})" != "$(md5sum stack/$i | awk {'print $1'})" ] ; then
202     #if [ $i == "undercloud.qcow2" ]; then
203     ### there's a problem with the Content-Length reported by the centos artifacts
204     ### server so using wget for it until a resolution is figured out.
205     #wget -nv -O stack/$i $rdo_images_uri/$i
206     #else
207     curl $rdo_images_uri/$i -o stack/$i
208     #fi
209   fi
210   # only untar the tar files
211   if [ "${i##*.}" == "tar" ]; then tar -xf stack/$i -C stack/; fi
212 done
213
214 #Adding OpenStack packages to undercloud
215 pushd stack
216 cp undercloud.qcow2 instack.qcow2
217 LIBGUESTFS_BACKEND=direct virt-customize --install yum-priorities -a instack.qcow2
218 PACKAGES="qemu-kvm-common,qemu-kvm,libvirt-daemon-kvm,libguestfs,python-libguestfs,openstack-nova-compute"
219 PACKAGES+=",openstack-swift,openstack-ceilometer-api,openstack-neutron-ml2,openstack-ceilometer-alarm"
220 PACKAGES+=",openstack-nova-conductor,openstack-ironic-inspector,openstack-ironic-api,python-openvswitch"
221 PACKAGES+=",openstack-glance,python-glance,python-troveclient,openstack-puppet-modules"
222 PACKAGES+=",openstack-neutron,openstack-neutron-openvswitch,openstack-nova-scheduler,openstack-keystone,openstack-swift-account"
223 PACKAGES+=",openstack-swift-container,openstack-swift-object,openstack-swift-plugin-swift3,openstack-swift-proxy"
224 PACKAGES+=",openstack-nova-api,openstack-nova-cert,openstack-heat-api-cfn,openstack-heat-api,"
225 PACKAGES+=",openstack-ceilometer-central,openstack-ceilometer-polling,openstack-ceilometer-collector,"
226 PACKAGES+=",openstack-heat-api-cloudwatch,openstack-heat-engine,openstack-heat-common,openstack-ceilometer-notification"
227 PACKAGES+=",hiera,puppet,memcached,keepalived,mariadb,mariadb-server,rabbitmq-server,python-pbr,python-proliantutils"
228 PACKAGES+=",ceph-common"
229
230 # install the packages above and enabling ceph to live on the controller
231 LIBGUESTFS_BACKEND=direct virt-customize --install $PACKAGES \
232     --run-command "sed -i '/ControllerEnableCephStorage/c\\  ControllerEnableCephStorage: true' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml" \
233     --run-command "sed -i '/  \$enable_ceph = /c\\  \$enable_ceph = true' /usr/share/openstack-tripleo-heat-templates/puppet/manifests/overcloud_controller_pacemaker.pp" \
234     --run-command "sed -i '/  \$enable_ceph = /c\\  \$enable_ceph = true' /usr/share/openstack-tripleo-heat-templates/puppet/manifests/overcloud_controller.pp" \
235     -a instack.qcow2
236 popd
237
238
239 pushd stack
240
241 ##########################################################
242 #####  Prep initial overcloud image with common deps #####
243 ##########################################################
244
245 # make a copy of the cached overcloud-full image
246 cp overcloud-full.qcow2 overcloud-full-opendaylight.qcow2
247 # Update puppet-aodh it's old
248 rm -rf aodh
249 git clone https://github.com/openstack/puppet-aodh aodh
250 pushd aodh
251 git checkout stable/liberty
252 popd
253 tar -czf puppet-aodh.tar.gz aodh
254
255 # Add epel, aodh and ceph, remove openstack-neutron-openvswitch
256 AODH_PKG="openstack-aodh-api,openstack-aodh-common,openstack-aodh-compat,openstack-aodh-evaluator,openstack-aodh-expirer"
257 AODH_PKG+=",openstack-aodh-listener,openstack-aodh-notifier"
258 LIBGUESTFS_BACKEND=direct virt-customize \
259     --upload puppet-aodh.tar.gz:/etc/puppet/modules/ \
260     --run-command "cd /etc/puppet/modules/ && rm -rf aodh && tar xzf puppet-aodh.tar.gz" \
261     --run-command "yum remove -y openstack-neutron-openvswitch" \
262     --run-command "echo 'nf_conntrack_proto_sctp' > /etc/modules-load.d/nf_conntrack_proto_sctp.conf" \
263     --install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm \
264     --install "$AODH_PKG,ceph" \
265     -a overcloud-full-opendaylight.qcow2
266
267 ###############################################
268 #####    Adding OpenDaylight to overcloud #####
269 ###############################################
270
271 cat > /tmp/opendaylight.repo << EOF
272 [opendaylight]
273 name=OpenDaylight \$releasever - \$basearch
274 baseurl=http://cbs.centos.org/repos/nfv7-opendaylight-4-testing/\$basearch/os/
275 enabled=1
276 gpgcheck=0
277 EOF
278
279 odlrpm=opendaylight-4.0.0-1.rc2.el7.noarch.rpm
280 if [ -f ${rdo_images_uri}/$odlrpm ]; then
281     LIBGUESTFS_BACKEND=direct virt-customize --upload ${rdo_images_uri}/$odlrpm:/tmp/
282     opendaylight=/tmp/$odlrpm
283 else
284     opendaylight=opendaylight
285 fi
286
287 # install ODL packages
288 LIBGUESTFS_BACKEND=direct virt-customize \
289     --upload /tmp/opendaylight.repo:/etc/yum.repos.d/opendaylight.repo \
290     --install ${opendaylight},python-networking-odl \
291     -a overcloud-full-opendaylight.qcow2
292
293 ## WORK AROUND
294 ## when OpenDaylight lands in upstream RDO manager this can be removed
295
296 # upload the opendaylight puppet module
297 rm -rf puppet-opendaylight
298 if [ -f ${rdo_images_uri}/puppet-opendaylight-3.2.2.tar.gz ]; then
299     cp ${rdo_images_uri}/puppet-opendaylight-3.2.2.tar.gz puppet-opendaylight.tar.gz
300 else
301     git clone -b opnfv_integration https://github.com/dfarrell07/puppet-opendaylight
302     pushd puppet-opendaylight
303     git archive --format=tar.gz --prefix=opendaylight/ HEAD > ../puppet-opendaylight.tar.gz
304     popd
305 fi
306 LIBGUESTFS_BACKEND=direct virt-customize --upload puppet-opendaylight.tar.gz:/etc/puppet/modules/ \
307                                          --run-command "cd /etc/puppet/modules/ && tar xzf puppet-opendaylight.tar.gz" \
308                                          --upload ../opendaylight-puppet-neutron.patch:/tmp \
309                                          --run-command "cd /etc/puppet/modules/neutron && patch -Np1 < /tmp/opendaylight-puppet-neutron.patch" \
310                                          -a overcloud-full-opendaylight.qcow2
311
312 # Patch in OpenDaylight installation and configuration
313 LIBGUESTFS_BACKEND=direct virt-customize --upload ../opnfv-tripleo-heat-templates.patch:/tmp \
314                                          --run-command "cd /usr/share/openstack-tripleo-heat-templates/ && patch -Np1 < /tmp/opnfv-tripleo-heat-templates.patch" \
315                                          -a instack.qcow2
316
317 # REMOVE ME AFTER Brahmaputra
318 LIBGUESTFS_BACKEND=direct virt-customize --upload ../puppet-neutron-force-metadata.patch:/tmp \
319                                          --run-command "cd /etc/puppet/modules/neutron && patch -Np1 < /tmp/puppet-neutron-force-metadata.patch" \
320                                          -a overcloud-full-opendaylight.qcow2
321
322 LIBGUESTFS_BACKEND=direct virt-customize --upload ../puppet-cinder-quota-fix.patch:/tmp \
323                                          --run-command "cd /etc/puppet/modules/cinder && patch -Np1 < /tmp/puppet-cinder-quota-fix.patch" \
324                                          -a overcloud-full-opendaylight.qcow2
325
326 LIBGUESTFS_BACKEND=direct virt-customize --upload ../aodh-puppet-tripleo.patch:/tmp \
327                                          --run-command "cd /etc/puppet/modules/tripleo && patch -Np1 < /tmp/aodh-puppet-tripleo.patch" \
328                                          -a overcloud-full-opendaylight.qcow2
329
330 # adds tripleoclient aodh workaround
331 # for keystone
332 LIBGUESTFS_BACKEND=direct virt-customize --upload ../aodh-tripleoclient.patch:/tmp \
333                                          --run-command "cd /usr/lib/python2.7/site-packages/tripleoclient && patch -Np1 < /tmp/aodh-tripleoclient.patch" \
334                                          --upload ../aodh-os-cloud-config.patch:/tmp \
335                                          --run-command "cd /usr/lib/python2.7/site-packages/os_cloud_config && patch -Np1 < /tmp/aodh-os-cloud-config.patch" \
336                                          -a instack.qcow2
337 # END REMOVE ME AFTER Brahmaputra
338
339 ################################################
340 #####    Adding SFC+OpenDaylight overcloud #####
341 ################################################
342
343
344 #copy opendaylight overcloud full to isolate odl-sfc
345 cp overcloud-full-opendaylight.qcow2 overcloud-full-opendaylight-sfc.qcow2
346
347
348 # kernel is patched with patch from this post
349 # http://xfs.org/index.php/XFS_FAQ#Q:_Why_do_I_receive_No_space_left_on_device_after_xfs_growfs.3F
350 LIBGUESTFS_BACKEND=direct virt-customize \
351     --install 'https://radez.fedorapeople.org/kernel-ml-3.13.7-1.el7.centos_xfs_grow.x86_64.rpm' \
352     --run-command 'grub2-set-default "\$(grep -P \"submenu|^menuentry\" /boot/grub2/grub.cfg | cut -d \"\\x27\" | head -n 1)"' \
353     --install 'https://radez.fedorapeople.org/openvswitch-kmod-2.3.90-1.el7.centos.x86_64.rpm' \
354     --run-command 'yum downgrade -y https://radez.fedorapeople.org/openvswitch-2.3.90-1.x86_64.rpm' \
355     --run-command 'rm -f /lib/modules/3.13.7-1.el7.centos_xfs_grow.x86_64/kernel/net/openvswitch/openvswitch.ko' \
356     --run-command 'ln -s /lib/modules/3.13.7-1.el7.centos_xfs_grow.x86_64/kernel/extra/openvswitch/openvswitch.ko /lib/modules/3.13.7-1.el7.centos_xfs_grow.x86_64/kernel/net/openvswitch/openvswitch.ko' \
357     -a overcloud-full-opendaylight-sfc.qcow2
358
359
360
361 ###############################################
362 #####    Adding ONOS to overcloud #####
363 ###############################################
364
365 ## WORK AROUND
366 ## when ONOS lands in upstream OPNFV artifacts this can be removed
367
368 # upload the onos puppet module
369
370 rm -rf puppet-onos
371 git clone https://github.com/bobzhouHW/puppet-onos.git
372 pushd puppet-onos
373 # download jdk, onos and maven dependancy packages.
374 pushd files
375 curl ${onos_artifacts_uri}/jdk-8u51-linux-x64.tar.gz -o ./jdk-8u51-linux-x64.tar.gz
376 curl ${onos_artifacts_uri}/onos-1.3.0.tar.gz -o ./onos-1.3.0.tar.gz
377 curl ${onos_artifacts_uri}/repository.tar -o ./repository.tar
378 popd
379 popd
380 mv puppet-onos onos
381 tar -czf puppet-onos.tar.gz onos
382 LIBGUESTFS_BACKEND=direct virt-customize --upload puppet-onos.tar.gz:/etc/puppet/modules/ \
383                                          --run-command "cd /etc/puppet/modules/ && tar xzf puppet-onos.tar.gz" -a overcloud-full-opendaylight.qcow2
384
385 ## END WORK AROUND
386
387 popd
388
389 # move and Sanitize private keys from instack.json file
390 mv stack/instackenv.json instackenv-virt.json
391 sed -i '/pm_password/c\      "pm_password": "INSERT_STACK_USER_PRIV_KEY",' instackenv-virt.json
392 sed -i '/ssh-key/c\  "ssh-key": "INSERT_STACK_USER_PRIV_KEY",' instackenv-virt.json
393
394 # clean up the VMs
395 ssh -T ${SSH_OPTIONS[@]} stack@localhost <<EOI
396 set -e
397 virsh destroy instack 2> /dev/null || echo -n ''
398 virsh undefine instack --remove-all-storage 2> /dev/null || echo -n ''
399 for i in \$(seq 0 $vm_index); do
400   virsh destroy baremetalbrbm_brbm1_brbm2_brbm3_\$i 2> /dev/null || echo -n ''
401   virsh undefine baremetalbrbm_brbm1_brbm2_brbm3_\$i --remove-all-storage 2> /dev/null || echo -n ''
402 done
403 EOI
404