Aodh
[apex.git] / build / instack.sh
1 #!/bin/sh
2 set -e
3 declare -i CNT
4
5 #rdo_images_uri=https://repos.fedorapeople.org/repos/openstack-m/rdo-images-centos-liberty-opnfv
6 rdo_images_uri=file:///stable-images
7 onos_artifacts_uri=file:///stable-images/onos
8
9 vm_index=4
10 RDO_RELEASE=liberty
11 SSH_OPTIONS=(-o StrictHostKeyChecking=no -o GlobalKnownHostsFile=/dev/null -o UserKnownHostsFile=/dev/null)
12 OPNFV_NETWORK_TYPES="admin_network private_network public_network storage_network"
13
14 # check for dependancy packages
15 for i in rpm-build createrepo libguestfs-tools python-docutils bsdtar; do
16     if ! rpm -q $i > /dev/null; then
17         sudo yum install -y $i
18     fi
19 done
20
21 # RDO Manager expects a stack user to exist, this checks for one
22 # and creates it if you are root
23 if ! id stack > /dev/null; then
24     sudo useradd stack;
25     sudo echo 'stack ALL=(root) NOPASSWD:ALL' | sudo tee -a /etc/sudoers.d/stack
26     sudo echo 'Defaults:stack !requiretty' | sudo tee -a /etc/sudoers.d/stack
27     sudo chmod 0440 /etc/sudoers.d/stack
28     echo 'Added user stack'
29 fi
30
31 # ensure that I can ssh as the stack user
32 if ! sudo grep "$(cat ~/.ssh/id_rsa.pub)" /home/stack/.ssh/authorized_keys; then
33     if ! sudo ls -d /home/stack/.ssh/ ; then
34         sudo mkdir /home/stack/.ssh
35         sudo chown stack:stack /home/stack/.ssh
36         sudo chmod 700 /home/stack/.ssh
37     fi
38     USER=$(whoami) sudo sh -c "cat ~$USER/.ssh/id_rsa.pub >> /home/stack/.ssh/authorized_keys"
39     sudo chown stack:stack /home/stack/.ssh/authorized_keys
40 fi
41
42 # clean up stack user previously build instack disk images
43 ssh -T ${SSH_OPTIONS[@]} stack@localhost "rm -f instack*.qcow2"
44
45 # Yum repo setup for building the undercloud
46 if ! rpm -q rdo-release > /dev/null && [ "$1" != "-master" ]; then
47     #pulling from current-passed-ci instead of release repos
48     #sudo yum install -y https://rdoproject.org/repos/openstack-${RDO_RELEASE}/rdo-release-${RDO_RELEASE}.rpm
49     sudo yum -y install yum-plugin-priorities
50     sudo yum-config-manager --disable openstack-${RDO_RELEASE}
51     sudo curl -o /etc/yum.repos.d/delorean.repo http://trunk.rdoproject.org/centos7-liberty/current-passed-ci/delorean.repo
52     sudo curl -o /etc/yum.repos.d/delorean-deps.repo http://trunk.rdoproject.org/centos7-liberty/delorean-deps.repo
53     sudo rm -f /etc/yum.repos.d/delorean-current.repo
54 elif [ "$1" == "-master" ]; then
55     sudo yum -y install yum-plugin-priorities
56     sudo yum-config-manager --disable openstack-${RDO_RELEASE}
57     sudo curl -o /etc/yum.repos.d/delorean.repo http://trunk.rdoproject.org/centos7/current-passed-ci/delorean.repo
58     sudo curl -o /etc/yum.repos.d/delorean-deps.repo http://trunk.rdoproject.org/centos7-liberty/delorean-deps.repo
59     sudo rm -f /etc/yum.repos.d/delorean-current.repo
60 fi
61
62 # install the opendaylight yum repo definition
63 cat << 'EOF' | sudo tee /etc/yum.repos.d/opendaylight.repo
64 [opendaylight]
65 name=OpenDaylight $releasever - $basearch
66 baseurl=http://cbs.centos.org/repos/nfv7-opendaylight-33-release/$basearch/os/
67 enabled=1
68 gpgcheck=0
69 EOF
70
71 # ensure the undercloud package is installed so we can build the undercloud
72 if ! rpm -q instack-undercloud > /dev/null; then
73     sudo yum install -y python-tripleoclient
74 fi
75
76 # ensure openvswitch is installed
77 if ! rpm -q openvswitch > /dev/null; then
78     sudo yum install -y openvswitch
79 fi
80
81 # ensure libvirt is installed
82 if ! rpm -q libvirt-daemon-kvm > /dev/null; then
83     sudo yum install -y libvirt-daemon-kvm
84 fi
85
86 # clean this up incase it's there
87 sudo rm -f /tmp/instack.answers
88
89 # ensure that no previous undercloud VMs are running
90 sudo ../ci/clean.sh
91 # and rebuild the bare undercloud VMs
92 ssh -T ${SSH_OPTIONS[@]} stack@localhost <<EOI
93 set -e
94 NODE_COUNT=5 NODE_CPU=2 NODE_MEM=8192 TESTENV_ARGS="--baremetal-bridge-names 'brbm brbm1 brbm2 brbm3'" instack-virt-setup
95 EOI
96
97 # let dhcp happen so we can get the ip
98 # just wait instead of checking until we see an address
99 # because there may be a previous lease that needs
100 # to be cleaned up
101 sleep 5
102
103 # get the undercloud ip address
104 UNDERCLOUD=$(grep instack /var/lib/libvirt/dnsmasq/default.leases | awk '{print $3}' | head -n 1)
105 if [ -z "$UNDERCLOUD" ]; then
106   #if not found then dnsmasq may be using leasefile-ro
107   instack_mac=$(ssh -T ${SSH_OPTIONS[@]} stack@localhost "virsh domiflist instack" | grep default | \
108                 grep -Eo "[0-9a-f\]+:[0-9a-f\]+:[0-9a-f\]+:[0-9a-f\]+:[0-9a-f\]+:[0-9a-f\]+")
109   UNDERCLOUD=$(/usr/sbin/arp -e | grep ${instack_mac} | awk {'print $1'})
110
111   if [ -z "$UNDERCLOUD" ]; then
112     echo "\n\nNever got IP for Instack. Can Not Continue."
113     exit 1
114   else
115     echo -e "${blue}\rInstack VM has IP $UNDERCLOUD${reset}"
116   fi
117 else
118    echo -e "${blue}\rInstack VM has IP $UNDERCLOUD${reset}"
119 fi
120
121 # ensure that we can ssh to the undercloud
122 CNT=10
123 while ! ssh -T ${SSH_OPTIONS[@]}  "root@$UNDERCLOUD" "echo ''" > /dev/null && [ $CNT -gt 0 ]; do
124     echo -n "."
125     sleep 3
126     CNT=CNT-1
127 done
128 # TODO fail if CNT=0
129
130 # yum repo, triple-o package and ssh key setup for the undercloud
131 ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" <<EOI
132 set -e
133
134 if ! rpm -q epel-release > /dev/null; then
135     yum install http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
136 fi
137
138 yum -y install yum-plugin-priorities
139 curl -o /etc/yum.repos.d/delorean.repo http://trunk.rdoproject.org/centos7-liberty/current-passed-ci/delorean.repo
140 curl -o /etc/yum.repos.d/delorean-deps.repo http://trunk.rdoproject.org/centos7-liberty/delorean-deps.repo
141
142 cp /root/.ssh/authorized_keys /home/stack/.ssh/authorized_keys
143 chown stack:stack /home/stack/.ssh/authorized_keys
144 EOI
145
146 # copy instackenv file for future virt deployments
147 if [ ! -d stack ]; then mkdir stack; fi
148 scp ${SSH_OPTIONS[@]} stack@$UNDERCLOUD:instackenv.json stack/instackenv.json
149
150 # make a copy of instack VM's definitions, and disk image
151 # it must be stopped to make a copy of its disk image
152 ssh -T ${SSH_OPTIONS[@]} stack@localhost <<EOI
153 set -e
154 echo "Shutting down instack to gather configs"
155 virsh shutdown instack
156
157 echo "Waiting for instack VM to shutdown"
158 CNT=20
159 while virsh list | grep instack > /dev/null && [ $CNT -gt 0 ]; do
160     echo -n "."
161     sleep 5
162     CNT=CNT-1
163 done
164 if virsh list | grep instack > /dev/null; then
165     echo "instack failed to shutdown for copy"
166     exit 1
167 fi
168
169 echo $'\nGenerating libvirt configuration'
170 for i in \$(seq 0 $vm_index); do
171   virsh dumpxml baremetalbrbm_brbm1_brbm2_brbm3_\$i | awk '/model type='\''virtio'\''/{c++;if(c==2){sub("model type='\''virtio'\''","model type='\''rtl8139'\''");c=0}}1' > baremetalbrbm_brbm1_brbm2_brbm3_\$i.xml
172 done
173
174 virsh dumpxml instack > instack.xml
175 virsh net-dumpxml brbm > brbm-net.xml
176 virsh net-dumpxml brbm1 > brbm1-net.xml
177 virsh net-dumpxml brbm2> brbm2-net.xml
178 virsh net-dumpxml brbm3 > brbm3-net.xml
179 virsh pool-dumpxml default > default-pool.xml
180 EOI
181
182 # copy off the instack artifacts
183 echo "Copying instack files to build directory"
184 for i in $(seq 0 $vm_index); do
185   scp ${SSH_OPTIONS[@]} stack@localhost:baremetalbrbm_brbm1_brbm2_brbm3_${i}.xml .
186 done
187
188 scp ${SSH_OPTIONS[@]} stack@localhost:instack.xml .
189 scp ${SSH_OPTIONS[@]} stack@localhost:brbm-net.xml .
190 scp ${SSH_OPTIONS[@]} stack@localhost:brbm1-net.xml .
191 scp ${SSH_OPTIONS[@]} stack@localhost:brbm2-net.xml .
192 scp ${SSH_OPTIONS[@]} stack@localhost:brbm3-net.xml .
193 scp ${SSH_OPTIONS[@]} stack@localhost:default-pool.xml .
194
195 # pull down the the built images
196 echo "Copying overcloud resources"
197 IMAGES="overcloud-full.tar"
198 IMAGES+=" undercloud.qcow2"
199
200 for i in $IMAGES; do
201   # download prebuilt images from RDO Project
202   if [ "$(curl -L $rdo_images_uri/${i}.md5 | awk {'print $1'})" != "$(md5sum stack/$i | awk {'print $1'})" ] ; then
203     #if [ $i == "undercloud.qcow2" ]; then
204     ### there's a problem with the Content-Length reported by the centos artifacts
205     ### server so using wget for it until a resolution is figured out.
206     #wget -nv -O stack/$i $rdo_images_uri/$i
207     #else
208     curl $rdo_images_uri/$i -o stack/$i
209     #fi
210   fi
211   # only untar the tar files
212   if [ "${i##*.}" == "tar" ]; then tar -xf stack/$i -C stack/; fi
213 done
214
215 #Adding OpenStack packages to undercloud
216 pushd stack
217 cp undercloud.qcow2 instack.qcow2
218 LIBGUESTFS_BACKEND=direct virt-customize --install yum-priorities -a instack.qcow2
219 PACKAGES="qemu-kvm-common,qemu-kvm,libvirt-daemon-kvm,libguestfs,python-libguestfs,openstack-nova-compute"
220 PACKAGES+=",openstack-swift,openstack-ceilometer-api,openstack-neutron-ml2,openstack-ceilometer-alarm"
221 PACKAGES+=",openstack-nova-conductor,openstack-ironic-inspector,openstack-ironic-api,python-openvswitch"
222 PACKAGES+=",openstack-glance,python-glance,python-troveclient,openstack-puppet-modules"
223 PACKAGES+=",openstack-neutron,openstack-neutron-openvswitch,openstack-nova-scheduler,openstack-keystone,openstack-swift-account"
224 PACKAGES+=",openstack-swift-container,openstack-swift-object,openstack-swift-plugin-swift3,openstack-swift-proxy"
225 PACKAGES+=",openstack-nova-api,openstack-nova-cert,openstack-heat-api-cfn,openstack-heat-api,"
226 PACKAGES+=",openstack-ceilometer-central,openstack-ceilometer-polling,openstack-ceilometer-collector,"
227 PACKAGES+=",openstack-heat-api-cloudwatch,openstack-heat-engine,openstack-heat-common,openstack-ceilometer-notification"
228 PACKAGES+=",hiera,puppet,memcached,keepalived,mariadb,mariadb-server,rabbitmq-server,python-pbr,python-proliantutils"
229 PACKAGES+=",ceph-common"
230
231 # install the packages above and enabling ceph to live on the controller
232 LIBGUESTFS_BACKEND=direct virt-customize --install $PACKAGES \
233     --run-command "sed -i '/ControllerEnableCephStorage/c\\  ControllerEnableCephStorage: true' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml" \
234     --run-command "sed -i '/  \$enable_ceph = /c\\  \$enable_ceph = true' /usr/share/openstack-tripleo-heat-templates/puppet/manifests/overcloud_controller_pacemaker.pp" \
235     --run-command "sed -i '/  \$enable_ceph = /c\\  \$enable_ceph = true' /usr/share/openstack-tripleo-heat-templates/puppet/manifests/overcloud_controller.pp" \
236     -a instack.qcow2
237 popd
238
239
240 #Adding OpenDaylight to overcloud
241 pushd stack
242 # make a copy of the cached overcloud-full image
243 cp overcloud-full.qcow2 overcloud-full-odl.qcow2
244
245 #install aodh on overcloud
246 AODH_PKG="openstack-aodh-api,openstack-aodh-common,openstack-aodh-compat,openstack-aodh-evaluator,openstack-aodh-expirer"
247 AODH_PKG+=",openstack-aodh-listener,openstack-aodh-notifier"
248
249 # remove unnecessary packages and install necessary packages
250 LIBGUESTFS_BACKEND=direct virt-customize --run-command "yum remove -y openstack-neutron-openvswitch" \
251     --install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm \
252     --upload /etc/yum.repos.d/opendaylight.repo:/etc/yum.repos.d/opendaylight.repo \
253     --install opendaylight,python-networking-odl,ceph \
254     --install $AODH_PKG \
255     -a overcloud-full-odl.qcow2
256
257 ## WORK AROUND
258 ## when OpenDaylight lands in upstream RDO manager this can be removed
259
260 # upload the opendaylight puppet module
261 rm -rf puppet-opendaylight
262 git clone -b 2.2.0 https://github.com/dfarrell07/puppet-opendaylight
263 pushd puppet-opendaylight
264 git archive --format=tar.gz --prefix=opendaylight/ HEAD > ../puppet-opendaylight.tar.gz
265 popd
266 LIBGUESTFS_BACKEND=direct virt-customize --upload puppet-opendaylight.tar.gz:/etc/puppet/modules/ \
267                                          --run-command "cd /etc/puppet/modules/ && tar xzf puppet-opendaylight.tar.gz" -a overcloud-full-odl.qcow2
268
269 # Patch in OpenDaylight installation and configuration
270 LIBGUESTFS_BACKEND=direct virt-customize --upload ../opnfv-tripleo-heat-templates.patch:/tmp \
271                                          --run-command "cd /usr/share/openstack-tripleo-heat-templates/ && patch -Np1 < /tmp/opnfv-tripleo-heat-templates.patch" \
272                                          -a instack.qcow2
273
274 LIBGUESTFS_BACKEND=direct virt-customize --upload ../opendaylight-puppet-neutron.patch:/tmp \
275                                          --run-command "cd /etc/puppet/modules/neutron && patch -Np1 < /tmp/opendaylight-puppet-neutron.patch" \
276                                          -a overcloud-full-odl.qcow2
277 # REMOVE ME AFTER Brahmaputra
278 LIBGUESTFS_BACKEND=direct virt-customize --upload ../puppet-neutron-force-metadata.patch:/tmp \
279                                          --run-command "cd /etc/puppet/modules/neutron && patch -Np1 < /tmp/puppet-neutron-force-metadata.patch" \
280                                          -a overcloud-full-odl.qcow2
281 LIBGUESTFS_BACKEND=direct virt-customize --upload ../puppet-cinder-quota-fix.patch:/tmp \
282                                          --run-command "cd /etc/puppet/modules/cinder && patch -Np1 < /tmp/puppet-cinder-quota-fix.patch" \
283                                          -a overcloud-full-odl.qcow2
284 # END REMOVE ME AFTER Brahmaputra
285
286 ## END WORK AROUND
287 popd
288
289 ## WORK AROUND
290 ## Current package of puppet-aodh is old
291
292 pushd stack
293 rm -rf aodh
294 git clone https://github.com/openstack/puppet-aodh aodh
295 pushd aodh
296 git checkout stable/liberty
297 popd
298
299 tar -czf puppet-aodh.tar.gz aodh
300 LIBGUESTFS_BACKEND=direct virt-customize --upload puppet-aodh.tar.gz:/etc/puppet/modules/ \
301                                          --run-command "cd /etc/puppet/modules/ && rm -rf aodh && tar xzf puppet-aodh.tar.gz" \
302                                           -a overcloud-full-odl.qcow2
303 ## END WORK AROUND
304 popd
305
306 ## WORK AROUND
307 ## when ONOS lands in upstream OPNFV artifacts this can be removed
308
309 # upload the onos puppet module
310 pushd stack
311
312 rm -rf puppet-onos
313 git clone https://github.com/bobzhouHW/puppet-onos.git
314 pushd puppet-onos
315 # download jdk, onos and maven dependancy packages.
316 pushd files
317 curl ${onos_artifacts_uri}/jdk-8u51-linux-x64.tar.gz -o ./jdk-8u51-linux-x64.tar.gz
318 curl ${onos_artifacts_uri}/onos-1.3.0.tar.gz -o ./onos-1.3.0.tar.gz
319 curl ${onos_artifacts_uri}/repository.tar -o ./repository.tar
320 popd
321 popd
322 mv puppet-onos onos
323 tar -czf puppet-onos.tar.gz onos
324 LIBGUESTFS_BACKEND=direct virt-customize --upload puppet-onos.tar.gz:/etc/puppet/modules/ \
325                                          --run-command "cd /etc/puppet/modules/ && tar xzf puppet-onos.tar.gz" -a overcloud-full-odl.qcow2
326
327 ## END WORK AROUND
328 popd
329
330 # move and Sanitize private keys from instack.json file
331 mv stack/instackenv.json instackenv-virt.json
332 sed -i '/pm_password/c\      "pm_password": "INSERT_STACK_USER_PRIV_KEY",' instackenv-virt.json
333 sed -i '/ssh-key/c\  "ssh-key": "INSERT_STACK_USER_PRIV_KEY",' instackenv-virt.json
334
335 # clean up the VMs
336 ssh -T ${SSH_OPTIONS[@]} stack@localhost <<EOI
337 set -e
338 virsh destroy instack 2> /dev/null || echo -n ''
339 virsh undefine instack --remove-all-storage 2> /dev/null || echo -n ''
340 for i in \$(seq 0 $vm_index); do
341   virsh destroy baremetalbrbm_brbm1_brbm2_brbm3_\$i 2> /dev/null || echo -n ''
342   virsh undefine baremetalbrbm_brbm1_brbm2_brbm3_\$i --remove-all-storage 2> /dev/null || echo -n ''
343 done
344 EOI
345