b4657e2a6a98d7cd865a33623c03b04f48a939a1
[apex.git] / build / instack.sh
1 #!/bin/sh
2 set -e
3 declare -i CNT
4
5 rdo_images_uri=https://repos.fedorapeople.org/repos/openstack-m/rdo-images-centos-liberty-opnfv
6
7 vm_index=4
8 RDO_RELEASE=liberty
9 SSH_OPTIONS=(-o StrictHostKeyChecking=no -o GlobalKnownHostsFile=/dev/null -o UserKnownHostsFile=/dev/null)
10 OPNFV_NETWORK_TYPES="admin_network private_network public_network storage_network"
11
12 # check for dependancy packages
13 for i in rpm-build createrepo libguestfs-tools python-docutils bsdtar; do
14     if ! rpm -q $i > /dev/null; then
15         sudo yum install -y $i
16     fi
17 done
18
19 # RDO Manager expects a stack user to exist, this checks for one
20 # and creates it if you are root
21 if ! id stack > /dev/null; then
22     sudo useradd stack;
23     sudo echo 'stack ALL=(root) NOPASSWD:ALL' | sudo tee -a /etc/sudoers.d/stack
24     sudo echo 'Defaults:stack !requiretty' | sudo tee -a /etc/sudoers.d/stack
25     sudo chmod 0440 /etc/sudoers.d/stack
26     echo 'Added user stack'
27 fi
28
29 # ensure that I can ssh as the stack user
30 if ! sudo grep "$(cat ~/.ssh/id_rsa.pub)" /home/stack/.ssh/authorized_keys; then
31     if ! sudo ls -d /home/stack/.ssh/ ; then
32         sudo mkdir /home/stack/.ssh
33         sudo chown stack:stack /home/stack/.ssh
34         sudo chmod 700 /home/stack/.ssh
35     fi
36     USER=$(whoami) sudo sh -c "cat ~$USER/.ssh/id_rsa.pub >> /home/stack/.ssh/authorized_keys"
37     sudo chown stack:stack /home/stack/.ssh/authorized_keys
38 fi
39
40 # clean up stack user previously build instack disk images
41 ssh -T ${SSH_OPTIONS[@]} stack@localhost "rm -f instack*.qcow2"
42
43 # Yum repo setup for building the undercloud
44 if ! rpm -q rdo-release > /dev/null && [ "$1" != "-master" ]; then
45     #pulling from current-passed-ci instead of release repos
46     #sudo yum install -y https://rdoproject.org/repos/openstack-${RDO_RELEASE}/rdo-release-${RDO_RELEASE}.rpm
47     sudo yum -y install yum-plugin-priorities
48     sudo yum-config-manager --disable openstack-${RDO_RELEASE}
49     sudo curl -o /etc/yum.repos.d/delorean.repo http://trunk.rdoproject.org/centos7-liberty/current-passed-ci/delorean.repo
50     sudo curl -o /etc/yum.repos.d/delorean-deps.repo http://trunk.rdoproject.org/centos7-liberty/delorean-deps.repo
51     sudo rm -f /etc/yum.repos.d/delorean-current.repo
52 elif [ "$1" == "-master" ]; then
53     sudo yum -y install yum-plugin-priorities
54     sudo yum-config-manager --disable openstack-${RDO_RELEASE}
55     sudo curl -o /etc/yum.repos.d/delorean.repo http://trunk.rdoproject.org/centos7/current-passed-ci/delorean.repo
56     sudo curl -o /etc/yum.repos.d/delorean-deps.repo http://trunk.rdoproject.org/centos7-liberty/delorean-deps.repo
57     sudo rm -f /etc/yum.repos.d/delorean-current.repo
58 fi
59
60 # install the opendaylight yum repo definition
61 cat << 'EOF' | sudo tee /etc/yum.repos.d/opendaylight.repo
62 [opendaylight]
63 name=OpenDaylight $releasever - $basearch
64 baseurl=http://cbs.centos.org/repos/nfv7-opendaylight-3-candidate/$basearch/os/
65 enabled=1
66 gpgcheck=0
67 EOF
68
69 # ensure the undercloud package is installed so we can build the undercloud
70 if ! rpm -q instack-undercloud > /dev/null; then
71     sudo yum install -y python-tripleoclient
72 fi
73
74 # ensure openvswitch is installed
75 if ! rpm -q openvswitch > /dev/null; then
76     sudo yum install -y openvswitch
77 fi
78
79 # ensure libvirt is installed
80 if ! rpm -q libvirt-daemon-kvm > /dev/null; then
81     sudo yum install -y libvirt-daemon-kvm
82 fi
83
84 # clean this up incase it's there
85 sudo rm -f /tmp/instack.answers
86
87 # ensure that no previous undercloud VMs are running
88 sudo ../ci/clean.sh
89 # and rebuild the bare undercloud VMs
90 ssh -T ${SSH_OPTIONS[@]} stack@localhost <<EOI
91 set -e
92 NODE_COUNT=5 NODE_CPU=2 NODE_MEM=8192 TESTENV_ARGS="--baremetal-bridge-names 'brbm brbm1 brbm2 brbm3'" instack-virt-setup
93 EOI
94
95 # let dhcp happen so we can get the ip
96 # just wait instead of checking until we see an address
97 # because there may be a previous lease that needs
98 # to be cleaned up
99 sleep 5
100
101 # get the undercloud ip address
102 UNDERCLOUD=$(grep instack /var/lib/libvirt/dnsmasq/default.leases | awk '{print $3}' | head -n 1)
103 if [ -z "$UNDERCLOUD" ]; then
104   #if not found then dnsmasq may be using leasefile-ro
105   instack_mac=$(ssh -T ${SSH_OPTIONS[@]} stack@localhost "virsh domiflist instack" | grep default | \
106                 grep -Eo "[0-9a-f\]+:[0-9a-f\]+:[0-9a-f\]+:[0-9a-f\]+:[0-9a-f\]+:[0-9a-f\]+")
107   UNDERCLOUD=$(/usr/sbin/arp -e | grep ${instack_mac} | awk {'print $1'})
108
109   if [ -z "$UNDERCLOUD" ]; then
110     echo "\n\nNever got IP for Instack. Can Not Continue."
111     exit 1
112   else
113     echo -e "${blue}\rInstack VM has IP $UNDERCLOUD${reset}"
114   fi
115 else
116    echo -e "${blue}\rInstack VM has IP $UNDERCLOUD${reset}"
117 fi
118
119 # ensure that we can ssh to the undercloud
120 CNT=10
121 while ! ssh -T ${SSH_OPTIONS[@]}  "root@$UNDERCLOUD" "echo ''" > /dev/null && [ $CNT -gt 0 ]; do
122     echo -n "."
123     sleep 3
124     CNT=CNT-1
125 done
126 # TODO fail if CNT=0
127
128 # yum repo, triple-o package and ssh key setup for the undercloud
129 ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" <<EOI
130 set -e
131
132 if ! rpm -q epel-release > /dev/null; then
133     yum install http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
134 fi
135
136 yum -y install yum-plugin-priorities
137 curl -o /etc/yum.repos.d/delorean.repo http://trunk.rdoproject.org/centos7-liberty/current-passed-ci/delorean.repo
138 curl -o /etc/yum.repos.d/delorean-deps.repo http://trunk.rdoproject.org/centos7-liberty/delorean-deps.repo
139
140 cp /root/.ssh/authorized_keys /home/stack/.ssh/authorized_keys
141 chown stack:stack /home/stack/.ssh/authorized_keys
142 EOI
143
144 # copy instackenv file for future virt deployments
145 if [ ! -d stack ]; then mkdir stack; fi
146 scp ${SSH_OPTIONS[@]} stack@$UNDERCLOUD:instackenv.json stack/instackenv.json
147
148 # make a copy of instack VM's definitions, and disk image
149 # it must be stopped to make a copy of its disk image
150 ssh -T ${SSH_OPTIONS[@]} stack@localhost <<EOI
151 set -e
152 echo "Shutting down instack to gather configs"
153 virsh shutdown instack
154
155 echo "Waiting for instack VM to shutdown"
156 CNT=20
157 while virsh list | grep instack > /dev/null && [ $CNT -gt 0 ]; do
158     echo -n "."
159     sleep 5
160     CNT=CNT-1
161 done
162 if virsh list | grep instack > /dev/null; then
163     echo "instack failed to shutdown for copy"
164     exit 1
165 fi
166
167 echo $'\nGenerating libvirt configuration'
168 for i in \$(seq 0 $vm_index); do
169   virsh dumpxml baremetalbrbm_brbm1_brbm2_brbm3_\$i | awk '/model type='\''virtio'\''/{c++;if(c==2){sub("model type='\''virtio'\''","model type='\''rtl8139'\''");c=0}}1' > baremetalbrbm_brbm1_brbm2_brbm3_\$i.xml
170 done
171
172 virsh dumpxml instack > instack.xml
173 virsh net-dumpxml brbm > brbm-net.xml
174 virsh net-dumpxml brbm1 > brbm1-net.xml
175 virsh net-dumpxml brbm2> brbm2-net.xml
176 virsh net-dumpxml brbm3 > brbm3-net.xml
177 virsh pool-dumpxml default > default-pool.xml
178 EOI
179
180 # copy off the instack artifacts
181 echo "Copying instack files to build directory"
182 for i in $(seq 0 $vm_index); do
183   scp ${SSH_OPTIONS[@]} stack@localhost:baremetalbrbm_brbm1_brbm2_brbm3_${i}.xml .
184 done
185
186 scp ${SSH_OPTIONS[@]} stack@localhost:instack.xml .
187 scp ${SSH_OPTIONS[@]} stack@localhost:brbm-net.xml .
188 scp ${SSH_OPTIONS[@]} stack@localhost:brbm1-net.xml .
189 scp ${SSH_OPTIONS[@]} stack@localhost:brbm2-net.xml .
190 scp ${SSH_OPTIONS[@]} stack@localhost:brbm3-net.xml .
191 scp ${SSH_OPTIONS[@]} stack@localhost:default-pool.xml .
192
193 # pull down the the built images
194 echo "Copying overcloud resources"
195 IMAGES="overcloud-full.tar"
196 IMAGES+=" undercloud.qcow2"
197
198 for i in $IMAGES; do
199   # download prebuilt images from RDO Project
200   if [ "$(curl -L $rdo_images_uri/${i}.md5 | awk {'print $1'})" != "$(md5sum stack/$i | awk {'print $1'})" ] ; then
201     #if [ $i == "undercloud.qcow2" ]; then
202     ### there's a problem with the Content-Length reported by the centos artifacts
203     ### server so using wget for it until a resolution is figured out.
204     wget -nv -O stack/$i $rdo_images_uri/$i
205     #else
206     #  curl $rdo_images_uri/$i -o stack/$i --verbose --silent --location
207     #fi
208   fi
209   # only untar the tar files
210   if [ "${i##*.}" == "tar" ]; then tar -xf stack/$i -C stack/; fi
211 done
212
213 #Adding OpenStack packages to undercloud
214 pushd stack
215 cp undercloud.qcow2 instack.qcow2
216 LIBGUESTFS_BACKEND=direct virt-customize --install yum-priorities -a instack.qcow2
217 PACKAGES="qemu-kvm-common,qemu-kvm,libvirt-daemon-kvm,libguestfs,python-libguestfs,openstack-nova-compute"
218 PACKAGES+=",openstack-swift,openstack-ceilometer-api,openstack-neutron-ml2,openstack-ceilometer-alarm"
219 PACKAGES+=",openstack-nova-conductor,openstack-ironic-inspector,openstack-ironic-api,python-openvswitch"
220 PACKAGES+=",openstack-glance,python-glance,python-troveclient,openstack-puppet-modules"
221 PACKAGES+=",openstack-neutron,openstack-neutron-openvswitch,openstack-nova-scheduler,openstack-keystone,openstack-swift-account"
222 PACKAGES+=",openstack-swift-container,openstack-swift-object,openstack-swift-plugin-swift3,openstack-swift-proxy"
223 PACKAGES+=",openstack-nova-api,openstack-nova-cert,openstack-heat-api-cfn,openstack-heat-api,"
224 PACKAGES+=",openstack-ceilometer-central,openstack-ceilometer-polling,openstack-ceilometer-collector,"
225 PACKAGES+=",openstack-heat-api-cloudwatch,openstack-heat-engine,openstack-heat-common,openstack-ceilometer-notification"
226 PACKAGES+=",hiera,puppet,memcached,keepalived,mariadb,mariadb-server,rabbitmq-server,python-pbr,python-proliantutils"
227
228 LIBGUESTFS_BACKEND=direct virt-customize --install $PACKAGES -a instack.qcow2
229 popd
230
231
232 #Adding OpenDaylight to overcloud
233 pushd stack
234 # make a copy of the cached overcloud-full image
235 cp overcloud-full.qcow2 overcloud-full-odl.qcow2
236
237 # remove unnessesary packages and install nessesary packages
238 LIBGUESTFS_BACKEND=direct virt-customize --run-command "yum remove -y openstack-neutron-openvswitch" \
239     --upload /etc/yum.repos.d/opendaylight.repo:/etc/yum.repos.d/opendaylight.repo \
240     --install opendaylight,python-networking-odl -a overcloud-full-odl.qcow2
241
242 ## WORK AROUND
243 ## when OpenDaylight lands in upstream RDO manager this can be removed
244
245 # upload the opendaylight puppet module
246 rm -rf puppet-opendaylight
247 git clone https://github.com/dfarrell07/puppet-opendaylight
248 pushd puppet-opendaylight
249 git archive --format=tar.gz --prefix=opendaylight/ HEAD > ../puppet-opendaylight.tar.gz
250 popd
251 LIBGUESTFS_BACKEND=direct virt-customize --upload puppet-opendaylight.tar.gz:/etc/puppet/modules/ \
252                                          --run-command "cd /etc/puppet/modules/ && tar xzf puppet-opendaylight.tar.gz" -a overcloud-full-odl.qcow2
253
254 # Patch in OpenDaylight installation and configuration
255 LIBGUESTFS_BACKEND=direct virt-customize --upload ../opendaylight-tripleo-heat-templates.patch:/tmp \
256                                          --run-command "cd /usr/share/openstack-tripleo-heat-templates/ && patch -Np1 < /tmp/opendaylight-tripleo-heat-templates.patch" \
257                                          -a instack.qcow2
258 LIBGUESTFS_BACKEND=direct virt-customize --upload ../opendaylight-puppet-neutron.patch:/tmp \
259                                          --run-command "cd /etc/puppet/modules/neutron && patch -Np1 < /tmp/opendaylight-puppet-neutron.patch" \
260                                          -a overcloud-full-odl.qcow2
261 ## END WORK AROUND
262 popd
263
264 # move and Sanitize private keys from instack.json file
265 mv stack/instackenv.json instackenv-virt.json
266 sed -i '/pm_password/c\      "pm_password": "INSERT_STACK_USER_PRIV_KEY",' instackenv-virt.json
267 sed -i '/ssh-key/c\  "ssh-key": "INSERT_STACK_USER_PRIV_KEY",' instackenv-virt.json
268
269 # clean up the VMs
270 ssh -T ${SSH_OPTIONS[@]} stack@localhost <<EOI
271 set -e
272 virsh destroy instack 2> /dev/null || echo -n ''
273 virsh undefine instack --remove-all-storage 2> /dev/null || echo -n ''
274 for i in \$(seq 0 $vm_index); do
275   virsh destroy baremetalbrbm_brbm1_brbm2_brbm3_\$i 2> /dev/null || echo -n ''
276   virsh undefine baremetalbrbm_brbm1_brbm2_brbm3_\$i --remove-all-storage 2> /dev/null || echo -n ''
277 done
278 EOI
279