Merge "Cherry-pick of L2GW environment file"
authorTim Rozet <trozet@redhat.com>
Mon, 12 Mar 2018 21:33:42 +0000 (21:33 +0000)
committerGerrit Code Review <gerrit@opnfv.org>
Mon, 12 Mar 2018 21:33:42 +0000 (21:33 +0000)
25 files changed:
apex/build.py
apex/common/utils.py
apex/deploy.py
apex/overcloud/deploy.py
apex/tests/test_apex_overcloud_deploy.py
apex/tests/test_apex_undercloud.py
apex/tests/test_apex_virtual_utils.py
apex/undercloud/undercloud.py
apex/virtual/exceptions.py [new file with mode: 0644]
apex/virtual/utils.py
build/Makefile
build/barometer-install.sh
build/opnfv-environment.yaml
build/overcloud-full.sh
build/patches/puppet-ceph.patch [new file with mode: 0644]
build/rpm_specs/opnfv-apex-common.spec
config/network/network_settings.yaml
config/network/network_settings_v6.yaml
config/network/network_settings_vlans.yaml
config/network/network_settings_vpp.yaml
docs/release/installation/baremetal.rst
docs/release/installation/virtual.rst
lib/ansible/playbooks/configure_undercloud.yml
lib/ansible/playbooks/deploy_dependencies.yml
requirements.txt

index 08f91ab..dff25ac 100644 (file)
@@ -225,6 +225,7 @@ def main():
     console.setLevel(log_level)
     console.setFormatter(logging.Formatter(formatter))
     logging.getLogger('').addHandler(console)
+    utils.install_ansible()
     # Since we only support building inside of git repo this should be fine
     try:
         apex_root = subprocess.check_output(
index 13250a4..b727b11 100644 (file)
@@ -8,10 +8,12 @@
 ##############################################################################
 
 import datetime
+import distro
 import json
 import logging
 import os
 import pprint
+import socket
 import subprocess
 import tarfile
 import time
@@ -192,3 +194,29 @@ def fetch_upstream_and_unpack(dest, url, targets):
             tar = tarfile.open(target_dest)
             tar.extractall(path=dest)
             tar.close()
+
+
+def install_ansible():
+    # we only install for CentOS/Fedora for now
+    dist = distro.id()
+    if 'centos' in dist:
+        pkg_mgr = 'yum'
+    elif 'fedora' in dist:
+        pkg_mgr = 'dnf'
+    else:
+        return
+
+    # yum python module only exists for 2.x, so use subprocess
+    try:
+        subprocess.check_call([pkg_mgr, '-y', 'install', 'ansible'])
+    except subprocess.CalledProcessError:
+        logging.warning('Unable to install Ansible')
+
+
+def internet_connectivity():
+    try:
+        urllib.request.urlopen('http://opnfv.org', timeout=3)
+        return True
+    except (urllib.request.URLError, socket.timeout):
+        logging.debug('No internet connectivity detected')
+        return False
index d2f7d50..5703e08 100644 (file)
@@ -234,6 +234,7 @@ def main():
     console.setLevel(log_level)
     console.setFormatter(logging.Formatter(formatter))
     logging.getLogger('').addHandler(console)
+    utils.install_ansible()
     validate_deploy_args(args)
     # Parse all settings
     deploy_settings = DeploySettings(args.deploy_settings_file)
@@ -381,7 +382,8 @@ def main():
                                        args.deploy_dir,
                                        root_pw=root_pw,
                                        external_network=uc_external,
-                                       image_name=os.path.basename(uc_image))
+                                       image_name=os.path.basename(uc_image),
+                                       os_version=os_version)
         undercloud.start()
 
         # Generate nic templates
@@ -410,8 +412,8 @@ def main():
         if not upstream:
             oc_deploy.prep_env(deploy_settings, net_settings, inventory,
                                opnfv_env, net_env_target, APEX_TEMP_DIR)
-            oc_deploy.prep_image(deploy_settings, sdn_image, APEX_TEMP_DIR,
-                                 root_pw=root_pw)
+            oc_deploy.prep_image(deploy_settings, net_settings, sdn_image,
+                                 APEX_TEMP_DIR, root_pw=root_pw)
         else:
             shutil.copyfile(sdn_image, os.path.join(APEX_TEMP_DIR,
                                                     'overcloud-full.qcow2'))
@@ -448,6 +450,8 @@ def main():
         deploy_vars['overcloudrc'] = 'source /home/stack/overcloudrc'
         deploy_vars['upstream'] = upstream
         deploy_vars['os_version'] = os_version
+        deploy_vars['http_proxy'] = net_settings.get('http_proxy', '')
+        deploy_vars['https_proxy'] = net_settings.get('https_proxy', '')
         for dns_server in net_settings['dns_servers']:
             deploy_vars['dns_server_args'] += " --dns-nameserver {}".format(
                 dns_server)
index 19d46e1..5bbcaed 100644 (file)
@@ -186,10 +186,11 @@ def create_deploy_cmd(ds, ns, inv, tmp_dir,
     return cmd
 
 
-def prep_image(ds, img, tmp_dir, root_pw=None):
+def prep_image(ds, ns, img, tmp_dir, root_pw=None):
     """
     Locates sdn image and preps for deployment.
     :param ds: deploy settings
+    :param ns: network settings
     :param img: sdn image
     :param tmp_dir: dir to store modified sdn image
     :param root_pw: password to configure for overcloud image
@@ -219,6 +220,18 @@ def prep_image(ds, img, tmp_dir, root_pw=None):
                 ".service"
         }])
 
+    if ns.get('http_proxy', ''):
+        virt_cmds.append({
+            con.VIRT_RUN_CMD:
+                "echo 'http_proxy={}' >> /etc/environment".format(
+                    ns['http_proxy'])})
+
+    if ns.get('https_proxy', ''):
+        virt_cmds.append({
+            con.VIRT_RUN_CMD:
+                "echo 'https_proxy={}' >> /etc/environment".format(
+                    ns['https_proxy'])})
+
     if ds_opts['vpn']:
         virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
         virt_cmds.append({
index 8ff98a8..420a70d 100644 (file)
@@ -154,7 +154,8 @@ class TestOvercloudDeploy(unittest.TestCase):
               'global_params': MagicMock()}
         ds['deploy_options'].__getitem__.side_effect = \
             lambda i: ds_opts.get(i, MagicMock())
-        prep_image(ds, 'undercloud.qcow2', '/tmp', root_pw='test')
+        ns = MagicMock()
+        prep_image(ds, ns, 'undercloud.qcow2', '/tmp', root_pw='test')
         mock_virt_utils.virt_customize.assert_called()
 
     @patch('apex.overcloud.deploy.virt_utils')
@@ -169,7 +170,8 @@ class TestOvercloudDeploy(unittest.TestCase):
               'global_params': MagicMock()}
         ds['deploy_options'].__getitem__.side_effect = \
             lambda i: ds_opts.get(i, MagicMock())
-        prep_image(ds, 'undercloud.qcow2', '/tmp', root_pw='test')
+        ns = MagicMock()
+        prep_image(ds, ns, 'undercloud.qcow2', '/tmp', root_pw='test')
         mock_virt_utils.virt_customize.assert_called()
 
     @patch('apex.overcloud.deploy.virt_utils')
@@ -188,7 +190,8 @@ class TestOvercloudDeploy(unittest.TestCase):
             lambda i: ds_opts.get(i, MagicMock())
         ds['deploy_options'].__contains__.side_effect = \
             lambda i: True if i in ds_opts else MagicMock()
-        prep_image(ds, 'undercloud.qcow2', '/tmp', root_pw='test')
+        ns = MagicMock()
+        prep_image(ds, ns, 'undercloud.qcow2', '/tmp', root_pw='test')
         mock_virt_utils.virt_customize.assert_called()
 
     @patch('apex.overcloud.deploy.virt_utils')
@@ -204,7 +207,8 @@ class TestOvercloudDeploy(unittest.TestCase):
               'global_params': MagicMock()}
         ds['deploy_options'].__getitem__.side_effect = \
             lambda i: ds_opts.get(i, MagicMock())
-        prep_image(ds, 'undercloud.qcow2', '/tmp', root_pw='test')
+        ns = MagicMock()
+        prep_image(ds, ns, 'undercloud.qcow2', '/tmp', root_pw='test')
         mock_virt_utils.virt_customize.assert_called()
 
     @patch('apex.overcloud.deploy.virt_utils')
@@ -219,14 +223,15 @@ class TestOvercloudDeploy(unittest.TestCase):
               'global_params': MagicMock()}
         ds['deploy_options'].__getitem__.side_effect = \
             lambda i: ds_opts.get(i, MagicMock())
-        prep_image(ds, 'undercloud.qcow2', '/tmp', root_pw='test')
+        ns = MagicMock()
+        prep_image(ds, ns, 'undercloud.qcow2', '/tmp', root_pw='test')
         mock_virt_utils.virt_customize.assert_called()
 
     @patch('apex.overcloud.deploy.os.path.isfile')
     def test_prep_image_no_image(self, mock_isfile):
         mock_isfile.return_value = False
         assert_raises(ApexDeployException, prep_image,
-                      {}, 'undercloud.qcow2', '/tmp')
+                      {}, {}, 'undercloud.qcow2', '/tmp')
 
     def test_make_ssh_key(self):
         priv, pub = make_ssh_key()
index c821ade..0df785f 100644 (file)
@@ -197,3 +197,18 @@ class TestUndercloud(unittest.TestCase):
         ds = {'global_params': {}}
 
         Undercloud('img_path', 'tplt_path').generate_config(ns, ds)
+
+    @patch.object(Undercloud, '_get_vm', return_value=None)
+    @patch.object(Undercloud, 'create')
+    @patch('apex.undercloud.undercloud.virt_utils')
+    def test_update_delorean(self, mock_vutils, mock_uc_create, mock_get_vm):
+        uc = Undercloud('img_path', 'tmplt_path', external_network=True)
+        uc._update_delorean_repo()
+        download_cmd = (
+            "curl -L -f -o "
+            "/etc/yum.repos.d/deloran.repo "
+            "https://trunk.rdoproject.org/centos7-{}"
+            "/current-tripleo/delorean.repo".format(
+                constants.DEFAULT_OS_VERSION))
+        test_ops = {'--run-command': download_cmd}
+        mock_vutils.virt_customize.assert_called_with(test_ops, uc.volume)
index 643069f..a9eb78d 100644 (file)
@@ -12,6 +12,7 @@ import unittest
 
 from mock import patch
 
+from apex.virtual.exceptions import ApexVirtualException
 from apex.virtual.utils import DEFAULT_VIRT_IP
 from apex.virtual.utils import get_virt_ip
 from apex.virtual.utils import generate_inventory
@@ -66,13 +67,30 @@ class TestVirtualUtils(unittest.TestCase):
         assert_is_instance(generate_inventory('target_file', ha_enabled=True),
                            dict)
 
+    @patch('apex.virtual.utils.get_virt_ip')
+    @patch('apex.virtual.utils.subprocess.check_output')
     @patch('apex.virtual.utils.iptc')
     @patch('apex.virtual.utils.subprocess.check_call')
     @patch('apex.virtual.utils.vbmc_lib')
-    def test_host_setup(self, mock_vbmc_lib, mock_subprocess, mock_iptc):
+    def test_host_setup(self, mock_vbmc_lib, mock_subprocess, mock_iptc,
+                        mock_check_output, mock_get_virt_ip):
+        mock_get_virt_ip.return_value = '192.168.122.1'
+        mock_check_output.return_value = b'blah |dummy \nstatus | running'
         host_setup({'test': 2468})
         mock_subprocess.assert_called_with(['vbmc', 'start', 'test'])
 
+    @patch('apex.virtual.utils.get_virt_ip')
+    @patch('apex.virtual.utils.subprocess.check_output')
+    @patch('apex.virtual.utils.iptc')
+    @patch('apex.virtual.utils.subprocess.check_call')
+    @patch('apex.virtual.utils.vbmc_lib')
+    def test_host_setup_vbmc_fails(self, mock_vbmc_lib, mock_subprocess,
+                                   mock_iptc, mock_check_output,
+                                   mock_get_virt_ip):
+        mock_get_virt_ip.return_value = '192.168.122.1'
+        mock_check_output.return_value = b'blah |dummy \nstatus | stopped'
+        assert_raises(ApexVirtualException, host_setup, {'test': 2468})
+
     @patch('apex.virtual.utils.iptc')
     @patch('apex.virtual.utils.subprocess.check_call')
     @patch('apex.virtual.utils.vbmc_lib')
index d28ed98..915c85f 100644 (file)
@@ -31,8 +31,10 @@ class Undercloud:
     """
     def __init__(self, image_path, template_path,
                  root_pw=None, external_network=False,
-                 image_name='undercloud.qcow2'):
+                 image_name='undercloud.qcow2',
+                 os_version=constants.DEFAULT_OS_VERSION):
         self.ip = None
+        self.os_version = os_version
         self.root_pw = root_pw
         self.external_net = external_network
         self.volume = os.path.join(constants.LIBVIRT_VOLUME_PATH,
@@ -73,6 +75,7 @@ class Undercloud:
                                    template_dir=self.template_path)
         self.setup_volumes()
         self.inject_auth()
+        self._update_delorean_repo()
 
     def _set_ip(self):
         ip_out = self.vm.interfaceAddresses(
@@ -202,8 +205,9 @@ class Undercloud:
             "undercloud_update_packages false",
             "undercloud_debug false",
             "inspection_extras false",
-            "ipxe {}".format(str(ds['global_params'].get('ipxe', True) and
-                                 not config['aarch64'])),
+            "ipxe_enabled {}".format(
+                str(ds['global_params'].get('ipxe', True) and
+                    not config['aarch64'])),
             "undercloud_hostname undercloud.{}".format(ns['dns-domain']),
             "local_ip {}/{}".format(str(ns_admin['installer_vm']['ip']),
                                     str(ns_admin['cidr']).split('/')[1]),
@@ -236,4 +240,22 @@ class Undercloud:
             "enabled": ns_external['enabled']
         }
 
+        config['http_proxy'] = ns.get('http_proxy', '')
+        config['https_proxy'] = ns.get('https_proxy', '')
+
         return config
+
+    def _update_delorean_repo(self):
+        if utils.internet_connectivity():
+            logging.info('Updating delorean repo on Undercloud')
+            delorean_repo = (
+                "https://trunk.rdoproject.org/centos7-{}"
+                "/current-tripleo/delorean.repo".format(self.os_version))
+            cmd = ("curl -L -f -o "
+                   "/etc/yum.repos.d/deloran.repo {}".format(delorean_repo))
+            try:
+                virt_utils.virt_customize({constants.VIRT_RUN_CMD: cmd},
+                                          self.volume)
+            except Exception:
+                logging.warning("Failed to download and update delorean repo "
+                                "for Undercloud")
diff --git a/apex/virtual/exceptions.py b/apex/virtual/exceptions.py
new file mode 100644 (file)
index 0000000..e3dff51
--- /dev/null
@@ -0,0 +1,12 @@
+##############################################################################
+# Copyright (c) 2018 Tim Rozet (trozet@redhat.com) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+
+class ApexVirtualException(Exception):
+    pass
index 226af1b..8b24bc4 100644 (file)
@@ -18,6 +18,8 @@ import xml.etree.ElementTree as ET
 
 from apex.common import utils as common_utils
 from apex.virtual import configure_vm as vm_lib
+from apex.virtual import exceptions as exc
+from time import sleep
 from virtualbmc import manager as vbmc_lib
 
 DEFAULT_RAM = 8192
@@ -131,11 +133,39 @@ def host_setup(node):
         chain.insert_rule(rule)
         try:
             subprocess.check_call(['vbmc', 'start', name])
-            logging.debug("Started vbmc for domain {}".format(name))
+            logging.debug("Started VBMC for domain {}".format(name))
         except subprocess.CalledProcessError:
-            logging.error("Failed to start vbmc for {}".format(name))
+            logging.error("Failed to start VBMC for {}".format(name))
             raise
-    logging.debug('vmbcs setup: {}'.format(vbmc_manager.list()))
+
+        logging.info("Checking VBMC {} is up".format(name))
+        is_running = False
+        for x in range(0, 4):
+            logging.debug("Polling to see if VBMC is up, attempt {}".format(x))
+            try:
+                output = subprocess.check_output(['vbmc', 'show', name],
+                                                 stderr=subprocess.STDOUT)
+            except subprocess.CalledProcessError:
+                logging.warning('Unable to issue "vbmc show" cmd')
+                continue
+            for line in output.decode('utf-8').split('\n'):
+                if 'status' in line:
+                    if 'running' in line:
+                        is_running = True
+                        break
+                    else:
+                        logging.debug('VBMC status is not "running"')
+                    break
+            if is_running:
+                break
+            sleep(1)
+        if is_running:
+            logging.info("VBMC {} is up and running".format(name))
+        else:
+            logging.error("Failed to verify VBMC is running")
+            raise exc.ApexVirtualException("Failed to bring up vbmc "
+                                           "{}".format(name))
+    logging.debug('VBMCs setup: {}'.format(vbmc_manager.list()))
 
 
 def virt_customize(ops, target):
index 729b3ce..fb6734b 100644 (file)
@@ -291,6 +291,7 @@ iso:        iso-clean images rpms $(CENTISO)
        cd $(BUILD_DIR)/centos/Packages && curl -O http://artifacts.opnfv.org/apex/dependencies/python34-pygerrit2-2.0.3-1.el7.centos.noarch.rpm
        cd $(BUILD_DIR)/centos/Packages && curl -O http://artifacts.opnfv.org/apex/dependencies/python34-gitdb2-2.0.3-1.el7.centos.noarch.rpm
        cd $(BUILD_DIR)/centos/Packages && curl -O http://artifacts.opnfv.org/apex/dependencies/python34-GitPython-2.1.7-1.el7.centos.noarch.rpm
+       cd $(BUILD_DIR)/centos/Packages && curl -O http://artifacts.opnfv.org/apex/dependencies/python34-distro-1.2.0-1.el7.centos.noarch.rpm
        # regenerate yum repo data
        @echo "Generating new yum metadata"
        createrepo --update -g $(BUILD_ROOT)/c7-opnfv-x86_64-comps.xml $(BUILD_DIR)/centos
index ca9b79c..2391b6b 100755 (executable)
@@ -22,7 +22,8 @@ source ./variables.sh
 # Versions/branches
 COLLECTD_OPENSTACK_PLUGINS_BRANCH="stable/pike"
 
-ARCH="6.el7.centos.x86_64.rpm"
+ARCH="8.el7.centos.x86_64.rpm"
+
 # don't fail because of missing certificate
 GETFLAG="--no-check-certificate"
 
@@ -58,19 +59,36 @@ function barometer_pkgs {
     | cut -d'-' -f9)
   RDT_SUFFIX=$INTEL_RDT_VER-1.el7.centos.x86_64.rpm
 
-  wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/libcollectdclient-$SUFFIX
-  wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/libcollectdclient-devel-$SUFFIX
-  wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-$SUFFIX
-  wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-utils-$SUFFIX
-  wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-ovs_events-$SUFFIX
-  wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-ovs_stats-$SUFFIX
-  wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-virt-$SUFFIX
-  wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/intel-cmt-cat-$RDT_SUFFIX
-  wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/intel-cmt-cat-devel-$RDT_SUFFIX
-  wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-python-$SUFFIX
-  wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-snmp-$SUFFIX
-  wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-snmp_agent-$SUFFIX
-  wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-intel_rdt-$SUFFIX
+  wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/libcollectdclient-${SUFFIX}
+  wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/libcollectdclient-devel-${SUFFIX}
+  wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-${SUFFIX}
+  wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-utils-${SUFFIX}
+  wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-python-${SUFFIX}
+  wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-ovs_events-${SUFFIX}
+  wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-ovs_stats-${SUFFIX}
+  wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/intel-cmt-cat-${RDT_SUFFIX}
+  wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/intel-cmt-cat-devel-${RDT_SUFFIX}
+  wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-intel_rdt-${SUFFIX}
+  wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-snmp-${SUFFIX}
+  wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-snmp_agent-${SUFFIX}
+  wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-virt-${SUFFIX}
+  wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-sensors-${SUFFIX}
+  wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-ceph-${SUFFIX}
+  wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-curl_json-${SUFFIX}
+  wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-apache-${SUFFIX}
+  wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-write_http-${SUFFIX}
+  wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-mysql-${SUFFIX}
+  wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-ping-${SUFFIX}
+  wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-smart-${SUFFIX}
+  wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-curl_xml-${SUFFIX}
+  wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-disk-${SUFFIX}
+  wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-rrdcached-${SUFFIX}
+  wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-iptables-${SUFFIX}
+  wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-curl-${SUFFIX}
+  wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-ipmi-${SUFFIX}
+  wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-netlink-${SUFFIX}
+  wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-rrdtool-${SUFFIX}
+  wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-lvm-${SUFFIX}
   curl "https://bootstrap.pypa.io/get-pip.py" -o "get-pip.py"
 
   tar cfz collectd.tar.gz *.rpm get-pip.py
@@ -88,8 +106,7 @@ function barometer_pkgs {
   # get the barometer puppet module and tar it
   rm -rf puppet-barometer
   git clone $PUPPET_BAROMETER_REPO puppet-barometer
-  cd puppet-barometer
-  pushd puppet-barometer/ > /dev/null
+  pushd puppet-barometer/puppet-barometer/ > /dev/null
   git archive --format=tar.gz HEAD > ${BUILD_DIR}/puppet-barometer.tar.gz
   popd > /dev/null
 
@@ -118,6 +135,10 @@ function barometer_pkgs {
     --run-command 'pip3 install requests libvirt-python pbr babel future six' \
     -a $OVERCLOUD_IMAGE
 
+  LIBGUESTFS_BACKEND=direct $VIRT_CUSTOMIZE \
+    --run-command 'yum remove -y collectd-write_sensu-5.8.0-2.el7.x86_64' \
+    -a $OVERCLOUD_IMAGE
+
   LIBGUESTFS_BACKEND=direct $VIRT_CUSTOMIZE \
     --run-command "yum install -y \
     /opt/libcollectdclient-${SUFFIX} \
@@ -132,7 +153,24 @@ function barometer_pkgs {
     /opt/collectd-intel_rdt-${SUFFIX} \
     /opt/collectd-snmp-${SUFFIX} \
     /opt/collectd-snmp_agent-${SUFFIX} \
-    /opt/collectd-virt-${SUFFIX}" \
+    /opt/collectd-virt-${SUFFIX} \
+    /opt/collectd-sensors-${SUFFIX} \
+    /opt/collectd-ceph-${SUFFIX} \
+    /opt/collectd-curl_json-${SUFFIX} \
+    /opt/collectd-apache-${SUFFIX} \
+    /opt/collectd-write_http-${SUFFIX} \
+    /opt/collectd-mysql-${SUFFIX} \
+    /opt/collectd-ping-${SUFFIX} \
+    /opt/collectd-smart-${SUFFIX} \
+    /opt/collectd-curl_xml-${SUFFIX} \
+    /opt/collectd-disk-${SUFFIX} \
+    /opt/collectd-rrdcached-${SUFFIX} \
+    /opt/collectd-iptables-${SUFFIX} \
+    /opt/collectd-curl-${SUFFIX} \
+    /opt/collectd-ipmi-${SUFFIX} \
+    /opt/collectd-netlink-${SUFFIX} \
+    /opt/collectd-rrdtool-${SUFFIX} \
+    /opt/collectd-lvm-${SUFFIX}" \
     -a $OVERCLOUD_IMAGE
 
   # install collectd-openstack-plugins
@@ -150,4 +188,3 @@ function barometer_pkgs {
     --run-command 'mkdir -p /etc/collectd/collectd.conf.d' \
     -a $OVERCLOUD_IMAGE
 }
-
index 4ef6ef8..3df18e9 100644 (file)
@@ -160,7 +160,7 @@ parameter_defaults:
   ComputeServices:
     - OS::TripleO::Services::AuditD
     - OS::TripleO::Services::Sshd
-    #- OS::TripleO::Services::Barometer
+    - OS::TripleO::Services::Barometer
     - OS::TripleO::Services::CACerts
     - OS::TripleO::Services::CephClient
     - OS::TripleO::Services::CephOSD
@@ -196,6 +196,8 @@ resource_registry:
   OS::TripleO::Services::SwiftStorage: OS::Heat::None
   #OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None
   OS::TripleO::Services::SwiftProxy: OS::Heat::None
+  OS::TripleO::Services::BarbicanApi: "/usr/share/openstack-tripleo-heat-\
+    templates/puppet/services/barbican-api.yaml"
   # Extra Config
   OS::TripleO::ComputeExtraConfigPre: OS::Heat::None
   OS::TripleO::ControllerExtraConfigPre: OS::Heat::None
index 1b7843a..a4006c4 100755 (executable)
@@ -48,6 +48,7 @@ qemu-img resize overcloud-full_build.qcow2 +1500M
 # installing forked apex-puppet-tripleo
 # upload neutron port data plane status
 LIBGUESTFS_BACKEND=direct $VIRT_CUSTOMIZE \
+    --run-command "curl -f https://trunk.rdoproject.org/centos7-pike/delorean-deps.repo > /etc/yum.repos.d/delorean-deps.repo" \
     --run-command "xfs_growfs /dev/sda" \
     --upload ${BUILD_DIR}/apex-puppet-tripleo.tar.gz:/etc/puppet/modules \
     --run-command "cd /etc/puppet/modules && rm -rf tripleo && tar xzf apex-puppet-tripleo.tar.gz" \
@@ -66,6 +67,7 @@ LIBGUESTFS_BACKEND=direct $VIRT_CUSTOMIZE \
     --upload ${BUILD_ROOT}/patches/neutron_openstackclient_dps.patch:/usr/lib/python2.7/site-packages/ \
     --upload ${BUILD_ROOT}/patches/puppet-neutron-add-sfc.patch:/usr/share/openstack-puppet/modules/neutron/ \
     --upload ${BUILD_ROOT}/patches/congress-parallel-execution.patch:/usr/lib/python2.7/site-packages/ \
+    --install openstack-utils \
     -a overcloud-full_build.qcow2
 #    --upload ${BUILD_ROOT}/patches/puppet-neutron-vpp-ml2-type_drivers-setting.patch:/usr/share/openstack-puppet/modules/neutron/ \
 #    --run-command "cd /usr/share/openstack-puppet/modules/neutron && patch -p1 < puppet-neutron-vpp-ml2-type_drivers-setting.patch" \
@@ -143,11 +145,12 @@ LIBGUESTFS_BACKEND=direct $VIRT_CUSTOMIZE \
     --install python-etcd,puppet-etcd \
     --install patch \
     --install docker,kubelet,kubeadm,kubectl,kubernetes-cni \
+    --upload ${BUILD_ROOT}/patches/puppet-ceph.patch:/etc/puppet/modules/ceph/ \
+    --run-command "cd /etc/puppet/modules/ceph && patch -p1 < puppet-ceph.patch" \
     -a overcloud-full_build.qcow2
 
     # upload and install barometer packages
-    # FIXME collectd pkgs conflict during upgrade to Pike
-    # barometer_pkgs overcloud-full_build.qcow2
+    barometer_pkgs overcloud-full_build.qcow2
 
 fi # end x86_64 specific items
 
diff --git a/build/patches/puppet-ceph.patch b/build/patches/puppet-ceph.patch
new file mode 100644 (file)
index 0000000..18bf9ee
--- /dev/null
@@ -0,0 +1,76 @@
+From 99a0bcc818ed801f6cb9e07a9904ee40e624bdab Mon Sep 17 00:00:00 2001
+From: Tim Rozet <trozet@redhat.com>
+Date: Mon, 5 Mar 2018 17:03:00 -0500
+Subject: [PATCH] Fixes ceph key import failures by adding multiple attempts
+
+Signed-off-by: Tim Rozet <trozet@redhat.com>
+---
+ manifests/key.pp | 42 +++++++++++++++++-------------------------
+ 1 file changed, 17 insertions(+), 25 deletions(-)
+
+diff --git a/manifests/key.pp b/manifests/key.pp
+index 911df1a..d47a4c3 100644
+--- a/manifests/key.pp
++++ b/manifests/key.pp
+@@ -123,22 +123,6 @@ define ceph::key (
+     }
+   }
+-  # ceph-authtool --add-key is idempotent, will just update pre-existing keys
+-  exec { "ceph-key-${name}":
+-    command   => "/bin/true # comment to satisfy puppet syntax requirements
+-set -ex
+-ceph-authtool ${keyring_path} --name '${name}' --add-key '${secret}' ${caps}",
+-    unless    => "/bin/true # comment to satisfy puppet syntax requirements
+-set -x
+-NEW_KEYRING=\$(mktemp)
+-ceph-authtool \$NEW_KEYRING --name '${name}' --add-key '${secret}' ${caps}
+-diff -N \$NEW_KEYRING ${keyring_path}
+-rv=\$?
+-rm \$NEW_KEYRING
+-exit \$rv",
+-    require   => [ File[$keyring_path], ],
+-    logoutput => true,
+-  }
+   if $inject {
+@@ -162,18 +146,26 @@ exit \$rv",
+     exec { "ceph-injectkey-${name}":
+       command   => "/bin/true # comment to satisfy puppet syntax requirements
+ set -ex
++cat ${keyring_path}
++ceph-authtool ${keyring_path} --name '${name}' --add-key '${secret}' ${caps}
++cat ${keyring_path}
+ ceph ${cluster_option} ${inject_id_option} ${inject_keyring_option} auth import -i ${keyring_path}",
+-      unless    => "/bin/true # comment to satisfy puppet syntax requirements
+-set -x
+-OLD_KEYRING=\$(mktemp)
+-ceph ${cluster_option} ${inject_id_option} ${inject_keyring_option} auth get ${name} -o \$OLD_KEYRING || true
+-diff -N \$OLD_KEYRING ${keyring_path}
+-rv=$?
+-rm \$OLD_KEYRING
+-exit \$rv",
+-      require   => [ Class['ceph'], Exec["ceph-key-${name}"], ],
++      require   => [ File[$keyring_path], Class['ceph'] ],
+       logoutput => true,
++      tries     => 6,
++      try_sleep => 10
+     }
++  } else {
++
++    # ceph-authtool --add-key is idempotent, will just update pre-existing keys
++    exec { "ceph-key-${name}":
++      command   => "/bin/true # comment to satisfy puppet syntax requirements
++set -ex
++ceph-authtool ${keyring_path} --name '${name}' --add-key '${secret}' ${caps}
++cat ${keyring_path}",
++      require   => [ File[$keyring_path], ],
++      logoutput => true,
++    }
+   }
+ }
+-- 
+2.14.3
+
index 342f442..f8226e4 100644 (file)
@@ -11,13 +11,13 @@ URL:                https://gerrit.opnfv.org/gerrit/apex.git
 Source0:       opnfv-apex-common.tar.gz
 
 BuildArch:      noarch
-BuildRequires:  python-docutils python34-devel
+BuildRequires:  python34-docutils python34-devel
 Requires:       opnfv-apex-sdn opnfv-apex-undercloud openvswitch qemu-kvm bridge-utils libguestfs-tools python34-libvirt
 Requires:       initscripts net-tools iputils iproute iptables python34 python34-yaml python34-jinja2 python3-ipmi python34-virtualbmc
 Requires:       ipxe-roms-qemu >= 20160127-1
 Requires:       libvirt-devel ansible
 Requires:       python34-iptables python34-cryptography python34-pbr
-Requires:       python34-GitPython python34-pygerrit2
+Requires:       python34-GitPython python34-pygerrit2 python34-distro
 
 %description
 Scripts for OPNFV deployment using Apex
@@ -118,6 +118,8 @@ install config/inventory/pod_example_settings.yaml %{buildroot}%{_docdir}/opnfv/
 %doc %{_docdir}/opnfv/inventory.yaml.example
 
 %changelog
+* Wed Feb 14 2018 Tim Rozet <trozet@redhat.com> - 6.0-1
+  Fix docutils requirement and add python34-distro
 * Wed Nov 29 2017 Tim Rozet <trozet@redhat.com> - 6.0-0
   Bump version for Fraser
 * Wed Oct 25 2017 Tim Rozet <trozet@redhat.com> - 5.0-9
index fe11a9b..ffe3a18 100644 (file)
@@ -57,6 +57,10 @@ syslog:
   server: 10.128.1.24
   transport: 'tcp'
 
+# http(s) proxy settings added to /etc/environment of uc and oc nodes
+# http_proxy: http://proxy.server:8080
+# https_proxy: https://proxy.server:8081
+
 # Common network settings
 networks:
   # Admin configuration (pxe and jumpstart)
index 7dddf34..176bc7c 100644 (file)
@@ -57,6 +57,10 @@ syslog:
   server: 10.128.1.24
   transport: 'tcp'
 
+# http(s) proxy settings added to /etc/environment of uc and oc nodes
+# http_proxy: http://proxy.server:8080
+# https_proxy: https://proxy.server:8081
+
 # Common network settings
 networks:
   # Admin configuration (pxe and jumpstart)
index 345dbbd..29cd193 100644 (file)
@@ -57,6 +57,10 @@ syslog:
   server: 10.128.1.24
   transport: 'tcp'
 
+# http(s) proxy settings added to /etc/environment of uc and oc nodes
+# http_proxy: http://proxy.server:8080
+# https_proxy: https://proxy.server:8081
+
 # Common network settings
 networks:
   # Admin configuration (pxe and jumpstart)
index 2f6bba5..a40158e 100644 (file)
@@ -57,6 +57,10 @@ syslog:
   server: 10.128.1.24
   transport: 'tcp'
 
+# http(s) proxy settings added to /etc/environment of uc and oc nodes
+# http_proxy: http://proxy.server:8080
+# https_proxy: https://proxy.server:8081
+
 # Common network settings
 networks:
   # Admin configuration (pxe and jumpstart)
index 17223df..703d169 100644 (file)
@@ -238,5 +238,5 @@ Follow the steps below to execute:
 3.  When the deployment is complete the undercloud IP and overcloud dashboard
     url will be printed. OPNFV has now been deployed using Apex.
 
-.. _`Execution Requirements (Bare Metal Only)`: index.html#execution-requirements-bare-metal-only
-.. _`Network Requirements`: index.html#network-requirements
+.. _`Execution Requirements (Bare Metal Only)`: requirements.html#execution-requirements-bare-metal-only
+.. _`Network Requirements`: requirements.html#network-requirements
index 2da8ccf..af8aece 100644 (file)
@@ -98,5 +98,5 @@ Verifying the Setup - VMs
 To verify the set you can follow the instructions in the `Verifying the Setup`_
 section.
 
-.. _`Install Bare Metal Jump Host`: index.html#install-bare-metal-jump-host
-.. _`Verifying the Setup`: index.html#verifying-the-setup
+.. _`Install Bare Metal Jump Host`: baremetal.html#install-bare-metal-jump-host
+.. _`Verifying the Setup`: verification.html#verifying-the-setup
index 60afca9..e9ce875 100644 (file)
         regexp: 'Defaults\s*requiretty'
         state: absent
       become: yes
+    - lineinfile:
+        path: /etc/environment
+        regexp: '^http_proxy'
+        line: "http_proxy={{ http_proxy }}"
+      become: yes
+      when: http_proxy
+    - lineinfile:
+        path: /etc/environment
+        regexp: '^https_proxy'
+        line: "https_proxy={{ https_proxy }}"
+      become: yes
+      when: https_proxy
     - name: openstack-configs undercloud
       shell: openstack-config --set undercloud.conf DEFAULT {{ item }}
       with_items: "{{ undercloud_config }}"
index 545ee33..fb1da46 100644 (file)
@@ -7,6 +7,7 @@
       with_items:
         - python-lxml
         - libvirt-python
+        - libguestfs-tools
     - sysctl:
         name: net.ipv4.ip_forward
         state: present
       when:
         - ansible_architecture == "x86_64"
         - "'Y' not in nested_result.stdout"
+    - modprobe:
+        name: ip6_tables
+        state: present
+    - modprobe:
+        name: ip_tables
+        state: present
     - name: Generate SSH key for root if missing
       shell: test -e ~/.ssh/id_rsa || ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa
     - name: Check that /u/l/python3.4/site-packages/virtualbmc/vbmc.py exists
index 0326a8c..18bd020 100644 (file)
@@ -11,3 +11,4 @@ PyYAML
 Jinja2>=2.8
 GitPython
 pygerrit2
+distro