Migrates Apex to Python 23/36723/71
authorTim Rozet <trozet@redhat.com>
Mon, 26 Jun 2017 01:25:36 +0000 (21:25 -0400)
committerTim Rozet <trozet@redhat.com>
Wed, 23 Aug 2017 12:59:54 +0000 (08:59 -0400)
Removes all bash libraries and converts almost all of the code to a
mixture of Python and Ansible.  utils.sh and clean.sh still exist.
clean.sh will be migrated fully to clean.py in another patch.

The Apex Python package is now built into the opnfv-apex-common RPM.  To
install locally do 'pip3 install .'.  To deploy:

opnfv-deploy -d <file> -n <file> --image-dir /root/apex/.build  -v --debug

Non-python files (THT yaml, settings files, ansible playbooks) are all
installed into /usr/share/opnfv-apex/.  The RPM will copy settings files
into /etc/opnfv-apex/.

JIRA: APEX-317

Change-Id: I3232f0329bcd13bce5a28da6a8c9c84d0b048024
Signed-off-by: Tim Rozet <trozet@redhat.com>
81 files changed:
.gitignore
apex/__init__.py [moved from lib/python/apex/__init__.py with 68% similarity]
apex/build.py [moved from ci/build.py with 98% similarity]
apex/build/__init__.py [moved from lib/python/apex/common/__init__.py with 100% similarity]
apex/build/build_utils.py [moved from lib/python/build_utils.py with 100% similarity]
apex/clean.py [moved from lib/python/apex/clean.py with 64% similarity]
apex/common/__init__.py [new file with mode: 0644]
apex/common/constants.py [moved from lib/python/apex/common/constants.py with 74% similarity]
apex/common/exceptions.py [new file with mode: 0644]
apex/common/parsers.py [new file with mode: 0644]
apex/common/utils.py [new file with mode: 0644]
apex/deploy.py [new file with mode: 0644]
apex/inventory/__init__.py [new file with mode: 0644]
apex/inventory/inventory.py [moved from lib/python/apex/inventory.py with 88% similarity]
apex/network/__init__.py [new file with mode: 0644]
apex/network/ip_utils.py [moved from lib/python/apex/ip_utils.py with 100% similarity]
apex/network/jumphost.py [new file with mode: 0644]
apex/network/network_environment.py [moved from lib/python/apex/network_environment.py with 98% similarity]
apex/overcloud/__init__.py [new file with mode: 0644]
apex/overcloud/config.py [new file with mode: 0644]
apex/overcloud/overcloud_deploy.py [new file with mode: 0644]
apex/settings/__init__.py [new file with mode: 0644]
apex/settings/deploy_settings.py [moved from lib/python/apex/deploy_settings.py with 92% similarity]
apex/settings/network_settings.py [moved from lib/python/apex/network_settings.py with 88% similarity]
apex/tests/__init__.py [new file with mode: 0644]
apex/tests/config/inventory.yaml [moved from tests/config/inventory.yaml with 100% similarity]
apex/tests/constants.py [new file with mode: 0644]
apex/tests/playbooks/test_playbook.yaml [new file with mode: 0644]
apex/tests/smoke_tests/execute_smoke_tests.sh [moved from tests/smoke_tests/execute_smoke_tests.sh with 100% similarity]
apex/tests/smoke_tests/execute_tests.yml [moved from tests/smoke_tests/execute_tests.yml with 100% similarity]
apex/tests/smoke_tests/prepare_undercloud.yml [moved from tests/smoke_tests/prepare_undercloud.yml with 100% similarity]
apex/tests/smoke_tests/smoke_tests.yml [moved from tests/smoke_tests/smoke_tests.yml with 100% similarity]
apex/tests/test_apex_clean.py [moved from tests/test_apex_clean.py with 95% similarity]
apex/tests/test_apex_common_utils.py [moved from tests/test_apex_common_utils.py with 58% similarity]
apex/tests/test_apex_deploy_settings.py [moved from tests/test_apex_deploy_settings.py with 83% similarity]
apex/tests/test_apex_inventory.py [moved from tests/test_apex_inventory.py with 72% similarity]
apex/tests/test_apex_ip_utils.py [moved from tests/test_apex_ip_utils.py with 94% similarity]
apex/tests/test_apex_network_environment.py [moved from tests/test_apex_network_environment.py with 77% similarity]
apex/tests/test_apex_network_settings.py [moved from tests/test_apex_network_settings.py with 81% similarity]
apex/undercloud/__init__.py [new file with mode: 0644]
apex/undercloud/undercloud.py [new file with mode: 0644]
apex/virtual/__init__.py [new file with mode: 0644]
apex/virtual/configure_vm.py [new file with mode: 0755]
apex/virtual/virtual_utils.py [new file with mode: 0644]
build/Makefile
build/domain.xml [moved from lib/installer/domain.xml with 100% similarity]
build/rpm_specs/opnfv-apex-common.spec
build/variables.sh
ci/build.sh
ci/clean.sh
ci/deploy.sh
ci/run_smoke_tests.sh
ci/util.sh
lib/ansible/playbooks/build_dependencies.yml
lib/ansible/playbooks/configure_undercloud.yml [new file with mode: 0644]
lib/ansible/playbooks/deploy_dependencies.yml [new file with mode: 0644]
lib/ansible/playbooks/deploy_overcloud.yml [new file with mode: 0644]
lib/ansible/playbooks/post_deploy_overcloud.yml [new file with mode: 0644]
lib/ansible/playbooks/post_deploy_undercloud.yml [new file with mode: 0644]
lib/ansible/playbooks/templates/external_vlan_ifcfg.yml.j2 [new file with mode: 0644]
lib/ansible/playbooks/templates/virsh_network_default.xml.j2 [new file with mode: 0644]
lib/ansible/playbooks/templates/virsh_network_ovs.xml.j2 [new file with mode: 0644]
lib/ansible/playbooks/templates/virsh_pool.xml.j2 [new file with mode: 0644]
lib/ansible/playbooks/undercloud_aarch64.yml [new file with mode: 0644]
lib/common-functions.sh [deleted file]
lib/configure-deps-functions.sh [deleted file]
lib/configure-vm [deleted file]
lib/overcloud-deploy-functions.sh [deleted file]
lib/parse-functions.sh [deleted file]
lib/post-install-functions.sh [deleted file]
lib/python/apex/common/utils.py [deleted file]
lib/python/apex_python_utils.py [deleted file]
lib/undercloud-functions.sh [deleted file]
lib/utility-functions.sh [deleted file]
lib/virtual-setup-functions.sh [deleted file]
requirements.txt [new file with mode: 0644]
setup.cfg [new file with mode: 0644]
setup.py [new file with mode: 0644]
test-requirements.txt [new file with mode: 0644]
tests/test_apex_python_utils_py.py [deleted file]
tox.ini [new file with mode: 0644]

index 511a0de..47eaef6 100644 (file)
@@ -8,3 +8,10 @@
 .build/
 .cache/
 ci/apex_build.log
+ci/apex_deploy.log
+.tox/
+apex.egg-info/
+/apex/tests/playbooks/*.retry
+coverage.xml
+nosetests.xml
+ci/apex_clean.log
similarity index 68%
rename from lib/python/apex/__init__.py
rename to apex/__init__.py
index b2a45f7..4db820d 100644 (file)
@@ -8,8 +8,8 @@
 ##############################################################################
 
 
-from .network_settings import NetworkSettings
-from .deploy_settings import DeploySettings
-from .network_environment import NetworkEnvironment
+from apex.network.network_environment import NetworkEnvironment
+from apex.settings.deploy_settings import DeploySettings
+from apex.settings.network_settings import NetworkSettings
 from .clean import clean_nodes
-from .inventory import Inventory
+from .inventory.inventory import Inventory
similarity index 98%
rename from ci/build.py
rename to apex/build.py
index a17b21b..cda4e06 100644 (file)
@@ -20,6 +20,7 @@ TMP_CACHE = '.cache'
 BUILD_ROOT = 'build'
 BUILD_LOG_FILE = './apex_build.log'
 
+
 class ApexBuildException(Exception):
     pass
 
@@ -216,9 +217,12 @@ if __name__ == '__main__':
     console.setFormatter(logging.Formatter(formatter))
     logging.getLogger('').addHandler(console)
     apex_root = os.path.split(os.getcwd())[0]
+    if 'apex/apex' in apex_root:
+        apex_root = os.path.split(apex_root)[0]
     for root, dirs, files in os.walk(apex_root):
-        if BUILD_ROOT in dirs:
+        if BUILD_ROOT in dirs and 'apex/apex' not in root:
             apex_root = root
+            break
     apex_build_root = os.path.join(apex_root, BUILD_ROOT)
     if os.path.isdir(apex_build_root):
         cache_tmp_dir = os.path.join(apex_root, TMP_CACHE)
similarity index 64%
rename from lib/python/apex/clean.py
rename to apex/clean.py
index 184b5ec..af9e8ce 100644 (file)
@@ -9,7 +9,9 @@
 
 # Clean will eventually be migrated to this file
 
+import argparse
 import logging
+import os
 import pyipmi
 import pyipmi.interfaces
 import sys
@@ -37,3 +39,27 @@ def clean_nodes(inventory):
         except Exception as e:
             logging.error("Failure while shutting down node {}".format(e))
             sys.exit(1)
+
+
+def main():
+    clean_parser = argparse.ArgumentParser()
+    clean_parser.add_argument('-f',
+                              dest='inv_file',
+                              required=True,
+                              help='File which contains inventory')
+    args = clean_parser.parse_args(sys.argv[1:])
+    os.makedirs(os.path.dirname('./apex_clean.log'), exist_ok=True)
+    formatter = '%(asctime)s %(levelname)s: %(message)s'
+    logging.basicConfig(filename='./apex_clean.log',
+                        format=formatter,
+                        datefmt='%m/%d/%Y %I:%M:%S %p',
+                        level=logging.DEBUG)
+    console = logging.StreamHandler()
+    console.setLevel(logging.DEBUG)
+    console.setFormatter(logging.Formatter(formatter))
+    logging.getLogger('').addHandler(console)
+    clean_nodes(args.inv_file)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/apex/common/__init__.py b/apex/common/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
similarity index 74%
rename from lib/python/apex/common/constants.py
rename to apex/common/constants.py
index 3aa28ea..0df7152 100644 (file)
@@ -7,6 +7,8 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 
+import os
+
 ADMIN_NETWORK = 'admin'
 TENANT_NETWORK = 'tenant'
 EXTERNAL_NETWORK = 'external'
@@ -28,3 +30,17 @@ CONTROLLER_PRE = "OS::TripleO::ControllerExtraConfigPre"
 PRE_CONFIG_DIR = "/usr/share/openstack-tripleo-heat-templates/puppet/" \
                  "extraconfig/pre_deploy/"
 DEFAULT_ROOT_DEV = 'sda'
+LIBVIRT_VOLUME_PATH = '/var/lib/libvirt/images'
+
+VIRT_UPLOAD = '--upload'
+VIRT_INSTALL = '-install'
+VIRT_RUN_CMD = '--run-command'
+VIRT_PW = '--root-password'
+
+THT_DIR = '/usr/share/openstack-tripleo-heat-templates'
+THT_ENV_DIR = os.path.join(THT_DIR, 'environments')
+
+DEFAULT_ODL_VERSION = 'carbon'
+DEBUG_OVERCLOUD_PW = 'opnfvapex'
+NET_ENV_FILE = 'network-environment.yaml'
+DEPLOY_TIMEOUT = 90
diff --git a/apex/common/exceptions.py b/apex/common/exceptions.py
new file mode 100644 (file)
index 0000000..c660213
--- /dev/null
@@ -0,0 +1,12 @@
+##############################################################################
+# Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+
+class ApexDeployException(Exception):
+    pass
diff --git a/apex/common/parsers.py b/apex/common/parsers.py
new file mode 100644 (file)
index 0000000..8744c86
--- /dev/null
@@ -0,0 +1,73 @@
+##############################################################################
+# Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import json
+import logging
+import pprint
+import os
+import re
+
+from apex.common.exceptions import ApexDeployException
+
+"""Parser functions for overcloud/openstack output"""
+
+
+def parse_nova_output(in_file):
+    """
+    Parses nova list output into a dictionary format for node name and ip
+    :param in_file: json format from openstack server list
+    :return: dictionary format for {"node name": "node ip"}
+    """
+    if not os.path.isfile(in_file):
+        raise FileNotFoundError(in_file)
+    node_dict = dict()
+    with open(in_file, 'r') as fh:
+        nova_list = json.load(fh)
+
+    for server in nova_list:
+        ip_match = re.search('([0-9]+\.){3}[0-9]+', server['Networks'])
+        if ip_match is None:
+            logging.error("Unable to find IP in nova output "
+                          "{}".format(pprint.pformat(server, indent=4)))
+            raise ApexDeployException("Unable to parse IP from nova output")
+        else:
+            node_dict[server['Name']] = ip_match.group(0)
+
+    if not node_dict:
+        raise ApexDeployException("No overcloud nodes found in: {}".format(
+            in_file))
+    return node_dict
+
+
+def parse_overcloudrc(in_file):
+    """
+    Parses overcloudrc into a dictionary format for key and value
+    :param in_file:
+    :return: dictionary format for {"variable": "value"}
+    """
+    logging.debug("Parsing overcloudrc file {}".format(in_file))
+    if not os.path.isfile(in_file):
+        raise FileNotFoundError(in_file)
+    creds = {}
+    with open(in_file, 'r') as fh:
+        lines = fh.readlines()
+    kv_pattern = re.compile('^export\s+([^\s]+)=([^\s]+)$')
+    for line in lines:
+        if 'export' not in line:
+            continue
+        else:
+            res = re.search(kv_pattern, line.strip())
+            if res:
+                creds[res.group(1)] = res.group(2)
+                logging.debug("os cred found: {}, {}".format(res.group(1),
+                                                             res.group(2)))
+            else:
+                logging.debug("os cred not found in: {}".format(line))
+
+    return creds
diff --git a/apex/common/utils.py b/apex/common/utils.py
new file mode 100644 (file)
index 0000000..848f264
--- /dev/null
@@ -0,0 +1,107 @@
+##############################################################################
+# Copyright (c) 2016 Tim Rozet (trozet@redhat.com) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import json
+import logging
+import os
+import pprint
+import subprocess
+import yaml
+
+
+def str2bool(var):
+    if isinstance(var, bool):
+        return var
+    else:
+        return var.lower() in ("true", "yes")
+
+
+def parse_yaml(yaml_file):
+    with open(yaml_file) as f:
+        parsed_dict = yaml.safe_load(f)
+        return parsed_dict
+
+
+def dump_yaml(data, file):
+    """
+    Dumps data to a file as yaml
+    :param data: yaml to be written to file
+    :param file: filename to write to
+    :return:
+    """
+    logging.debug("Writing file {} with "
+                  "yaml data:\n{}".format(file, yaml.safe_dump(data)))
+    with open(file, "w") as fh:
+        yaml.safe_dump(data, fh, default_flow_style=False)
+
+
+def dict_objects_to_str(dictionary):
+        if isinstance(dictionary, list):
+            tmp_list = []
+            for element in dictionary:
+                if isinstance(element, dict):
+                    tmp_list.append(dict_objects_to_str(element))
+                else:
+                    tmp_list.append(str(element))
+            return tmp_list
+        elif not isinstance(dictionary, dict):
+            if not isinstance(dictionary, bool):
+                return str(dictionary)
+            else:
+                return dictionary
+        return dict((k, dict_objects_to_str(v)) for
+                    k, v in dictionary.items())
+
+
+def run_ansible(ansible_vars, playbook, host='localhost', user='root',
+                tmp_dir=None, dry_run=False):
+    """
+    Executes ansible playbook and checks for errors
+    :param ansible_vars: dictionary of variables to inject into ansible run
+    :param playbook: playbook to execute
+    :param tmp_dir: temp directory to store ansible command
+    :param dry_run: Do not actually apply changes
+    :return: None
+    """
+    logging.info("Executing ansible playbook: {}".format(playbook))
+    inv_host = "{},".format(host)
+    if host == 'localhost':
+        conn_type = 'local'
+    else:
+        conn_type = 'smart'
+    ansible_command = ['ansible-playbook', '--become', '-i', inv_host,
+                       '-u', user, '-c', conn_type, playbook, '-vvv']
+    if dry_run:
+        ansible_command.append('--check')
+
+    if isinstance(ansible_vars, dict) and ansible_vars:
+        logging.debug("Ansible variables to be set:\n{}".format(
+            pprint.pformat(ansible_vars)))
+        ansible_command.append('--extra-vars')
+        ansible_command.append(json.dumps(ansible_vars))
+        if tmp_dir:
+            ansible_tmp = os.path.join(tmp_dir,
+                                       os.path.basename(playbook) + '.rerun')
+            # FIXME(trozet): extra vars are printed without single quotes
+            # so a dev has to add them manually to the command to rerun
+            # the playbook.  Need to test if we can just add the single quotes
+            # to the json dumps to the ansible command and see if that works
+            with open(ansible_tmp, 'w') as fh:
+                fh.write("ANSIBLE_HOST_KEY_CHECKING=FALSE {}".format(
+                    ' '.join(ansible_command)))
+    try:
+        my_env = os.environ.copy()
+        my_env['ANSIBLE_HOST_KEY_CHECKING'] = 'False'
+        logging.info("Executing playbook...this may take some time")
+        logging.debug(subprocess.check_output(ansible_command, env=my_env,
+                      stderr=subprocess.STDOUT).decode('utf-8'))
+    except subprocess.CalledProcessError as e:
+        logging.error("Error executing ansible: {}".format(
+            pprint.pformat(e.output.decode('utf-8'))))
+        raise
diff --git a/apex/deploy.py b/apex/deploy.py
new file mode 100644 (file)
index 0000000..76708e9
--- /dev/null
@@ -0,0 +1,441 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import argparse
+import json
+import logging
+import os
+import pprint
+import shutil
+import sys
+import tempfile
+
+import apex.virtual.configure_vm as vm_lib
+import apex.virtual.virtual_utils as virt_utils
+from apex import DeploySettings
+from apex import Inventory
+from apex import NetworkEnvironment
+from apex import NetworkSettings
+from apex.common import utils
+from apex.common import constants
+from apex.common import parsers
+from apex.common.exceptions import ApexDeployException
+from apex.network import jumphost
+from apex.undercloud import undercloud as uc_lib
+from apex.overcloud import config as oc_cfg
+from apex.overcloud import overcloud_deploy
+
+APEX_TEMP_DIR = tempfile.mkdtemp()
+ANSIBLE_PATH = 'ansible/playbooks'
+SDN_IMAGE = 'overcloud-full-opendaylight.qcow2'
+
+
+def deploy_quickstart(args, deploy_settings_file, network_settings_file,
+                      inventory_file=None):
+    pass
+
+
+def validate_cross_settings(deploy_settings, net_settings, inventory):
+    """
+    Used to validate compatibility across settings file.
+    :param deploy_settings: parsed settings for deployment
+    :param net_settings: parsed settings for network
+    :param inventory: parsed inventory file
+    :return: None
+    """
+
+    if deploy_settings['deploy_options']['dataplane'] != 'ovs' and 'tenant' \
+            not in net_settings.enabled_network_list:
+        raise ApexDeployException("Setting a DPDK based dataplane requires"
+                                  "a dedicated NIC for tenant network")
+
+    # TODO(trozet): add more checks here like RAM for ODL, etc
+    # check if odl_vpp_netvirt is true and vpp is set
+    # Check if fdio and nosdn:
+    # tenant_nic_mapping_controller_members" ==
+    # "$tenant_nic_mapping_compute_members
+
+
+def build_vms(inventory, network_settings):
+    """
+    Creates VMs and configures vbmc and host
+    :param inventory:
+    :param network_settings:
+    :return:
+    """
+
+    for idx, node in enumerate(inventory['nodes']):
+        name = 'baremetal{}'.format(idx)
+        volume = name + ".qcow2"
+        volume_path = os.path.join(constants.LIBVIRT_VOLUME_PATH, volume)
+        # TODO(trozet): add back aarch64
+        # TODO(trozet): add error checking
+        vm_lib.create_vm(
+            name, volume_path,
+            baremetal_interfaces=network_settings.enabled_network_list,
+            memory=node['memory'], cpus=node['cpu'],
+            macs=[node['mac_address']])
+        virt_utils.host_setup({name: node['pm_port']})
+
+
+def create_deploy_parser():
+    deploy_parser = argparse.ArgumentParser()
+    deploy_parser.add_argument('--debug', action='store_true', default=False,
+                               help="Turn on debug messages")
+    deploy_parser.add_argument('-l', '--log-file',
+                               default='./apex_deploy.log',
+                               dest='log_file', help="Log file to log to")
+    deploy_parser.add_argument('-d', '--deploy-settings',
+                               dest='deploy_settings_file',
+                               required=True,
+                               help='File which contains Apex deploy settings')
+    deploy_parser.add_argument('-n', '--network-settings',
+                               dest='network_settings_file',
+                               required=True,
+                               help='File which contains Apex network '
+                                    'settings')
+    deploy_parser.add_argument('-i', '--inventory-file',
+                               dest='inventory_file',
+                               default=None,
+                               help='Inventory file which contains POD '
+                                    'definition')
+    deploy_parser.add_argument('-e', '--environment-file',
+                               dest='env_file',
+                               default='opnfv-environment.yaml',
+                               help='Provide alternate base env file')
+    deploy_parser.add_argument('-v', '--virtual', action='store_true',
+                               default=False,
+                               dest='virtual',
+                               help='Enable virtual deployment')
+    deploy_parser.add_argument('--interactive', action='store_true',
+                               default=False,
+                               help='Enable interactive deployment mode which '
+                                    'requires user to confirm steps of '
+                                    'deployment')
+    deploy_parser.add_argument('--virtual-computes',
+                               dest='virt_compute_nodes',
+                               default=1,
+                               help='Number of Virtual Compute nodes to create'
+                                    ' and use during deployment (defaults to 1'
+                                    ' for noha and 2 for ha)')
+    deploy_parser.add_argument('--virtual-cpus',
+                               dest='virt_cpus',
+                               default=4,
+                               help='Number of CPUs to use per Overcloud VM in'
+                                    ' a virtual deployment (defaults to 4)')
+    deploy_parser.add_argument('--virtual-default-ram',
+                               dest='virt_default_ram',
+                               default=8,
+                               help='Amount of default RAM to use per '
+                                    'Overcloud VM in GB (defaults to 8).')
+    deploy_parser.add_argument('--virtual-compute-ram',
+                               dest='virt_compute_ram',
+                               default=None,
+                               help='Amount of RAM to use per Overcloud '
+                                    'Compute VM in GB (defaults to 8). '
+                                    'Overrides --virtual-default-ram arg for '
+                                    'computes')
+    deploy_parser.add_argument('--deploy-dir',
+                               default='/usr/share/opnfv-apex',
+                               help='Directory to deploy from which contains '
+                                    'base config files for deployment')
+    deploy_parser.add_argument('--image-dir',
+                               default='/var/opt/opnfv/images',
+                               help='Directory which contains '
+                                    'base disk images for deployment')
+    deploy_parser.add_argument('--lib-dir',
+                               default='/usr/share/opnfv-apex',
+                               help='Directory path for apex ansible '
+                                    'and third party libs')
+    deploy_parser.add_argument('--quickstart', action='store_true',
+                               default=False,
+                               help='Use tripleo-quickstart to deploy')
+    return deploy_parser
+
+
+def validate_deploy_args(args):
+    """
+    Validates arguments for deploy
+    :param args:
+    :return: None
+    """
+
+    logging.debug('Validating arguments for deployment')
+    if args.virtual and args.inventory_file is not None:
+        logging.error("Virtual enabled but inventory file also given")
+        raise ApexDeployException('You should not specify an inventory file '
+                                  'with virtual deployments')
+    elif args.virtual:
+        args.inventory_file = os.path.join(APEX_TEMP_DIR,
+                                           'inventory-virt.yaml')
+    elif os.path.isfile(args.inventory_file) is False:
+        logging.error("Specified inventory file does not exist: {}".format(
+            args.inventory_file))
+        raise ApexDeployException('Specified inventory file does not exist')
+
+    for settings_file in (args.deploy_settings_file,
+                          args.network_settings_file):
+        if os.path.isfile(settings_file) is False:
+            logging.error("Specified settings file does not "
+                          "exist: {}".format(settings_file))
+            raise ApexDeployException('Specified settings file does not '
+                                      'exist: {}'.format(settings_file))
+
+
+def main():
+    parser = create_deploy_parser()
+    args = parser.parse_args(sys.argv[1:])
+    # FIXME (trozet): this is only needed as a workaround for CI.  Remove
+    # when CI is changed
+    if os.getenv('IMAGES', False):
+        args.image_dir = os.getenv('IMAGES')
+    if args.debug:
+        log_level = logging.DEBUG
+    else:
+        log_level = logging.INFO
+    os.makedirs(os.path.dirname(args.log_file), exist_ok=True)
+    formatter = '%(asctime)s %(levelname)s: %(message)s'
+    logging.basicConfig(filename=args.log_file,
+                        format=formatter,
+                        datefmt='%m/%d/%Y %I:%M:%S %p',
+                        level=log_level)
+    console = logging.StreamHandler()
+    console.setLevel(log_level)
+    console.setFormatter(logging.Formatter(formatter))
+    logging.getLogger('').addHandler(console)
+    validate_deploy_args(args)
+    # Parse all settings
+    deploy_settings = DeploySettings(args.deploy_settings_file)
+    logging.info("Deploy settings are:\n {}".format(pprint.pformat(
+                 deploy_settings)))
+    net_settings = NetworkSettings(args.network_settings_file)
+    logging.info("Network settings are:\n {}".format(pprint.pformat(
+                 net_settings)))
+    net_env_file = os.path.join(args.deploy_dir, constants.NET_ENV_FILE)
+    net_env = NetworkEnvironment(net_settings, net_env_file)
+    net_env_target = os.path.join(APEX_TEMP_DIR, constants.NET_ENV_FILE)
+    utils.dump_yaml(dict(net_env), net_env_target)
+    ha_enabled = deploy_settings['global_params']['ha_enabled']
+    if args.virtual:
+        if args.virt_compute_ram is None:
+            compute_ram = args.virt_default_ram
+        else:
+            compute_ram = args.virt_compute_ram
+        if deploy_settings['deploy_options']['sdn_controller'] == \
+                'opendaylight' and args.virt_default_ram < 12:
+            control_ram = 12
+            logging.warning('RAM per controller is too low.  OpenDaylight '
+                            'requires at least 12GB per controller.')
+            logging.info('Increasing RAM per controller to 12GB')
+        elif args.virt_default_ram < 10:
+            control_ram = 10
+            logging.warning('RAM per controller is too low.  nosdn '
+                            'requires at least 10GB per controller.')
+            logging.info('Increasing RAM per controller to 10GB')
+        else:
+            control_ram = args.virt_default_ram
+        if ha_enabled and args.virt_compute_nodes < 2:
+            logging.debug('HA enabled, bumping number of compute nodes to 2')
+            args.virt_compute_nodes = 2
+        virt_utils.generate_inventory(args.inventory_file, ha_enabled,
+                                      num_computes=args.virt_compute_nodes,
+                                      controller_ram=control_ram * 1024,
+                                      compute_ram=compute_ram * 1024,
+                                      vcpus=args.virt_cpus
+                                      )
+    inventory = Inventory(args.inventory_file, ha_enabled, args.virtual)
+
+    validate_cross_settings(deploy_settings, net_settings, inventory)
+
+    if args.quickstart:
+        deploy_settings_file = os.path.join(APEX_TEMP_DIR,
+                                            'apex_deploy_settings.yaml')
+        utils.dump_yaml(utils.dict_objects_to_str(deploy_settings),
+                        deploy_settings_file)
+        logging.info("File created: {}".format(deploy_settings_file))
+        network_settings_file = os.path.join(APEX_TEMP_DIR,
+                                             'apex_network_settings.yaml')
+        utils.dump_yaml(utils.dict_objects_to_str(net_settings),
+                        network_settings_file)
+        logging.info("File created: {}".format(network_settings_file))
+        deploy_quickstart(args, deploy_settings_file, network_settings_file,
+                          args.inventory_file)
+    else:
+        # TODO (trozet): add logic back from:
+        # Iedb75994d35b5dc1dd5d5ce1a57277c8f3729dfd (FDIO DVR)
+        ansible_args = {
+            'virsh_enabled_networks': net_settings.enabled_network_list
+        }
+        ansible_path = os.path.join(args.lib_dir, ANSIBLE_PATH)
+        utils.run_ansible(ansible_args,
+                          os.path.join(args.lib_dir,
+                                       ansible_path,
+                                       'deploy_dependencies.yml'))
+        uc_external = False
+        if 'external' in net_settings.enabled_network_list:
+            uc_external = True
+        if args.virtual:
+            # create all overcloud VMs
+            build_vms(inventory, net_settings)
+        else:
+            # Attach interfaces to jumphost for baremetal deployment
+            jump_networks = ['admin']
+            if uc_external:
+                jump_networks.append('external')
+            for network in jump_networks:
+                iface = net_settings['network'][network]['installer_vm'][
+                    'members'](0)
+                bridge = "br-{}".format(network)
+                jumphost.attach_interface_to_ovs(bridge, iface, network)
+        # Dump all settings out to temp bash files to be sourced
+        instackenv_json = os.path.join(APEX_TEMP_DIR, 'instackenv.json')
+        with open(instackenv_json, 'w') as fh:
+            json.dump(inventory, fh)
+
+        # Create and configure undercloud
+        if args.debug:
+            root_pw = constants.DEBUG_OVERCLOUD_PW
+        else:
+            root_pw = None
+        undercloud = uc_lib.Undercloud(args.image_dir,
+                                       root_pw=root_pw,
+                                       external_network=uc_external)
+        undercloud.start()
+
+        # Generate nic templates
+        for role in 'compute', 'controller':
+            oc_cfg.create_nic_template(net_settings, deploy_settings, role,
+                                       args.deploy_dir, APEX_TEMP_DIR)
+        # Install Undercloud
+        undercloud.configure(net_settings,
+                             os.path.join(args.lib_dir,
+                                          ansible_path,
+                                          'configure_undercloud.yml'),
+                             APEX_TEMP_DIR)
+
+        # Prepare overcloud-full.qcow2
+        logging.info("Preparing Overcloud for deployment...")
+        sdn_image = os.path.join(args.image_dir, SDN_IMAGE)
+        overcloud_deploy.prep_image(deploy_settings, sdn_image, APEX_TEMP_DIR,
+                                    root_pw=root_pw)
+        opnfv_env = os.path.join(args.deploy_dir, args.env_file)
+        overcloud_deploy.prep_env(deploy_settings, net_settings, opnfv_env,
+                                  net_env_target, APEX_TEMP_DIR)
+        overcloud_deploy.create_deploy_cmd(deploy_settings, net_settings,
+                                           inventory, APEX_TEMP_DIR,
+                                           args.virtual, args.env_file)
+        deploy_playbook = os.path.join(args.lib_dir, ansible_path,
+                                       'deploy_overcloud.yml')
+        virt_env = 'virtual-environment.yaml'
+        bm_env = 'baremetal-environment.yaml'
+        for p_env in virt_env, bm_env:
+            shutil.copyfile(os.path.join(args.deploy_dir, p_env),
+                            os.path.join(APEX_TEMP_DIR, p_env))
+
+        # Start Overcloud Deployment
+        logging.info("Executing Overcloud Deployment...")
+        deploy_vars = dict()
+        deploy_vars['virtual'] = args.virtual
+        deploy_vars['debug'] = args.debug
+        deploy_vars['dns_server_args'] = ''
+        deploy_vars['apex_temp_dir'] = APEX_TEMP_DIR
+        deploy_vars['stackrc'] = 'source /home/stack/stackrc'
+        deploy_vars['overcloudrc'] = 'source /home/stack/overcloudrc'
+        for dns_server in net_settings['dns_servers']:
+            deploy_vars['dns_server_args'] += " --dns-nameserver {}".format(
+                dns_server)
+        try:
+            utils.run_ansible(deploy_vars, deploy_playbook, host=undercloud.ip,
+                              user='stack', tmp_dir=APEX_TEMP_DIR)
+            logging.info("Overcloud deployment complete")
+            os.remove(os.path.join(APEX_TEMP_DIR, 'overcloud-full.qcow2'))
+        except Exception:
+            logging.error("Deployment Failed.  Please check log")
+            raise
+
+        # Post install
+        logging.info("Executing post deploy configuration")
+        jumphost.configure_bridges(net_settings)
+        nova_output = os.path.join(APEX_TEMP_DIR, 'nova_output')
+        deploy_vars['overcloud_nodes'] = parsers.parse_nova_output(
+            nova_output)
+        deploy_vars['SSH_OPTIONS'] = '-o StrictHostKeyChecking=no -o ' \
+                                     'GlobalKnownHostsFile=/dev/null -o ' \
+                                     'UserKnownHostsFile=/dev/null -o ' \
+                                     'LogLevel=error'
+        deploy_vars['external_network_cmds'] = \
+            overcloud_deploy.external_network_cmds(net_settings)
+        # TODO(trozet): just parse all ds_opts as deploy vars one time
+        ds_opts = deploy_settings['deploy_options']
+        deploy_vars['gluon'] = ds_opts['gluon']
+        deploy_vars['sdn'] = ds_opts['sdn_controller']
+        for dep_option in 'yardstick', 'dovetail', 'vsperf':
+            if dep_option in ds_opts:
+                deploy_vars[dep_option] = ds_opts[dep_option]
+            else:
+                deploy_vars[dep_option] = False
+        deploy_vars['dataplane'] = ds_opts['dataplane']
+        overcloudrc = os.path.join(APEX_TEMP_DIR, 'overcloudrc')
+        if ds_opts['congress']:
+            deploy_vars['congress_datasources'] = \
+                overcloud_deploy.create_congress_cmds(overcloudrc)
+            deploy_vars['congress'] = True
+        else:
+            deploy_vars['congress'] = False
+        # TODO(trozet): this is probably redundant with getting external
+        # network info from undercloud.py
+        if 'external' in net_settings.enabled_network_list:
+            ext_cidr = net_settings['networks']['external'][0]['cidr']
+        else:
+            ext_cidr = net_settings['networks']['admin']['cidr']
+        deploy_vars['external_cidr'] = str(ext_cidr)
+        if ext_cidr.version == 6:
+            deploy_vars['external_network_ipv6'] = True
+        else:
+            deploy_vars['external_network_ipv6'] = False
+        post_undercloud = os.path.join(args.lib_dir, ansible_path,
+                                       'post_deploy_undercloud.yml')
+        logging.info("Executing post deploy configuration undercloud playbook")
+        try:
+            utils.run_ansible(deploy_vars, post_undercloud, host=undercloud.ip,
+                              user='stack', tmp_dir=APEX_TEMP_DIR)
+            logging.info("Post Deploy Undercloud Configuration Complete")
+        except Exception:
+            logging.error("Post Deploy Undercloud Configuration failed.  "
+                          "Please check log")
+            raise
+        # Post deploy overcloud node configuration
+        # TODO(trozet): just parse all ds_opts as deploy vars one time
+        deploy_vars['sfc'] = ds_opts['sfc']
+        deploy_vars['vpn'] = ds_opts['vpn']
+        # TODO(trozet): pull all logs and store in tmp dir in overcloud
+        # playbook
+        post_overcloud = os.path.join(args.lib_dir, ansible_path,
+                                      'post_deploy_overcloud.yml')
+        # Run per overcloud node
+        for node, ip in deploy_vars['overcloud_nodes'].items():
+            logging.info("Executing Post deploy overcloud playbook on "
+                         "node {}".format(node))
+            try:
+                utils.run_ansible(deploy_vars, post_overcloud, host=ip,
+                                  user='heat-admin', tmp_dir=APEX_TEMP_DIR)
+                logging.info("Post Deploy Overcloud Configuration Complete "
+                             "for node {}".format(node))
+            except Exception:
+                logging.error("Post Deploy Overcloud Configuration failed "
+                              "for node {}. Please check log".format(node))
+                raise
+        logging.info("Apex deployment complete")
+        logging.info("Undercloud IP: {}, please connect by doing "
+                     "'opnfv-util undercloud'".format(undercloud.ip))
+        # TODO(trozet): add logging here showing controller VIP and horizon url
+if __name__ == '__main__':
+    main()
diff --git a/apex/inventory/__init__.py b/apex/inventory/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
similarity index 88%
rename from lib/python/apex/inventory.py
rename to apex/inventory/inventory.py
index 64f47b4..dd731a8 100644 (file)
@@ -7,12 +7,13 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 
-import yaml
 import json
 import platform
 
-from .common import constants
-from .common import utils
+import yaml
+
+from apex.common import constants
+from apex.common import utils
 
 
 class Inventory(dict):
@@ -60,7 +61,7 @@ class Inventory(dict):
         super().__init__({'nodes': list(map(munge_nodes, init_dict['nodes']))})
 
         # verify number of nodes
-        if ha and len(self['nodes']) < 5:
+        if ha and len(self['nodes']) < 5 and not virtual:
             raise InventoryException('You must provide at least 5 '
                                      'nodes for HA baremetal deployment')
         elif len(self['nodes']) < 2:
@@ -79,16 +80,6 @@ class Inventory(dict):
     def dump_instackenv_json(self):
         print(json.dumps(dict(self), sort_keys=True, indent=4))
 
-    def dump_bash(self, path=None):
-        """
-        Prints settings for bash consumption.
-
-        If optional path is provided, bash string will be written to the file
-        instead of stdout.
-        """
-        bash_str = "{}={}\n".format('root_disk_list', str(self.root_device))
-        utils.write_str(bash_str, path)
-
 
 class InventoryException(Exception):
     def __init__(self, value):
diff --git a/apex/network/__init__.py b/apex/network/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/apex/network/jumphost.py b/apex/network/jumphost.py
new file mode 100644 (file)
index 0000000..81562c7
--- /dev/null
@@ -0,0 +1,172 @@
+##############################################################################
+# Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import logging
+import os
+import re
+import shutil
+import subprocess
+
+from apex.common.exceptions import ApexDeployException
+from apex.network import ip_utils
+
+NET_MAP = {
+    'admin': 'br-admin',
+    'tenant': 'br-tenant',
+    'external': 'br-external',
+    'storage': 'br-storage',
+    'api': 'br-api'
+}
+
+
+def configure_bridges(ns):
+    """
+    Configures IP on jumphost bridges
+    :param ns: network_settings
+    :return: None
+    """
+    bridge_networks = ['admin']
+    if 'external' in ns.enabled_network_list:
+        bridge_networks.append('external')
+    for network in bridge_networks:
+        if network == 'external':
+            net_config = ns['networks'][network][0]
+        else:
+            net_config = ns['networks'][network]
+        cidr = net_config['cidr']
+        interface = ip_utils.get_interface(NET_MAP[network], cidr.version)
+
+        if interface:
+            logging.info("Bridge {} already configured with IP: {}".format(
+                NET_MAP[network], interface.ip))
+        else:
+            logging.info("Will configure IP for {}".format(NET_MAP[network]))
+            ovs_ip = net_config['overcloud_ip_range'][1]
+            if cidr.version == 6:
+                ipv6_br_path = "/proc/sys/net/ipv6/conf/{}/disable_" \
+                               "ipv6".format(NET_MAP[network])
+                try:
+                    subprocess.check_call('echo', 0, '>', ipv6_br_path)
+                except subprocess.CalledProcessError:
+                    logging.error("Unable to enable ipv6 on "
+                                  "bridge {}".format(NET_MAP[network]))
+                    raise
+            try:
+                ip_prefix = "{}/{}".format(ovs_ip, cidr.prefixlen)
+                subprocess.check_call(['ip', 'addr', 'add', ip_prefix, 'dev',
+                                      NET_MAP[network]])
+                subprocess.check_call(['ip', 'link', 'set', 'up', NET_MAP[
+                    network]])
+                logging.info("IP configured: {} on bridge {}".format(ovs_ip,
+                             NET_MAP[network]))
+            except subprocess.CalledProcessError:
+                logging.error("Unable to configure IP address on "
+                              "bridge {}".format(NET_MAP[network]))
+
+
+def attach_interface_to_ovs(bridge, interface, network):
+    """
+    Attaches jumphost interface to OVS for baremetal deployments
+    :param bridge: bridge to attach to
+    :param interface: interface to attach to bridge
+    :param network: Apex network type for these interfaces
+    :return: None
+    """
+
+    net_cfg_path = '/etc/sysconfig/network-scripts'
+    if_file = os.path.join(net_cfg_path, "ifcfg-{}".format(interface))
+    ovs_file = os.path.join(net_cfg_path, "ifcfg-{}".format(bridge))
+
+    logging.info("Attaching interface: {} to bridge: {} on network {}".format(
+        bridge, interface, network
+    ))
+
+    try:
+        output = subprocess.check_output(['ovs-vsctl', 'list-ports', bridge],
+                                         stderr=subprocess.STDOUT)
+        if bridge in output:
+            logging.debug("Interface already attached to bridge")
+            return
+    except subprocess.CalledProcessError as e:
+        logging.error("Unable to dump ports for bridge: {}".format(bridge))
+        logging.error("Error output: {}".format(e.output))
+        raise
+
+    if not os.path.isfile(if_file):
+        logging.error("Interface ifcfg not found: {}".format(if_file))
+        raise FileNotFoundError("Interface file missing: {}".format(if_file))
+
+    ifcfg_params = {
+        'IPADDR': '',
+        'NETMASK': '',
+        'GATEWAY': '',
+        'METRIC': '',
+        'DNS1': '',
+        'DNS2': '',
+        'PREFIX': ''
+    }
+    with open(if_file, 'r') as fh:
+        interface_output = fh.read()
+
+    for param in ifcfg_params.keys():
+        match = re.search("{}=(.*)\n".format(param), interface_output)
+        if match:
+            ifcfg_params[param] = match.group(1)
+
+    if not ifcfg_params['IPADDR']:
+        logging.error("IPADDR missing in {}".format(if_file))
+        raise ApexDeployException("IPADDR missing in {}".format(if_file))
+    if not (ifcfg_params['NETMASK'] or ifcfg_params['PREFIX']):
+        logging.error("NETMASK/PREFIX missing in {}".format(if_file))
+        raise ApexDeployException("NETMASK/PREFIX missing in {}".format(
+            if_file))
+    if network == 'external' and not ifcfg_params['GATEWAY']:
+        logging.error("GATEWAY is required to be in {} for external "
+                      "network".format(if_file))
+        raise ApexDeployException("GATEWAY is required to be in {} for "
+                                  "external network".format(if_file))
+
+    shutil.move(if_file, "{}.orig".format(if_file))
+    if_content = """DEVICE={}
+DEVICETYPE=ovs
+TYPE=OVSPort
+PEERDNS=no
+BOOTPROTO=static
+NM_CONTROLLED=no
+ONBOOT=yes
+OVS_BRIDGE={}
+PROMISC=yes""".format(interface, bridge)
+
+    bridge_content = """DEVICE={}
+DEVICETYPE=ovs
+BOOTPROTO=static
+ONBOOT=yes
+TYPE=OVSBridge
+PROMISC=yes""".format(bridge)
+    peer_dns = 'no'
+    for param, value in ifcfg_params.items():
+        if value:
+            bridge_content += "\n{}={}".format(param, value)
+            if param == 'DNS1' or param == 'DNS2':
+                peer_dns = 'yes'
+    bridge_content += "\n{}={}".format('PEERDNS', peer_dns)
+
+    logging.debug("New interface file content:\n{}".format(if_content))
+    logging.debug("New bridge file content:\n{}".format(bridge_content))
+    with open(if_file, 'w') as fh:
+        fh.write(if_content)
+    with open(ovs_file, 'w') as fh:
+        fh.write(bridge_content)
+    logging.info("New network ifcfg files written")
+    logging.info("Restarting Linux networking")
+    try:
+        subprocess.check_call(['systemctl', 'restart', 'network'])
+    except subprocess.CalledProcessError:
+        logging.error("Failed to restart Linux networking")
+        raise
similarity index 98%
rename from lib/python/apex/network_environment.py
rename to apex/network/network_environment.py
index dd9530b..c2e9991 100644 (file)
@@ -7,21 +7,20 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 
-import yaml
 import re
-from .common.constants import (
+
+import yaml
+
+from apex.settings.network_settings import NetworkSettings
+from apex.common.constants import (
     CONTROLLER,
     COMPUTE,
     ADMIN_NETWORK,
     TENANT_NETWORK,
     STORAGE_NETWORK,
     EXTERNAL_NETWORK,
-    API_NETWORK,
-    CONTROLLER_PRE,
-    COMPUTE_PRE,
-    PRE_CONFIG_DIR
+    API_NETWORK
 )
-from .network_settings import NetworkSettings
 
 HEAT_NONE = 'OS::Heat::None'
 PORTS = '/ports'
diff --git a/apex/overcloud/__init__.py b/apex/overcloud/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/apex/overcloud/config.py b/apex/overcloud/config.py
new file mode 100644 (file)
index 0000000..6e116de
--- /dev/null
@@ -0,0 +1,76 @@
+##############################################################################
+# Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+"""
+Utilities for generating overcloud configuration
+"""
+
+import logging
+import os
+
+from jinja2 import Environment
+from jinja2 import FileSystemLoader
+from apex.common.exceptions import ApexDeployException
+
+
+def create_nic_template(network_settings, deploy_settings, role, template_dir,
+                        target_dir):
+    """
+    Creates NIC heat template files
+    :param ns: Network settings
+    :param ds: Deploy Settings
+    :param role: controller or compute
+    :param template_dir: directory where base templates are stored
+    :param target_dir: to store rendered nic template
+    :return:
+    """
+    # TODO(trozet): rather than use Jinja2 to build these files, use with py
+    if role not in ['controller', 'compute']:
+        raise ApexDeployException("Invalid type for overcloud node: {"
+                                  "}".format(type))
+    logging.info("Creating template for {}".format(role))
+    template_file = 'nics-template.yaml.jinja2'
+    nets = network_settings.get('networks')
+    env = Environment(loader=FileSystemLoader(template_dir), autoescape=True)
+    template = env.get_template(template_file)
+    ds = deploy_settings.get('deploy_options')
+    ext_net = 'br-ex'
+    ovs_dpdk_br = ''
+    if ds['dataplane'] == 'fdio':
+        nets['tenant']['nic_mapping'][role]['phys_type'] = 'vpp_interface'
+        if ds['sdn_controller'] == 'opendaylight':
+            nets['external'][0]['nic_mapping'][role]['phys_type'] = \
+                'vpp_interface'
+            ext_net = 'vpp_interface'
+    elif ds['dataplane'] == 'ovs_dpdk':
+        ovs_dpdk_br = 'br-phy'
+    if (ds.get('performance', {}).get(role.title(), {}).get('vpp', {})
+            .get('uio-driver')):
+        nets['tenant']['nic_mapping'][role]['uio-driver'] =\
+            ds['performance'][role.title()]['vpp']['uio-driver']
+        if ds['sdn_controller'] == 'opendaylight':
+            nets['external'][0]['nic_mapping'][role]['uio-driver'] =\
+                ds['performance'][role.title()]['vpp']['uio-driver']
+    if (ds.get('performance', {}).get(role.title(), {}).get('vpp', {})
+            .get('interface-options')):
+        nets['tenant']['nic_mapping'][role]['interface-options'] =\
+            ds['performance'][role.title()]['vpp']['interface-options']
+
+    template_output = template.render(
+        nets=nets,
+        role=role,
+        external_net_af=network_settings.get_ip_addr_family(),
+        external_net_type=ext_net,
+        ovs_dpdk_bridge=ovs_dpdk_br)
+
+    logging.debug("Template output: {}".format(template_output))
+    target = os.path.join(target_dir, "{}.yaml".format(role))
+    with open(target, "w") as f:
+        f.write(template_output)
+    logging.info("Wrote template {}".format(target))
diff --git a/apex/overcloud/overcloud_deploy.py b/apex/overcloud/overcloud_deploy.py
new file mode 100644 (file)
index 0000000..3c10846
--- /dev/null
@@ -0,0 +1,556 @@
+##############################################################################
+# Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import base64
+import fileinput
+import logging
+import os
+import re
+import shutil
+import uuid
+import struct
+import time
+
+from apex.common import constants as con
+from apex.common.exceptions import ApexDeployException
+from apex.common import parsers
+from apex.virtual import virtual_utils as virt_utils
+from cryptography.hazmat.primitives import serialization as \
+    crypto_serialization
+from cryptography.hazmat.primitives.asymmetric import rsa
+from cryptography.hazmat.backends import default_backend as \
+    crypto_default_backend
+
+
+SDN_FILE_MAP = {
+    'opendaylight': {
+        'sfc': 'opendaylight_sfc.yaml',
+        'vpn': 'neutron-bgpvpn-opendaylight.yaml',
+        'gluon': 'gluon.yaml',
+        'vpp': {
+            'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
+            'default': 'neutron-opendaylight-honeycomb.yaml'
+        },
+        'default': 'neutron-opendaylight.yaml',
+    },
+    'onos': {
+        'sfc': 'neutron-onos-sfc.yaml',
+        'default': 'neutron-onos.yaml'
+    },
+    'ovn': 'neutron-ml2-ovn.yaml',
+    False: {
+        'vpp': 'neutron-ml2-vpp.yaml',
+        'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
+    }
+}
+
+OTHER_FILE_MAP = {
+    'tacker': 'enable_tacker.yaml',
+    'congress': 'enable_congress.yaml',
+    'barometer': 'enable_barometer.yaml',
+    'rt_kvm': 'enable_rt_kvm.yaml'
+}
+
+OVS_PERF_MAP = {
+    'HostCpusList': 'dpdk_cores',
+    'NeutronDpdkCoreList': 'pmd_cores',
+    'NeutronDpdkSocketMemory': 'socket_memory',
+    'NeutronDpdkMemoryChannels': 'memory_channels'
+}
+
+OVS_NSH_KMOD_RPM = "openvswitch-kmod-2.6.1-1.el7.centos.x86_64.rpm"
+OVS_NSH_RPM = "openvswitch-2.6.1-1.el7.centos.x86_64.rpm"
+ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
+                      ".noarch.rpm"
+
+
+def build_sdn_env_list(ds, sdn_map, env_list=None):
+    if env_list is None:
+        env_list = list()
+    for k, v in sdn_map.items():
+        if ds['sdn_controller'] == k or (k in ds and ds[k] is True):
+            if isinstance(v, dict):
+                env_list.extend(build_sdn_env_list(ds, v))
+            else:
+                env_list.append(os.path.join(con.THT_ENV_DIR, v))
+        elif isinstance(v, tuple):
+                if ds[k] == v[0]:
+                    env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
+    if len(env_list) == 0:
+        try:
+            env_list.append(os.path.join(
+                con.THT_ENV_DIR, sdn_map[ds['sdn_controller']]['default']))
+        except KeyError:
+            logging.warning("Unable to find default file for SDN")
+
+    return env_list
+
+
+def create_deploy_cmd(ds, ns, inv, tmp_dir,
+                      virtual, env_file='opnfv-environment.yaml'):
+
+    logging.info("Creating deployment command")
+    deploy_options = [env_file, 'network-environment.yaml']
+    ds_opts = ds['deploy_options']
+    deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
+
+    # TODO(trozet): make sure rt kvm file is in tht dir
+    for k, v in OTHER_FILE_MAP.items():
+        if k in ds_opts and ds_opts[k]:
+            deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
+
+    if ds_opts['ceph']:
+        prep_storage_env(ds, tmp_dir)
+        deploy_options.append(os.path.join(con.THT_ENV_DIR,
+                                           'storage-environment.yaml'))
+    if ds['global_params']['ha_enabled']:
+        deploy_options.append(os.path.join(con.THT_ENV_DIR,
+                                           'puppet-pacemaker.yaml'))
+
+    if virtual:
+        deploy_options.append('virtual-environment.yaml')
+    else:
+        deploy_options.append('baremetal-environment.yaml')
+
+    nodes = inv['nodes']
+    num_control = 0
+    num_compute = 0
+    for node in nodes:
+        if node['capabilities'] == 'profile:control':
+            num_control += 1
+        elif node['capabilities'] == 'profile:compute':
+            num_compute += 1
+        else:
+            # TODO(trozet) do we want to allow capabilities to not exist?
+            logging.error("Every node must include a 'capabilities' key "
+                          "tagged with either 'profile:control' or "
+                          "'profile:compute'")
+            raise ApexDeployException("Node missing capabilities "
+                                      "key: {}".format(node))
+    if num_control == 0 or num_compute == 0:
+        logging.error("Detected 0 control or compute nodes.  Control nodes: "
+                      "{}, compute nodes{}".format(num_control, num_compute))
+        raise ApexDeployException("Invalid number of control or computes")
+    cmd = "openstack overcloud deploy --templates --timeout {} " \
+          "--libvirt-type kvm".format(con.DEPLOY_TIMEOUT)
+    # build cmd env args
+    for option in deploy_options:
+        cmd += " -e {}".format(option)
+    cmd += " --ntp-server {}".format(ns['ntp'][0])
+    cmd += " --control-scale {}".format(num_control)
+    cmd += " --compute-scale {}".format(num_compute)
+    cmd += ' --control-flavor control --compute-flavor compute'
+    logging.info("Deploy command set: {}".format(cmd))
+
+    with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
+        fh.write(cmd)
+    return cmd
+
+
+def prep_image(ds, img, tmp_dir, root_pw=None):
+    """
+    Locates sdn image and preps for deployment.
+    :param ds: deploy settings
+    :param img: sdn image
+    :param tmp_dir: dir to store modified sdn image
+    :param root_pw: password to configure for overcloud image
+    :return: None
+    """
+    # TODO(trozet): Come up with a better way to organize this logic in this
+    # function
+    logging.info("Preparing image: {} for deployment".format(img))
+    if not os.path.isfile(img):
+        logging.error("Missing SDN image {}".format(img))
+        raise ApexDeployException("Missing SDN image file: {}".format(img))
+
+    ds_opts = ds['deploy_options']
+    virt_cmds = list()
+    sdn = ds_opts['sdn_controller']
+    # we need this due to rhbz #1436021
+    # fixed in systemd-219-37.el7
+    if sdn is not False:
+        logging.info("Neutron openvswitch-agent disabled")
+        virt_cmds.extend([{
+            con.VIRT_RUN_CMD:
+                "rm -f /etc/systemd/system/multi-user.target.wants/"
+                "neutron-openvswitch-agent.service"},
+            {
+            con.VIRT_RUN_CMD:
+                "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
+                ".service"
+        }])
+
+    if ds_opts['vpn']:
+        virt_cmds.append({con.VIRT_RUN_CMD: "systemctl enable zrpcd"})
+        logging.info("ZRPC and Quagga enabled")
+
+    dataplane = ds_opts['dataplane']
+    if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
+        logging.info("Enabling kernel modules for dpdk")
+        # file to module mapping
+        uio_types = {
+            os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
+            os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
+        }
+        for mod_file, mod in uio_types:
+            with open(mod_file, 'w') as fh:
+                fh.write('#!/bin/bash\n')
+                fh.write('exec /sbin/modprobe {}'.format(mod))
+                fh.close()
+
+            virt_cmds.extend([
+                {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
+                    mod_file)},
+                {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
+                                   "{}".format(os.path.basename(mod_file))}
+            ])
+    if root_pw:
+        pw_op = "password:{}".format(root_pw)
+        virt_cmds.append({con.VIRT_PW: pw_op})
+    if ds_opts['sfc'] and dataplane == 'ovs':
+        virt_cmds.extend([
+            {con.VIRT_RUN_CMD: "yum -y install "
+                               "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
+                               "{}".format(OVS_NSH_KMOD_RPM)},
+            {con.VIRT_RUN_CMD: "yum upgrade -y "
+                               "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
+                               "{}".format(OVS_NSH_RPM)}
+        ])
+    if dataplane == 'fdio':
+        # Patch neutron with using OVS external interface for router
+        # and add generic linux NS interface driver
+        virt_cmds.append(
+            {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
+                               "-p1 < neutron-patch-NSDriver.patch"})
+
+    if sdn == 'opendaylight':
+        if ds_opts['odl_version'] != con.DEFAULT_ODL_VERSION:
+            virt_cmds.extend([
+                {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
+                {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
+                    con.DEFAULT_ODL_VERSION)},
+                {con.VIRT_RUN_CMD: "rm -rf /etc/puppet/modules/opendaylight"},
+                {con.VIRT_RUN_CMD: "cd /etc/puppet/modules && tar xzf "
+                                   "/root/puppet-opendaylight-"
+                                   "{}.tar.gz".format(ds_opts['odl_version'])}
+            ])
+        elif sdn == 'opendaylight' and 'odl_vpp_netvirt' in ds_opts \
+                and ds_opts['odl_vpp_netvirt']:
+            virt_cmds.extend([
+                {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
+                {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
+                    ODL_NETVIRT_VPP_RPM)}
+            ])
+
+    if sdn == 'ovn':
+        virt_cmds.extend([
+            {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum update -y "
+                               "*openvswitch*"},
+            {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum downgrade -y "
+                               "*openvswitch*"}
+        ])
+
+    tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
+    shutil.copyfile(img, tmp_oc_image)
+    logging.debug("Temporary overcloud image stored as: {}".format(
+        tmp_oc_image))
+    virt_utils.virt_customize(virt_cmds, tmp_oc_image)
+    logging.info("Overcloud image customization complete")
+
+
+def make_ssh_key():
+    """
+    Creates public and private ssh keys with 1024 bit RSA encryption
+    :return: private, public key
+    """
+    key = rsa.generate_private_key(
+        backend=crypto_default_backend(),
+        public_exponent=65537,
+        key_size=1024
+    )
+
+    private_key = key.private_bytes(
+        crypto_serialization.Encoding.PEM,
+        crypto_serialization.PrivateFormat.PKCS8,
+        crypto_serialization.NoEncryption())
+    public_key = key.public_key().public_bytes(
+        crypto_serialization.Encoding.OpenSSH,
+        crypto_serialization.PublicFormat.OpenSSH
+    )
+    pub_key = re.sub('ssh-rsa\s*', '', public_key.decode('utf-8'))
+    return private_key.decode('utf-8'), pub_key
+
+
+def prep_env(ds, ns, opnfv_env, net_env, tmp_dir):
+    """
+    Creates modified opnfv/network environments for deployment
+    :param ds: deploy settings
+    :param ns: network settings
+    :param opnfv_env: file path for opnfv-environment file
+    :param net_env: file path for network-environment file
+    :param tmp_dir: Apex tmp dir
+    :return:
+    """
+
+    logging.info("Preparing opnfv-environment and network-environment files")
+    ds_opts = ds['deploy_options']
+    tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
+    shutil.copyfile(opnfv_env, tmp_opnfv_env)
+    tenant_nic_map = ns['networks']['tenant']['nic_mapping']
+    tenant_ctrl_nic = tenant_nic_map['controller']['members'][0]
+    tenant_comp_nic = tenant_nic_map['compute']['members'][0]
+
+    # SSH keys
+    private_key, public_key = make_ssh_key()
+
+    # Make easier/faster variables to index in the file editor
+    if 'performance' in ds_opts:
+        perf = True
+        # vpp
+        if 'vpp' in ds_opts['performance']['Compute']:
+            perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
+        else:
+            perf_vpp_comp = None
+        if 'vpp' in ds_opts['performance']['Controller']:
+            perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
+        else:
+            perf_vpp_ctrl = None
+
+        # ovs
+        if 'ovs' in ds_opts['performance']['Compute']:
+            perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
+        else:
+            perf_ovs_comp = None
+
+        # kernel
+        if 'kernel' in ds_opts['performance']['Compute']:
+            perf_kern_comp = ds_opts['performance']['Compute']['kernel']
+        else:
+            perf_kern_comp = None
+    else:
+        perf = False
+
+    # Modify OPNFV environment
+    for line in fileinput.input(tmp_opnfv_env, inplace=True):
+        line = line.strip('\n')
+        if 'CloudDomain' in line:
+            print("  CloudDomain: {}".format(ns['domain_name']))
+        elif ds_opts['sdn_controller'] == 'opendaylight' and \
+                'odl_vpp_routing_node' in ds_opts and ds_opts[
+                'odl_vpp_routing_node'] != 'dvr':
+            if 'opendaylight::vpp_routing_node' in line:
+                print("    opendaylight::vpp_routing_node: ${}.${}".format(
+                    ds_opts['odl_vpp_routing_node'], ns['domain_name']))
+            elif 'ControllerExtraConfig' in line:
+                print("  ControllerExtraConfig:\n    "
+                      "tripleo::profile::base::neutron::agents::honeycomb"
+                      "::interface_role_mapping: ['{}:tenant-"
+                      "interface]'".format(tenant_ctrl_nic))
+            elif 'NovaComputeExtraConfig' in line:
+                print("  NovaComputeExtraConfig:\n    "
+                      "tripleo::profile::base::neutron::agents::honeycomb"
+                      "::interface_role_mapping: ['{}:tenant-"
+                      "interface]'".format(tenant_comp_nic))
+            else:
+                print(line)
+
+        elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
+            if 'NeutronVPPAgentPhysnets' in line:
+                print("  NeutronVPPAgentPhysnets: 'datacentre:{}'".format(
+                    tenant_ctrl_nic))
+            else:
+                print(line)
+        elif perf:
+            line_printed = False
+            for role in 'NovaCompute', 'Controller':
+                if role == 'NovaCompute':
+                    perf_opts = perf_vpp_comp
+                else:
+                    perf_opts = perf_vpp_ctrl
+                cfg = "{}ExtraConfig".format(role)
+                if cfg in line and perf_opts:
+                    if 'main-core' in perf_opts:
+                        print("  {}:\n"
+                              "    fdio::vpp_cpu_main_core: '{}'"
+                              "".format(cfg, perf_opts['main-core']))
+                        line_printed = True
+                        break
+                    elif 'corelist-workers' in perf_vpp_comp:
+                        print("  {}:\n"
+                              "    fdio::vpp_cpu_corelist_workers: '{}'"
+                              "".format(cfg, perf_opts['corelist-workers']))
+                        line_printed = True
+                        break
+
+            # kernel args
+            # (FIXME) use compute's kernel settings for all nodes for now.
+            if 'ComputeKernelArgs' in line and perf_kern_comp:
+                kernel_args = ''
+                for k, v in perf_kern_comp.items():
+                    kernel_args += "{}={}".format(k, v)
+                if kernel_args:
+                    print("ComputeKernelArgs: '{}'".format(kernel_args))
+                    line_printed = True
+            elif ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
+                for k, v in OVS_PERF_MAP.items():
+                    if k in line and v in perf_ovs_comp:
+                        print("  {}: {}".format(k, perf_ovs_comp[v]))
+                        line_printed = True
+
+            if not line_printed:
+                print(line)
+        elif 'replace_private_key' in line:
+            print("      key: '{}'".format(private_key))
+        elif 'replace_public_key' in line:
+            print("      key: '{}'".format(public_key))
+        else:
+            print(line)
+
+    logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
+
+    # Modify Network environment
+    for line in fileinput.input(net_env, inplace=True):
+        line = line.strip('\n')
+        if ds_opts['dataplane'] == 'ovs_dpdk':
+            if 'ComputeExtraConfigPre' in line:
+                print('  OS::TripleO::ComputeExtraConfigPre: '
+                      './ovs-dpdk-preconfig.yaml')
+            else:
+                print(line)
+        elif perf and perf_kern_comp:
+            if 'resource_registry' in line:
+                print("resource_registry:\n"
+                      "  OS::TripleO::NodeUserData: first-boot.yaml")
+            elif 'NovaSchedulerDefaultFilters' in line:
+                print("  NovaSchedulerDefaultFilters: 'RamFilter,"
+                      "ComputeFilter,AvailabilityZoneFilter,"
+                      "ComputeCapabilitiesFilter,ImagePropertiesFilter,"
+                      "NUMATopologyFilter'")
+            else:
+                print(line)
+        else:
+            print(line)
+
+    logging.info("network-environment file written to {}".format(net_env))
+
+
+def generate_ceph_key():
+    key = os.urandom(16)
+    header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
+    return base64.b64encode(header + key)
+
+
+def prep_storage_env(ds, tmp_dir):
+    """
+    Creates storage environment file for deployment.  Source file is copied by
+    undercloud playbook to host.
+    :param ds:
+    :param tmp_dir:
+    :return:
+    """
+    ds_opts = ds['deploy_options']
+    storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
+    if not os.path.isfile(storage_file):
+        logging.error("storage-environment file is not in tmp directory: {}. "
+                      "Check if file was copied from "
+                      "undercloud".format(tmp_dir))
+        raise ApexDeployException("storage-environment file not copied from "
+                                  "undercloud")
+    for line in fileinput.input(storage_file, inplace=True):
+        line = line.strip('\n')
+        if 'CephClusterFSID' in line:
+            print("  CephClusterFSID: {}".format(str(uuid.uuid4())))
+        elif 'CephMonKey' in line:
+            print("  CephMonKey: {}".format(generate_ceph_key().decode(
+                'utf-8')))
+        elif 'CephAdminKey' in line:
+            print("  CephAdminKey: {}".format(generate_ceph_key().decode(
+                'utf-8')))
+        else:
+            print(line)
+    if 'ceph_device' in ds_opts and ds_opts['ceph_device']:
+        with open(storage_file, 'a') as fh:
+            fh.write('  ExtraConfig:\n')
+            fh.write("    ceph::profile::params::osds:{{{}:{{}}}}\n".format(
+                ds_opts['ceph_device']
+            ))
+
+
+def external_network_cmds(ns):
+    """
+    Generates external network openstack commands
+    :param ns: network settings
+    :return: list of commands to configure external network
+    """
+    if 'external' in ns.enabled_network_list:
+        net_config = ns['networks']['external'][0]
+        external = True
+        pool_start, pool_end = net_config['floating_ip_range']
+    else:
+        net_config = ns['networks']['admin']
+        external = False
+        pool_start, pool_end = ns['apex']['networks']['admin'][
+            'introspection_range']
+    nic_config = net_config['nic_mapping']
+    gateway = net_config['gateway']
+    cmds = list()
+    # create network command
+    if nic_config['compute']['vlan'] == 'native':
+        ext_type = 'flat'
+    else:
+        ext_type = "vlan --provider-segment {}".format(nic_config[
+                                                       'compute']['vlan'])
+    cmds.append("openstack network create external --project service "
+                "--external --provider-network-type {} "
+                "--provider-physical-network datacentre".format(ext_type))
+    # create subnet command
+    cidr = net_config['cidr']
+    subnet_cmd = "openstack subnet create external-subnet --project " \
+                 "service --network external --no-dhcp --gateway {} " \
+                 "--allocation-pool start={},end={} --subnet-range " \
+                 "{}".format(gateway, pool_start, pool_end, str(cidr))
+    if external and cidr.version == 6:
+        subnet_cmd += ' --ip-version 6 --ipv6-ra-mode slaac ' \
+                      '--ipv6-address-mode slaac'
+    cmds.append(subnet_cmd)
+    logging.debug("Neutron external network commands determined "
+                  "as: {}".format(cmds))
+    return cmds
+
+
+def create_congress_cmds(overcloud_file):
+    drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
+    overcloudrc = parsers.parse_overcloudrc(overcloud_file)
+    logging.info("Creating congress commands")
+    try:
+        ds_cfg = [
+            "username={}".format(overcloudrc['OS_USERNAME']),
+            "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
+            "password={}".format(overcloudrc['OS_PASSWORD']),
+            "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
+        ]
+    except KeyError:
+        logging.error("Unable to find all keys required for congress in "
+                      "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
+                      "OS_PASSWORD, OS_AUTH_URL.  Please check overcloudrc "
+                      "file: {}".format(overcloud_file))
+        raise
+    cmds = list()
+    ds_cfg = '--config ' + ' --config '.join(ds_cfg)
+
+    for driver in drivers:
+        if driver == 'doctor':
+            cmd = "{} \"{}\"".format(driver, driver)
+        else:
+            cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
+        if driver == 'nova':
+            cmd += '--config api_version="2.34"'
+        logging.debug("Congress command created: {}".format(cmd))
+        cmds.append(cmd)
+    return cmds
diff --git a/apex/settings/__init__.py b/apex/settings/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
similarity index 92%
rename from lib/python/apex/deploy_settings.py
rename to apex/settings/deploy_settings.py
index 0618594..c8e347b 100644 (file)
@@ -9,9 +9,9 @@
 
 
 import yaml
-import logging
 
-from .common import utils
+from apex.common import utils
+from apex.common import constants
 
 REQ_DEPLOY_SETTINGS = ['sdn_controller',
                        'odl_version',
@@ -37,6 +37,7 @@ OPT_DEPLOY_SETTINGS = ['performance',
 VALID_ROLES = ['Controller', 'Compute', 'ObjectStorage']
 VALID_PERF_OPTS = ['kernel', 'nova', 'vpp', 'ovs']
 VALID_DATAPLANES = ['ovs', 'ovs_dpdk', 'fdio']
+VALID_ODL_VERSIONS = ['carbon', 'nitrogen', 'master']
 
 
 class DeploySettings(dict):
@@ -48,7 +49,6 @@ class DeploySettings(dict):
     deployment script move to python.
     """
     def __init__(self, filename):
-        init_dict = {}
         if isinstance(filename, str):
             with open(filename, 'r') as deploy_settings_file:
                 init_dict = yaml.safe_load(deploy_settings_file)
@@ -103,8 +103,16 @@ class DeploySettings(dict):
                     self['deploy_options'][req_set] = 'ovs'
                 elif req_set == 'ceph':
                     self['deploy_options'][req_set] = True
+                elif req_set == 'odl_version':
+                    self['deploy_options'][req_set] = \
+                        constants.DEFAULT_ODL_VERSION
                 else:
                     self['deploy_options'][req_set] = False
+            elif req_set == 'odl_version' and self['deploy_options'][
+                    'odl_version'] not in VALID_ODL_VERSIONS:
+                raise DeploySettingsException(
+                    "Invalid ODL version: {}".format(self[deploy_options][
+                        'odl_version']))
 
         if 'performance' in deploy_options:
             if not isinstance(deploy_options['performance'], dict):
@@ -171,21 +179,6 @@ class DeploySettings(dict):
                                                                    value)
         return bash_str
 
-    def dump_bash(self, path=None):
-        """
-        Prints settings for bash consumption.
-
-        If optional path is provided, bash string will be written to the file
-        instead of stdout.
-        """
-        bash_str = ''
-        for key, value in self['global_params'].items():
-            bash_str += "{}={}\n".format(key, value)
-        if 'performance' in self['deploy_options']:
-            bash_str += self._dump_performance()
-        bash_str += self._dump_deploy_options_array()
-        utils.write_str(bash_str, path)
-
 
 class DeploySettingsException(Exception):
     def __init__(self, value):
similarity index 88%
rename from lib/python/apex/network_settings.py
rename to apex/settings/network_settings.py
index 79b0a9d..1487007 100644 (file)
@@ -7,14 +7,14 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 
-import yaml
-import logging
 import ipaddress
-
+import logging
 from copy import copy
-from .common import utils
-from . import ip_utils
-from .common.constants import (
+
+import yaml
+
+from apex.common import utils
+from apex.common.constants import (
     CONTROLLER,
     COMPUTE,
     ROLES,
@@ -25,6 +25,7 @@ from .common.constants import (
     EXTERNAL_NETWORK,
     OPNFV_NETWORK_TYPES,
 )
+from apex.network import ip_utils
 
 
 class NetworkSettings(dict):
@@ -50,7 +51,7 @@ class NetworkSettings(dict):
         super().__init__(init_dict)
 
         if 'apex' in self:
-            # merge two dics Nondestructively
+            # merge two dicts Non-destructively
             def merge(pri, sec):
                 for key, val in sec.items():
                     if key in pri:
@@ -116,6 +117,8 @@ class NetworkSettings(dict):
 
         if 'dns-domain' not in self:
             self['domain_name'] = DOMAIN_NAME
+        else:
+            self['domain_name'] = self['dns-domain']
         self['dns_servers'] = self.get('dns_nameservers', DNS_SERVERS)
         self['ntp_servers'] = self.get('ntp', NTP_SERVER)
 
@@ -266,6 +269,7 @@ class NetworkSettings(dict):
             - gateway
         """
         if network == ADMIN_NETWORK:
+            # FIXME: _config_ip  function does not exist!
             self._config_ip(network, None, 'provisioner_ip', 1)
             self._config_ip_range(network=network,
                                   ip_range='dhcp_range',
@@ -274,6 +278,7 @@ class NetworkSettings(dict):
                                   ip_range='introspection_range',
                                   start_offset=11, count=9)
         elif network == EXTERNAL_NETWORK:
+            # FIXME: _config_ip  function does not exist!
             self._config_ip(network, None, 'provisioner_ip', 1)
             self._config_ip_range(network=network,
                                   ip_range='floating_ip_range',
@@ -302,44 +307,6 @@ class NetworkSettings(dict):
 
         logging.info("Config Gateway: {} {}".format(network, gateway))
 
-    def dump_bash(self, path=None):
-        """
-        Prints settings for bash consumption.
-
-        If optional path is provided, bash string will be written to the file
-        instead of stdout.
-        """
-        def flatten(name, obj, delim=','):
-            """
-            flatten lists to delim separated strings
-            flatten dics to underscored key names and string values
-            """
-            if isinstance(obj, list):
-                return "{}=\'{}\'\n".format(name,
-                                            delim.join(map(lambda x: str(x),
-                                                           obj)))
-            elif isinstance(obj, dict):
-                flat_str = ''
-                for k in obj:
-                    flat_str += flatten("{}_{}".format(name, k), obj[k])
-                return flat_str
-            elif isinstance(obj, str):
-                return "{}='{}'\n".format(name, obj)
-            else:
-                return "{}={}\n".format(name, str(obj))
-
-        bash_str = ''
-        for network in self.enabled_network_list:
-            _network = self.get_network(network)
-            bash_str += flatten(network, _network)
-        bash_str += flatten('enabled_network_list',
-                            self.enabled_network_list, ' ')
-        bash_str += flatten('ip_addr_family', self.get_ip_addr_family())
-        bash_str += flatten('dns_servers', self['dns_servers'], ' ')
-        bash_str += flatten('domain_name', self['dns-domain'], ' ')
-        bash_str += flatten('ntp_server', self['ntp_servers'][0], ' ')
-        utils.write_str(bash_str, path)
-
     def get_ip_addr_family(self,):
         """
         Returns IP address family for current deployment.
diff --git a/apex/tests/__init__.py b/apex/tests/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/apex/tests/constants.py b/apex/tests/constants.py
new file mode 100644 (file)
index 0000000..47e63e2
--- /dev/null
@@ -0,0 +1,12 @@
+##############################################################################
+# Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+TEST_CONFIG_DIR = 'config'
+TEST_BUILD_DIR = 'build'
+TEST_PLAYBOOK_DIR = 'playbooks'
diff --git a/apex/tests/playbooks/test_playbook.yaml b/apex/tests/playbooks/test_playbook.yaml
new file mode 100644 (file)
index 0000000..800d8fd
--- /dev/null
@@ -0,0 +1,5 @@
+---
+- hosts: localhost
+  tasks:
+    - debug:
+        msg: "Test playbook"
similarity index 95%
rename from tests/test_apex_clean.py
rename to apex/tests/test_apex_clean.py
index 2a436a7..d0b8791 100644 (file)
 import mock
 import pyipmi
 import pyipmi.chassis
-
-from apex import clean_nodes
 from mock import patch
 from nose import tools
 
+from apex import clean_nodes
+
 
 class TestClean(object):
     @classmethod
@@ -35,7 +35,7 @@ class TestClean(object):
         with mock.patch.object(pyipmi.Session, 'establish') as mock_method:
             with patch.object(pyipmi.chassis.Chassis,
                               'chassis_control_power_down') as mock_method2:
-                clean_nodes('config/inventory.yaml')
+                clean_nodes('apex/tests/config/inventory.yaml')
 
         tools.assert_equal(mock_method.call_count, 5)
         tools.assert_equal(mock_method2.call_count, 5)
similarity index 58%
rename from tests/test_apex_common_utils.py
rename to apex/tests/test_apex_common_utils.py
index 9459865..357ad1b 100644 (file)
@@ -7,9 +7,17 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 
+import ipaddress
 import nose.tools
+import os
 
 from apex.common import utils
+from apex.settings.network_settings import NetworkSettings
+from apex.tests.constants import (
+    TEST_CONFIG_DIR,
+    TEST_PLAYBOOK_DIR)
+
+NET_SETS = os.path.join(TEST_CONFIG_DIR, 'network', 'network_settings.yaml')
 
 
 class TestCommonUtils(object):
@@ -34,6 +42,18 @@ class TestCommonUtils(object):
         nose.tools.assert_equal(utils.str2bool("YES"), True)
 
     def test_parse_yaml(self):
-        nose.tools.assert_is_instance(
-            utils.parse_yaml('../config/network/network_settings.yaml'),
-            dict)
+        nose.tools.assert_is_instance(utils.parse_yaml(NET_SETS), dict)
+
+    def test_dict_to_string(self):
+        net_settings = NetworkSettings(NET_SETS)
+        output = utils.dict_objects_to_str(net_settings)
+        nose.tools.assert_is_instance(output, dict)
+        for k, v in output.items():
+            nose.tools.assert_is_instance(k, str)
+            nose.tools.assert_not_is_instance(v, ipaddress.IPv4Address)
+
+    def test_run_ansible(self):
+        playbook = 'apex/tests/playbooks/test_playbook.yaml'
+        nose.tools.assert_equal(
+            utils.run_ansible(None, os.path.join(playbook),
+                              dry_run=True), None)
similarity index 83%
rename from tests/test_apex_deploy_settings.py
rename to apex/tests/test_apex_deploy_settings.py
index 00eb274..312c1f3 100644 (file)
@@ -8,15 +8,16 @@
 ##############################################################################
 
 # https://docs.python.org/3/library/io.html
-import io
+import os
 import tempfile
 
-from apex.deploy_settings import DeploySettings
-from apex.deploy_settings import DeploySettingsException
-
 from nose.tools import assert_equal
-from nose.tools import assert_raises
 from nose.tools import assert_is_instance
+from nose.tools import assert_raises
+
+from apex.settings.deploy_settings import DeploySettings
+from apex.settings.deploy_settings import DeploySettingsException
+from apex.tests.constants import TEST_CONFIG_DIR
 
 deploy_files = ('deploy_settings.yaml',
                 'os-nosdn-nofeature-noha.yaml',
@@ -80,7 +81,7 @@ class TestIpUtils(object):
 
     def test_init(self):
         for f in deploy_files:
-            ds = DeploySettings('../config/deploy/{}'.format(f))
+            ds = DeploySettings(os.path.join(TEST_CONFIG_DIR, 'deploy', f))
             ds = DeploySettings(ds)
 
     def test__validate_settings(self):
@@ -94,14 +95,7 @@ class TestIpUtils(object):
             finally:
                 f.close()
 
-    def test_dump_bash(self):
-        # the performance file has the most use of the function
-        # so using that as the test case
-        ds = DeploySettings('../config/deploy/os-nosdn-performance-ha.yaml')
-        assert_equal(ds.dump_bash(), None)
-        assert_equal(ds.dump_bash(path='/dev/null'), None)
-
-    def test_exception(sefl):
+    def test_exception(self):
         e = DeploySettingsException("test")
         print(e)
         assert_is_instance(e, DeploySettingsException)
similarity index 72%
rename from tests/test_apex_inventory.py
rename to apex/tests/test_apex_inventory.py
index ec75856..ed95c53 100644 (file)
@@ -7,21 +7,25 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 
+import os
 import sys
+from io import StringIO
 
-from apex.inventory import Inventory
-from apex.inventory import InventoryException
-
+from nose.tools import assert_equal
 from nose.tools import assert_is_instance
 from nose.tools import assert_raises
-from nose.tools import assert_equal
 from nose.tools import assert_regexp_matches
-from io import StringIO
+
+from apex import Inventory
+from apex.inventory.inventory import InventoryException
+from apex.tests.constants import TEST_CONFIG_DIR
 
 inventory_files = ('intel_pod2_settings.yaml',
                    'nokia_pod1_settings.yaml',
                    'pod_example_settings.yaml')
 
+files_dir = os.path.join(TEST_CONFIG_DIR, 'inventory')
+
 
 class TestInventory(object):
     @classmethod
@@ -40,7 +44,7 @@ class TestInventory(object):
 
     def test_init(self):
         for f in inventory_files:
-            i = Inventory('../config/inventory/{}'.format(f))
+            i = Inventory(os.path.join(files_dir, f))
             assert_equal(i.dump_instackenv_json(), None)
 
         # test virtual
@@ -59,23 +63,7 @@ class TestInventory(object):
         assert_raises(InventoryException,
                       Inventory, i, ha=False)
 
-    def test_exception(sefl):
+    def test_exception(self):
         e = InventoryException("test")
         print(e)
         assert_is_instance(e, InventoryException)
-
-    def test_dump_bash_default(self):
-        i = Inventory('../config/inventory/intel_pod2_settings.yaml')
-        out = StringIO()
-        sys.stdout = out
-        i.dump_bash()
-        output = out.getvalue().strip()
-        assert_regexp_matches(output, 'root_disk_list=sda')
-
-    def test_dump_bash_set_root_device(self):
-        i = Inventory('../config/inventory/pod_example_settings.yaml')
-        out = StringIO()
-        sys.stdout = out
-        i.dump_bash()
-        output = out.getvalue().strip()
-        assert_regexp_matches(output, 'root_disk_list=sdb')
similarity index 94%
rename from tests/test_apex_ip_utils.py
rename to apex/tests/test_apex_ip_utils.py
index e5e84b6..04a1b2b 100644 (file)
@@ -7,27 +7,24 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 
-import re
 import ipaddress
+import re
+from ipaddress import IPv4Address
+from ipaddress import ip_network
 
-from apex.ip_utils import IPUtilsException
-from apex.ip_utils import get_interface
-from apex.ip_utils import find_gateway
-from apex.ip_utils import get_ip
-from apex.ip_utils import get_ip_range
-from apex.ip_utils import _validate_ip_range
-
-from nose.tools import assert_true
-from nose.tools import assert_false
 from nose.tools import assert_equal
-from nose.tools import assert_raises
+from nose.tools import assert_false
 from nose.tools import assert_is_instance
+from nose.tools import assert_raises
 from nose.tools import assert_regexp_matches
+from nose.tools import assert_true
 
-from ipaddress import IPv4Address
-from ipaddress import IPv6Address
-from ipaddress import ip_network
-
+from apex.network.ip_utils import IPUtilsException
+from apex.network.ip_utils import _validate_ip_range
+from apex.network.ip_utils import find_gateway
+from apex.network.ip_utils import get_interface
+from apex.network.ip_utils import get_ip
+from apex.network.ip_utils import get_ip_range
 
 ip4_pattern = re.compile('\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}')
 ip4_range_pattern = re.compile('\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3},\d{1,'
similarity index 77%
rename from tests/test_apex_network_environment.py
rename to apex/tests/test_apex_network_environment.py
index b4d7e71..5047adb 100644 (file)
@@ -7,29 +7,26 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 
-import ipaddress
+import os
 
 from copy import copy
 
+from nose.tools import assert_equal
+from nose.tools import assert_is_instance
+from nose.tools import assert_not_equal
+from nose.tools import assert_raises
+
 from apex.common.constants import (
     EXTERNAL_NETWORK,
     TENANT_NETWORK,
     STORAGE_NETWORK,
     API_NETWORK,
-    CONTROLLER)
-from apex.network_settings import NetworkSettings
-from apex.network_environment import (
-    NetworkEnvironment,
-    NetworkEnvException,
-    EXTERNAL_RESOURCES,
-    TENANT_RESOURCES,
-    STORAGE_RESOURCES,
-    API_RESOURCES)
-
-from nose.tools import assert_equal
-from nose.tools import assert_raises
-from nose.tools import assert_is_instance
-from nose.tools import assert_not_equal
+    NET_ENV_FILE)
+from apex import NetworkEnvironment
+from apex.network.network_environment import NetworkEnvException
+from apex import NetworkSettings
+from apex.tests.constants import TEST_CONFIG_DIR
+from apex.tests.constants import TEST_BUILD_DIR
 
 
 class TestNetworkEnvironment(object):
@@ -37,11 +34,12 @@ class TestNetworkEnvironment(object):
     def setup_class(klass):
         """This method is run once for each class before any tests are run"""
         klass.ns = NetworkSettings(
-            '../config/network/network_settings.yaml')
+            os.path.join(TEST_CONFIG_DIR, 'network/network_settings.yaml'))
         klass.ns_vlans = NetworkSettings(
-            '../config/network/network_settings_vlans.yaml')
+            os.path.join(TEST_CONFIG_DIR,
+                         'network/network_settings_vlans.yaml'))
         klass.ns_ipv6 = NetworkSettings(
-            '../config/network/network_settings_v6.yaml')
+            os.path.join(TEST_CONFIG_DIR, 'network/network_settings_v6.yaml'))
 
     @classmethod
     def teardown_class(klass):
@@ -55,12 +53,12 @@ class TestNetworkEnvironment(object):
 
     def test_init(self):
         assert_raises(NetworkEnvException, NetworkEnvironment,
-                      None, '../build/network-environment.yaml')
+                      None, os.path.join(TEST_BUILD_DIR, NET_ENV_FILE))
 
     def test_netenv_settings_external_network_vlans(self):
         # test vlans
         ne = NetworkEnvironment(self.ns_vlans,
-                                '../build/network-environment.yaml')
+                                os.path.join(TEST_BUILD_DIR, NET_ENV_FILE))
         assert_equal(ne['parameter_defaults']['NeutronExternalNetworkBridge'],
                      '""')
         assert_equal(ne['parameter_defaults']['ExternalNetworkVlanID'], 501)
@@ -68,7 +66,7 @@ class TestNetworkEnvironment(object):
     def test_netenv_settings_external_network_ipv6(self):
         # Test IPv6
         ne = NetworkEnvironment(self.ns_ipv6,
-                                '../build/network-environment.yaml')
+                                os.path.join(TEST_BUILD_DIR, NET_ENV_FILE))
         regstr = ne['resource_registry']['OS::TripleO::Network::External']
         assert_equal(regstr.split('/')[-1], 'external_v6.yaml')
 
@@ -76,14 +74,14 @@ class TestNetworkEnvironment(object):
         ns = copy(self.ns)
         # Test removing EXTERNAL_NETWORK
         ns.enabled_network_list.remove(EXTERNAL_NETWORK)
-        ne = NetworkEnvironment(ns, '../build/network-environment.yaml')
+        ne = NetworkEnvironment(ns, os.path.join(TEST_BUILD_DIR, NET_ENV_FILE))
         regstr = ne['resource_registry']['OS::TripleO::Network::External']
         assert_equal(regstr.split('/')[-1], 'OS::Heat::None')
 
     def test_netenv_settings_tenant_network_vlans(self):
         # test vlans
         ne = NetworkEnvironment(self.ns_vlans,
-                                '../build/network-environment.yaml')
+                                os.path.join(TEST_BUILD_DIR, NET_ENV_FILE))
         assert_equal(ne['parameter_defaults']['TenantNetworkVlanID'], 401)
 
 # Apex is does not support v6 tenant networks
@@ -101,20 +99,20 @@ class TestNetworkEnvironment(object):
         ns = copy(self.ns)
         # Test removing TENANT_NETWORK
         ns.enabled_network_list.remove(TENANT_NETWORK)
-        ne = NetworkEnvironment(ns, '../build/network-environment.yaml')
+        ne = NetworkEnvironment(ns, os.path.join(TEST_BUILD_DIR, NET_ENV_FILE))
         regstr = ne['resource_registry']['OS::TripleO::Network::Tenant']
         assert_equal(regstr.split('/')[-1], 'OS::Heat::None')
 
     def test_netenv_settings_storage_network_vlans(self):
         # test vlans
         ne = NetworkEnvironment(self.ns_vlans,
-                                '../build/network-environment.yaml')
+                                os.path.join(TEST_BUILD_DIR, NET_ENV_FILE))
         assert_equal(ne['parameter_defaults']['StorageNetworkVlanID'], 201)
 
     def test_netenv_settings_storage_network_v6(self):
         # Test IPv6
         ne = NetworkEnvironment(self.ns_ipv6,
-                                '../build/network-environment.yaml')
+                                os.path.join(TEST_BUILD_DIR, NET_ENV_FILE))
         regstr = ne['resource_registry']['OS::TripleO::Network::Storage']
         assert_equal(regstr.split('/')[-1], 'storage_v6.yaml')
 
@@ -122,7 +120,7 @@ class TestNetworkEnvironment(object):
         ns = copy(self.ns)
         # Test removing STORAGE_NETWORK
         ns.enabled_network_list.remove(STORAGE_NETWORK)
-        ne = NetworkEnvironment(ns, '../build/network-environment.yaml')
+        ne = NetworkEnvironment(ns, os.path.join(TEST_BUILD_DIR, NET_ENV_FILE))
         regstr = ne['resource_registry']['OS::TripleO::Network::Storage']
         assert_equal(regstr.split('/')[-1], 'OS::Heat::None')
 
@@ -132,7 +130,7 @@ class TestNetworkEnvironment(object):
         ns['networks'][API_NETWORK]['cidr'] = '10.11.12.0/24'
         ns = NetworkSettings(ns)
         # test vlans
-        ne = NetworkEnvironment(ns, '../build/network-environment.yaml')
+        ne = NetworkEnvironment(ns, os.path.join(TEST_BUILD_DIR, NET_ENV_FILE))
         assert_equal(ne['parameter_defaults']['InternalApiNetworkVlanID'], 101)
 
     def test_netenv_settings_api_network_vlans(self):
@@ -140,25 +138,26 @@ class TestNetworkEnvironment(object):
         ns['networks'][API_NETWORK]['enabled'] = True
         ns = NetworkSettings(ns)
         # test vlans
-        ne = NetworkEnvironment(ns, '../build/network-environment.yaml')
+        ne = NetworkEnvironment(ns, os.path.join(TEST_BUILD_DIR, NET_ENV_FILE))
         assert_equal(ne['parameter_defaults']['InternalApiNetworkVlanID'], 101)
 
     def test_netenv_settings_api_network_v6(self):
         # Test IPv6
         ne = NetworkEnvironment(self.ns_ipv6,
-                                '../build/network-environment.yaml')
+                                os.path.join(TEST_BUILD_DIR, NET_ENV_FILE))
         regstr = ne['resource_registry']['OS::TripleO::Network::InternalApi']
         assert_equal(regstr.split('/')[-1], 'internal_api_v6.yaml')
 
     def test_netenv_settings_api_network_removed(self):
         ns = copy(self.ns)
         # API_NETWORK is not in the default network settings file
-        ne = NetworkEnvironment(ns, '../build/network-environment.yaml')
+        ne = NetworkEnvironment(ns, os.path.join(TEST_BUILD_DIR, NET_ENV_FILE))
         regstr = ne['resource_registry']['OS::TripleO::Network::InternalApi']
         assert_equal(regstr.split('/')[-1], 'OS::Heat::None')
 
     def test_numa_configs(self):
-        ne = NetworkEnvironment(self.ns, '../build/network-environment.yaml',
+        ne = NetworkEnvironment(self.ns,
+                                os.path.join(TEST_BUILD_DIR, NET_ENV_FILE),
                                 compute_pre_config=True,
                                 controller_pre_config=True)
         assert_is_instance(ne, dict)
similarity index 81%
rename from tests/test_apex_network_settings.py
rename to apex/tests/test_apex_network_settings.py
index a1dbaf1..adff8cf 100644 (file)
@@ -7,16 +7,7 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 
-from apex.common.constants import (
-    EXTERNAL_NETWORK,
-    STORAGE_NETWORK,
-    ADMIN_NETWORK,
-)
-
-from apex.network_settings import (
-    NetworkSettings,
-    NetworkSettingsException,
-)
+import os
 
 from nose.tools import (
     assert_equal,
@@ -24,7 +15,16 @@ from nose.tools import (
     assert_raises
 )
 
-files_dir = '../config/network/'
+from apex.common.constants import (
+    EXTERNAL_NETWORK,
+    STORAGE_NETWORK,
+    ADMIN_NETWORK,
+)
+from apex import NetworkSettings
+from apex.settings.network_settings import NetworkSettingsException
+from apex.tests.constants import TEST_CONFIG_DIR
+
+files_dir = os.path.join(TEST_CONFIG_DIR, 'network')
 
 
 class TestNetworkSettings(object):
@@ -44,12 +44,13 @@ class TestNetworkSettings(object):
 
     def test_init(self):
         assert_is_instance(
-            NetworkSettings(files_dir+'network_settings.yaml'),
+            NetworkSettings(os.path.join(files_dir, 'network_settings.yaml')),
             NetworkSettings)
 
     def test_init_vlans(self):
         assert_is_instance(
-            NetworkSettings(files_dir+'network_settings_vlans.yaml'),
+            NetworkSettings(os.path.join(files_dir,
+                                         'network_settings_vlans.yaml')),
             NetworkSettings)
 
 # TODO, v6 test is stuck
@@ -59,7 +60,7 @@ class TestNetworkSettings(object):
     #         NetworkSettings)
 
     def test_init_admin_disabled_or_missing(self):
-        ns = NetworkSettings(files_dir+'network_settings.yaml')
+        ns = NetworkSettings(os.path.join(files_dir, 'network_settings.yaml'))
         # remove admin, apex section will re-add it
         ns['networks'].pop('admin', None)
         assert_raises(NetworkSettingsException, NetworkSettings, ns)
@@ -69,24 +70,19 @@ class TestNetworkSettings(object):
         assert_raises(NetworkSettingsException, NetworkSettings, ns)
 
     def test_init_collapse_storage(self):
-        ns = NetworkSettings(files_dir+'network_settings.yaml')
+        ns = NetworkSettings(os.path.join(files_dir, 'network_settings.yaml'))
         # remove storage
         ns['networks'].pop('storage', None)
         assert_is_instance(NetworkSettings(ns), NetworkSettings)
 
     def test_init_missing_dns_domain(self):
-        ns = NetworkSettings(files_dir+'network_settings.yaml')
+        ns = NetworkSettings(os.path.join(files_dir, 'network_settings.yaml'))
         # remove storage
         ns.pop('dns-domain', None)
         assert_is_instance(NetworkSettings(ns), NetworkSettings)
 
-    def test_dump_bash(self):
-        ns = NetworkSettings('../config/network/network_settings.yaml')
-        assert_equal(ns.dump_bash(), None)
-        assert_equal(ns.dump_bash(path='/dev/null'), None)
-
     def test_get_network_settings(self):
-        ns = NetworkSettings('../config/network/network_settings.yaml')
+        ns = NetworkSettings(os.path.join(files_dir, 'network_settings.yaml'))
         assert_is_instance(ns, NetworkSettings)
         for role in ['controller', 'compute']:
             nic_index = 0
@@ -97,11 +93,11 @@ class TestNetworkSettings(object):
                 nic_index += 1
 
     def test_get_enabled_networks(self):
-        ns = NetworkSettings('../config/network/network_settings.yaml')
+        ns = NetworkSettings(os.path.join(files_dir, 'network_settings.yaml'))
         assert_is_instance(ns.enabled_network_list, list)
 
     def test_invalid_nic_members(self):
-        ns = NetworkSettings(files_dir+'network_settings.yaml')
+        ns = NetworkSettings(os.path.join(files_dir, 'network_settings.yaml'))
         storage_net_nicmap = ns['networks'][STORAGE_NETWORK]['nic_mapping']
         # set duplicate nic
         storage_net_nicmap['controller']['members'][0] = 'eth0'
@@ -111,7 +107,7 @@ class TestNetworkSettings(object):
         assert_raises(NetworkSettingsException, NetworkSettings, ns)
 
     def test_missing_vlan(self):
-        ns = NetworkSettings(files_dir+'network_settings.yaml')
+        ns = NetworkSettings(os.path.join(files_dir, 'network_settings.yaml'))
         storage_net_nicmap = ns['networks'][STORAGE_NETWORK]['nic_mapping']
         # remove vlan from storage net
         storage_net_nicmap['compute'].pop('vlan', None)
@@ -127,7 +123,7 @@ class TestNetworkSettings(object):
 #        assert_is_instance(NetworkSettings(ns), NetworkSettings)
 
     def test_admin_fail_auto_detect(self):
-        ns = NetworkSettings(files_dir+'network_settings.yaml')
+        ns = NetworkSettings(os.path.join(files_dir, 'network_settings.yaml'))
         # remove cidr and installer_vm to fail autodetect
         ns['networks'][ADMIN_NETWORK].pop('cidr', None)
         ns['networks'][ADMIN_NETWORK].pop('installer_vm', None)
@@ -139,7 +135,7 @@ class TestNetworkSettings(object):
         assert_is_instance(e, NetworkSettingsException)
 
     def test_config_ip(self):
-        ns = NetworkSettings(files_dir+'network_settings.yaml')
+        ns = NetworkSettings(os.path.join(files_dir, 'network_settings.yaml'))
         # set the provisioner ip to None to force _gen_ip to generate one
         ns['networks'][ADMIN_NETWORK]['installer_vm']['ip'] = None
         ns['networks'][EXTERNAL_NETWORK][0]['installer_vm']['ip'] = None
@@ -151,7 +147,7 @@ class TestNetworkSettings(object):
                      '192.168.37.1')
 
     def test_config_gateway(self):
-        ns = NetworkSettings(files_dir+'network_settings.yaml')
+        ns = NetworkSettings(os.path.join(files_dir, 'network_settings.yaml'))
         # set the gateway ip to None to force _config_gateway to generate one
         ns['networks'][EXTERNAL_NETWORK][0]['gateway'] = None
         # Now rebuild network settings object and check for a repopulated value
diff --git a/apex/undercloud/__init__.py b/apex/undercloud/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/apex/undercloud/undercloud.py b/apex/undercloud/undercloud.py
new file mode 100644 (file)
index 0000000..7efc2cb
--- /dev/null
@@ -0,0 +1,206 @@
+##############################################################################
+# Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import libvirt
+import logging
+import os
+import shutil
+import time
+
+from apex.virtual import virtual_utils as virt_utils
+from apex.virtual import configure_vm as vm_lib
+from apex.common import constants
+from apex.common import utils
+
+
+class ApexUndercloudException(Exception):
+    pass
+
+
+class Undercloud:
+    """
+    This class represents an Apex Undercloud VM
+    """
+    def __init__(self, image_path, root_pw=None, external_network=False):
+        self.ip = None
+        self.root_pw = root_pw
+        self.external_net = external_network
+        self.volume = os.path.join(constants.LIBVIRT_VOLUME_PATH,
+                                   'undercloud.qcow2')
+        self.image_path = image_path
+        self.vm = None
+        if Undercloud._get_vm():
+            logging.error("Undercloud VM already exists.  Please clean "
+                          "before creating")
+            raise ApexUndercloudException("Undercloud VM already exists!")
+        self.create()
+
+    @staticmethod
+    def _get_vm():
+        conn = libvirt.open('qemu:///system')
+        try:
+            vm = conn.lookupByName('undercloud')
+            return vm
+        except libvirt.libvirtError:
+            logging.debug("No undercloud VM exists")
+
+    def create(self):
+        networks = ['admin']
+        if self.external_net:
+            networks.append('external')
+        self.vm = vm_lib.create_vm(name='undercloud',
+                                   image=self.volume,
+                                   baremetal_interfaces=networks,
+                                   direct_boot='overcloud-full',
+                                   kernel_args=['console=ttyS0',
+                                                'root=/dev/sda'],
+                                   default_network=True)
+        self.setup_volumes()
+        self.inject_auth()
+
+    def _set_ip(self):
+        ip_out = self.vm.interfaceAddresses(
+            libvirt.VIR_DOMAIN_INTERFACE_ADDRESSES_SRC_LEASE, 0)
+        if ip_out:
+            for (name, val) in ip_out.items():
+                for ipaddr in val['addrs']:
+                    if ipaddr['type'] == libvirt.VIR_IP_ADDR_TYPE_IPV4:
+                        self.ip = ipaddr['addr']
+                        return True
+
+    def start(self):
+        """
+        Start Undercloud VM
+        :return: None
+        """
+        if self.vm.isActive():
+            logging.info("Undercloud already started")
+        else:
+            logging.info("Starting undercloud")
+            self.vm.create()
+            # give 10 seconds to come up
+            time.sleep(10)
+        # set IP
+        for x in range(5):
+            if self._set_ip():
+                logging.info("Undercloud started.  IP Address: {}".format(
+                    self.ip))
+                break
+            logging.debug("Did not find undercloud IP in {} "
+                          "attempts...".format(x))
+            time.sleep(10)
+        else:
+            logging.error("Cannot find IP for Undercloud")
+            raise ApexUndercloudException(
+                "Unable to find IP for undercloud.  Check if VM booted "
+                "correctly")
+
+    def configure(self, net_settings, playbook, apex_temp_dir):
+        """
+        Configures undercloud VM
+        :return:
+        """
+        # TODO(trozet): If undercloud install fails we can add a retry
+        logging.info("Configuring Undercloud...")
+        # run ansible
+        ansible_vars = Undercloud.generate_config(net_settings)
+        ansible_vars['apex_temp_dir'] = apex_temp_dir
+        utils.run_ansible(ansible_vars, playbook, host=self.ip, user='stack')
+        logging.info("Undercloud installed!")
+
+    def setup_volumes(self):
+        for img_file in ('overcloud-full.vmlinuz', 'overcloud-full.initrd',
+                         'undercloud.qcow2'):
+            src_img = os.path.join(self.image_path, img_file)
+            dest_img = os.path.join(constants.LIBVIRT_VOLUME_PATH, img_file)
+            if not os.path.isfile(src_img):
+                raise ApexUndercloudException(
+                    "Required source file does not exist:{}".format(src_img))
+            if os.path.exists(dest_img):
+                os.remove(dest_img)
+            shutil.copyfile(src_img, dest_img)
+
+        # TODO(trozet):check if resize needed right now size is 50gb
+        # there is a lib called vminspect which has some dependencies and is
+        # not yet available in pip.  Consider switching to this lib later.
+        # execute ansible playbook
+
+    def inject_auth(self):
+        virt_ops = list()
+        # virt-customize keys/pws
+        if self.root_pw:
+            pw_op = "password:{}".format(self.root_pw)
+            virt_ops.append({constants.VIRT_PW: pw_op})
+        # ssh key setup
+        virt_ops.append({constants.VIRT_RUN_CMD:
+                        'mkdir -p /root/.ssh'})
+        virt_ops.append({constants.VIRT_UPLOAD:
+                         '/root/.ssh/id_rsa.pub:/root/.ssh/authorized_keys'})
+        run_cmds = [
+            'chmod 600 /root/.ssh/authorized_keys',
+            'restorecon /root/.ssh/authorized_keys',
+            'cp /root/.ssh/authorized_keys /home/stack/.ssh/',
+            'chown stack:stack /home/stack/.ssh/authorized_keys',
+            'chmod 600 /home/stack/.ssh/authorized_keys'
+        ]
+        for cmd in run_cmds:
+            virt_ops.append({constants.VIRT_RUN_CMD: cmd})
+        virt_utils.virt_customize(virt_ops, self.volume)
+
+    @staticmethod
+    def generate_config(ns):
+        """
+        Generates a dictionary of settings for configuring undercloud
+        :param ns: network settings to derive undercloud settings
+        :return: dictionary of settings
+        """
+
+        ns_admin = ns['networks']['admin']
+        intro_range = ns['apex']['networks']['admin']['introspection_range']
+        config = dict()
+        config['undercloud_config'] = [
+            "enable_ui false",
+            "undercloud_update_packages false",
+            "undercloud_debug false",
+            "undercloud_hostname undercloud.{}".format(ns['dns-domain']),
+            "local_ip {}/{}".format(str(ns_admin['installer_vm']['ip']),
+                                    str(ns_admin['cidr']).split('/')[1]),
+            "network_gateway {}".format(str(ns_admin['installer_vm']['ip'])),
+            "network_cidr {}".format(str(ns_admin['cidr'])),
+            "dhcp_start {}".format(str(ns_admin['dhcp_range'][0])),
+            "dhcp_end {}".format(str(ns_admin['dhcp_range'][1])),
+            "inspection_iprange {}".format(','.join(intro_range))
+        ]
+
+        config['ironic_config'] = [
+            "disk_utils iscsi_verify_attempts 30",
+            "disk_partitioner check_device_max_retries 40"
+        ]
+
+        config['nova_config'] = [
+            "dns_domain {}".format(ns['dns-domain']),
+            "dhcp_domain {}".format(ns['dns-domain'])
+        ]
+
+        config['neutron_config'] = [
+            "dns_domain {}".format(ns['dns-domain']),
+        ]
+        # FIXME(trozet): possible bug here with not using external network
+        ns_external = ns['networks']['external'][0]
+        config['external_network'] = {
+            "vlan": ns_external['installer_vm']['vlan'],
+            "ip": ns_external['installer_vm']['ip'],
+            "prefix": str(ns_external['cidr']).split('/')[1],
+            "enabled": ns_external['enabled']
+        }
+
+        # FIXME (trozet): for now hardcoding aarch64 to false
+        config['aarch64'] = False
+
+        return config
diff --git a/apex/virtual/__init__.py b/apex/virtual/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/apex/virtual/configure_vm.py b/apex/virtual/configure_vm.py
new file mode 100755 (executable)
index 0000000..3af7d1e
--- /dev/null
@@ -0,0 +1,206 @@
+##############################################################################
+# Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import libvirt
+import logging
+import math
+import os
+import random
+
+MAX_NUM_MACS = math.trunc(0xff / 2)
+
+
+def generate_baremetal_macs(count=1):
+    """Generate an Ethernet MAC address suitable for baremetal testing."""
+    # NOTE(dprince): We generate our own bare metal MAC address's here
+    # instead of relying on libvirt so that we can ensure the
+    # locally administered bit is set low. (The libvirt default is
+    # to set the 2nd MSB high.) This effectively allows our
+    # fake baremetal VMs to more accurately behave like real hardware
+    # and fixes issues with bridge/DHCP configurations which rely
+    # on the fact that bridges assume the MAC address of the lowest
+    # attached NIC.
+    # MACs generated for a given machine will also be in sequential
+    # order, which matches how most BM machines are laid out as well.
+    # Additionally we increment each MAC by two places.
+    macs = []
+
+    if count > MAX_NUM_MACS:
+        raise ValueError("The MAX num of MACS supported is %i." % MAX_NUM_MACS)
+
+    base_nums = [0x00,
+                 random.randint(0x00, 0xff),
+                 random.randint(0x00, 0xff),
+                 random.randint(0x00, 0xff),
+                 random.randint(0x00, 0xff)]
+    base_mac = ':'.join(map(lambda x: "%02x" % x, base_nums))
+
+    start = random.randint(0x00, 0xff)
+    if (start + (count * 2)) > 0xff:
+        # leave room to generate macs in sequence
+        start = 0xff - count * 2
+    for num in range(0, count * 2, 2):
+        mac = start + num
+        macs.append(base_mac + ":" + ("%02x" % mac))
+    return macs
+
+
+def create_vm_storage(domain, vol_path='/var/lib/libvirt/images'):
+    volume_name = domain + '.qcow2'
+    stgvol_xml = """
+    <volume>
+      <name>{}</name>
+      <allocation>0</allocation>
+      <capacity unit="G">41</capacity>
+      <target>
+        <format type='qcow2'/>
+        <path>{}</path>
+        <permissions>
+          <owner>107</owner>
+          <group>107</group>
+          <mode>0744</mode>
+          <label>virt_image_t</label>
+        </permissions>
+      </target>
+    </volume>""".format(volume_name, os.path.join(vol_path, volume_name))
+
+    conn = libvirt.open('qemu:///system')
+    pool = conn.storagePoolLookupByName('default')
+    if pool is None:
+        raise Exception("Default libvirt storage pool missing")
+        # TODO(trozet) create default storage pool
+
+    if pool.isActive() == 0:
+        pool.create()
+    try:
+        vol = pool.storageVolLookupByName(volume_name)
+        vol.wipe(0)
+        vol.delete(0)
+    except libvirt.libvirtError as e:
+        if e.get_error_code() != libvirt.VIR_ERR_NO_STORAGE_VOL:
+            raise
+    new_vol = pool.createXML(stgvol_xml)
+    if new_vol is None:
+        raise Exception("Unable to create new volume")
+    logging.debug("Created new storage volume: {}".format(volume_name))
+
+
+def create_vm(name, image, diskbus='sata', baremetal_interfaces=['admin'],
+              arch='x86_64', engine='kvm', memory=8192, bootdev='network',
+              cpus=4, nic_driver='virtio', macs=[], direct_boot=None,
+              kernel_args=None, default_network=False,
+              template_dir='/usr/share/opnfv-apex'):
+    # TODO(trozet): fix name here to be image since it is full path of qcow2
+    create_vm_storage(name)
+    with open(os.path.join(template_dir, 'domain.xml'), 'r') as f:
+        source_template = f.read()
+    imagefile = os.path.realpath(image)
+    memory = int(memory) * 1024
+    params = {
+        'name': name,
+        'imagefile': imagefile,
+        'engine': engine,
+        'arch': arch,
+        'memory': str(memory),
+        'cpus': str(cpus),
+        'bootdev': bootdev,
+        'network': '',
+        'enable_serial_console': '',
+        'direct_boot': '',
+        'kernel_args': '',
+        'user_interface': '',
+    }
+
+    # Configure the bus type for the target disk device
+    params['diskbus'] = diskbus
+    nicparams = {
+        'nicdriver': nic_driver,
+    }
+    if default_network:
+        params['network'] = """
+      <!-- regular natted network, for access to the vm -->
+      <interface type='network'>
+        <source network='default'/>
+        <model type='%(nicdriver)s'/>
+      </interface>""" % nicparams
+    else:
+        params['network'] = ''
+    while len(macs) < len(baremetal_interfaces):
+        macs += generate_baremetal_macs(1)
+
+    params['bm_network'] = ""
+    for bm_interface, mac in zip(baremetal_interfaces, macs):
+        bm_interface_params = {
+            'bminterface': bm_interface,
+            'bmmacaddress': mac,
+            'nicdriver': nic_driver,
+        }
+        params['bm_network'] += """
+          <!-- bridged 'bare metal' network on %(bminterface)s -->
+          <interface type='network'>
+            <mac address='%(bmmacaddress)s'/>
+            <source network='%(bminterface)s'/>
+            <model type='%(nicdriver)s'/>
+          </interface>""" % bm_interface_params
+
+    params['enable_serial_console'] = """
+        <serial type='pty'>
+          <target port='0'/>
+        </serial>
+        <console type='pty'>
+          <target type='serial' port='0'/>
+        </console>
+        """
+    if direct_boot:
+        params['direct_boot'] = """
+        <kernel>/var/lib/libvirt/images/%(direct_boot)s.vmlinuz</kernel>
+        <initrd>/var/lib/libvirt/images/%(direct_boot)s.initrd</initrd>
+        """ % {'direct_boot': direct_boot}
+    if kernel_args:
+        params['kernel_args'] = """
+        <cmdline>%s</cmdline>
+        """ % ' '.join(kernel_args)
+
+    if arch == 'aarch64':
+
+        params['direct_boot'] += """
+        <loader readonly='yes' \
+        type='pflash'>/usr/share/AAVMF/AAVMF_CODE.fd</loader>
+        <nvram>/var/lib/libvirt/qemu/nvram/centos7.0_VARS.fd</nvram>
+        """
+        params['user_interface'] = """
+        <controller type='virtio-serial' index='0'>
+          <address type='virtio-mmio'/>
+        </controller>
+        <serial type='pty'>
+          <target port='0'/>
+        </serial>
+        <console type='pty'>
+          <target type='serial' port='0'/>
+        </console>
+        <channel type='unix'>
+          <target type='virtio' name='org.qemu.guest_agent.0'/>
+          <address type='virtio-serial' controller='0' bus='0' port='1'/>
+        </channel>
+        """
+    else:
+        params['user_interface'] = """
+        <input type='mouse' bus='ps2'/>
+        <graphics type='vnc' port='-1' autoport='yes'/>
+        <video>
+          <model type='cirrus' vram='9216' heads='1'/>
+        </video>
+        """
+
+    libvirt_template = source_template % params
+    logging.debug("libvirt template is {}".format(libvirt_template))
+    conn = libvirt.open('qemu:///system')
+    vm = conn.defineXML(libvirt_template)
+    logging.info("Created machine %s with UUID %s" % (name, vm.UUIDString()))
+    return vm
diff --git a/apex/virtual/virtual_utils.py b/apex/virtual/virtual_utils.py
new file mode 100644 (file)
index 0000000..5ebb058
--- /dev/null
@@ -0,0 +1,140 @@
+##############################################################################
+# Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import copy
+import iptc
+import logging
+import os
+import pprint
+import subprocess
+
+from apex.common import utils
+from apex.virtual import configure_vm as vm_lib
+from virtualbmc import manager as vbmc_lib
+
+DEFAULT_RAM = 8192
+DEFAULT_PM_PORT = 6230
+DEFAULT_USER = 'admin'
+DEFAULT_PASS = 'password'
+DEFAULT_VIRT_IP = '192.168.122.1'
+
+
+def generate_inventory(target_file, ha_enabled=False, num_computes=1,
+                       controller_ram=DEFAULT_RAM, arch='x86_64',
+                       compute_ram=DEFAULT_RAM, vcpus=4):
+    """
+    Generates inventory file for virtual deployments
+    :param target_file:
+    :param ha_enabled:
+    :param num_computes:
+    :param controller_ram:
+    :param arch:
+    :param compute_ram:
+    :param vcpus:
+    :return:
+    """
+
+    node = {'mac_address': '',
+            'ipmi_ip': DEFAULT_VIRT_IP,
+            'ipmi_user': DEFAULT_USER,
+            'ipmi_pass': DEFAULT_PASS,
+            'pm_type': 'pxe_ipmitool',
+            'pm_port': '',
+            'cpu': vcpus,
+            'memory': DEFAULT_RAM,
+            'disk': 41,
+            'arch': arch,
+            'capabilities': ''
+            }
+
+    inv_output = {'nodes': {}}
+    if ha_enabled:
+        num_ctrlrs = 3
+    else:
+        num_ctrlrs = 1
+
+    for idx in range(num_ctrlrs + num_computes):
+        tmp_node = copy.deepcopy(node)
+        tmp_node['mac_address'] = vm_lib.generate_baremetal_macs(1)[0]
+        tmp_node['pm_port'] = DEFAULT_PM_PORT + idx
+        if idx < num_ctrlrs:
+            tmp_node['capabilities'] = 'profile:control'
+            tmp_node['memory'] = controller_ram
+        else:
+            tmp_node['capabilities'] = 'profile:compute'
+            tmp_node['memory'] = compute_ram
+        inv_output['nodes']['node{}'.format(idx)] = copy.deepcopy(tmp_node)
+
+    utils.dump_yaml(inv_output, target_file)
+
+    logging.info('Virtual environment file created: {}'.format(target_file))
+
+
+def host_setup(node):
+    """
+    Handles configuring vmbc and firewalld/iptables
+    :param node: dictionary of domain names and ports for ipmi
+    :return:
+    """
+    vbmc_manager = vbmc_lib.VirtualBMCManager()
+    for name, port in node.items():
+        vbmc_manager.add(username=DEFAULT_USER, password=DEFAULT_PASS,
+                         port=port, address=DEFAULT_VIRT_IP, domain_name=name,
+                         libvirt_uri='qemu:///system',
+                         libvirt_sasl_password=False,
+                         libvirt_sasl_username=False)
+
+        # TODO(trozet): add support for firewalld
+        subprocess.call(['systemctl', 'stop', 'firewalld'])
+
+        # iptables rule
+        rule = iptc.Rule()
+        rule.protocol = 'udp'
+        match = rule.create_match('udp')
+        match.dport = str(port)
+        rule.add_match(match)
+        rule.target = iptc.Target(rule, "ACCEPT")
+        chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), "INPUT")
+        chain.insert_rule(rule)
+        try:
+            subprocess.check_call(['vbmc', 'start', name])
+            logging.debug("Started vbmc for domain {}".format(name))
+        except subprocess.CalledProcessError:
+            logging.error("Failed to start vbmc for {}".format(name))
+            raise
+    logging.debug('vmbcs setup: {}'.format(vbmc_manager.list()))
+
+
+def virt_customize(ops, target):
+    """
+    Helper function to virt customize disks
+    :param ops: list of of operations and arguments
+    :param target: target disk to modify
+    :return: None
+    """
+    logging.info("Virt customizing target disk: {}".format(target))
+    virt_cmd = ['virt-customize']
+    for op in ops:
+        for op_cmd, op_arg in op.items():
+            virt_cmd.append(op_cmd)
+            virt_cmd.append(op_arg)
+    virt_cmd.append('-a')
+    virt_cmd.append(target)
+    if not os.path.isfile(target):
+        raise FileNotFoundError
+    my_env = os.environ.copy()
+    my_env['LIBGUESTFS_BACKEND'] = 'direct'
+    logging.debug("Virt-customizing with: \n{}".format(virt_cmd))
+    try:
+        logging.debug(subprocess.check_output(virt_cmd, env=my_env,
+                                              stderr=subprocess.STDOUT))
+    except subprocess.CalledProcessError as e:
+        logging.error("Error executing virt-customize: {}".format(
+                      pprint.pformat(e.output)))
+        raise
index 43eae37..74e7265 100644 (file)
@@ -79,7 +79,7 @@ $(RPMREL):
        rpmbuild --clean -ba rpm_specs/opnfv-apex-release.spec $(RPM_DIR_ARGS) -D "_release $(shell echo $(RELEASE) | tr -d '_-')"
 
 $(BUILD_DIR)/opnfv-apex-common.tar.gz:
-       pushd ../ && git archive --format=tar.gz --prefix=opnfv-apex-common-$(RPMVERS)/ HEAD > $(BUILD_DIR)/opnfv-apex-common.tar.gz
+       pushd ../ && git archive --format=tar.gz --prefix=opnfv-apex-$(RPMVERS)/ HEAD > $(BUILD_DIR)/opnfv-apex-common.tar.gz
 
 .PHONY: common-rpm-check
 common-rpm-check: $(BUILD_DIR)/opnfv-apex-common.tar.gz
@@ -99,14 +99,7 @@ $(RPMCOM):
 
 .PHONY: python-tests
 python-tests:
-       # clean previous coverage data
-       rm -rf ../tests/.coverage
-       rm -rf ../tests/htmlcov
-       # run nose tests
-       cd ../tests && PYTHONPATH=../lib/python/ nosetests-3.4 . --with-coverage --cover-package apex --cover-package apex_python_utils --cover-html --cover-min-percentage 90
-
-       # generate reports
-       cd ../tests && coverage3 report --include '*lib/python/*' -m
+       tox -e py35
 
 #######################
 #  PYTHON PEP8 CHECK  #
@@ -114,8 +107,7 @@ python-tests:
 
 .PHONY: python-pep8-check
 python-pep8-check:
-       pep8 ../lib/python
-       pep8 ../tests
+       tox -e pep8
 
 #############
 #  YAMLLINT #
similarity index 100%
rename from lib/installer/domain.xml
rename to build/domain.xml
index ccb100f..42bc42f 100644 (file)
@@ -1,7 +1,9 @@
-Name:          opnfv-apex-common
+%global srcname opnfv-apex
+
+Name:          python3-%{srcname}
 Version:       5.0
 Release:       %{_release}
-Summary:       Scripts for OPNFV deployment using RDO Manager
+Summary:       Scripts for OPNFV deployment using Apex
 
 Group:         System Environment
 License:       Apache 2.0
@@ -13,23 +15,25 @@ BuildRequires:  python-docutils python34-devel
 Requires:       opnfv-apex-sdn opnfv-apex-undercloud openvswitch qemu-kvm bridge-utils libguestfs-tools libvirt-python
 Requires:       initscripts net-tools iputils iproute iptables python34 python34-yaml python34-jinja2 python3-ipmi python2-virtualbmc
 Requires:       ipxe-roms-qemu >= 20160127-1
+Requires:       libvirt-devel
 
 %description
-Scripts for OPNFV deployment using RDO Manager
+Scripts for OPNFV deployment using Apex
 https://wiki.opnfv.org/apex
 
 %prep
-%setup -q
+%autosetup -n %{srcname}-%{version}
 
 %build
 rst2html docs/release/installation/index.rst docs/release/installation/installation-instructions.html
 rst2html docs/release/release-notes/release-notes.rst docs/release/release-notes/release-notes.html
+%py3_build
 
 %global __python %{__python3}
 
 %install
 mkdir -p %{buildroot}%{_bindir}/
-install ci/deploy.sh %{buildroot}%{_bindir}/opnfv-deploy
+%py3_install
 install ci/clean.sh %{buildroot}%{_bindir}/opnfv-clean
 install ci/util.sh %{buildroot}%{_bindir}/opnfv-util
 
@@ -37,67 +41,10 @@ mkdir -p %{buildroot}%{_sysconfdir}/bash_completion.d/
 install build/bash_completion_apex %{buildroot}%{_sysconfdir}/bash_completion.d/apex
 
 mkdir -p %{buildroot}%{_sysconfdir}/opnfv-apex/
-install config/deploy/os-nosdn-nofeature-noha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-nosdn-nofeature-noha.yaml
-install config/deploy/os-nosdn-bar-noha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-nosdn-bar-noha.yaml
-install config/deploy/os-nosdn-bar-ha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-nosdn-bar-ha.yaml
-install config/deploy/os-nosdn-fdio-noha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-nosdn-fdio-noha.yaml
-install config/deploy/os-nosdn-fdio-ha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-nosdn-fdio-ha.yaml
-install config/deploy/os-nosdn-ovs_dpdk-noha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-nosdn-ovs_dpdk-noha.yaml
-install config/deploy/os-nosdn-nofeature-ha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-nosdn-nofeature-ha.yaml
-install config/deploy/os-nosdn-performance-ha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-nosdn-performance-ha.yaml
-install config/deploy/os-nosdn-ovs_dpdk-ha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-nosdn-ovs_dpdk-ha.yaml
-install config/deploy/os-nosdn-kvm-ha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-nosdn-kvm-ha.yaml
-install config/deploy/os-nosdn-kvm-noha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-nosdn-kvm-noha.yaml
-install config/deploy/os-nosdn-kvm_ovs_dpdk-ha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-nosdn-kvm_ovs_dpdk-ha.yaml
-install config/deploy/os-nosdn-kvm_ovs_dpdk-noha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-nosdn-kvm_ovs_dpdk-noha.yaml
-install config/deploy/os-odl-bgpvpn-ha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-odl-bgpvpn-ha.yaml
-install config/deploy/os-odl-bgpvpn-noha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-odl-bgpvpn-noha.yaml
-install config/deploy/os-odl-sfc-ha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-odl-sfc-ha.yaml
-install config/deploy/os-odl-sfc-noha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-odl-sfc-noha.yaml
-install config/deploy/os-odl-fdio-noha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-odl-fdio-noha.yaml
-install config/deploy/os-odl_netvirt-fdio-noha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-odl_netvirt-fdio-noha.yaml
-install config/deploy/os-odl-fdio-ha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-odl-fdio-ha.yaml
-install config/deploy/os-odl-fdio-dvr-ha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-odl-fdio-dvr-ha.yaml
-install config/deploy/os-odl-fdio-dvr-noha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-odl-fdio-dvr-noha.yaml
-install config/deploy/os-odl-nofeature-ha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-odl-nofeature-ha.yaml
-install config/deploy/os-odl-nofeature-noha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-odl-nofeature-noha.yaml
-install config/deploy/os-odl-ovs_dpdk-ha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-odl-ovs_dpdk-ha.yaml
-install config/deploy/os-odl-ovs_dpdk-noha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-odl-ovs_dpdk-noha.yaml
-install config/deploy/os-odl-gluon-noha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-odl-gluon-noha.yaml
-install config/deploy/os-ovn-nofeature-noha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-ovn-nofeature-noha.yaml
-install config/deploy/os-onos-nofeature-ha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-onos-nofeature-ha.yaml
-install config/deploy/os-onos-sfc-ha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-onos-sfc-ha.yaml
-install config/deploy/os-ocl-nofeature-ha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-ocl-nofeature-ha.yaml
-install config/network/network_settings.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/network_settings.yaml
-install config/network/network_settings_v6.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/network_settings_v6.yaml
-install config/network/network_settings_vpp.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/network_settings_vpp.yaml
-
-
-mkdir -p %{buildroot}%{_var}/opt/opnfv/lib/python/apex
-install lib/common-functions.sh %{buildroot}%{_var}/opt/opnfv/lib/
-install lib/configure-deps-functions.sh %{buildroot}%{_var}/opt/opnfv/lib/
-install lib/parse-functions.sh %{buildroot}%{_var}/opt/opnfv/lib/
-install lib/virtual-setup-functions.sh %{buildroot}%{_var}/opt/opnfv/lib/
-install lib/undercloud-functions.sh %{buildroot}%{_var}/opt/opnfv/lib/
-install lib/overcloud-deploy-functions.sh %{buildroot}%{_var}/opt/opnfv/lib/
-install lib/post-install-functions.sh %{buildroot}%{_var}/opt/opnfv/lib/
-install lib/utility-functions.sh %{buildroot}%{_var}/opt/opnfv/lib/
-install lib/configure-vm %{buildroot}%{_var}/opt/opnfv/lib/
-install lib/python/apex_python_utils.py %{buildroot}%{_var}/opt/opnfv/lib/python/
-mkdir -p %{buildroot}%{python3_sitelib}/apex/
-install lib/python/apex/__init__.py %{buildroot}%{python3_sitelib}/apex/
-install lib/python/apex/deploy_settings.py %{buildroot}%{python3_sitelib}/apex/
-install lib/python/apex/ip_utils.py %{buildroot}%{python3_sitelib}/apex/
-install lib/python/apex/inventory.py %{buildroot}%{python3_sitelib}/apex/
-install lib/python/apex/network_environment.py %{buildroot}%{python3_sitelib}/apex/
-install lib/python/apex/network_settings.py %{buildroot}%{python3_sitelib}/apex/
-install lib/python/apex/clean.py %{buildroot}%{python3_sitelib}/apex/
-mkdir -p %{buildroot}%{python3_sitelib}/apex/common
-install lib/python/apex/common/__init__.py %{buildroot}%{python3_sitelib}/apex/common/
-install lib/python/apex/common/constants.py %{buildroot}%{python3_sitelib}/apex/common/
-install lib/python/apex/common/utils.py %{buildroot}%{python3_sitelib}/apex/common/
-mkdir -p %{buildroot}%{_var}/opt/opnfv/lib/installer/onos/
-install lib/installer/domain.xml %{buildroot}%{_var}/opt/opnfv/lib/installer/
+cp -f %{buildroot}%{_datadir}/opnfv-apex/config/deploy/* %{buildroot}%{_sysconfdir}/opnfv-apex/
+cp -f %{buildroot}%{_datadir}/opnfv-apex/config/network/* %{buildroot}%{_sysconfdir}/opnfv-apex/
+rm -f %{buildroot}%{_sysconfdir}/opnfv-apex/os-odl-csit-noha.yaml
+rm -f %{buildroot}%{_sysconfdir}/opnfv-apex/deploy_settings.yaml
 
 mkdir -p %{buildroot}%{_docdir}/opnfv/
 install LICENSE.rst %{buildroot}%{_docdir}/opnfv/
@@ -111,22 +58,13 @@ install config/network/network_settings_vpp.yaml %{buildroot}%{_docdir}/opnfv/ne
 install config/inventory/pod_example_settings.yaml %{buildroot}%{_docdir}/opnfv/inventory.yaml.example
 
 %files
-%defattr(644, root, root, -)
+%{python3_sitelib}/apex/
+%{python3_sitelib}/apex-*.egg-info
+%defattr(644, root, root, 644)
 %attr(755,root,root) %{_bindir}/opnfv-deploy
 %attr(755,root,root) %{_bindir}/opnfv-clean
 %attr(755,root,root) %{_bindir}/opnfv-util
-%{_var}/opt/opnfv/lib/common-functions.sh
-%{_var}/opt/opnfv/lib/configure-deps-functions.sh
-%{_var}/opt/opnfv/lib/parse-functions.sh
-%{_var}/opt/opnfv/lib/virtual-setup-functions.sh
-%{_var}/opt/opnfv/lib/undercloud-functions.sh
-%{_var}/opt/opnfv/lib/overcloud-deploy-functions.sh
-%{_var}/opt/opnfv/lib/post-install-functions.sh
-%{_var}/opt/opnfv/lib/utility-functions.sh
-%attr(755,root,root) %{_var}/opt/opnfv/lib/configure-vm
-%{_var}/opt/opnfv/lib/python/
-%{python3_sitelib}/apex/
-%{_var}/opt/opnfv/lib/installer/domain.xml
+%{_datadir}/opnfv-apex/
 %{_sysconfdir}/bash_completion.d/apex
 %{_sysconfdir}/opnfv-apex/os-nosdn-nofeature-noha.yaml
 %{_sysconfdir}/opnfv-apex/os-nosdn-bar-noha.yaml
@@ -160,6 +98,7 @@ install config/inventory/pod_example_settings.yaml %{buildroot}%{_docdir}/opnfv/
 %{_sysconfdir}/opnfv-apex/os-onos-sfc-ha.yaml
 %{_sysconfdir}/opnfv-apex/os-ocl-nofeature-ha.yaml
 %{_sysconfdir}/opnfv-apex/network_settings.yaml
+%{_sysconfdir}/opnfv-apex/network_settings_vlans.yaml
 %{_sysconfdir}/opnfv-apex/network_settings_v6.yaml
 %{_sysconfdir}/opnfv-apex/network_settings_vpp.yaml
 %doc %{_docdir}/opnfv/LICENSE.rst
@@ -173,6 +112,8 @@ install config/inventory/pod_example_settings.yaml %{buildroot}%{_docdir}/opnfv/
 %doc %{_docdir}/opnfv/inventory.yaml.example
 
 %changelog
+* Mon Aug 14 2017 Tim Rozet <trozet@redhat.com> - 5.0-4
+- Updated for python refactoring
 * Mon May 08 2017 Dan Radez <dradez@redhat.com> - 5.0-3
 - adding configure-vm
 * Tue Apr 11 2017 Dan Radez <dradez@redhat.com> - 5.0-2
index d37be0d..6f8a669 100644 (file)
@@ -14,7 +14,7 @@ QUAGGA_RPMS_DIR=${BUILD_DIR}/quagga_build_dir
 CACHE_DIR="$(dirname ${BUILD_ROOT})/.cache"
 CACHE_HISTORY=".cache_history"
 PATCHES_DIR="${BUILD_ROOT}/patches"
-BUILD_UTILS="$(dirname ${BUILD_ROOT})/lib/python/build_utils.py"
+BUILD_UTILS="$(dirname ${BUILD_ROOT})/apex/build/build_utils.py"
 
 
 rdo_images_uri=${RDO_IMAGES_URI:-https://images.rdoproject.org/ocata/delorean/current-tripleo/stable/}
@@ -52,4 +52,4 @@ fdio_pkgs=(
 'http://artifacts.opnfv.org/apex/danube/fdio_common_rpms/vpp-plugins-17.04.1-3~ge3b7ad7~b72.x86_64.rpm'
 )
 
-honeycomb_pkg='http://artifacts.opnfv.org/apex/danube/fdio_common_rpms/honeycomb-1.17.04.1-2073.noarch.rpm'
\ No newline at end of file
+honeycomb_pkg='http://artifacts.opnfv.org/apex/danube/fdio_common_rpms/honeycomb-1.17.04.1-2073.noarch.rpm'
index 5cd2c28..113f35d 100755 (executable)
@@ -13,4 +13,4 @@ set -e
 rpm -q ansible || sudo yum -y install ansible
 ansible-playbook --become -i "localhost," -c local $DIR/../lib/ansible/playbooks/build_dependencies.yml -vvv
 make -C $DIR/../build clean
-python3 $DIR/build.py $@
+python3 $DIR/../apex/build.py $@
index fba1f12..e35b95b 100755 (executable)
 #author: Dan Radez (dradez@redhat.com)
 #author: Tim Rozet (trozet@redhat.com)
 
-# Use default if no param passed
-BASE=${BASE:-'/var/opt/opnfv'}
-IMAGES=${IMAGES:-"$BASE/images"}
-LIB=${LIB:-"$BASE/lib"}
 reset=$(tput sgr0 || echo "")
 blue=$(tput setaf 4 || echo "")
 red=$(tput setaf 1 || echo "")
 green=$(tput setaf 2 || echo "")
 
-##LIBRARIES
-for lib in common-functions parse-functions; do
-  if ! source $LIB/${lib}.sh; then
-    echo "Failed to source $LIB/${lib}.sh"
-    exit 1
-  fi
-done
-
 vm_index=4
 ovs_bridges="br-admin br-tenant br-external br-storage"
 ovs_bridges+=" br-private br-public" # Legacy names, remove in E river
@@ -37,6 +25,102 @@ ovs_bridges+=" br-private br-public" # Legacy names, remove in E river
 OPNFV_NETWORK_TYPES+=" admin tenant external storage api"
 OPNFV_NETWORK_TYPES+=" admin_network private_network public_network storage_network api_network" # Legecy names, remove in E river
 
+##detach interface from OVS and set the network config correctly
+##params: bridge to detach from
+##assumes only 1 real interface attached to OVS
+function detach_interface_from_ovs {
+  local bridge
+  local port_output ports_no_orig
+  local net_path
+  local if_ip if_mask if_gw if_prefix
+  local if_metric if_dns1 if_dns2
+
+  net_path=/etc/sysconfig/network-scripts/
+  if [[ -z "$1" ]]; then
+    return 1
+  else
+    bridge=$1
+  fi
+
+  # if no interfaces attached then return
+  if ! ovs-vsctl list-ports ${bridge} | grep -Ev "vnet[0-9]*"; then
+    return 0
+  fi
+
+  # look for .orig ifcfg files  to use
+  port_output=$(ovs-vsctl list-ports ${bridge} | grep -Ev "vnet[0-9]*")
+  while read -r line; do
+    if [ -z "$line" ]; then
+      continue
+    elif [ -e ${net_path}/ifcfg-${line}.orig ]; then
+      mv -f ${net_path}/ifcfg-${line}.orig ${net_path}/ifcfg-${line}
+    elif [ -e ${net_path}/ifcfg-${bridge} ]; then
+      if_ip=$(sed -n 's/^IPADDR=\(.*\)$/\1/p' ${net_path}/ifcfg-${bridge})
+      if_mask=$(sed -n 's/^NETMASK=\(.*\)$/\1/p' ${net_path}/ifcfg-${bridge})
+      if_gw=$(sed -n 's/^GATEWAY=\(.*\)$/\1/p' ${net_path}/ifcfg-${bridge})
+      if_metric=$(sed -n 's/^METRIC=\(.*\)$/\1/p' ${net_path}/ifcfg-${bridge})
+      if_dns1=$(sed -n 's/^DNS1=\(.*\)$/\1/p' ${net_path}/ifcfg-${bridge})
+      if_dns2=$(sed -n 's/^DNS2=\(.*\)$/\1/p' ${net_path}/ifcfg-${bridge})
+
+      if [ -z "$if_mask" ]; then
+        if_prefix=$(sed -n 's/^PREFIX=[^0-9]*\([0-9][0-9]*\)[^0-9]*$/\1/p' ${net_path}/ifcfg-${bridge})
+        if_mask=$(prefix2mask ${if_prefix})
+      fi
+
+      if [[ -z "$if_ip" || -z "$if_mask" ]]; then
+        echo "ERROR: IPADDR or PREFIX/NETMASK missing for ${bridge} and no .orig file for interface ${line}"
+        return 1
+      fi
+
+      # create if cfg
+      echo "DEVICE=${line}
+IPADDR=${if_ip}
+NETMASK=${if_mask}
+BOOTPROTO=static
+ONBOOT=yes
+TYPE=Ethernet
+NM_CONTROLLED=no
+PEERDNS=no" > ${net_path}/ifcfg-${line}
+
+      if [ -n "$if_gw" ]; then
+        echo "GATEWAY=${if_gw}" >> ${net_path}/ifcfg-${line}
+      fi
+
+      if [ -n "$if_metric" ]; then
+        echo "METRIC=${if_metric}" >> ${net_path}/ifcfg-${line}
+      fi
+
+      if [[ -n "$if_dns1" || -n "$if_dns2" ]]; then
+        sed -i '/PEERDNS/c\PEERDNS=yes' ${net_path}/ifcfg-${line}
+
+        if [ -n "$if_dns1" ]; then
+          echo "DNS1=${if_dns1}" >> ${net_path}/ifcfg-${line}
+        fi
+
+        if [ -n "$if_dns2" ]; then
+          echo "DNS2=${if_dns2}" >> ${net_path}/ifcfg-${line}
+        fi
+      fi
+      break
+    else
+      echo "ERROR: Real interface ${line} attached to bridge, but no interface or ${bridge} ifcfg file exists"
+      return 1
+    fi
+
+  done <<< "$port_output"
+
+  # modify the bridge ifcfg file
+  # to remove IP params
+  sudo sed -i 's/IPADDR=.*//' ${net_path}/ifcfg-${bridge}
+  sudo sed -i 's/NETMASK=.*//' ${net_path}/ifcfg-${bridge}
+  sudo sed -i 's/GATEWAY=.*//' ${net_path}/ifcfg-${bridge}
+  sudo sed -i 's/DNS1=.*//' ${net_path}/ifcfg-${bridge}
+  sudo sed -i 's/DNS2=.*//' ${net_path}/ifcfg-${bridge}
+  sudo sed -i 's/METRIC=.*//' ${net_path}/ifcfg-${bridge}
+  sudo sed -i 's/PEERDNS=.*//' ${net_path}/ifcfg-${bridge}
+
+  sudo systemctl restart network
+}
 
 display_usage() {
   echo -e "Usage:\n$0 [arguments] \n"
@@ -47,7 +131,7 @@ display_usage() {
 ##params: $@ the entire command line is passed
 ##usage: parse_cmd_line() "$@"
 parse_cmdline() {
-  echo -e "\n\n${blue}This script is used to deploy the Apex Installer and Provision OPNFV Target System${reset}\n\n"
+  echo -e "\n\n${blue}This script is used to clean an Apex environment${reset}\n\n"
   echo "Use -h to display help"
   sleep 2
 
@@ -79,7 +163,13 @@ parse_cmdline "$@"
 
 if [ -n "$INVENTORY_FILE" ]; then
   echo -e "${blue}INFO: Parsing inventory file...${reset}"
-  if ! python3 -B $LIB/python/apex_python_utils.py clean -f ${INVENTORY_FILE}; then
+  # hack for now (until we switch fully over to clean.py) to tell if
+  # we should install apex from python or if rpm is being used
+  if ! rpm -q opnfv-apex-common > /dev/null; then
+    pushd ../ && python3 setup.py install > /dev/null
+    popd
+  fi
+  if ! python3 -m apex.clean -f ${INVENTORY_FILE}; then
     echo -e "${red}WARN: Unable to shutdown all nodes! Please check /var/log/apex.log${reset}"
   else
     echo -e "${blue}INFO: Node shutdown complete...${reset}"
index f1a807f..0ba0c74 100755 (executable)
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
 ##############################################################################
 # Copyright (c) 2015 Tim Rozet (Red Hat), Dan Radez (Red Hat) and others.
 #
 # author: Dan Radez (dradez@redhat.com)
 # author: Tim Rozet (trozet@redhat.com)
 #
-# Based on RDO Manager http://www.rdoproject.org
 
 set -e
-
-##VARIABLES
-reset=$(tput sgr0 || echo "")
-blue=$(tput setaf 4 || echo "")
-red=$(tput setaf 1 || echo "")
-green=$(tput setaf 2 || echo "")
-
-interactive="FALSE"
-ping_site="8.8.8.8"
-dnslookup_site="www.google.com"
-post_config="TRUE"
-debug="FALSE"
-
-ovs_rpm_name=openvswitch-2.6.1-1.el7.centos.x86_64.rpm
-ovs_kmod_rpm_name=openvswitch-kmod-2.6.1-1.el7.centos.x86_64.rpm
-
-declare -i CNT
-declare UNDERCLOUD
-declare -A deploy_options_array
-declare -a performance_options
-declare -A NET_MAP
-
-APEX_TMP_DIR=$(python3 -c "import tempfile; print(tempfile.mkdtemp())")
-SSH_OPTIONS=(-o StrictHostKeyChecking=no -o GlobalKnownHostsFile=/dev/null -o UserKnownHostsFile=/dev/null -o LogLevel=error)
-DEPLOY_OPTIONS=""
-BASE=${BASE:-'/var/opt/opnfv'}
-IMAGES=${IMAGES:-"$BASE/images"}
-LIB=${LIB:-"$BASE/lib"}
-OPNFV_NETWORK_TYPES="admin tenant external storage api"
-ENV_FILE="opnfv-environment.yaml"
-
-VM_CPUS=4
-VM_RAM=8
-VM_COMPUTES=1
-
-# Netmap used to map networks to OVS bridge names
-NET_MAP['admin']="br-admin"
-NET_MAP['tenant']="br-tenant"
-NET_MAP['external']="br-external"
-NET_MAP['storage']="br-storage"
-NET_MAP['api']="br-api"
-ext_net_type="interface"
-ip_address_family=4
-
-# Libraries
-lib_files=(
-$LIB/common-functions.sh
-$LIB/configure-deps-functions.sh
-$LIB/parse-functions.sh
-$LIB/virtual-setup-functions.sh
-$LIB/undercloud-functions.sh
-$LIB/overcloud-deploy-functions.sh
-$LIB/post-install-functions.sh
-$LIB/utility-functions.sh
-)
-for lib_file in ${lib_files[@]}; do
-  if ! source $lib_file; then
-    echo -e "${red}ERROR: Failed to source $lib_file${reset}"
-    exit 1
-  fi
-done
-
-display_usage() {
-  echo -e "Usage:\n$0 [arguments] \n"
-  echo -e "   --deploy-settings | -d : Full path to deploy settings yaml file. Optional.  Defaults to null"
-  echo -e "   --inventory | -i : Full path to inventory yaml file. Required only for baremetal"
-  echo -e "   --net-settings | -n : Full path to network settings file. Optional."
-  echo -e "   --ping-site | -p : site to use to verify IP connectivity. Optional. Defaults to 8.8.8.8"
-  echo -e "   --dnslookup-site : site to use to verify DNS resolution. Optional. Defaults to www.google.com"
-  echo -e "   --virtual | -v : Virtualize overcloud nodes instead of using baremetal."
-  echo -e "   --no-post-config : disable Post Install configuration."
-  echo -e "   --debug : enable debug output."
-  echo -e "   --interactive : enable interactive deployment mode which requires user to confirm steps of deployment."
-  echo -e "   --virtual-cpus : Number of CPUs to use per Overcloud VM in a virtual deployment (defaults to 4)."
-  echo -e "   --virtual-computes : Number of Virtual Compute nodes to create and use during deployment (defaults to 1 for noha and 2 for ha)."
-  echo -e "   --virtual-default-ram : Amount of default RAM to use per Overcloud VM in GB (defaults to 8)."
-  echo -e "   --virtual-compute-ram : Amount of RAM to use per Overcloud Compute VM in GB (defaults to 8). Overrides --virtual-default-ram arg for computes"
-}
-
-##translates the command line parameters into variables
-##params: $@ the entire command line is passed
-##usage: parse_cmd_line() "$@"
-parse_cmdline() {
-  echo -e "\n\n${blue}This script is used to deploy the Apex Installer and Provision OPNFV Target System${reset}\n\n"
-  echo "Use -h to display help"
-
-  while [ "${1:0:1}" = "-" ]
-  do
-    case "$1" in
-        -h|--help)
-                display_usage
-                exit 0
-            ;;
-        -d|--deploy-settings)
-                DEPLOY_SETTINGS_FILE=$2
-                echo "Deployment Configuration file: $2"
-                shift 2
-            ;;
-        -i|--inventory)
-                INVENTORY_FILE=$2
-                shift 2
-            ;;
-        -n|--net-settings)
-                NETSETS=$2
-                echo "Network Settings Configuration file: $2"
-                shift 2
-            ;;
-        -e|--environment-file)
-                ENV_FILE=$2
-                echo "Base OOO Environment file: $2"
-                shift 2
-            ;;
-        -p|--ping-site)
-                ping_site=$2
-                echo "Using $2 as the ping site"
-                shift 2
-            ;;
-        --dnslookup-site)
-                dnslookup_site=$2
-                echo "Using $2 as the dnslookup site"
-                shift 2
-            ;;
-        -v|--virtual)
-                virtual="TRUE"
-                echo "Executing a Virtual Deployment"
-                shift 1
-            ;;
-        --no-post-config )
-                post_config="FALSE"
-                echo "Post install configuration disabled"
-                shift 1
-            ;;
-        --debug )
-                debug="TRUE"
-                echo "Enable debug output"
-                shift 1
-            ;;
-        --interactive )
-                interactive="TRUE"
-                echo "Interactive mode enabled"
-                shift 1
-            ;;
-        --virtual-cpus )
-                VM_CPUS=$2
-                echo "Number of CPUs per VM set to $VM_CPUS"
-                shift 2
-            ;;
-        --virtual-default-ram )
-                VM_RAM=$2
-                echo "Amount of Default RAM per VM set to $VM_RAM"
-                shift 2
-            ;;
-        --virtual-computes )
-                VM_COMPUTES=$2
-                echo "Virtual Compute nodes set to $VM_COMPUTES"
-                shift 2
-            ;;
-        --virtual-compute-ram )
-                VM_COMPUTE_RAM=$2
-                echo "Virtual Compute RAM set to $VM_COMPUTE_RAM"
-                shift 2
-            ;;
-        *)
-                display_usage
-                exit 1
-            ;;
-    esac
-  done
-  sleep 2
-
-  if [[ -z "$NETSETS" ]]; then
-    echo -e "${red}ERROR: You must provide a network_settings file with -n.${reset}"
-    exit 1
-  fi
-
-  # inventory file usage validation
-  if [[ -n "$virtual" ]]; then
-      if [[ -n "$INVENTORY_FILE" ]]; then
-          echo -e "${red}ERROR: You should not specify an inventory file with virtual deployments${reset}"
-          exit 1
-      else
-          INVENTORY_FILE="$APEX_TMP_DIR/inventory-virt.yaml"
-      fi
-  elif [[ -z "$INVENTORY_FILE" ]]; then
-    echo -e "${red}ERROR: You must specify an inventory file for baremetal deployments! Exiting...${reset}"
-    exit 1
-  elif [[ ! -f "$INVENTORY_FILE" ]]; then
-    echo -e "{$red}ERROR: Inventory File: ${INVENTORY_FILE} does not exist! Exiting...${reset}"
-    exit 1
-  fi
-
-  if [[ -z "$DEPLOY_SETTINGS_FILE" || ! -f "$DEPLOY_SETTINGS_FILE" ]]; then
-    echo -e "${red}ERROR: Deploy Settings: ${DEPLOY_SETTINGS_FILE} does not exist! Exiting...${reset}"
-    exit 1
-  fi
-
-  if [[ ! -z "$NETSETS" && ! -f "$NETSETS" ]]; then
-    echo -e "${red}ERROR: Network Settings: ${NETSETS} does not exist! Exiting...${reset}"
-    exit 1
-  fi
-
-}
-
-main() {
-  parse_cmdline "$@"
-  if [ -n "$DEPLOY_SETTINGS_FILE" ]; then
-    echo -e "${blue}INFO: Parsing deploy settings file...${reset}"
-    parse_deploy_settings
-  fi
-  echo -e "${blue}INFO: Parsing network settings file...${reset}"
-  parse_network_settings
-  if ! configure_deps; then
-    echo -e "${red}Dependency Validation Failed, Exiting.${reset}"
-    exit 1
-  fi
-  #Correct the time on the server prior to launching any VMs
-  if ntpdate $ntp_server; then
-    hwclock --systohc
-  else
-    echo "${blue}WARNING: ntpdate failed to update the time on the server. ${reset}"
-  fi
-  setup_undercloud_vm
-  if [ "$virtual" == "TRUE" ]; then
-    setup_virtual_baremetal $VM_CPUS $VM_RAM
-  fi
-  parse_inventory_file
-  configure_undercloud
-  overcloud_deploy
-  if [ "$post_config" == "TRUE" ]; then
-    if ! configure_post_install; then
-      echo -e "${red}ERROR:Post Install Configuration Failed, Exiting.${reset}"
-      exit 1
-    else
-      echo -e "${blue}INFO: Post Install Configuration Complete${reset}"
-    fi
-  fi
-}
-
-main "$@"
+yum -y install python34 python34-devel libvirt-devel python34-pip python-tox ansible
+mkdir -p /home/jenkins-ci/tmp
+mv -f .build /home/jenkins-ci/tmp/
+pip3 install --upgrade --force-reinstall .
+mv -f /home/jenkins-ci/tmp/.build .
+opnfv-deploy $@
index 7cbd390..517822e 100755 (executable)
@@ -1,7 +1,5 @@
 #!/usr/bin/env bash
 
-source ../lib/utility-functions.sh
-
 export ANSIBLE_HOST_KEY_CHECKING=False
 
 ./dev_dep_check.sh
index 1a931d0..a9df021 100755 (executable)
@@ -2,12 +2,88 @@
 # Utility script used to interact with a deployment
 # @author Tim Rozet (trozet@redhat.com)
 
-BASE=${BASE:-'/var/opt/opnfv'}
-IMAGES=${IMAGES:-"$BASE/images"}
-LIB=${LIB:-"$BASE/lib"}
 VALID_CMDS="undercloud overcloud opendaylight debug-stack mock-detached -h --help"
+SSH_OPTIONS=(-o StrictHostKeyChecking=no -o GlobalKnownHostsFile=/dev/null -o UserKnownHostsFile=/dev/null -o LogLevel=error)
 
-source $LIB/utility-functions.sh
+##connects to undercloud
+##params: user to login with, command to execute on undercloud (optional)
+function undercloud_connect {
+  local user=$1
+
+  if [ -z "$1" ]; then
+    echo "Missing required argument: user to login as to undercloud"
+    return 1
+  fi
+
+  if [ -z "$2" ]; then
+    ssh ${SSH_OPTIONS[@]} ${user}@$(get_undercloud_ip)
+  else
+    ssh ${SSH_OPTIONS[@]} -T ${user}@$(get_undercloud_ip) "$2"
+  fi
+}
+
+##outputs the Undercloud's IP address
+##params: none
+function get_undercloud_ip {
+  echo $(arp -an | grep $(virsh domiflist undercloud | grep default |\
+    awk '{print $5}') | grep -Eo "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+")
+}
+
+##connects to overcloud nodes
+##params: node to login to, command to execute on overcloud (optional)
+function overcloud_connect {
+  local node
+  local node_output
+  local node_ip
+
+  if [ -z "$1" ]; then
+    echo "Missing required argument: overcloud node to login to"
+    return 1
+  elif ! echo "$1" | grep -E "(controller|compute)[0-9]+" > /dev/null; then
+    echo "Invalid argument: overcloud node to login to must be in the format: \
+controller<number> or compute<number>"
+    return 1
+  fi
+
+  node_output=$(undercloud_connect "stack" "source stackrc; nova list")
+  node=$(echo "$1" | sed -E 's/([a-zA-Z]+)([0-9]+)/\1-\2/')
+
+  node_ip=$(echo "$node_output" | grep "$node" | grep -Eo "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+")
+
+  if [ "$node_ip" == "" ]; then
+    echo -e "Unable to find IP for ${node} in \n${node_output}"
+    return 1
+  fi
+
+  if [ -z "$2" ]; then
+    ssh ${SSH_OPTIONS[@]} heat-admin@${node_ip}
+  else
+    ssh ${SSH_OPTIONS[@]} -T heat-admin@${node_ip} "$2"
+  fi
+}
+
+##connects to opendaylight karaf console
+##params: None
+function opendaylight_connect {
+  local opendaylight_ip
+  opendaylight_ip=$(undercloud_connect "stack" "cat overcloudrc | grep SDN_CONTROLLER_IP | grep -Eo [0-9]+\.[0-9]+\.[0-9]+\.[0-9]+")
+
+  if [ "$opendaylight_ip" == "" ]; then
+    echo -e "Unable to find IP for OpenDaylight in overcloudrc"
+    return 1
+  else
+    echo -e "Connecting to ODL Karaf console.  Default password is 'karaf'"
+  fi
+
+  ssh -p 8101 ${SSH_OPTIONS[@]} karaf@${opendaylight_ip}
+}
+
+##outputs heat stack deployment failures
+##params: none
+function debug_stack {
+  source ~/stackrc
+  openstack stack failures list overcloud --long
+}
 
 resolve_cmd() {
   local given=$1
index dec8ab7..afe12b7 100644 (file)
               libguestfs-tools,bsdtar,libvirt,yum-utils,
               python2-oslo-config,python2-debtcollector,
               make, python34-pip, python-virtualenv,libguestfs-tools-c,
-              supermin,supermin5,perl-Sys-Guestfs,python-libguestfs
+              supermin,supermin5,perl-Sys-Guestfs,python-libguestfs,
+              libvirt-devel,python34-docutils,python-docutils
     - name: Install Virtualization group
       yum:
         name: "@Virtualization Host"
-    - name: Install python ipmi from OPNFV artifacts
-      yum:
-        name: 'http://artifacts.opnfv.org/apex/dependencies/python3-ipmi-0.3.0-1.noarch.rpm'
+    - pip:
+        name: python-ipmi
+        executable: pip3.4
     - pip:
         name: tox
+        executable: pip3.4
     - pip:
         name: gitpython
         executable: pip3.4
diff --git a/lib/ansible/playbooks/configure_undercloud.yml b/lib/ansible/playbooks/configure_undercloud.yml
new file mode 100644 (file)
index 0000000..7b23662
--- /dev/null
@@ -0,0 +1,116 @@
+---
+- hosts: all
+  tasks:
+    - name: Generate SSH key for stack if missing
+      shell: test -e ~/.ssh/id_rsa || ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa
+    - name: Fix ssh key for stack
+      shell: restorecon -r /home/stack
+      become: yes
+    - file:
+        path: /home/stack/nics
+        state: directory
+        owner: stack
+        group: stack
+        mode: 0775
+    - copy:
+        src: /root/.ssh/id_rsa.pub
+        dest: /home/stack/jumphost_id_rsa.pub
+        owner: stack
+        owner: stack
+        mode: 0644
+    - copy:
+        src: "{{ apex_temp_dir }}/{{ item }}.yaml"
+        dest: "/home/stack/nics/{{ item }}.yaml"
+        owner: stack
+        group: stack
+        mode: 0644
+      with_items:
+        - controller
+        - compute
+    - lineinfile:
+        path: /etc/sudoers
+        regexp: 'Defaults\s*requiretty'
+        state: absent
+      become: yes
+    - name: openstack-configs undercloud
+      shell: openstack-config --set undercloud.conf DEFAULT {{ item }}
+      with_items: "{{ undercloud_config }}"
+    - name: openstack-configs ironic
+      shell: openstack-config --set /etc/ironic/ironic.conf {{ item }}
+      become: yes
+      with_items: "{{ ironic_config }}"
+    - name: openstack-configs undercloud aarch64
+      shell: openstack-config --set undercloud.conf DEFAULT ipxe_enabled false
+      when: "{{ aarch64 }}"
+    - lineinfile:
+        path: /usr/lib/python2.7/site-packages/ironic/common/pxe_utils.py
+        regexp: '_link_ip_address_pxe_configs'
+        line: '_link_mac_pxe_configs(task)'
+      when: "{{ aarch64 }}"
+    - name: undercloud install
+      shell: openstack undercloud install &> apex-undercloud-install.log
+      become: yes
+      become_user: stack
+    - name: openstack-configs nova
+      shell: openstack-config --set /etc/nova/nova.conf DEFAULT {{ item }}
+      become: yes
+      with_items: "{{ nova_config }}"
+    - name: restart nova services
+      service:
+        name: "{{ item }}"
+        state: restarted
+        enabled: yes
+      with_items:
+        - openstack-nova-conductor
+        - openstack-nova-compute
+        - openstack-nova-api
+        - openstack-nova-scheduler
+    - name: openstack-configs neutron
+      shell: openstack-config --set /etc/neutron/neutron.conf DEFAULT {{ item }}
+      become: yes
+      with_items: "{{ neutron_config }}"
+    - name: restart neutron services
+      service:
+        name: "{{ item }}"
+        state: restarted
+        enabled: yes
+      with_items:
+        - neutron-server
+        - neutron-dhcp-agent
+    - name: configure external network vlan ifcfg
+      template:
+        src: external_vlan_ifcfg.yml.j2
+        dest: "/etc/sysconfig/network-scripts/ifcfg-vlan{{ external_network.vlan }}"
+        owner: root
+        group: root
+        mode: 0644
+      become: yes
+      when:
+        - external_network.vlan != "native"
+        - external_network.enabled
+    - name: bring up vlan ifcfg
+      shell: "ifup vlan{{ external_network.vlan }}"
+      become: yes
+      when:
+        - external_network.vlan != "native"
+        - external_network.enabled
+    - name: assign IP to  native eth2
+      shell: ip a a {{ external_network.ip }}/{{ external_network.prefix }} dev eth2
+      become: yes
+      when:
+        - external_network.vlan == "native"
+        - external_network.enabled
+    - name: bring up eth2
+      shell: ip link set up dev eth2
+      when:
+        - external_network.vlan == "native"
+        - external_network.enabled
+      become: yes
+    - name: fetch storage environment file
+      fetch:
+        src: /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml
+        dest: "{{ apex_temp_dir }}/"
+        flat: yes
+
+- include: undercloud_aarch64.yml
+  when: aarch64
diff --git a/lib/ansible/playbooks/deploy_dependencies.yml b/lib/ansible/playbooks/deploy_dependencies.yml
new file mode 100644 (file)
index 0000000..7723162
--- /dev/null
@@ -0,0 +1,66 @@
+---
+- hosts: localhost
+  tasks:
+    - sysctl:
+        name: net.ipv4.ip_forward
+        state: present
+        value: 1
+        sysctl_set: yes
+    - systemd:
+        name: dhcpd
+        state: stopped
+        enabled: no
+      ignore_errors: yes
+    - systemd:
+        name: libvirtd
+        state: started
+        enabled: yes
+    - systemd:
+        name: openvswitch
+        state: started
+        enabled: yes
+    - virt_net:
+        command: define
+        name: default
+        xml: '{{ lookup("template", "virsh_network_default.xml.j2") }}'
+        state: active
+        autostart: yes
+    - openvswitch_bridge:
+        bridge: 'br-{{ item }}'
+        state: present
+      with_items: '{{ virsh_enabled_networks }}'
+    - virt_net:
+        command: define
+        name: '{{ item }}'
+        xml: '{{ lookup("template", "virsh_network_ovs.xml.j2") }}'
+        autostart: yes
+      with_items: '{{ virsh_enabled_networks }}'
+    - virt_net:
+        command: create
+        name: '{{ item }}'
+      with_items: '{{ virsh_enabled_networks }}'
+    - virt_pool:
+        name: default
+        command: define
+        autostart: yes
+        state: active
+        xml: '{{ lookup("template", "virsh_pool.xml.j2") }}'
+    - lineinfile:
+        path: /etc/modprobe.d/kvm_intel.conf
+        line: 'options kvm-intel nested=1'
+        create: yes
+      when: ansible_architecture == "x86_64"
+    - modprobe:
+        name: "{{ item }}"
+        state: present
+      with_items:
+        - kvm
+        - kvm_intel
+      when: ansible_architecture == "x86_64"
+    - name: Generate SSH key for root if missing
+      shell: test -e ~/.ssh/id_rsa || ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa
+    - name: Manually patch vmbc to work with python3.x
+      lineinfile:
+        line: "                conn.defineXML(ET.tostring(tree, encoding='unicode'))"
+        regexp: "tostring"
+        path: /usr/lib/python3.4/site-packages/virtualbmc/vbmc.py
diff --git a/lib/ansible/playbooks/deploy_overcloud.yml b/lib/ansible/playbooks/deploy_overcloud.yml
new file mode 100644 (file)
index 0000000..76bbbc6
--- /dev/null
@@ -0,0 +1,68 @@
+---
+- hosts: all
+  tasks:
+    - name: Copy all files to undercloud
+      copy:
+        src: "{{ apex_temp_dir }}/{{ item }}"
+        dest: "/home/stack/{{ item }}"
+        owner: stack
+        group: stack
+        mode: 0644
+      with_items:
+        - network-environment.yaml
+        - instackenv.json
+        - opnfv-environment.yaml
+        - overcloud-full.qcow2
+        - deploy_command
+        - virtual-environment.yaml
+        - baremetal-environment.yaml
+    - copy:
+        src: "{{ apex_temp_dir }}/storage-environment.yaml"
+        dest: /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml
+        owner: root
+        group: root
+        mode: 0664
+    - systemd:
+        name: openstack-swift-proxy
+        state: restarted
+        enabled: yes
+      become: yes
+    - name: Upload glance images
+      shell: "{{ stackrc }} && openstack overcloud image upload"
+      become: yes
+      become_user: stack
+    - name: Import inventory (baremetal)
+      shell: "{{ stackrc }} && {{ item }}"
+      with_items:
+        - openstack overcloud node import instackenv.json
+        - openstack overcloud node introspect --all-manageable --provide
+      when: not virtual
+    - name: Import inventory (virtual)
+      shell: "{{ stackrc }} && openstack overcloud node import --provide instackenv.json"
+      when: virtual
+    - name: Set flavors
+      shell: '{{ stackrc }} && openstack flavor set --property "cpu_arch"="x86_64" {{ item }}'
+      with_items:
+        - baremetal
+        - control
+        - compute
+    - name: Configure DNS server for ctlplane network
+      shell: "{{ stackrc }} && openstack subnet set ctlplane-subnet {{ dns_server_args }}"
+    - name: Execute Overcloud Deployment
+      shell: "{{ stackrc }} && bash deploy_command"
+    - name: Show Keystone output
+      shell: "{{ overcloudrc }} && {{ item }}"
+      when: debug
+      with_items:
+        - openstack endpoint list
+        - openstack service list
+    - name: Get overcloud nodes and IPs
+      shell: "{{ stackrc }} && openstack server list -f json"
+      register: nova_list
+    - name: Write nova list output to file
+      local_action: copy content="{{ nova_list.stdout }}" dest="{{ apex_temp_dir }}/nova_output"
+    - name: Fetch overcloudrc
+      fetch:
+        src: /home/stack/overcloudrc
+        dest: "{{ apex_temp_dir }}/"
+        flat: yes
diff --git a/lib/ansible/playbooks/post_deploy_overcloud.yml b/lib/ansible/playbooks/post_deploy_overcloud.yml
new file mode 100644 (file)
index 0000000..fdf7024
--- /dev/null
@@ -0,0 +1,45 @@
+---
+- hosts: all
+  tasks:
+    - name: Bring up br-phy for OVS DPDK
+      shell: ifup br-phy
+      when:
+        - dataplane == 'ovs_dpdk'
+        - "'compute' in ansible_hostname"
+      become: yes
+    - name: Restart OVS Agent for DPDK
+      shell: systemctl restart neutron-openvswitch-agent
+      when:
+        - dataplane == 'ovs_dpdk'
+        - "'compute' in ansible_hostname"
+        - sdn == false
+    - name: SFC config workaround
+      file:
+        src: /etc/neutron/networking_sfc.conf
+        dest: /etc/neutron/conf.d/neutron-server/networking_sfc.conf
+        state: link
+      become: yes
+      when:
+        - sfc
+        - "'controller' in ansible_hostname"
+    - name: Ensure ZRPCD is up
+      systemd:
+        name: zrpcd
+        state: started
+        enabled: yes
+      become: yes
+      when:
+        - vpn
+        - "'controller-0' in ansible_hostname"
+    - name: VSPERF build base machine
+      shell: /build_base_machine.sh
+      args:
+        chdir: /var/opt/vsperf/systems/
+      become: yes
+      when:
+        - vsperf
+        - "'compute-0' in ansible_hostname"
+    - name: Fetch logs from node
+      fetch:
+        src: /var/log/messages
+        dest: "{{ apex_temp_dir }}"
diff --git a/lib/ansible/playbooks/post_deploy_undercloud.yml b/lib/ansible/playbooks/post_deploy_undercloud.yml
new file mode 100644 (file)
index 0000000..ba0746b
--- /dev/null
@@ -0,0 +1,118 @@
+---
+- hosts: all
+  tasks:
+    - name: Enable ssh to overcloud nodes from jumphost
+      shell: "cat /home/stack/jumphost_id_rsa.pub | ssh -T {{ SSH_OPTIONS }} heat-admin@{{ item.value }} 'cat >> ~/.ssh/authorized_keys'"
+      with_dict: "{{ overcloud_nodes }}"
+      become: yes
+      become_user: stack
+    - name: Configure external network
+      shell: "{{ overcloudrc }} && {{ item }}"
+      with_items: "{{ external_network_cmds }}"
+    - name: Configure gluon networks
+      shell: "{{ overcloudrc }} && {{ item }}"
+      when: gluon
+      with_items:
+        - openstack network create gluon-network --share --provider-network-type vxlan
+        - openstack subnet create gluon-subnet --no-gateway --no-dhcp --network GluonNetwork --subnet-range 0.0.0.0/1
+    - name: Find admin project id
+      shell: "{{ overcloudrc }} && openstack project list | grep admin | awk '{print $2}'"
+      register: os_project_id
+    - name: Inject OS_PROJECT_ID and OS_TENANT_NAME into overcloudrc
+      lineinfile:
+        line: "{{ item }}"
+        path: /home/stack/overcloudrc
+      with_items:
+        - "export OS_PROJECT_ID={{ os_project_id.stdout }}"
+        - "export OS_TENANT_NAME=admin"
+    - name: Install Docker
+      yum:
+        name: docker
+        state: present
+      when: yardstick or dovetail
+      become: yes
+    - systemd:
+        name: docker
+        state: started
+        enabled: yes
+      when: yardstick or dovetail
+      become: yes
+    - name: Pull yardstick docker image
+      docker_image:
+        name: opnfv/yardstick
+      when: yardstick
+      become: yes
+    - name: Pull dovetail docker image
+      docker_image:
+        name: opnfv/dovetail
+      when: dovetail
+      become: yes
+    - name: Register SDN VIP
+      shell: "{{ stackrc }} && neutron port-list | grep control_virtual_ip | grep -Eo '([0-9]+\\.){3}[0-9]+'"
+      register: sdn_vip
+      become: yes
+      become_user: stack
+      when: sdn != false
+    - name: Write SDN controller VIP to overcloudrc
+      lineinfile:
+        line: "export SDN_CONTROLLER_IP={{ sdn_vip.stdout }}"
+        regexp: 'SDN_CONTROLLER_IP'
+        path: "/home/stack/{{ item }}"
+      when: sdn != false
+      with_items:
+        - overcloudrc
+        - overcloudrc.v3
+    - name: Undercloud NAT - MASQUERADE interface
+      iptables:
+        table: nat
+        chain: POSTROUTING
+        out_interface: eth0
+        jump: MASQUERADE
+      when:
+        - virtual
+        - not external_network_ipv6
+      become: yes
+    - name: Undercloud NAT - MASQUERADE interface with subnet
+      iptables:
+        table: nat
+        chain: POSTROUTING
+        out_interface: eth0
+        jump: MASQUERADE
+        source: "{{ external_cidr }}"
+      when:
+        - virtual
+        - not external_network_ipv6
+      become: yes
+    - name: Undercloud NAT - Allow Forwarding
+      iptables:
+        chain: FORWARD
+        in_interface: eth2
+        jump: ACCEPT
+      when:
+        - virtual
+        - not external_network_ipv6
+      become: yes
+    - name: Undercloud NAT - Allow Stateful Forwarding
+      iptables:
+        chain: FORWARD
+        in_interface: eth2
+        jump: ACCEPT
+        source: "{{ external_cidr }}"
+        ctstate: ESTABLISHED,RELATED
+      when:
+        - virtual
+        - not external_network_ipv6
+      become: yes
+    - name: Undercloud NAT - Save iptables
+      shell: service iptables save
+      become: yes
+      when:
+        - virtual
+        - not external_network_ipv6
+    - name: Create congress datasources
+      shell: "{{ overcloudrc }} && openstack congress datasource create {{ item }}"
+      become: yes
+      become_user: stack
+      when: congress
+      with_items: "{{ congress_datasources }}"
+      ignore_errors: yes
diff --git a/lib/ansible/playbooks/templates/external_vlan_ifcfg.yml.j2 b/lib/ansible/playbooks/templates/external_vlan_ifcfg.yml.j2
new file mode 100644 (file)
index 0000000..c478a7d
--- /dev/null
@@ -0,0 +1,9 @@
+DEVICE=vlan{{ external_network.vlan }}
+ONBOOT=yes
+DEVICETYPE=ovs
+TYPE=OVSIntPort
+BOOTPROTO=static
+IPADDR={{ external_network.ip }}
+PREFIX={{ external_network.prefix }}
+OVS_BRIDGE=br-ctlplane
+OVS_OPTIONS="tag={{ external_network.vlan }}"
diff --git a/lib/ansible/playbooks/templates/virsh_network_default.xml.j2 b/lib/ansible/playbooks/templates/virsh_network_default.xml.j2
new file mode 100644 (file)
index 0000000..d7241d0
--- /dev/null
@@ -0,0 +1,10 @@
+<network>
+  <name>default</name>
+  <bridge name="virbr0"/>
+  <forward/>
+  <ip address="192.168.122.1" netmask="255.255.255.0">
+    <dhcp>
+      <range start="192.168.122.2" end="192.168.122.254"/>
+    </dhcp>
+  </ip>
+</network>
diff --git a/lib/ansible/playbooks/templates/virsh_network_ovs.xml.j2 b/lib/ansible/playbooks/templates/virsh_network_ovs.xml.j2
new file mode 100644 (file)
index 0000000..75a06ee
--- /dev/null
@@ -0,0 +1,6 @@
+<network ipv6='yes'>
+  <name>{{ item }}</name>
+  <forward mode='bridge'/>
+  <bridge name='br-{{ item }}'/>
+  <virtualport type='openvswitch'/>
+</network>
diff --git a/lib/ansible/playbooks/templates/virsh_pool.xml.j2 b/lib/ansible/playbooks/templates/virsh_pool.xml.j2
new file mode 100644 (file)
index 0000000..f6ea498
--- /dev/null
@@ -0,0 +1,6 @@
+<pool type='dir'>
+  <name>default</name>
+  <target>
+    <path>/var/lib/libvirt/images</path>
+  </target>
+</pool>
diff --git a/lib/ansible/playbooks/undercloud_aarch64.yml b/lib/ansible/playbooks/undercloud_aarch64.yml
new file mode 100644 (file)
index 0000000..5b607c3
--- /dev/null
@@ -0,0 +1,49 @@
+---
+- hosts: all
+  tasks:
+    - name: aarch64 configuration
+      block:
+        - shell: yum -y reinstall grub2-efi shim
+        - copy:
+            src: /boot/efi/EFI/centos/grubaa64.efi
+            dest: /tftpboot/grubaa64.efi
+            remote_src: yes
+        - file:
+            path: /tftpboot/EFI/centos
+            state: directory
+            mode: 0755
+        - copy:
+            content: |
+                     set default=master
+                     set timeout=5
+                     set hidden_timeout_quiet=false
+                     menuentry "master"  {
+                     configfile /tftpboot/\\\$net_default_ip.conf
+                     }
+            dest: /tftpboot/EFI/centos/grub.cfg
+            mode: 0644
+        - shell: 'openstack-config --set /etc/ironic/ironic.conf pxe uefi_pxe_config_template $pybasedir/drivers/modules/pxe_grub_config.template'
+        - shell: 'openstack-config --set /etc/ironic/ironic.conf pxe uefi_pxe_bootfile_name grubaa64.efi'
+        - systemd:
+            name: openstack-ironic-conductor
+            state: restarted
+            enabled: yes
+        - replace:
+            path: /usr/lib/python2.7/site-packages/ironic/drivers/modules/pxe_grub_config.template
+            regexp: 'linuxefi'
+            replace: 'linux'
+        - replace:
+            path: /usr/lib/python2.7/site-packages/ironic/drivers/modules/pxe_grub_config.template
+            regexp: 'initrdefi'
+            replace: 'initrd'
+        - lineinfile:
+            path: /tftpboot/map-file
+            insertafter: EOF
+            state: present
+            line: ''
+        - shell: "echo 'r ^/EFI/centos/grub.cfg-(.*) /tftpboot/pxelinux.cfg/\\1' | sudo tee --append /tftpboot/map-file"
+        - systemd:
+            name: xinetd
+            state: restarted
+            enabled: yes
+      become: yes
diff --git a/lib/common-functions.sh b/lib/common-functions.sh
deleted file mode 100644 (file)
index 709dbf9..0000000
+++ /dev/null
@@ -1,308 +0,0 @@
-#!/usr/bin/env bash
-##############################################################################
-# Copyright (c) 2015 Tim Rozet (Red Hat), Dan Radez (Red Hat) and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-# Common Functions used by  OPNFV Apex
-# author: Tim Rozet (trozet@redhat.com)
-
-##converts subnet mask to prefix
-##params: subnet mask
-function prefix2mask {
-  # Number of args to shift, 255..255, first non-255 byte, zeroes
-   set -- $(( 5 - ($1 / 8) )) 255 255 255 255 $(( (255 << (8 - ($1 % 8))) & 255 )) 0 0 0
-   [ $1 -gt 1 ] && shift $1 || shift
-   echo ${1-0}.${2-0}.${3-0}.${4-0}
-}
-
-##find ip of interface
-##params: interface name, address family
-function find_ip {
-  local af
-  if [[ -z "$1" ]]; then
-    return 1
-  fi
-  if [[ -z "$2" ]]; then
-    af=4
-  else
-    af=$2
-  fi
-
-  python3 -B $LIB/python/apex_python_utils.py find-ip -i $1 -af $af
-}
-
-##attach interface to OVS and set the network config correctly
-##params: bride to attach to, interface to attach, network type (optional)
-##external indicates attaching to a external interface
-function attach_interface_to_ovs {
-  local bridge interface
-  local if_ip if_mask if_gw if_file ovs_file if_prefix
-  local if_metric if_dns1 if_dns2
-
-  if [[ -z "$1" || -z "$2" ]]; then
-    return 1
-  else
-    bridge=$1
-    interface=$2
-  fi
-
-  if ovs-vsctl list-ports ${bridge} | grep ${interface}; then
-    return 0
-  fi
-
-  if_file=/etc/sysconfig/network-scripts/ifcfg-${interface}
-  ovs_file=/etc/sysconfig/network-scripts/ifcfg-${bridge}
-
-  if [ -e "$if_file" ]; then
-    if_ip=$(sed -n 's/^IPADDR=\(.*\)$/\1/p' ${if_file})
-    if_mask=$(sed -n 's/^NETMASK=\(.*\)$/\1/p' ${if_file})
-    if_gw=$(sed -n 's/^GATEWAY=\(.*\)$/\1/p' ${if_file})
-    if_metric=$(sed -n 's/^METRIC=\(.*\)$/\1/p' ${if_file})
-    if_dns1=$(sed -n 's/^DNS1=\(.*\)$/\1/p' ${if_file})
-    if_dns2=$(sed -n 's/^DNS2=\(.*\)$/\1/p' ${if_file})
-  else
-    echo "ERROR: ifcfg file missing for ${interface}"
-    return 1
-  fi
-
-  if [ -z "$if_mask" ]; then
-    # we can look for PREFIX here, then convert it to NETMASK
-    if_prefix=$(sed -n 's/^PREFIX=[^0-9]*\([0-9][0-9]*\)[^0-9]*$/\1/p' ${if_file})
-    if_mask=$(prefix2mask ${if_prefix})
-  fi
-
-  if [[ -z "$if_ip" || -z "$if_mask" ]]; then
-    echo "ERROR: IPADDR or NETMASK/PREFIX missing for ${interface}"
-    return 1
-  elif [[ -z "$if_gw" && "$3" == "external" ]]; then
-    echo "ERROR: GATEWAY missing for ${interface}, which is external"
-    return 1
-  fi
-
-  # move old config file to .orig
-  mv -f ${if_file} ${if_file}.orig
-  echo "DEVICE=${interface}
-DEVICETYPE=ovs
-TYPE=OVSPort
-PEERDNS=no
-BOOTPROTO=static
-NM_CONTROLLED=no
-ONBOOT=yes
-OVS_BRIDGE=${bridge}
-PROMISC=yes" > ${if_file}
-
-
-  # create bridge cfg
-  echo "DEVICE=${bridge}
-DEVICETYPE=ovs
-IPADDR=${if_ip}
-NETMASK=${if_mask}
-BOOTPROTO=static
-ONBOOT=yes
-TYPE=OVSBridge
-PROMISC=yes
-PEERDNS=no" > ${ovs_file}
-
-  if [ -n "$if_gw" ]; then
-    echo "GATEWAY=${if_gw}" >> ${ovs_file}
-  fi
-
-  if [ -n "$if_metric" ]; then
-    echo "METRIC=${if_metric}" >> ${ovs_file}
-  fi
-
-  if [[ -n "$if_dns1" || -n "$if_dns2" ]]; then
-    sed -i '/PEERDNS/c\PEERDNS=yes' ${ovs_file}
-
-    if [ -n "$if_dns1" ]; then
-      echo "DNS1=${if_dns1}" >> ${ovs_file}
-    fi
-
-    if [ -n "$if_dns2" ]; then
-      echo "DNS2=${if_dns2}" >> ${ovs_file}
-    fi
-  fi
-
-  sudo systemctl restart network
-}
-
-##detach interface from OVS and set the network config correctly
-##params: bridge to detach from
-##assumes only 1 real interface attached to OVS
-function detach_interface_from_ovs {
-  local bridge
-  local port_output ports_no_orig
-  local net_path
-  local if_ip if_mask if_gw if_prefix
-  local if_metric if_dns1 if_dns2
-
-  net_path=/etc/sysconfig/network-scripts/
-  if [[ -z "$1" ]]; then
-    return 1
-  else
-    bridge=$1
-  fi
-
-  # if no interfaces attached then return
-  if ! ovs-vsctl list-ports ${bridge} | grep -Ev "vnet[0-9]*"; then
-    return 0
-  fi
-
-  # look for .orig ifcfg files  to use
-  port_output=$(ovs-vsctl list-ports ${bridge} | grep -Ev "vnet[0-9]*")
-  while read -r line; do
-    if [ -z "$line" ]; then
-      continue
-    elif [ -e ${net_path}/ifcfg-${line}.orig ]; then
-      mv -f ${net_path}/ifcfg-${line}.orig ${net_path}/ifcfg-${line}
-    elif [ -e ${net_path}/ifcfg-${bridge} ]; then
-      if_ip=$(sed -n 's/^IPADDR=\(.*\)$/\1/p' ${net_path}/ifcfg-${bridge})
-      if_mask=$(sed -n 's/^NETMASK=\(.*\)$/\1/p' ${net_path}/ifcfg-${bridge})
-      if_gw=$(sed -n 's/^GATEWAY=\(.*\)$/\1/p' ${net_path}/ifcfg-${bridge})
-      if_metric=$(sed -n 's/^METRIC=\(.*\)$/\1/p' ${net_path}/ifcfg-${bridge})
-      if_dns1=$(sed -n 's/^DNS1=\(.*\)$/\1/p' ${net_path}/ifcfg-${bridge})
-      if_dns2=$(sed -n 's/^DNS2=\(.*\)$/\1/p' ${net_path}/ifcfg-${bridge})
-
-      if [ -z "$if_mask" ]; then
-        if_prefix=$(sed -n 's/^PREFIX=[^0-9]*\([0-9][0-9]*\)[^0-9]*$/\1/p' ${net_path}/ifcfg-${bridge})
-        if_mask=$(prefix2mask ${if_prefix})
-      fi
-
-      if [[ -z "$if_ip" || -z "$if_mask" ]]; then
-        echo "ERROR: IPADDR or PREFIX/NETMASK missing for ${bridge} and no .orig file for interface ${line}"
-        return 1
-      fi
-
-      # create if cfg
-      echo "DEVICE=${line}
-IPADDR=${if_ip}
-NETMASK=${if_mask}
-BOOTPROTO=static
-ONBOOT=yes
-TYPE=Ethernet
-NM_CONTROLLED=no
-PEERDNS=no" > ${net_path}/ifcfg-${line}
-
-      if [ -n "$if_gw" ]; then
-        echo "GATEWAY=${if_gw}" >> ${net_path}/ifcfg-${line}
-      fi
-
-      if [ -n "$if_metric" ]; then
-        echo "METRIC=${if_metric}" >> ${net_path}/ifcfg-${line}
-      fi
-
-      if [[ -n "$if_dns1" || -n "$if_dns2" ]]; then
-        sed -i '/PEERDNS/c\PEERDNS=yes' ${net_path}/ifcfg-${line}
-
-        if [ -n "$if_dns1" ]; then
-          echo "DNS1=${if_dns1}" >> ${net_path}/ifcfg-${line}
-        fi
-
-        if [ -n "$if_dns2" ]; then
-          echo "DNS2=${if_dns2}" >> ${net_path}/ifcfg-${line}
-        fi
-      fi
-      break
-    else
-      echo "ERROR: Real interface ${line} attached to bridge, but no interface or ${bridge} ifcfg file exists"
-      return 1
-    fi
-
-  done <<< "$port_output"
-
-  # modify the bridge ifcfg file
-  # to remove IP params
-  sudo sed -i 's/IPADDR=.*//' ${net_path}/ifcfg-${bridge}
-  sudo sed -i 's/NETMASK=.*//' ${net_path}/ifcfg-${bridge}
-  sudo sed -i 's/GATEWAY=.*//' ${net_path}/ifcfg-${bridge}
-  sudo sed -i 's/DNS1=.*//' ${net_path}/ifcfg-${bridge}
-  sudo sed -i 's/DNS2=.*//' ${net_path}/ifcfg-${bridge}
-  sudo sed -i 's/METRIC=.*//' ${net_path}/ifcfg-${bridge}
-  sudo sed -i 's/PEERDNS=.*//' ${net_path}/ifcfg-${bridge}
-
-  sudo systemctl restart network
-}
-
-# Update iptables rule for external network reach internet
-# for virtual deployments
-# params: external_cidr
-function configure_undercloud_nat {
-  local external_cidr
-  if [[ -z "$1" ]]; then
-    return 1
-  else
-    external_cidr=$1
-  fi
-
-  ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" <<EOI
-iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE
-iptables -t nat -A POSTROUTING -s ${external_cidr} -o eth0 -j MASQUERADE
-iptables -A FORWARD -i eth2 -j ACCEPT
-iptables -A FORWARD -s ${external_cidr} -m state --state ESTABLISHED,RELATED -j ACCEPT
-service iptables save
-EOI
-}
-
-# Interactive prompt handler
-# params: step stage, ex. deploy, undercloud install, etc
-function prompt_user {
-  while [ 1 ]; do
-    echo -n "Would you like to proceed with ${1}? (y/n) "
-    read response
-    if [ "$response" == 'y' ]; then
-      return 0
-    elif [ "$response" == 'n' ]; then
-      return 1
-    else
-      continue
-    fi
-  done
-}
-
-##checks if prefix exists in string
-##params: string, prefix
-##usage: contains_prefix "deploy_setting_launcher=1" "deploy_setting"
-contains_prefix() {
-  local mystr=$1
-  local prefix=$2
-  if echo $mystr | grep -E "^$prefix.*$" > /dev/null; then
-    return 0
-  else
-    return 1
-  fi
-}
-
-##verify internet connectivity
-#params: none
-function verify_internet {
-  if ping -c 2 $ping_site > /dev/null; then
-    if ping -c 2 $dnslookup_site > /dev/null; then
-      echo "${blue}Internet connectivity detected${reset}"
-      return 0
-    else
-      echo "${red}Internet connectivity detected, but DNS lookup failed${reset}"
-      return 1
-    fi
-  else
-    echo "${red}No internet connectivity detected${reset}"
-    return 1
-  fi
-}
-
-##tests if overcloud nodes have external connectivity
-#params:none
-function test_overcloud_connectivity {
-  for node in $(undercloud_connect stack ". stackrc && nova list" | grep -Eo "controller-[0-9]+|compute-[0-9]+" | tr -d -) ; do
-    if ! overcloud_connect $node "ping -c 2 $ping_site > /dev/null"; then
-      echo "${blue}Node ${node} was unable to ping site ${ping_site}${reset}"
-      return 1
-    fi
-  done
-  echo "${blue}Overcloud external connectivity OK${reset}"
-}
-
diff --git a/lib/configure-deps-functions.sh b/lib/configure-deps-functions.sh
deleted file mode 100755 (executable)
index 4c00fbf..0000000
+++ /dev/null
@@ -1,173 +0,0 @@
-#!/usr/bin/env bash
-##############################################################################
-# Copyright (c) 2015 Tim Rozet (Red Hat), Dan Radez (Red Hat) and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-##download dependencies if missing and configure host
-#params: none
-function configure_deps {
-  if ! verify_internet; then
-    echo "${red}Will not download dependencies${reset}"
-    internet=false
-  fi
-
-  # verify ip forwarding
-  if sysctl net.ipv4.ip_forward | grep 0; then
-    sudo sysctl -w net.ipv4.ip_forward=1
-    sudo sh -c "echo 'net.ipv4.ip_forward = 1' >> /etc/sysctl.conf"
-  fi
-
-  # ensure no dhcp server is running on jumphost
-  if ! sudo systemctl status dhcpd | grep dead; then
-    echo "${red}WARN: DHCP Server detected on jumphost, disabling...${reset}"
-    sudo systemctl stop dhcpd
-    sudo systemctl disable dhcpd
-  fi
-
-  # ensure networks are configured
-  systemctl status libvirtd || systemctl start libvirtd
-  systemctl status openvswitch || systemctl start openvswitch
-
-  # For baremetal we only need to create/attach Undercloud to admin and external
-  if [ "$virtual" == "FALSE" ]; then
-    virsh_enabled_networks="admin external"
-  else
-    virsh_enabled_networks=$enabled_network_list
-  fi
-
-  # ensure default network is configured correctly
-  libvirt_dir="/usr/share/libvirt/networks"
-  virsh net-list --all | grep default || virsh net-define ${libvirt_dir}/default.xml
-  virsh net-list --all | grep -E "default\s+active" > /dev/null || virsh net-start default
-  virsh net-list --all | grep -E "default\s+active\s+yes" > /dev/null || virsh net-autostart --network default
-
-  if [[ -z "$virtual" || "$virtual" == "FALSE" ]]; then
-    for network in ${enabled_network_list}; do
-      echo "${blue}INFO: Creating Virsh Network: $network & OVS Bridge: ${NET_MAP[$network]}${reset}"
-      ovs-vsctl list-br | grep "^${NET_MAP[$network]}$" > /dev/null || ovs-vsctl add-br ${NET_MAP[$network]}
-      virsh net-list --all | grep " $network " > /dev/null || (cat > ${libvirt_dir}/apex-virsh-net.xml && virsh net-define ${libvirt_dir}/apex-virsh-net.xml) << EOF
-<network ipv6='yes'>
-  <name>$network</name>
-  <forward mode='bridge'/>
-  <bridge name='${NET_MAP[$network]}'/>
-  <virtualport type='openvswitch'/>
-</network>
-EOF
-      if ! (virsh net-list --all | grep " $network " > /dev/null); then
-          echo "${red}ERROR: unable to create network: ${network}${reset}"
-          exit 1;
-      fi
-      rm -f ${libvirt_dir}/apex-virsh-net.xml &> /dev/null;
-      virsh net-list | grep -E "$network\s+active" > /dev/null || virsh net-start $network
-      virsh net-list | grep -E "$network\s+active\s+yes" > /dev/null || virsh net-autostart --network $network
-    done
-
-    echo -e "${blue}INFO: Bridges set: ${reset}"
-    ovs-vsctl list-br
-
-    # bridge interfaces to correct OVS instances for baremetal deployment
-    for network in ${enabled_network_list}; do
-      if [[ "$network" != "admin" && "$network" != "external" ]]; then
-        continue
-      fi
-      this_interface=$(eval echo \${${network}_installer_vm_members})
-      # check if this a bridged interface for this network
-      if [[ ! -z "$this_interface" || "$this_interface" != "none" ]]; then
-        if ! attach_interface_to_ovs ${NET_MAP[$network]} ${this_interface} ${network}; then
-          echo -e "${red}ERROR: Unable to bridge interface ${this_interface} to bridge ${NET_MAP[$network]} for enabled network: ${network}${reset}"
-          exit 1
-        else
-          echo -e "${blue}INFO: Interface ${this_interface} bridged to bridge ${NET_MAP[$network]} for enabled network: ${network}${reset}"
-        fi
-      else
-        echo "${red}ERROR: Unable to determine interface to bridge to for enabled network: ${network}${reset}"
-        exit 1
-      fi
-    done
-  else
-    # verify virtualbmc is installed for a virtual install
-    if ! rpm -q python2-virtualbmc; then
-       echo -e "${red}ERROR: Package python2-virtualbmc is required to do a virtual install.$reset"
-       exit 1
-    fi
-    for network in ${OPNFV_NETWORK_TYPES}; do
-      if ! ovs-vsctl --may-exist add-br ${NET_MAP[$network]}; then
-       echo -e "${red}ERROR: Failed to create ovs bridge ${NET_MAP[$network]}${reset}"
-       exit 1
-      fi
-      echo "${blue}INFO: Creating Virsh Network: $network${reset}"
-      virsh net-list --all | grep " $network " > /dev/null || (cat > ${libvirt_dir}/apex-virsh-net.xml && virsh net-define ${libvirt_dir}/apex-virsh-net.xml) << EOF
-<network ipv6='yes'>
-<name>$network</name>
-<forward mode='bridge'/>
-<bridge name='${NET_MAP[$network]}'/>
-<virtualport type='openvswitch'/>
-</network>
-EOF
-      if ! (virsh net-list --all | grep $network > /dev/null); then
-          echo "${red}ERROR: unable to create network: ${network}${reset}"
-          exit 1;
-      fi
-      rm -f ${libvirt_dir}/apex-virsh-net.xml &> /dev/null;
-      virsh net-list | grep -E "$network\s+active" > /dev/null || virsh net-start $network
-      virsh net-list | grep -E "$network\s+active\s+yes" > /dev/null || virsh net-autostart --network $network
-    done
-
-    echo -e "${blue}INFO: Bridges set: ${reset}"
-    ovs-vsctl list-br
-  fi
-
-  echo -e "${blue}INFO: virsh networks set: ${reset}"
-  virsh net-list
-
-  # ensure storage pool exists and is started
-  virsh pool-list --all | grep default > /dev/null || virsh pool-define-as --name default dir --target /var/lib/libvirt/images
-  virsh pool-list | grep -Eo "default\s+active" > /dev/null || (virsh pool-autostart default; virsh pool-start default)
-
-  # Virt flag check is Arch dependent on x86
-  if [ "$(uname -i)" == 'x86_64' ]; then
-      if ! egrep '^flags.*(vmx|svm)' /proc/cpuinfo > /dev/null; then
-        echo "${red}virtualization extensions not found, kvm kernel module insertion may fail.\n  \
-Are you sure you have enabled vmx in your bios or hypervisor?${reset}"
-      fi
-
-      if ! lsmod | grep kvm > /dev/null; then modprobe kvm; fi
-      if ! lsmod | grep kvm_intel > /dev/null; then modprobe kvm_intel; fi
-
-      if ! lsmod | grep kvm > /dev/null; then
-        echo "${red}kvm kernel modules not loaded!${reset}"
-        return 1
-      fi
-
-      # try to enabled nested kvm
-      if [ "$virtual" == "TRUE" ]; then
-        nested_kvm=`cat /sys/module/kvm_intel/parameters/nested`
-        if [ "$nested_kvm" != "Y" ]; then
-          # try to enable nested kvm
-          echo 'options kvm-intel nested=1' > /etc/modprobe.d/kvm_intel.conf
-          if rmmod kvm_intel; then
-            modprobe kvm_intel
-          fi
-          nested_kvm=`cat /sys/module/kvm_intel/parameters/nested`
-        fi
-        if [ "$nested_kvm" != "Y" ]; then
-          echo "${red}Cannot enable nested kvm, falling back to qemu for deployment${reset}"
-          DEPLOY_OPTIONS+=" --libvirt-type qemu"
-        else
-          echo "${blue}Nested kvm enabled, deploying with kvm acceleration${reset}"
-        fi
-      fi
-  fi
-
-  ##sshkeygen for root
-  if [ ! -e ~/.ssh/id_rsa.pub ]; then
-    ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa
-  fi
-
-  echo "${blue}All dependencies installed and running${reset}"
-}
diff --git a/lib/configure-vm b/lib/configure-vm
deleted file mode 100755 (executable)
index 5cb4521..0000000
+++ /dev/null
@@ -1,203 +0,0 @@
-#!/usr/bin/env python
-
-import argparse
-import math
-import os
-import random
-
-import libvirt
-
-templatedir = os.getenv('LIB', '/var/opt/opnfv/lib') + '/installer/'
-
-MAX_NUM_MACS = math.trunc(0xff/2)
-
-
-def generate_baremetal_macs(count=1):
-    """Generate an Ethernet MAC address suitable for baremetal testing."""
-    # NOTE(dprince): We generate our own bare metal MAC address's here
-    # instead of relying on libvirt so that we can ensure the
-    # locally administered bit is set low. (The libvirt default is
-    # to set the 2nd MSB high.) This effectively allows our
-    # fake baremetal VMs to more accurately behave like real hardware
-    # and fixes issues with bridge/DHCP configurations which rely
-    # on the fact that bridges assume the MAC address of the lowest
-    # attached NIC.
-    # MACs generated for a given machine will also be in sequential
-    # order, which matches how most BM machines are laid out as well.
-    # Additionally we increment each MAC by two places.
-    macs = []
-
-    if count > MAX_NUM_MACS:
-        raise ValueError("The MAX num of MACS supported is %i." % MAX_NUM_MACS)
-
-    base_nums = [0x00,
-               random.randint(0x00, 0xff),
-               random.randint(0x00, 0xff),
-               random.randint(0x00, 0xff),
-               random.randint(0x00, 0xff)]
-    base_mac = ':'.join(map(lambda x: "%02x" % x, base_nums))
-
-    start = random.randint(0x00, 0xff)
-    if (start + (count * 2)) > 0xff:
-        # leave room to generate macs in sequence
-        start = 0xff - count * 2
-    for num in range(0, count*2, 2):
-        mac = start + num
-        macs.append(base_mac + ":" + ("%02x" % mac))
-    return macs
-
-def main():
-    parser = argparse.ArgumentParser(
-        description="Configure a kvm virtual machine for the seed image.")
-    parser.add_argument('--name', default='seed',
-        help='the name to give the machine in libvirt.')
-    parser.add_argument('--image',
-        help='Use a custom image file (must be qcow2).')
-    parser.add_argument('--diskbus', default='sata',
-        help='Choose an alternate bus type for the disk')
-    parser.add_argument('--baremetal-interface', nargs='+', default=['brbm'],
-        help='The interface which bare metal nodes will be connected to.')
-    parser.add_argument('--engine', default='kvm',
-        help='The virtualization engine to use')
-    parser.add_argument('--arch', default='i686',
-        help='The architecture to use')
-    parser.add_argument('--memory', default='2097152',
-        help="Maximum memory for the VM in KB.")
-    parser.add_argument('--cpus', default='1',
-        help="CPU count for the VM.")
-    parser.add_argument('--bootdev', default='hd',
-        help="What boot device to use (hd/network).")
-    parser.add_argument('--seed', default=False, action='store_true',
-        help='Create a seed vm with two interfaces.')
-    parser.add_argument('--ovsbridge', default="",
-        help='Place the seed public interface on this ovs bridge.')
-    parser.add_argument('--libvirt-nic-driver', default='virtio',
-        help='The libvirt network driver to use')
-    parser.add_argument('--enable-serial-console', action="store_true",
-            help='Enable a serial console')
-    parser.add_argument('--direct-boot',
-            help='Enable directboot to <value>.{vmlinux & initrd}')
-    parser.add_argument('--kernel-arg', action="append", dest='kernel_args',
-            help='Kernel arguments, use multiple time for multiple args.')
-    parser.add_argument('--uri', default='qemu:///system',
-        help='The server uri with which to connect.')
-    args = parser.parse_args()
-    with file(templatedir + '/domain.xml', 'rb') as f:
-        source_template = f.read()
-    imagefile = '/var/lib/libvirt/images/seed.qcow2'
-    if args.image:
-        imagefile = args.image
-    imagefile = os.path.realpath(imagefile)
-    params = {
-        'name': args.name,
-        'imagefile': imagefile,
-        'engine': args.engine,
-        'arch': args.arch,
-        'memory': args.memory,
-        'cpus': args.cpus,
-        'bootdev': args.bootdev,
-        'network': '',
-        'enable_serial_console': '',
-        'direct_boot': '',
-        'kernel_args': '',
-        'user_interface': '',
-        }
-    if args.image is not None:
-        params['imagefile'] = args.image
-
-    # Configure the bus type for the target disk device
-    params['diskbus'] = args.diskbus
-    nicparams = {
-        'nicdriver': args.libvirt_nic_driver,
-        'ovsbridge': args.ovsbridge,
-        }
-    if args.seed:
-        if args.ovsbridge:
-            params['network'] = """
-      <interface type='bridge'>
-        <source bridge='%(ovsbridge)s'/>
-        <virtualport type='openvswitch'/>
-        <model type='%(nicdriver)s'/>
-      </interface>""" % nicparams
-        else:
-            params['network'] = """
-      <!-- regular natted network, for access to the vm -->
-      <interface type='network'>
-        <source network='default'/>
-        <model type='%(nicdriver)s'/>
-      </interface>""" % nicparams
-
-    macs = generate_baremetal_macs(len(args.baremetal_interface))
-
-    params['bm_network'] = ""
-    for bm_interface, mac in zip(args.baremetal_interface, macs):
-        bm_interface_params = {
-            'bminterface': bm_interface,
-            'bmmacaddress': mac,
-            'nicdriver': args.libvirt_nic_driver,
-            }
-        params['bm_network'] += """
-          <!-- bridged 'bare metal' network on %(bminterface)s -->
-          <interface type='network'>
-            <mac address='%(bmmacaddress)s'/>
-            <source network='%(bminterface)s'/>
-            <model type='%(nicdriver)s'/>
-          </interface>""" % bm_interface_params
-
-    if args.enable_serial_console:
-        params['enable_serial_console'] = """
-        <serial type='pty'>
-          <target port='0'/>
-        </serial>
-        <console type='pty'>
-          <target type='serial' port='0'/>
-        </console>
-        """
-    if args.direct_boot:
-        params['direct_boot'] = """
-        <kernel>/var/lib/libvirt/images/%(direct_boot)s.vmlinuz</kernel>
-        <initrd>/var/lib/libvirt/images/%(direct_boot)s.initrd</initrd>
-        """ % { 'direct_boot': args.direct_boot }
-    if args.kernel_args:
-        params['kernel_args'] = """
-        <cmdline>%s</cmdline>
-        """ % ' '.join(args.kernel_args)
-
-    if args.arch == 'aarch64':
-
-        params['direct_boot'] += """
-        <loader readonly='yes' type='pflash'>/usr/share/AAVMF/AAVMF_CODE.fd</loader>
-        <nvram>/var/lib/libvirt/qemu/nvram/centos7.0_VARS.fd</nvram>
-        """
-        params['user_interface'] = """
-        <controller type='virtio-serial' index='0'>
-          <address type='virtio-mmio'/>
-        </controller>
-        <serial type='pty'>
-          <target port='0'/>
-        </serial>
-        <console type='pty'>
-          <target type='serial' port='0'/>
-        </console>
-        <channel type='unix'>
-          <target type='virtio' name='org.qemu.guest_agent.0'/>
-          <address type='virtio-serial' controller='0' bus='0' port='1'/>
-        </channel>
-        """
-    else:
-        params['user_interface'] = """
-        <input type='mouse' bus='ps2'/>
-        <graphics type='vnc' port='-1' autoport='yes'/>
-        <video>
-          <model type='cirrus' vram='9216' heads='1'/>
-        </video>
-        """
-
-
-    libvirt_template = source_template % params
-    conn=libvirt.open(args.uri)
-    a = conn.defineXML(libvirt_template)
-    print ("Created machine %s with UUID %s" % (args.name, a.UUIDString()))
-
-if __name__ == '__main__':
-    main()
diff --git a/lib/overcloud-deploy-functions.sh b/lib/overcloud-deploy-functions.sh
deleted file mode 100755 (executable)
index b52d0c2..0000000
+++ /dev/null
@@ -1,503 +0,0 @@
-#!/usr/bin/env bash
-##############################################################################
-# Copyright (c) 2015 Tim Rozet (Red Hat), Dan Radez (Red Hat) and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-##preping it for deployment and launch the deploy
-##params: none
-function overcloud_deploy {
-  local num_compute_nodes
-  local num_control_nodes
-  local dpdk_cores pmd_cores socket_mem ovs_dpdk_perf_flag ovs_option_heat_arr
-  declare -A ovs_option_heat_arr
-
-  ovs_option_heat_arr['dpdk_cores']=HostCpusList
-  ovs_option_heat_arr['pmd_cores']=NeutronDpdkCoreList
-  ovs_option_heat_arr['socket_memory']=NeutronDpdkSocketMemory
-  ovs_option_heat_arr['memory_channels']=NeutronDpdkMemoryChannels
-
-  # OPNFV Default Environment and Network settings
-  DEPLOY_OPTIONS+=" -e ${ENV_FILE}"
-  DEPLOY_OPTIONS+=" -e network-environment.yaml"
-
-  # get number of nodes available in inventory
-  num_control_nodes=$(ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "grep -c profile:control /home/stack/instackenv.json")
-  num_compute_nodes=$(ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "grep -c profile:compute /home/stack/instackenv.json")
-
-  # Custom Deploy Environment Templates
-  if [[ "${#deploy_options_array[@]}" -eq 0 || "${deploy_options_array['sdn_controller']}" == 'opendaylight' ]]; then
-    if [ "${deploy_options_array['sfc']}" == 'True' ]; then
-      DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/neutron-sfc-opendaylight.yaml"
-    elif [ "${deploy_options_array['vpn']}" == 'True' ]; then
-      DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/neutron-bgpvpn-opendaylight.yaml"
-      if [ "${deploy_options_array['gluon']}" == 'True' ]; then
-        DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/services/gluon.yaml"
-      fi
-    elif [ "${deploy_options_array['vpp']}" == 'True' ]; then
-      if [ "${deploy_options_array['odl_vpp_netvirt']}" == "True" ]; then
-        DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/neutron-opendaylight-netvirt-vpp.yaml"
-      elif [ "${deploy_options_array['odl_vpp_routing_node']}" == "dvr" ]; then
-        DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/neutron-opendaylight-fdio-dvr.yaml"
-      else
-        DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/neutron-opendaylight-honeycomb.yaml"
-      fi
-    else
-      DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/neutron-opendaylight.yaml"
-    fi
-    SDN_IMAGE=opendaylight
-  elif [ "${deploy_options_array['sdn_controller']}" == 'opendaylight-external' ]; then
-    DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/opendaylight-external.yaml"
-    SDN_IMAGE=opendaylight
-  elif [ "${deploy_options_array['sdn_controller']}" == 'onos' ]; then
-    if [ "${deploy_options_array['sfc']}" == 'True' ]; then
-      DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/neutron-onos-sfc.yaml"
-    else
-      DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/neutron-onos.yaml"
-    fi
-    SDN_IMAGE=onos
-  elif [ "${deploy_options_array['sdn_controller']}" == 'ovn' ]; then
-    if [[ "$ha_enabled" == "True" ]]; then
-      DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/neutron-ml2-ovn-ha.yaml"
-      echo "${red}OVN HA support is not not supported... exiting.${reset}"
-      exit 1
-    else
-      DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/neutron-ml2-ovn.yaml"
-    fi
-    SDN_IMAGE=opendaylight
-  elif [ "${deploy_options_array['sdn_controller']}" == 'opencontrail' ]; then
-    echo -e "${red}ERROR: OpenContrail is currently unsupported...exiting${reset}"
-    exit 1
-  elif [[ -z "${deploy_options_array['sdn_controller']}" || "${deploy_options_array['sdn_controller']}" == 'False' ]]; then
-    echo -e "${blue}INFO: SDN Controller disabled...will deploy nosdn scenario${reset}"
-    if [ "${deploy_options_array['vpp']}" == 'True' ]; then
-      DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/neutron-ml2-vpp.yaml"
-    elif [ "${deploy_options_array['dataplane']}" == 'ovs_dpdk' ]; then
-      DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/neutron-ovs-dpdk.yaml"
-    fi
-    SDN_IMAGE=opendaylight
-  else
-    echo "${red}Invalid sdn_controller: ${deploy_options_array['sdn_controller']}${reset}"
-    echo "${red}Valid choices are opendaylight, opendaylight-external, onos, opencontrail, False, or null${reset}"
-    exit 1
-  fi
-
-  # Enable Tacker
-  if [ "${deploy_options_array['tacker']}" == 'True' ]; then
-    DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/enable_tacker.yaml"
-  fi
-
-  # Enable Congress
-  if [ "${deploy_options_array['congress']}" == 'True' ]; then
-    DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/enable_congress.yaml"
-  fi
-
-  # Enable Real Time Kernel (kvm4nfv)
-  if [ "${deploy_options_array['rt_kvm']}" == 'True' ]; then
-    DEPLOY_OPTIONS+=" -e /home/stack/enable_rt_kvm.yaml"
-  fi
-
-  # Enable Barometer service
-  if [ "${deploy_options_array['barometer']}" == 'True' ]; then
-    DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/enable_barometer.yaml"
-  fi
-
-# Make sure the correct overcloud image is available
-  if [ ! -f $IMAGES/overcloud-full-${SDN_IMAGE}.qcow2 ]; then
-      echo "${red} $IMAGES/overcloud-full-${SDN_IMAGE}.qcow2 is required to execute your deployment."
-      echo "Please install the opnfv-apex package to provide this overcloud image for deployment.${reset}"
-      exit 1
-  fi
-
-  echo "Copying overcloud image to Undercloud"
-  ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "rm -f overcloud-full.qcow2"
-  scp ${SSH_OPTIONS[@]} $IMAGES/overcloud-full-${SDN_IMAGE}.qcow2 "stack@$UNDERCLOUD":overcloud-full.qcow2
-
-  # disable neutron openvswitch agent from starting
-  if [[ -n "${deploy_options_array['sdn_controller']}" && "${deploy_options_array['sdn_controller']}" != 'False' ]]; then
-      echo -e "${blue}INFO: Disabling neutron-openvswitch-agent from systemd${reset}"
-      ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
-      LIBGUESTFS_BACKEND=direct virt-customize --run-command "rm -f /etc/systemd/system/multi-user.target.wants/neutron-openvswitch-agent.service" \
-                                               --run-command "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent.service" \
-                                               -a overcloud-full.qcow2
-EOI
-  fi
-
-  if [ "${deploy_options_array['vpn']}" == 'True' ]; then
-      echo -e "${blue}INFO: Enabling ZRPC and Quagga${reset}"
-      ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
-      LIBGUESTFS_BACKEND=direct virt-customize \
-         --run-command "systemctl enable zrpcd" \
-         -a overcloud-full.qcow2
-EOI
-  fi
-
-  # Install ovs-dpdk inside the overcloud image if it is enabled.
-  if [[ "${deploy_options_array['dataplane']}" == 'ovs_dpdk' || "${deploy_options_array['dataplane']}" == 'fdio' ]]; then
-    # install dpdk packages before ovs
-    echo -e "${blue}INFO: Enabling kernel modules for dpdk inside overcloud image${reset}"
-
-    ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
-      cat << EOF > vfio_pci.modules
-#!/bin/bash
-exec /sbin/modprobe vfio_pci >/dev/null 2>&1
-EOF
-
-      cat << EOF > uio_pci_generic.modules
-#!/bin/bash
-exec /sbin/modprobe uio_pci_generic >/dev/null 2>&1
-EOF
-
-      LIBGUESTFS_BACKEND=direct virt-customize --upload vfio_pci.modules:/etc/sysconfig/modules/ \
-                                               --upload uio_pci_generic.modules:/etc/sysconfig/modules/ \
-                                               --run-command "chmod 0755 /etc/sysconfig/modules/vfio_pci.modules" \
-                                               --run-command "chmod 0755 /etc/sysconfig/modules/uio_pci_generic.modules" \
-                                               -a overcloud-full.qcow2
-
-      if [ "${deploy_options_array['dataplane']}" == 'ovs_dpdk' ]; then
-        sed -i "/OS::TripleO::ComputeExtraConfigPre:/c\  OS::TripleO::ComputeExtraConfigPre: ./ovs-dpdk-preconfig.yaml" network-environment.yaml
-      fi
-
-EOI
-
-  elif [ "${deploy_options_array['dataplane']}" != 'ovs' ]; then
-    echo "${red}${deploy_options_array['dataplane']} not supported${reset}"
-    exit 1
-  fi
-
-  if [ "$debug" == 'TRUE' ]; then
-    ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "LIBGUESTFS_BACKEND=direct virt-customize -a overcloud-full.qcow2 --root-password password:opnfvapex"
-  fi
-
-  # upgrade ovs into ovs ovs 2.6.1 with NSH function if SFC is enabled
-  if [[ "${deploy_options_array['sfc']}" == 'True' && "${deploy_options_array['dataplane']}" == 'ovs' ]]; then
-    ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
-         LIBGUESTFS_BACKEND=direct virt-customize --run-command "yum install -y /root/ovs/rpm/rpmbuild/RPMS/x86_64/${ovs_kmod_rpm_name}" \
-                                                  --run-command "yum upgrade -y /root/ovs/rpm/rpmbuild/RPMS/x86_64/${ovs_rpm_name}" \
-                                                  -a overcloud-full.qcow2
-EOI
-  fi
-
-  # Patch neutron with using OVS external interface for router and add generic linux NS interface driver
-  if [[ "${deploy_options_array['dataplane']}" == 'fdio' ]]; then
-    ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
-      LIBGUESTFS_BACKEND=direct virt-customize --run-command "cd /usr/lib/python2.7/site-packages/ && patch -p1 < neutron-patch-NSDriver.patch" \
-                                               -a overcloud-full.qcow2
-EOI
-
-    # Configure routing node for odl-fdio
-    if [[ "${deploy_options_array['sdn_controller']}" == 'opendaylight' ]]; then
-      if [[ "${deploy_options_array['odl_vpp_routing_node']}" == 'dvr' ]]; then
-        ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
-          sed -i "/OS::TripleO::Services::NeutronDhcpAgent/d" ${ENV_FILE}
-          sed -i "/NeutronDhcpAgentsPerNetwork:/ c\  NeutronDhcpAgentsPerNetwork: $num_compute_nodes" ${ENV_FILE}
-          sed -i "$ a\    - OS::TripleO::Services::NeutronDhcpAgent" ${ENV_FILE}
-# TODO: Update VPP version to 17.10 when specific version is known
-#          LIBGUESTFS_BACKEND=direct virt-customize --run-command "yum remove -y vpp-lib" \
-#                                                   --run-command "yum install -y /root/fdio_dvr/*.rpm" \
-#                                                   --run-command "rm -f /etc/sysctl.d/80-vpp.conf" \
-#                                                   -a overcloud-full.qcow2
-EOI
-      else
-        ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
-          sed -i "/opendaylight::vpp_routing_node:/c\    opendaylight::vpp_routing_node: ${deploy_options_array['odl_vpp_routing_node']}.${domain_name}" ${ENV_FILE}
-EOI
-      fi
-      ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
-        sed -i "/ControllerExtraConfig:/ c\  ControllerExtraConfig:\n    tripleo::profile::base::neutron::agents::honeycomb::interface_role_mapping:  ['${tenant_nic_mapping_controller_members}:tenant-interface']" ${ENV_FILE}
-        sed -i "/NovaComputeExtraConfig:/ c\  NovaComputeExtraConfig:\n    tripleo::profile::base::neutron::agents::honeycomb::interface_role_mapping:  ['${tenant_nic_mapping_compute_members}:tenant-interface','${external_nic_mapping_compute_members}:public-interface']" ${ENV_FILE}
-EOI
-
-    fi
-  fi
-
-  if [ -n "${deploy_options_array['performance']}" ]; then
-    ovs_dpdk_perf_flag="False"
-    for option in "${performance_options[@]}" ; do
-      if [ "${arr[1]}" == "vpp" ]; then
-        if [ "${arr[0]}" == "Compute" ]; then
-          role='NovaCompute'
-        else
-          role=${arr[0]}
-        fi
-        if [ "${arr[2]}" == "main-core" ]; then
-          ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
-            sed -i "/${role}ExtraConfig:/ c\  ${role}ExtraConfig:\n    fdio::vpp_cpu_main_core: '${arr[3]}'" ${ENV_FILE}
-EOI
-        elif [ "${arr[2]}" == "corelist-workers" ]; then
-          ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
-            sed -i "/${role}ExtraConfig:/ c\  ${role}ExtraConfig:\n    fdio::vpp_cpu_corelist_workers: '${arr[3]}'" ${ENV_FILE}
-EOI
-        fi
-      fi
-      arr=($option)
-      # use compute's kernel settings for all nodes for now.
-      if [ "${arr[0]}" == "Compute" ] && [ "${arr[1]}" == "kernel" ]; then
-        kernel_args+=" ${arr[2]}=${arr[3]}"
-      fi
-      if [ "${arr[0]}" == "Compute" ] && [ "${arr[1]}" == "ovs" ]; then
-         eval "${arr[2]}=${arr[3]}"
-         ovs_dpdk_perf_flag="True"
-      fi
-    done
-
-    ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
-      sed -i "/ComputeKernelArgs:/c\  ComputeKernelArgs: '$kernel_args'" ${ENV_FILE}
-      sed -i "$ a\resource_registry:\n  OS::TripleO::NodeUserData: first-boot.yaml" ${ENV_FILE}
-      sed -i "/NovaSchedulerDefaultFilters:/c\  NovaSchedulerDefaultFilters: 'RamFilter,ComputeFilter,AvailabilityZoneFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,NUMATopologyFilter'" ${ENV_FILE}
-EOI
-
-    if [[ "${deploy_options_array['dataplane']}" == 'ovs_dpdk' && "$ovs_dpdk_perf_flag" == "True" ]]; then
-      for ovs_option in ${!ovs_option_heat_arr[@]}; do
-        if [ -n "${!ovs_option}" ]; then
-          ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
-            sed -i "/${ovs_option_heat_arr[$ovs_option]}:/c\  ${ovs_option_heat_arr[$ovs_option]}: '${!ovs_option}'" ${ENV_FILE}
-EOI
-        fi
-      done
-    fi
-  fi
-
-  if [[ -z "${deploy_options_array['sdn_controller']}" || "${deploy_options_array['sdn_controller']}" == 'False' ]]; then
-    if [ "${deploy_options_array['dataplane']}" == "fdio" ]; then
-      if [ "$tenant_nic_mapping_controller_members" == "$tenant_nic_mapping_compute_members" ]; then
-        echo -e "${blue}INFO: nosdn fdio deployment...installing correct vpp packages...${reset}"
-        ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
-          sed -i "/NeutronVPPAgentPhysnets:/c\  NeutronVPPAgentPhysnets: 'datacentre:${tenant_nic_mapping_controller_members}'" ${ENV_FILE}
-EOI
-      else
-        echo -e "${red}Compute and Controller must use the same tenant nic name, please modify network setting file.${reset}"
-        exit 1
-      fi
-    fi
-  fi
-
-  # Set ODL version accordingly
-  if [[ "${deploy_options_array['sdn_controller']}" == 'opendaylight' && -n "${deploy_options_array['odl_version']}" ]]; then
-    case "${deploy_options_array['odl_version']}" in
-      carbon) odl_version=''
-              ;;
-      nitrogen) odl_version='nitrogen'
-              ;;
-      *) echo -e "${red}Invalid ODL version ${deploy_options_array['odl_version']}.  Please use 'carbon' or 'nitrogen'.${reset}"
-         exit 1
-         ;;
-    esac
-
-    if [[  -n "$odl_version" ]]; then
-        ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
-          LIBGUESTFS_BACKEND=direct virt-customize --run-command "yum -y remove opendaylight" \
-                                                   --run-command "yum -y install /root/${odl_version}/*" \
-                                                   --run-command "rm -rf /etc/puppet/modules/opendaylight" \
-                                                   --run-command "cd /etc/puppet/modules/ && tar xzf /root/puppet-opendaylight-master.tar.gz" \
-                                                   -a overcloud-full.qcow2
-EOI
-    fi
-  fi
-
-  # Override ODL for fdio scenarios
-  if [[ "${deploy_options_array['odl_vpp_netvirt']}" == 'True' && "${deploy_options_array['sdn_controller']}" == 'opendaylight' ]]; then
-    ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
-      LIBGUESTFS_BACKEND=direct virt-customize --run-command "yum -y remove opendaylight" \
-                                               --run-command "yum -y install /root/opendaylight-7.0.0-0.1.20170531snap665.el7.noarch.rpm" \
-                                               -a overcloud-full.qcow2
-EOI
-#  elif [[ "${deploy_options_array['sdn_controller']}" == 'opendaylight' && "${deploy_options_array['dataplane']}" == 'fdio' ]]; then
-#    if [[ "${deploy_options_array['odl_vpp_routing_node']}" != 'dvr' ]]; then
-#      ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
-#        LIBGUESTFS_BACKEND=direct virt-customize --run-command "rm -rf /opt/opendaylight/*" \
-#                                                 --run-command "tar zxvf /root/fdio_odl_carbon.tar.gz -C /opt/opendaylight/ --strip-components=1" \
-#                                                 --run-command "chown odl:odl -R /opt/opendaylight" \
-#                                                 -a overcloud-full.qcow2
-#EOI
-#    fi
-  fi
-
-  # Override ODL if we enable dvr for fdio
-# TODO: Update ODL version when specific version is known.
-#  if [[ "${deploy_options_array['odl_vpp_routing_node']}" == 'dvr' ]]; then
-#    ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
-#      LIBGUESTFS_BACKEND=direct virt-customize --run-command "rm -rf /opt/opendaylight/*" \
-#                                               --run-command "tar zxvf /root/fdio_odl_carbon.tar.gz -C /opt/opendaylight/ --strip-components=1" \
-#                                               --run-command "chown odl:odl -R /opt/opendaylight" \
-#                                               -a overcloud-full.qcow2
-#EOI
-#  fi
-
-
-
-  # check if ceph should be enabled
-  if [ "${deploy_options_array['ceph']}" == 'True' ]; then
-    DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml"
-  fi
-
-  if [ "${deploy_options_array['sdn_controller']}" == 'ovn' ]; then
-    # The epoch in deloran's ovs is 1: and in leif's is 0:
-    # so we have to execute a downgrade instead of an update
-    ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
-      LIBGUESTFS_BACKEND=direct virt-customize \
-        --run-command "cd /root/ovs28 && yum update -y *openvswitch*" \
-        --run-command "cd /root/ovs28 && yum downgrade -y *openvswitch*" \
-        -a overcloud-full.qcow2
-EOI
-  fi
-
-  # check if HA is enabled
-  if [[ "$ha_enabled" == "True" ]]; then
-    if [ "$num_control_nodes" -lt 3 ]; then
-      echo -e "${red}ERROR: Number of control nodes in inventory is less than 3 and HA is enabled: ${num_control_nodes}. Check your inventory file.${reset}"
-      exit 1
-    else
-     DEPLOY_OPTIONS+=" --control-scale ${num_control_nodes}"
-     DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/puppet-pacemaker.yaml"
-     echo -e "${blue}INFO: Number of control nodes set for deployment: ${num_control_nodes}${reset}"
-    fi
-  else
-    if [ "$num_control_nodes" -lt 1 ]; then
-      echo -e "${red}ERROR: Number of control nodes in inventory is less than 1: ${num_control_nodes}. Check your inventory file.${reset}"
-      exit 1
-    fi
-  fi
-
-  if [ "$num_compute_nodes" -le 0 ]; then
-    echo -e "${red}ERROR: Invalid number of compute nodes: ${num_compute_nodes}. Check your inventory file.${reset}"
-    exit 1
-  else
-    echo -e "${blue}INFO: Number of compute nodes set for deployment: ${num_compute_nodes}${reset}"
-    DEPLOY_OPTIONS+=" --compute-scale ${num_compute_nodes}"
-  fi
-
-  DEPLOY_OPTIONS+=" --ntp-server $ntp_server"
-
-  DEPLOY_OPTIONS+=" --control-flavor control --compute-flavor compute"
-  if [[ "$virtual" == "TRUE" ]]; then
-     DEPLOY_OPTIONS+=" -e virtual-environment.yaml"
-     echo 'Ensuring Virtual BMC device status'
-     for i in $(vbmc list | grep down | awk '{ print $2}'); do
-         vbmc start $i
-         sleep 5
-     done
-     vbmc list
-  else
-    DEPLOY_OPTIONS+=" -e baremetal-environment.yaml"
-  fi
-
-  echo -e "${blue}INFO: Deploy options set:\n${DEPLOY_OPTIONS}${reset}"
-
-  ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
-# Create a key for use by nova for live migration
-echo "Creating nova SSH key for nova resize support"
-ssh-keygen -f nova_id_rsa -b 1024 -P ""
-public_key=\'\$(cat nova_id_rsa.pub | cut -d ' ' -f 2)\'
-sed -i "s#replace_public_key:#key: \$public_key#g" ${ENV_FILE}
-python -c 'open("opnfv-environment-new.yaml", "w").write((open("${ENV_FILE}").read().replace("replace_private_key:", "key: \"" + "".join(open("nova_id_rsa").readlines()).replace("\\n","\\\n") + "\"")))'
-mv -f opnfv-environment-new.yaml ${ENV_FILE}
-
-source stackrc
-set -o errexit
-# Workaround for APEX-207 where sometimes swift proxy is down
-if ! sudo systemctl status openstack-swift-proxy > /dev/null; then
-  sudo systemctl restart openstack-swift-proxy
-fi
-echo "Uploading overcloud glance images"
-openstack overcloud image upload
-
-echo "Configuring undercloud and discovering nodes"
-
-
-if [[ -z "$virtual" ]]; then
-  openstack overcloud node import instackenv.json
-  openstack overcloud node introspect --all-manageable --provide
-  #if [[ -n "$root_disk_list" ]]; then
-    # TODO: replace node configure boot with ironic node-update
-    # TODO: configure boot is not used in ocata here anymore
-    #openstack overcloud node configure boot --root-device=${root_disk_list}
-    #https://github.com/openstack/tripleo-quickstart-extras/blob/master/roles/overcloud-prep-images/templates/overcloud-prep-images.sh.j2#L73-L130
-    #ironic node-update $ironic_node add properties/root_device='{"{{ node['key'] }}": "{{ node['value'] }}"}'
-  #fi
-else
-  openstack overcloud node import --provide instackenv.json
-fi
-
-openstack flavor set --property "cpu_arch"="x86_64" baremetal
-openstack flavor set --property "cpu_arch"="x86_64" control
-openstack flavor set --property "cpu_arch"="x86_64" compute
-echo "Configuring nameserver on ctlplane network"
-dns_server_ext=''
-for dns_server in ${dns_servers}; do
-  dns_server_ext="\${dns_server_ext} --dns-nameserver \${dns_server}"
-done
-openstack subnet set ctlplane-subnet \${dns_server_ext}
-sed -i '/CloudDomain:/c\  CloudDomain: '${domain_name} ${ENV_FILE}
-echo "Executing overcloud deployment, this could run for an extended period without output."
-sleep 60 #wait for Hypervisor stats to check-in to nova
-# save deploy command so it can be used for debugging
-cat > deploy_command << EOF
-openstack overcloud deploy --templates $DEPLOY_OPTIONS --timeout 90
-EOF
-EOI
-
-  if [ "$interactive" == "TRUE" ]; then
-    if ! prompt_user "Overcloud Deployment"; then
-      echo -e "${blue}INFO: User requests exit${reset}"
-      exit 0
-    fi
-  fi
-
-  ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
-source stackrc
-openstack overcloud deploy --templates $DEPLOY_OPTIONS --timeout 90
-if ! openstack stack list | grep CREATE_COMPLETE 1>/dev/null; then
-  $(typeset -f debug_stack)
-  debug_stack
-  exit 1
-fi
-EOI
-
-  # Configure DPDK and restart ovs agent after bringing up br-phy
-  if [ "${deploy_options_array['dataplane']}" == 'ovs_dpdk' ]; then
-    ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI || (echo "DPDK config failed, exiting..."; exit 1)
-source stackrc
-set -o errexit
-for node in \$(nova list | grep novacompute | grep -Eo "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+"); do
-echo "Checking DPDK status and bringing up br-phy on \$node"
-ssh -T ${SSH_OPTIONS[@]} "heat-admin@\$node" <<EOF
-set -o errexit
-sudo dpdk-devbind -s
-sudo ifup br-phy
-if [[ -z "${deploy_options_array['sdn_controller']}" || "${deploy_options_array['sdn_controller']}" == 'False' ]]; then
-  echo "Restarting openvswitch agent to pick up VXLAN tunneling"
-  sudo systemctl restart neutron-openvswitch-agent
-fi
-EOF
-done
-EOI
-  elif [ "${deploy_options_array['sfc']}" == 'True' ]; then
-  ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI || (echo "SFC config failed, exiting..."; exit 1)
-source stackrc
-set -o errexit
-for node in \$(nova list | grep controller | grep -Eo "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+"); do
-echo "Configuring networking_sfc.conf on \$node"
-ssh -T ${SSH_OPTIONS[@]} "heat-admin@\$node" <<EOF
-set -o errexit
-sudo ln -s /etc/neutron/networking_sfc.conf /etc/neutron/conf.d/neutron-server/networking_sfc.conf
-sudo systemctl restart neutron-server
-EOF
-done
-EOI
-  fi
-
-  if [ "$debug" == 'TRUE' ]; then
-      ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
-source overcloudrc
-echo "Keystone Endpoint List:"
-openstack endpoint list
-echo "Keystone Service List"
-openstack service list
-EOI
-  fi
-}
diff --git a/lib/parse-functions.sh b/lib/parse-functions.sh
deleted file mode 100755 (executable)
index 2114c0b..0000000
+++ /dev/null
@@ -1,70 +0,0 @@
-#!/usr/bin/env bash
-##############################################################################
-# Copyright (c) 2015 Tim Rozet (Red Hat), Dan Radez (Red Hat) and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-# Parser functions used by OPNFV Apex
-
-##parses network settings yaml into globals
-parse_network_settings() {
-  local output
-
-  if output=$(python3 -B $LIB/python/apex_python_utils.py parse-net-settings -s $NETSETS -td $APEX_TMP_DIR -e $BASE/network-environment.yaml); then
-      echo -e "${blue}${output}${reset}"
-      eval "$output"
-  else
-      echo -e "${red}ERROR: Failed to parse network settings file $NETSETS ${reset}"
-      exit 1
-  fi
-
-  if [ "${deploy_options_array['dataplane']}" == 'ovs_dpdk' ]; then
-    if [[ ! $enabled_network_list =~ "tenant" ]]; then
-      echo -e "${red}ERROR: tenant network is not enabled for ovs-dpdk ${reset}"
-      exit 1
-    fi
-  fi
-}
-
-##parses deploy settings yaml into globals
-parse_deploy_settings() {
-  local output
-  if output=$(python3 -B $LIB/python/apex_python_utils.py parse-deploy-settings -f $DEPLOY_SETTINGS_FILE); then
-      echo -e "${blue}${output}${reset}"
-      eval "$output"
-  else
-      echo -e "${red}ERROR: Failed to parse deploy settings file $DEPLOY_SETTINGS_FILE ${reset}"
-      exit 1
-  fi
-
-}
-
-##parses baremetal yaml settings into compatible json
-##writes the json to undercloud:instackenv.json
-##params: none
-##usage: parse_inventory_file
-parse_inventory_file() {
-  local output
-  if [ "$virtual" == "TRUE" ]; then inv_virt="--virtual"; fi
-  if [[ "$ha_enabled" == "True" ]]; then inv_ha="--ha"; fi
-  instackenv_output=$(python3 -B $LIB/python/apex_python_utils.py parse-inventory -f $INVENTORY_FILE $inv_virt $inv_ha)
-  #Copy instackenv.json to undercloud
-  echo -e "${blue}Parsed instackenv JSON:\n${instackenv_output}${reset}"
-  ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
-cat > instackenv.json << EOF
-$instackenv_output
-EOF
-EOI
-  if output=$(python3 -B $LIB/python/apex_python_utils.py parse-inventory -f $INVENTORY_FILE $inv_virt $inv_ha --export-bash); then
-    echo -e "${blue}${output}${reset}"
-    eval "$output"
-  else
-    echo -e "${red}ERROR: Failed to parse inventory bash settings file ${INVENTORY_FILE}${reset}"
-    exit 1
-  fi
-
-}
diff --git a/lib/post-install-functions.sh b/lib/post-install-functions.sh
deleted file mode 100755 (executable)
index 7678b0d..0000000
+++ /dev/null
@@ -1,281 +0,0 @@
-#!/usr/bin/env bash
-##############################################################################
-# Copyright (c) 2015 Tim Rozet (Red Hat), Dan Radez (Red Hat) and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-##Post configuration after install
-##params: none
-function configure_post_install {
-  local opnfv_attach_networks ovs_ip ip_range net_cidr tmp_ip af external_network_ipv6
-  external_network_ipv6=False
-  opnfv_attach_networks="admin"
-  if [[ $enabled_network_list =~ "external" ]]; then
-    opnfv_attach_networks+=' external'
-  fi
-
-  echo -e "${blue}INFO: Post Install Configuration Running...${reset}"
-
-  echo -e "${blue}INFO: Configuring ssh for root to overcloud nodes...${reset}"
-  # copy host key to instack
-  scp ${SSH_OPTIONS[@]} /root/.ssh/id_rsa.pub "stack@$UNDERCLOUD":jumphost_id_rsa.pub
-
-  # add host key to overcloud nodes authorized keys
-  ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" << EOI
-source stackrc
-nodes=\$(nova list | grep -Eo "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+")
-for node in \$nodes; do
-cat ~/jumphost_id_rsa.pub | ssh -T ${SSH_OPTIONS[@]} "heat-admin@\$node" 'cat >> ~/.ssh/authorized_keys'
-done
-EOI
-
-  echo -e "${blue}INFO: Checking if OVS bridges have IP addresses...${reset}"
-  for network in ${opnfv_attach_networks}; do
-    ovs_ip=$(find_ip ${NET_MAP[$network]})
-    tmp_ip=''
-    if [ -n "$ovs_ip" ]; then
-      echo -e "${blue}INFO: OVS Bridge ${NET_MAP[$network]} has IP address ${ovs_ip}${reset}"
-    else
-      echo -e "${blue}INFO: OVS Bridge ${NET_MAP[$network]} missing IP, will configure${reset}"
-      # use last IP of allocation pool
-      eval "ip_range=\${${network}_overcloud_ip_range}"
-      ovs_ip=${ip_range##*,}
-      eval "net_cidr=\${${network}_cidr}"
-      if [[ $ovs_ip =~ ^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
-        af=4
-      else
-        af=6
-        if [ "$network" == "external" ]; then
-          ublic_network_ipv6=True
-        fi
-        #enable ipv6 on bridge interface
-        echo 0 > /proc/sys/net/ipv6/conf/${NET_MAP[$network]}/disable_ipv6
-      fi
-      sudo ip addr add ${ovs_ip}/${net_cidr##*/} dev ${NET_MAP[$network]}
-      sudo ip link set up ${NET_MAP[$network]}
-      tmp_ip=$(find_ip ${NET_MAP[$network]} $af)
-      if [ -n "$tmp_ip" ]; then
-        echo -e "${blue}INFO: OVS Bridge ${NET_MAP[$network]} IP set: ${tmp_ip}${reset}"
-        continue
-      else
-        echo -e "${red}ERROR: Unable to set OVS Bridge ${NET_MAP[$network]} with IP: ${ovs_ip}${reset}"
-        return 1
-      fi
-    fi
-  done
-
-  if [ "${deploy_options_array['dataplane']}" == 'ovs_dpdk' ]; then
-    echo -e "${blue}INFO: Bringing up br-phy and ovs-agent for dpdk compute nodes...${reset}"
-    compute_nodes=$(undercloud_connect stack "source stackrc; nova list | grep compute | wc -l")
-    i=0
-    while [ "$i" -lt "$compute_nodes" ]; do
-      overcloud_connect compute${i} "sudo ifup br-phy; sudo systemctl restart neutron-openvswitch-agent"
-      i=$((i + 1))
-    done
-  fi
-
-  # TODO fix this when HA SDN controllers are supported
-  if [ "${deploy_options_array['sdn_controller']}" != 'False' ]; then
-    echo -e "${blue}INFO: Finding SDN Controller IP for overcloudrc...${reset}"
-    sdn_controller_ip=$(undercloud_connect stack "source stackrc;nova list | grep controller-0 | cut -d '|' -f 7 | grep -Eo [0-9]+\.[0-9]+\.[0-9]+\.[0-9]+")
-    echo -e "${blue}INFO: SDN Controller IP is ${sdn_controller_ip} ${reset}"
-    undercloud_connect stack "echo 'export SDN_CONTROLLER_IP=${sdn_controller_ip}' >> /home/stack/overcloudrc"
-  fi
-
-  ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
-source overcloudrc
-set -o errexit
-echo "Configuring Neutron external network"
-if [[ -n "$external_nic_mapping_compute_vlan" && "$external_nic_mapping_compute_vlan" != 'native' ]]; then
-  openstack network create external --project service --external --provider-network-type vlan --provider-segment $external_nic_mapping_compute_vlan --provider-physical-network datacentre
-else
-  openstack network create external --project service --external --provider-network-type flat --provider-physical-network datacentre
-fi
-if [ "$external_network_ipv6" == "True" ]; then
-  openstack subnet create external-subnet --project service --network external --no-dhcp --gateway $external_gateway --allocation-pool start=${external_floating_ip_range%%,*},end=${external_floating_ip_range##*,} --subnet-range $external_cidr --ip-version 6 --ipv6-ra-mode slaac --ipv6-address-mode slaac
-elif [[ "$enabled_network_list" =~ "external" ]]; then
-  openstack subnet create external-subnet --project service --network external --no-dhcp --gateway $external_gateway --allocation-pool start=${external_floating_ip_range%%,*},end=${external_floating_ip_range##*,} --subnet-range $external_cidr
-else
-  # we re-use the introspection range for floating ips with single admin network
-  openstack subnet create external-subnet --project service --network external --no-dhcp --gateway $admin_gateway --allocation-pool start=${admin_introspection_range%%,*},end=${admin_introspection_range##*,} --subnet-range $admin_cidr
-fi
-
-if [ "${deploy_options_array['gluon']}" == 'True' ]; then
-  echo "Creating Gluon dummy network and subnet"
-  openstack network create gluon-network --share --provider-network-type vxlan
-  openstack subnet create gluon-subnet --no-gateway --no-dhcp --network GluonNetwork --subnet-range 0.0.0.0/1
-fi
-
-# Fix project_id and os_tenant_name not in overcloudrc
-# Deprecated openstack client does not need project_id
-# and os_tenant_name anymore but glance client and
-# Rally in general does need it.
-# REMOVE when not needed in Rally/glance-client anymore.
-if ! grep -q  "OS_PROJECT_ID" ./overcloudrc;then
-    project_id=\$(openstack project list |grep admin|awk '{print \$2}')
-    echo "export OS_PROJECT_ID=\$project_id" >> ./overcloudrc
-fi
-if ! grep -q  "OS_TENANT_NAME" ./overcloudrc;then
-    echo "export OS_TENANT_NAME=admin" >> ./overcloudrc
-fi
-
-if [ "${deploy_options_array['dataplane']}" == 'fdio' ] || [ "${deploy_options_array['dataplane']}" == 'ovs_dpdk' ]; then
-    for flavor in \$(openstack flavor list -c Name -f value); do
-        echo "INFO: Configuring \$flavor to use hugepage"
-        nova flavor-key \$flavor set hw:mem_page_size=large
-    done
-fi
-
-if [ "${deploy_options_array['congress']}" == 'True' ]; then
-    ds_configs="--config username=\$OS_USERNAME
-                --config tenant_name=\$OS_PROJECT_NAME
-                --config password=\$OS_PASSWORD
-                --config auth_url=\$OS_AUTH_URL"
-    for s in nova neutronv2 cinder glancev2 keystone; do
-        ds_extra_configs=""
-        if [ "\$s" == "nova" ]; then
-            # nova's latest version is 2.38 but congress relies on nova to do
-            # floating ip operation instead of neutron. fip support in nova
-            # was depricated as of 2.35. Hard coding 2.34 for danube.
-            # Carlos.Goncalves working on fixes for upstream congress that
-            # should be ready for ocata.
-            nova_micro_version="2.34"
-            #nova_micro_version=\$(nova version-list | grep CURRENT | awk '{print \$10}')
-            ds_extra_configs+="--config api_version=\$nova_micro_version"
-        fi
-        if openstack congress datasource create \$s "\$s" \$ds_configs \$ds_extra_configs; then
-          echo "INFO: Datasource: \$s created"
-        else
-          echo "WARN: Datasource: \$s could NOT be created"
-        fi
-    done
-    if openstack congress datasource create doctor "doctor"; then
-      echo "INFO: Datasource: doctor created"
-    else
-      echo "WARN: Datsource: doctor could NOT be created"
-    fi
-fi
-
-
-EOI
-
-  # we need to restart neutron-server in Gluon deployments to allow the Gluon core
-  # plugin to correctly register itself with Neutron
-  if [ "${deploy_options_array['gluon']}" == 'True' ]; then
-    echo "Restarting neutron-server to finalize Gluon installation"
-    overcloud_connect "controller0" "sudo systemctl restart neutron-server"
-  fi
-
-  # for virtual, we NAT external network through Undercloud
-  # same goes for baremetal if only jumphost has external connectivity
-  if [ "$virtual" == "TRUE" ] || ! test_overcloud_connectivity && [ "$external_network_ipv6" != "True" ]; then
-    if [[ "$enabled_network_list" =~ "external" ]]; then
-      nat_cidr=${external_cidr}
-    else
-      nat_cidr=${admin_cidr}
-    fi
-    if ! configure_undercloud_nat ${nat_cidr}; then
-      echo -e "${red}ERROR: Unable to NAT undercloud with external net: ${nat_cidr}${reset}"
-      exit 1
-    else
-      echo -e "${blue}INFO: Undercloud VM has been setup to NAT Overcloud external network${reset}"
-    fi
-  fi
-
-  # for sfc deployments we need the vxlan workaround
-  if [ "${deploy_options_array['sfc']}" == 'True' ]; then
-      ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
-source stackrc
-set -o errexit
-for node in \$(nova list | grep -Eo "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+"); do
-ssh -T ${SSH_OPTIONS[@]} "heat-admin@\$node" <<EOF
-sudo ifconfig br-int up
-sudo ip route add 123.123.123.0/24 dev br-int
-EOF
-done
-EOI
-  fi
-
-  ### VSPERF ###
-  if [[ "${deploy_options_array['vsperf']}" == 'True' ]]; then
-    echo "${blue}\nVSPERF enabled, running build_base_machine.sh\n${reset}"
-    overcloud_connect "compute0" "sudo sh -c 'cd /var/opt/vsperf/systems/ && ./build_base_machine.sh 2>&1 > /var/log/vsperf.log'"
-  fi
-
-  # install docker
-  if [ "${deploy_options_array['yardstick']}" == 'True' ] || [ "${deploy_options_array['dovetail']}" == 'True' ]; then
-    ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
-sudo yum install docker -y
-sudo systemctl start docker
-sudo systemctl enable docker
-EOI
-  fi
-
-  # pull yardstick image
-  if [ "${deploy_options_array['yardstick']}" == 'True' ]; then
-    ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
-sudo docker pull opnfv/yardstick
-EOI
-  fi
-
-  # pull dovetail image
-  if [ "${deploy_options_array['dovetail']}" == 'True' ]; then
-    ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
-sudo docker pull opnfv/dovetail
-EOI
-  fi
-
-  # Collect deployment logs
-  ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
-mkdir -p ~/deploy_logs
-rm -rf deploy_logs/*
-source stackrc
-set -o errexit
-for node in \$(nova list | grep -Eo "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+"); do
- ssh -T ${SSH_OPTIONS[@]} "heat-admin@\$node" <<EOF
- sudo cp /var/log/messages /home/heat-admin/messages.log
- sudo chown heat-admin /home/heat-admin/messages.log
-EOF
-scp ${SSH_OPTIONS[@]} heat-admin@\$node:/home/heat-admin/messages.log ~/deploy_logs/\$node.messages.log
-if [ "$debug" == "TRUE" ]; then
-    nova list --ip \$node
-    echo "---------------------------"
-    echo "-----/var/log/messages-----"
-    echo "---------------------------"
-    cat ~/deploy_logs/\$node.messages.log
-    echo "---------------------------"
-    echo "----------END LOG----------"
-    echo "---------------------------"
-
-    ssh -T ${SSH_OPTIONS[@]} "heat-admin@\$node" <<EOF
-echo "$node"
-sudo openstack-status
-EOF
-fi
- ssh -T ${SSH_OPTIONS[@]} "heat-admin@\$node" <<EOF
- sudo rm -f /home/heat-admin/messages.log
-EOF
-done
-
-# Print out the undercloud IP and dashboard URL
-source stackrc
-echo "Undercloud IP: $UNDERCLOUD, please connect by doing 'opnfv-util undercloud'"
-echo "Overcloud dashboard available at http://\$(openstack stack output show overcloud PublicVip -f json | jq -r .output_value)/dashboard"
-EOI
-
-if [[ "$ha_enabled" == 'True' ]]; then
-  if [ "$debug" == "TRUE" ]; then
-    echo "${blue}\nChecking pacemaker service status\n${reset}"
-  fi
-  overcloud_connect "controller0" "for i in \$(sudo pcs status | grep '^* ' | cut -d ' ' -f 2 | cut -d '_' -f 1 | uniq); do echo \"WARNING: Service: \$i not running\"; done"
-fi
-
-if [ "${deploy_options_array['vpn']}" == 'True' ]; then
-   # Check zrpcd is started
-   overcloud_connect "controller0" "sudo systemctl status zrpcd > /dev/null || echo 'WARNING: zrpcd is not running on controller0'"
-fi
-}
diff --git a/lib/python/apex/common/utils.py b/lib/python/apex/common/utils.py
deleted file mode 100644 (file)
index 8e6896f..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Tim Rozet (trozet@redhat.com) and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-import yaml
-
-
-def str2bool(var):
-    if isinstance(var, bool):
-        return var
-    else:
-        return var.lower() in ("true", "yes")
-
-
-def parse_yaml(yaml_file):
-    with open(yaml_file) as f:
-        parsed_dict = yaml.safe_load(f)
-        return parsed_dict
-
-
-def write_str(bash_str, path=None):
-    if path:
-        with open(path, 'w') as file:
-            file.write(bash_str)
-    else:
-        print(bash_str)
diff --git a/lib/python/apex_python_utils.py b/lib/python/apex_python_utils.py
deleted file mode 100755 (executable)
index 70fc592..0000000
+++ /dev/null
@@ -1,265 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Feng Pan (fpan@redhat.com), Dan Radez (dradez@redhat.com)
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-import apex
-import argparse
-import sys
-import logging
-import os
-import yaml
-
-from jinja2 import Environment
-from jinja2 import FileSystemLoader
-
-from apex import NetworkSettings
-from apex import NetworkEnvironment
-from apex import DeploySettings
-from apex import Inventory
-from apex import ip_utils
-
-
-def parse_net_settings(args):
-    """
-    Parse OPNFV Apex network_settings.yaml config file
-    and dump bash syntax to set environment variables
-
-    Args:
-    - file: string
-      file to network_settings.yaml file
-    """
-    settings = NetworkSettings(args.net_settings_file)
-    net_env = NetworkEnvironment(settings, args.net_env_file,
-                                 args.compute_pre_config,
-                                 args.controller_pre_config)
-    target = args.target_dir.split('/')
-    target.append('network-environment.yaml')
-    dump_yaml(dict(net_env), '/'.join(target))
-    settings.dump_bash()
-
-
-def dump_yaml(data, file):
-    """
-    Dumps data to a file as yaml
-    :param data: yaml to be written to file
-    :param file: filename to write to
-    :return:
-    """
-    with open(file, "w") as fh:
-        yaml.dump(data, fh, default_flow_style=False)
-
-
-def parse_deploy_settings(args):
-    settings = DeploySettings(args.file)
-    settings.dump_bash()
-
-
-def run_clean(args):
-    apex.clean_nodes(args.file)
-
-
-def parse_inventory(args):
-    inventory = Inventory(args.file, ha=args.ha, virtual=args.virtual)
-    if args.export_bash is True:
-        inventory.dump_bash()
-    else:
-        inventory.dump_instackenv_json()
-
-
-def find_ip(args):
-    """
-    Get and print the IP from a specific interface
-
-    Args:
-    - interface: string
-      network interface name
-    - address_family: int
-      4 or 6, respective to ipv4 or ipv6
-    """
-    interface = ip_utils.get_interface(args.interface,
-                                       args.address_family)
-    if interface:
-        print(interface.ip)
-
-
-def build_nic_template(args):
-    """
-    Build and print a Triple-O nic template from jinja template
-
-    Args:
-    - template: string
-      path to jinja template to load
-    - enabled_networks: comma delimited list
-      list of networks defined in net_env.py
-    - ext_net_type: string
-      interface or br-ex, defines the external network configuration
-    - address_family: string
-      4 or 6, respective to ipv4 or ipv6
-    - ovs_dpdk_bridge: string
-      bridge name to use as ovs_dpdk
-    """
-    template_dir, template = args.template.rsplit('/', 1)
-
-    netsets = NetworkSettings(args.net_settings_file)
-    nets = netsets.get('networks')
-    ds = DeploySettings(args.deploy_settings_file).get('deploy_options')
-    env = Environment(loader=FileSystemLoader(template_dir), autoescape=True)
-    template = env.get_template(template)
-
-    if ds['dataplane'] == 'fdio':
-        nets['tenant']['nic_mapping'][args.role]['phys_type'] = 'vpp_interface'
-        if ds['sdn_controller'] == 'opendaylight':
-            nets['external'][0]['nic_mapping'][args.role]['phys_type'] =\
-                'vpp_interface'
-            if ds.get('odl_vpp_routing_node') == 'dvr':
-                nets['admin']['nic_mapping'][args.role]['phys_type'] =\
-                    'linux_bridge'
-    if ds.get('performance', {}).get(args.role.title(), {}).get('vpp', {})\
-            .get('uio-driver'):
-        nets['tenant']['nic_mapping'][args.role]['uio-driver'] =\
-            ds['performance'][args.role.title()]['vpp']['uio-driver']
-        if ds['sdn_controller'] == 'opendaylight':
-            nets['external'][0]['nic_mapping'][args.role]['uio-driver'] =\
-                ds['performance'][args.role.title()]['vpp']['uio-driver']
-    if ds.get('performance', {}).get(args.role.title(), {}).get('vpp', {})\
-            .get('interface-options'):
-        nets['tenant']['nic_mapping'][args.role]['interface-options'] =\
-            ds['performance'][args.role.title()]['vpp']['interface-options']
-
-    print(template.render(nets=nets,
-                          role=args.role,
-                          external_net_af=netsets.get_ip_addr_family(),
-                          external_net_type=args.ext_net_type,
-                          ovs_dpdk_bridge=args.ovs_dpdk_bridge))
-
-
-def get_parser():
-    parser = argparse.ArgumentParser()
-    parser.add_argument('--debug', action='store_true', default=False,
-                        help="Turn on debug messages")
-    parser.add_argument('-l', '--log-file', default='/var/log/apex/apex.log',
-                        dest='log_file', help="Log file to log to")
-    subparsers = parser.add_subparsers()
-    # parse-net-settings
-    net_settings = subparsers.add_parser('parse-net-settings',
-                                         help='Parse network settings file')
-    net_settings.add_argument('-s', '--net-settings-file',
-                              default='network-settings.yaml',
-                              dest='net_settings_file',
-                              help='path to network settings file')
-    net_settings.add_argument('-e', '--net-env-file',
-                              default="network-environment.yaml",
-                              dest='net_env_file',
-                              help='path to network environment file')
-    net_settings.add_argument('-td', '--target-dir',
-                              default="/tmp",
-                              dest='target_dir',
-                              help='directory to write the'
-                                   'network-environment.yaml file')
-    net_settings.add_argument('--compute-pre-config',
-                              default=False,
-                              action='store_true',
-                              dest='compute_pre_config',
-                              help='Boolean to enable Compute Pre Config')
-    net_settings.add_argument('--controller-pre-config',
-                              action='store_true',
-                              default=False,
-                              dest='controller_pre_config',
-                              help='Boolean to enable Controller Pre Config')
-
-    net_settings.set_defaults(func=parse_net_settings)
-    # find-ip
-    get_int_ip = subparsers.add_parser('find-ip',
-                                       help='Find interface ip')
-    get_int_ip.add_argument('-i', '--interface', required=True,
-                            help='Interface name')
-    get_int_ip.add_argument('-af', '--address-family', default=4, type=int,
-                            choices=[4, 6], dest='address_family',
-                            help='IP Address family')
-    get_int_ip.set_defaults(func=find_ip)
-    # nic-template
-    nic_template = subparsers.add_parser('nic-template',
-                                         help='Build NIC templates')
-    nic_template.add_argument('-r', '--role', required=True,
-                              choices=['controller', 'compute'],
-                              help='Role template generated for')
-    nic_template.add_argument('-t', '--template', required=True,
-                              dest='template',
-                              help='Template file to process')
-    nic_template.add_argument('-s', '--net-settings-file',
-                              default='network-settings.yaml',
-                              dest='net_settings_file',
-                              help='path to network settings file')
-    nic_template.add_argument('-e', '--ext-net-type', default='interface',
-                              dest='ext_net_type',
-                              choices=['interface', 'vpp_interface', 'br-ex'],
-                              help='External network type')
-    nic_template.add_argument('-d', '--ovs-dpdk-bridge',
-                              default=None, dest='ovs_dpdk_bridge',
-                              help='OVS DPDK Bridge Name')
-    nic_template.add_argument('--deploy-settings-file',
-                              help='path to deploy settings file')
-
-    nic_template.set_defaults(func=build_nic_template)
-    # parse-deploy-settings
-    deploy_settings = subparsers.add_parser('parse-deploy-settings',
-                                            help='Parse deploy settings file')
-    deploy_settings.add_argument('-f', '--file',
-                                 default='deploy_settings.yaml',
-                                 help='path to deploy settings file')
-    deploy_settings.set_defaults(func=parse_deploy_settings)
-    # parse-inventory
-    inventory = subparsers.add_parser('parse-inventory',
-                                      help='Parse inventory file')
-    inventory.add_argument('-f', '--file',
-                           default='deploy_settings.yaml',
-                           help='path to deploy settings file')
-    inventory.add_argument('--ha',
-                           default=False,
-                           action='store_true',
-                           help='Indicate if deployment is HA or not')
-    inventory.add_argument('--virtual',
-                           default=False,
-                           action='store_true',
-                           help='Indicate if deployment inventory is virtual')
-    inventory.add_argument('--export-bash',
-                           default=False,
-                           dest='export_bash',
-                           action='store_true',
-                           help='Export bash variables from inventory')
-    inventory.set_defaults(func=parse_inventory)
-
-    clean = subparsers.add_parser('clean',
-                                  help='Parse deploy settings file')
-    clean.add_argument('-f', '--file',
-                       help='path to inventory file')
-    clean.set_defaults(func=run_clean)
-
-    return parser
-
-
-def main():
-    parser = get_parser()
-    args = parser.parse_args(sys.argv[1:])
-    if args.debug:
-        logging.basicConfig(level=logging.DEBUG)
-    else:
-        apex_log_filename = args.log_file
-        os.makedirs(os.path.dirname(apex_log_filename), exist_ok=True)
-        logging.basicConfig(filename=apex_log_filename,
-                            format='%(asctime)s %(levelname)s: %(message)s',
-                            datefmt='%m/%d/%Y %I:%M:%S %p',
-                            level=logging.DEBUG)
-    if hasattr(args, 'func'):
-        args.func(args)
-    else:
-        parser.print_help()
-        exit(1)
-
-if __name__ == "__main__":
-    main()
diff --git a/lib/undercloud-functions.sh b/lib/undercloud-functions.sh
deleted file mode 100755 (executable)
index 08e1b7c..0000000
+++ /dev/null
@@ -1,291 +0,0 @@
-#!/usr/bin/env bash
-##############################################################################
-# Copyright (c) 2015 Tim Rozet (Red Hat), Dan Radez (Red Hat) and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-##verify vm exists, an has a dhcp lease assigned to it
-##params: none
-function setup_undercloud_vm {
-  local libvirt_imgs=/var/lib/libvirt/images
-  if ! virsh list --all | grep undercloud > /dev/null; then
-      undercloud_nets="default admin"
-      if [[ $enabled_network_list =~ "external" ]]; then
-        undercloud_nets+=" external"
-      fi
-      define_vm undercloud hd 30 "$undercloud_nets" 4 12288
-
-      ### this doesn't work for some reason I was getting hangup events so using cp instead
-      #virsh vol-upload --pool default --vol undercloud.qcow2 --file $BASE/stack/undercloud.qcow2
-      #2015-12-05 12:57:20.569+0000: 8755: info : libvirt version: 1.2.8, package: 16.el7_1.5 (CentOS BuildSystem <http://bugs.centos.org>, 2015-11-03-13:56:46, worker1.bsys.centos.org)
-      #2015-12-05 12:57:20.569+0000: 8755: warning : virKeepAliveTimerInternal:143 : No response from client 0x7ff1e231e630 after 6 keepalive messages in 35 seconds
-      #2015-12-05 12:57:20.569+0000: 8756: warning : virKeepAliveTimerInternal:143 : No response from client 0x7ff1e231e630 after 6 keepalive messages in 35 seconds
-      #error: cannot close volume undercloud.qcow2
-      #error: internal error: received hangup / error event on socket
-      #error: Reconnected to the hypervisor
-
-      cp -f $IMAGES/undercloud.qcow2 $libvirt_imgs/undercloud.qcow2
-      cp -f $IMAGES/overcloud-full.vmlinuz $libvirt_imgs/overcloud-full.vmlinuz
-      cp -f $IMAGES/overcloud-full.initrd $libvirt_imgs/overcloud-full.initrd
-
-      # resize Undercloud machine
-      echo "Checking if Undercloud needs to be resized..."
-      undercloud_size=$(LIBGUESTFS_BACKEND=direct virt-filesystems --long -h --all -a $libvirt_imgs/undercloud.qcow2 |grep device | grep -Eo "[0-9\.]+G" | sed -n 's/\([0-9][0-9]*\).*/\1/p')
-      if [ "$undercloud_size" -lt 30 ]; then
-        qemu-img resize /var/lib/libvirt/images/undercloud.qcow2 +25G
-        LIBGUESTFS_BACKEND=direct virt-resize --expand /dev/sda1 $IMAGES/undercloud.qcow2 $libvirt_imgs/undercloud.qcow2
-        LIBGUESTFS_BACKEND=direct virt-customize -a $libvirt_imgs/undercloud.qcow2 --run-command 'xfs_growfs -d /dev/sda1 || true'
-        new_size=$(LIBGUESTFS_BACKEND=direct virt-filesystems --long -h --all -a $libvirt_imgs/undercloud.qcow2 |grep filesystem | grep -Eo "[0-9\.]+G" | sed -n 's/\([0-9][0-9]*\).*/\1/p')
-        if [ "$new_size" -lt 30 ]; then
-          echo "Error resizing Undercloud machine, disk size is ${new_size}"
-          exit 1
-        else
-          echo "Undercloud successfully resized"
-        fi
-      else
-        echo "Skipped Undercloud resize, upstream is large enough"
-      fi
-
-  else
-      echo "Found existing Undercloud VM, exiting."
-      exit 1
-  fi
-
-  # if the VM is not running update the authkeys and start it
-  if ! virsh list | grep undercloud > /dev/null; then
-    if [ "$debug" == 'TRUE' ]; then
-      LIBGUESTFS_BACKEND=direct virt-customize -a $libvirt_imgs/undercloud.qcow2 --root-password password:opnfvapex
-    fi
-
-    echo "Injecting ssh key to Undercloud VM"
-    LIBGUESTFS_BACKEND=direct virt-customize -a $libvirt_imgs/undercloud.qcow2 --run-command "mkdir -p /root/.ssh/" \
-        --upload ~/.ssh/id_rsa.pub:/root/.ssh/authorized_keys \
-        --run-command "chmod 600 /root/.ssh/authorized_keys && restorecon /root/.ssh/authorized_keys" \
-        --run-command "cp /root/.ssh/authorized_keys /home/stack/.ssh/" \
-        --run-command "chown stack:stack /home/stack/.ssh/authorized_keys && chmod 600 /home/stack/.ssh/authorized_keys"
-    virsh start undercloud
-    virsh autostart undercloud
-  fi
-
-  sleep 10 # let undercloud get started up
-
-  # get the undercloud VM IP
-  CNT=10
-  echo -n "${blue}Waiting for Undercloud's dhcp address${reset}"
-  undercloud_mac=$(virsh domiflist undercloud | grep default | awk '{ print $5 }')
-  while ! $(arp -en | grep ${undercloud_mac} > /dev/null) && [ $CNT -gt 0 ]; do
-      echo -n "."
-      sleep 10
-      CNT=$((CNT-1))
-  done
-  UNDERCLOUD=$(arp -en | grep ${undercloud_mac} | awk {'print $1'})
-
-  if [ -z "$UNDERCLOUD" ]; then
-    echo "\n\nCan't get IP for Undercloud. Can Not Continue."
-    exit 1
-  else
-     echo -e "${blue}\rUndercloud VM has IP $UNDERCLOUD${reset}"
-  fi
-
-  CNT=10
-  echo -en "${blue}\rValidating Undercloud VM connectivity${reset}"
-  while ! ping -c 1 $UNDERCLOUD > /dev/null && [ $CNT -gt 0 ]; do
-      echo -n "."
-      sleep 3
-      CNT=$((CNT-1))
-  done
-  if [ "$CNT" -eq 0 ]; then
-      echo "Failed to contact Undercloud. Can Not Continue"
-      exit 1
-  fi
-  CNT=10
-  while ! ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "echo ''" 2>&1> /dev/null && [ $CNT -gt 0 ]; do
-      echo -n "."
-      sleep 3
-      CNT=$((CNT-1))
-  done
-  if [ "$CNT" -eq 0 ]; then
-      echo "Failed to connect to Undercloud. Can Not Continue"
-      exit 1
-  fi
-
-  # extra space to overwrite the previous connectivity output
-  echo -e "${blue}\r                                                                 ${reset}"
-  sleep 1
-
-  # ensure stack user on Undercloud machine has an ssh key
-  ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "if [ ! -e ~/.ssh/id_rsa.pub ]; then ssh-keygen -t rsa -N '' -f ~/.ssh/id_rsa; fi"
-
-  # ssh key fix for stack user
-  ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "restorecon -r /home/stack"
-}
-
-##Copy over the glance images and instackenv json file
-##params: none
-function configure_undercloud {
-  local controller_nic_template compute_nic_template
-  echo
-  echo "Copying configuration files to Undercloud"
-  echo -e "${blue}Network Environment set for Deployment: ${reset}"
-  cat $APEX_TMP_DIR/network-environment.yaml
-  scp ${SSH_OPTIONS[@]} $APEX_TMP_DIR/network-environment.yaml "stack@$UNDERCLOUD":
-
-  # check for ODL L3/ONOS
-  if [ "${deploy_options_array['dataplane']}" == 'fdio' ]; then
-    ext_net_type=vpp_interface
-  else
-    ext_net_type=br-ex
-  fi
-
-  if [ "${deploy_options_array['dataplane']}" == 'ovs_dpdk' ]; then
-    ovs_dpdk_bridge='br-phy'
-  else
-    ovs_dpdk_bridge=''
-  fi
-
-  # for some reason putting IP on the bridge fails with pinging validation in OOO
-  if [ "${deploy_options_array['sfc']}" == 'True' ]; then
-    controller_external='interface'
-  else
-    controller_external='br-ex'
-  fi
-
-  if ! controller_nic_template=$(python3 -B $LIB/python/apex_python_utils.py nic-template -r controller -s $NETSETS -t $BASE/nics-template.yaml.jinja2 -e $controller_external --deploy-settings-file $DEPLOY_SETTINGS_FILE); then
-    echo -e "${red}ERROR: Failed to generate controller NIC heat template ${reset}"
-    exit 1
-  fi
-
-  if ! compute_nic_template=$(python3 -B $LIB/python/apex_python_utils.py nic-template -r compute -s $NETSETS -t $BASE/nics-template.yaml.jinja2 -e $ext_net_type -d "$ovs_dpdk_bridge" --deploy-settings-file $DEPLOY_SETTINGS_FILE); then
-    echo -e "${red}ERROR: Failed to generate compute NIC heat template ${reset}"
-    exit 1
-  fi
-  ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" << EOI
-mkdir nics/
-cat > nics/controller.yaml << EOF
-$controller_nic_template
-EOF
-cat > nics/compute.yaml << EOF
-$compute_nic_template
-EOF
-EOI
-
-  # disable requiretty for sudo
-  ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "sed -i 's/Defaults\s*requiretty//'" /etc/sudoers
-
-  # configure undercloud on Undercloud VM
-  echo "Running undercloud installation and configuration."
-  echo "Logging undercloud installation to stack@undercloud:/home/stack/apex-undercloud-install.log"
-  ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" << EOI
-set -e
-openstack-config --set undercloud.conf DEFAULT local_ip ${admin_installer_vm_ip}/${admin_cidr##*/}
-openstack-config --set undercloud.conf DEFAULT network_gateway ${admin_installer_vm_ip}
-openstack-config --set undercloud.conf DEFAULT network_cidr ${admin_cidr}
-openstack-config --set undercloud.conf DEFAULT dhcp_start ${admin_dhcp_range%%,*}
-openstack-config --set undercloud.conf DEFAULT dhcp_end ${admin_dhcp_range##*,}
-openstack-config --set undercloud.conf DEFAULT inspection_iprange ${admin_introspection_range}
-openstack-config --set undercloud.conf DEFAULT undercloud_debug false
-openstack-config --set undercloud.conf DEFAULT undercloud_hostname "undercloud.${domain_name}"
-openstack-config --set undercloud.conf DEFAULT enable_ui false
-openstack-config --set undercloud.conf DEFAULT undercloud_update_packages false
-sudo openstack-config --set /etc/ironic/ironic.conf disk_utils iscsi_verify_attempts 30
-sudo openstack-config --set /etc/ironic/ironic.conf disk_partitioner check_device_max_retries 40
-
-if [[ -n "${deploy_options_array['ceph_device']}" ]]; then
-    sed -i '/ExtraConfig/a\\    ceph::profile::params::osds: {\\x27${deploy_options_array['ceph_device']}\\x27: {}}' ${ENV_FILE}
-fi
-
-sudo sed -i '/CephClusterFSID:/c\\  CephClusterFSID: \\x27$(cat /proc/sys/kernel/random/uuid)\\x27' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml
-sudo sed -i '/CephMonKey:/c\\  CephMonKey: \\x27'"\$(ceph-authtool --gen-print-key)"'\\x27' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml
-sudo sed -i '/CephAdminKey:/c\\  CephAdminKey: \\x27'"\$(ceph-authtool --gen-print-key)"'\\x27' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml
-
-if [ "\$(uname -i)" == 'aarch64' ]; then
-
-# These two fixes are done in the base OOO image build right now
-# keeping them here to know that they are done and in case we need
-# to take care of them in the future.
-#    # remove syslinux references for aarch64
-#    sudo sh -xc 'cd /etc/puppet/modules/ironic/manifests && patch -p0 < puppet-ironic-manifests-pxe-pp-aarch64.patch'
-#    sudo sed -i '/syslinux-extlinux/d' /usr/share/instack-undercloud/puppet-stack-config/puppet-stack-config.pp
-#
-#    # disable use_linkat in swift
-#    sudo sed -i 's/o_tmpfile_supported()/False/' /usr/lib/python2.7/site-packages/swift/obj/diskfile.py
-
-    openstack-config --set undercloud.conf DEFAULT ipxe_enabled false
-    sudo sed -i '/    _link_ip_address_pxe_configs/a\\        _link_mac_pxe_configs(task)' /usr/lib/python2.7/site-packages/ironic/common/pxe_utils.py
-fi
-
-openstack undercloud install &> apex-undercloud-install.log || {
-    # cat the undercloud install log incase it fails
-    echo "ERROR: openstack undercloud install has failed. Dumping Log:"
-    cat apex-undercloud-install.log
-    exit 1
-}
-
-if [ "\$(uname -i)" == 'aarch64' ]; then
-sudo yum -y reinstall grub2-efi shim
-sudo cp /boot/efi/EFI/centos/grubaa64.efi /tftpboot/grubaa64.efi
-sudo mkdir -p /tftpboot/EFI/centos
-sudo tee /tftpboot/EFI/centos/grub.cfg > /dev/null << EOF
-set default=master
-set timeout=5
-set hidden_timeout_quiet=false
-
-menuentry "master"  {
-configfile /tftpboot/\\\$net_default_ip.conf
-}
-EOF
-sudo chmod 644 /tftpboot/EFI/centos/grub.cfg
-sudo openstack-config --set /etc/ironic/ironic.conf pxe uefi_pxe_config_template \\\$pybasedir/drivers/modules/pxe_grub_config.template
-sudo openstack-config --set /etc/ironic/ironic.conf pxe uefi_pxe_bootfile_name grubaa64.efi
-sudo service openstack-ironic-conductor restart
-sudo sed -i 's/linuxefi/linux/g' /usr/lib/python2.7/site-packages/ironic/drivers/modules/pxe_grub_config.template
-sudo sed -i 's/initrdefi/initrd/g' /usr/lib/python2.7/site-packages/ironic/drivers/modules/pxe_grub_config.template
-echo '' | sudo tee --append /tftpboot/map-file > /dev/null
-echo 'r ^/EFI/centos/grub.cfg-(.*) /tftpboot/pxelinux.cfg/\\1' | sudo tee --append /tftpboot/map-file > /dev/null
-sudo service xinetd restart
-fi
-
-# Set nova domain name
-sudo openstack-config --set /etc/nova/nova.conf DEFAULT dns_domain ${domain_name}
-sudo openstack-config --set /etc/nova/nova.conf DEFAULT dhcp_domain ${domain_name}
-sudo systemctl restart openstack-nova-conductor
-sudo systemctl restart openstack-nova-compute
-sudo systemctl restart openstack-nova-api
-sudo systemctl restart openstack-nova-scheduler
-
-# Set neutron domain name
-sudo openstack-config --set /etc/neutron/neutron.conf DEFAULT dns_domain ${domain_name}
-sudo systemctl restart neutron-server
-sudo systemctl restart neutron-dhcp-agent
-EOI
-
-# configure external network
-if [[ "$enabled_network_list" =~ "external" ]]; then
-  ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" << EOI
-if [[ "$external_installer_vm_vlan" != "native" ]]; then
-  cat <<EOF > /etc/sysconfig/network-scripts/ifcfg-vlan${external_installer_vm_vlan}
-DEVICE=vlan${external_installer_vm_vlan}
-ONBOOT=yes
-DEVICETYPE=ovs
-TYPE=OVSIntPort
-BOOTPROTO=static
-IPADDR=${external_installer_vm_ip}
-PREFIX=${external_cidr##*/}
-OVS_BRIDGE=br-ctlplane
-OVS_OPTIONS="tag=${external_installer_vm_vlan}"
-EOF
-  ifup vlan${external_installer_vm_vlan}
-else
-  if ! ip a s eth2 | grep ${external_installer_vm_ip} > /dev/null; then
-      ip a a ${external_installer_vm_ip}/${external_cidr##*/} dev eth2
-      ip link set up dev eth2
-  fi
-fi
-EOI
-fi
-
-}
diff --git a/lib/utility-functions.sh b/lib/utility-functions.sh
deleted file mode 100644 (file)
index c12619a..0000000
+++ /dev/null
@@ -1,85 +0,0 @@
-#!/usr/bin/env bash
-# Utility Functions used by  OPNFV Apex
-# author: Tim Rozet (trozet@redhat.com)
-
-SSH_OPTIONS=(-o StrictHostKeyChecking=no -o GlobalKnownHostsFile=/dev/null -o UserKnownHostsFile=/dev/null -o LogLevel=error)
-
-##connects to undercloud
-##params: user to login with, command to execute on undercloud (optional)
-function undercloud_connect {
-  local user=$1
-
-  if [ -z "$1" ]; then
-    echo "Missing required argument: user to login as to undercloud"
-    return 1
-  fi
-
-  if [ -z "$2" ]; then
-    ssh ${SSH_OPTIONS[@]} ${user}@$(get_undercloud_ip)
-  else
-    ssh ${SSH_OPTIONS[@]} -T ${user}@$(get_undercloud_ip) "$2"
-  fi
-}
-
-##outputs the Undercloud's IP address
-##params: none
-function get_undercloud_ip {
-  echo $(arp -an | grep $(virsh domiflist undercloud | grep default |\
-    awk '{print $5}') | grep -Eo "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+")
-}
-
-##connects to overcloud nodes
-##params: node to login to, command to execute on overcloud (optional)
-function overcloud_connect {
-  local node
-  local node_output
-  local node_ip
-
-  if [ -z "$1" ]; then
-    echo "Missing required argument: overcloud node to login to"
-    return 1
-  elif ! echo "$1" | grep -E "(controller|compute)[0-9]+" > /dev/null; then
-    echo "Invalid argument: overcloud node to login to must be in the format: \
-controller<number> or compute<number>"
-    return 1
-  fi
-
-  node_output=$(undercloud_connect "stack" "source stackrc; nova list")
-  node=$(echo "$1" | sed -E 's/([a-zA-Z]+)([0-9]+)/\1-\2/')
-
-  node_ip=$(echo "$node_output" | grep "$node" | grep -Eo "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+")
-
-  if [ "$node_ip" == "" ]; then
-    echo -e "Unable to find IP for ${node} in \n${node_output}"
-    return 1
-  fi
-
-  if [ -z "$2" ]; then
-    ssh ${SSH_OPTIONS[@]} heat-admin@${node_ip}
-  else
-    ssh ${SSH_OPTIONS[@]} -T heat-admin@${node_ip} "$2"
-  fi
-}
-
-##connects to opendaylight karaf console
-##params: None
-function opendaylight_connect {
-  local opendaylight_ip
-  opendaylight_ip=$(undercloud_connect "stack" "cat overcloudrc | grep SDN_CONTROLLER_IP | grep -Eo [0-9]+\.[0-9]+\.[0-9]+\.[0-9]+")
-
-  if [ "$opendaylight_ip" == "" ]; then
-    echo -e "Unable to find IP for OpenDaylight in overcloudrc"
-    return 1
-  else
-    echo -e "Connecting to ODL Karaf console.  Default password is 'karaf'"
-  fi
-
-  ssh -p 8101 ${SSH_OPTIONS[@]} karaf@${opendaylight_ip}
-}
-
-##outputs heat stack deployment failures
-##params: none
-function debug_stack {
-  source ~/stackrc
-  openstack stack failures list overcloud --long
-}
diff --git a/lib/virtual-setup-functions.sh b/lib/virtual-setup-functions.sh
deleted file mode 100755 (executable)
index 5f9e6ba..0000000
+++ /dev/null
@@ -1,164 +0,0 @@
-#!/usr/bin/env bash
-##############################################################################
-# Copyright (c) 2015 Tim Rozet (Red Hat), Dan Radez (Red Hat) and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-##Create virtual nodes in virsh
-##params: vcpus, ramsize
-function setup_virtual_baremetal {
-  local vcpus ramsize held_ramsize
-  if [ -z "$1" ]; then
-    vcpus=4
-    ramsize=8192
-  elif [ -z "$2" ]; then
-    vcpus=$1
-    ramsize=8192
-  else
-    vcpus=$1
-    ramsize=$(($2*1024))
-  fi
-  #start by generating the opening yaml for the inventory-virt.yaml file
-  cat > $APEX_TMP_DIR/inventory-virt.yaml << EOF
-nodes:
-EOF
-
-  # next create the virtual machines and add their definitions to the file
-  if [ "$ha_enabled" == "False" ]; then
-      controller_index=0
-  else
-      controller_index=2
-      # 3 controller + computes
-      # zero based so add 2 to compute count
-      if [ $VM_COMPUTES -lt 2 ]; then
-          VM_COMPUTES=2
-      fi
-  fi
-
-  # tmp var to hold ramsize in case modified during detection
-  held_ramsize=${ramsize}
-  for i in $(seq 0 $(($controller_index+$VM_COMPUTES))); do
-    ramsize=${held_ramsize}
-    if [ $i -gt $controller_index ]; then
-      capability="profile:compute"
-      if [ -n "$VM_COMPUTE_RAM" ]; then
-        ramsize=$((${VM_COMPUTE_RAM}*1024))
-      fi
-    else
-      capability="profile:control"
-      if [[ "${deploy_options_array['sdn_controller']}" == 'opendaylight' && "$ramsize" -lt 12288 ]]; then
-         echo "WARN: RAM per controller too low.  OpenDaylight specified in deployment requires at least 12GB"
-         echo "INFO: Increasing RAM per controller to 12GB"
-         ramsize=12288
-      elif [[ "$ramsize" -lt 10240 ]]; then
-         echo "WARN: RAM per controller too low.  Deployment requires at least 10GB"
-         echo "INFO: Increasing RAM per controller to 10GB"
-         ramsize=10240
-      fi
-    fi
-    if ! virsh list --all | grep baremetal${i} > /dev/null; then
-      define_vm baremetal${i} network 41 'admin' $vcpus $ramsize
-      for n in tenant external storage api; do
-        if [[ $enabled_network_list =~ $n ]]; then
-          echo -n "$n "
-          virsh attach-interface --domain baremetal${i} --type network --source $n --model virtio --config
-        fi
-      done
-    else
-      echo "Found baremetal${i} VM, using existing VM"
-    fi
-    #virsh vol-list default | grep baremetal${i} 2>&1> /dev/null || virsh vol-create-as default baremetal${i}.qcow2 41G --format qcow2
-    mac=$(virsh domiflist baremetal${i} | grep admin | awk '{ print $5 }')
-
-    cat >> $APEX_TMP_DIR/inventory-virt.yaml << EOF
-  node${i}:
-    mac_address: "$mac"
-    ipmi_ip: 192.168.122.1
-    ipmi_user: admin
-    ipmi_pass: "password"
-    pm_type: "pxe_ipmitool"
-    pm_port: "623$i"
-    cpu: $vcpus
-    memory: $ramsize
-    disk: 41
-    arch: "$(uname -i)"
-    capabilities: "$capability"
-EOF
-    vbmc add baremetal$i --port 623$i
-    if service firewalld status > /dev/null; then
-        firewall-cmd --permanent --zone=public --add-port=623$i/udp
-    fi
-    # TODO: add iptables check and commands too
-    vbmc start baremetal$i
-  done
-  if service firewalld status > /dev/null; then
-    firewall-cmd --reload
-  fi
-}
-
-##Create virtual nodes in virsh
-##params: name - String: libvirt name for VM
-##        bootdev - String: boot device for the VM
-##        disksize - Number: size of the disk in GB
-##        ovs_bridges: - List: list of ovs bridges
-##        vcpus - Number of VCPUs to use (defaults to 4)
-##        ramsize - Size of RAM for VM in MB (defaults to 8192)
-function define_vm () {
-  local vcpus ramsize volume_path direct_boot kernel_args
-
-  if [ -z "$5" ]; then
-    vcpus=4
-    ramsize=8388608
-  elif [ -z "$6" ]; then
-    vcpus=$5
-    ramsize=8388608
-  else
-    vcpus=$5
-    ramsize=$(($6*1024))
-  fi
-
-  # Create the libvirt storage volume
-  if virsh vol-list default | grep ${1}.qcow2 2>&1> /dev/null; then
-    volume_path=$(virsh vol-path --pool default ${1}.qcow2 || echo "/var/lib/libvirt/images/${1}.qcow2")
-    echo "Volume ${1} exists. Deleting Existing Volume $volume_path"
-    virsh vol-dumpxml ${1}.qcow2 --pool default > /dev/null || echo '' #ok for this to fail
-    touch $volume_path
-    virsh vol-delete ${1}.qcow2 --pool default
-  fi
-  virsh vol-create-as default ${1}.qcow2 ${3}G --format qcow2
-  volume_path=$(virsh vol-path --pool default ${1}.qcow2)
-  if [ ! -f $volume_path ]; then
-      echo "$volume_path Not created successfully... Aborting"
-      exit 1
-  fi
-
-  # undercloud need to be direct booted.
-  # the upstream image no longer includes the kernel and initrd
-  if [ "$1" == 'undercloud' ]; then
-      direct_boot='--direct-boot overcloud-full'
-      kernel_args='--kernel-arg console=ttyS0 --kernel-arg root=/dev/sda'
-  fi
-
-  if [ "$(uname -i)" == 'aarch64' ]; then
-      diskbus='scsi'
-  else
-      diskbus='sata'
-  fi
-
-  # create the VM
-  $LIB/configure-vm --name $1 \
-                    --bootdev $2 \
-                    --image "$volume_path" \
-                    --diskbus $diskbus \
-                    --arch $(uname -i) \
-                    --cpus $vcpus \
-                    --memory $ramsize \
-                    --libvirt-nic-driver virtio \
-                    $direct_boot \
-                    $kernel_args \
-                    --baremetal-interface $4
-}
diff --git a/requirements.txt b/requirements.txt
new file mode 100644 (file)
index 0000000..af2a106
--- /dev/null
@@ -0,0 +1,13 @@
+# The order of packages is significant, because pip processes them in the order
+# of appearance. Changing the order has an impact on the overall integration
+# process, which may cause wedges in the gate later.
+pbr!=2.1.0,>=2.0.0 # Apache-2.0
+
+libvirt-python
+python-iptables
+virtualbmc
+PyYAML
+cryptography
+python-ipmi
+PyYAML
+Jinja2>=2.8
diff --git a/setup.cfg b/setup.cfg
new file mode 100644 (file)
index 0000000..ee3105a
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,48 @@
+[metadata]
+name = apex
+summary = A ansible roles and tools for deploying OPNFV
+description-file =
+    INFO
+author = Apex Team
+author-email = michapma@redhat.com trozet@redhat.com dradez@redhat.com
+home-page = https://github.com/opnfv/apex
+classifier =
+  License :: OSI Approved :: Apache Software License
+  Development Status :: 4 - Beta
+  Intended Audience :: Developers
+  Intended Audience :: System Administrators
+  Intended Audience :: Information Technology
+  Topic :: Utilities
+  Programming Language :: Python :: 3
+  Programming Language :: Python :: 3.5
+
+[global]
+setup-hooks =
+    pbr.hooks.setup_hook
+
+[entry_points]
+console_scripts =
+    opnfv-deploy = apex.deploy:main
+
+[files]
+packages =
+    apex
+data_files =
+    share/opnfv-apex/ =
+        build/network-environment.yaml
+        build/opnfv-environment.yaml
+        build/nics-template.yaml.jinja2
+        build/csit-environment.yaml
+        build/virtual-environment.yaml
+        build/baremetal-environment.yaml
+        build/domain.xml
+    share/opnfv-apex/ansible = lib/ansible/*
+    share/opnfv-apex/config = config/*
+    share/opnfv-apex/docs = docs/*
+
+[wheel]
+universal = 1
+
+[pbr]
+skip_authors = True
+skip_changelog = True
diff --git a/setup.py b/setup.py
new file mode 100644 (file)
index 0000000..6a931a6
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,19 @@
+#   Copyright Red Hat, Inc. All Rights Reserved.
+#
+#   Licensed under the Apache License, Version 2.0 (the "License"); you may
+#   not use this file except in compliance with the License. You may obtain
+#   a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#   WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#   License for the specific language governing permissions and limitations
+#   under the License.
+
+import setuptools
+
+setuptools.setup(
+    setup_requires=['pbr'],
+    pbr=True)
diff --git a/test-requirements.txt b/test-requirements.txt
new file mode 100644 (file)
index 0000000..f22863c
--- /dev/null
@@ -0,0 +1,6 @@
+coverage>=4.0 # Apache-2.0
+mock>=2.0 # BSD
+nose # LGPL
+flake8<2.6.0,>=2.5.4 # MIT
+pylint==1.4.5 # GPLv2
+sphinx!=1.3b1,<1.4,>=1.2.1 # BSD
diff --git a/tests/test_apex_python_utils_py.py b/tests/test_apex_python_utils_py.py
deleted file mode 100644 (file)
index 550042f..0000000
+++ /dev/null
@@ -1,91 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Dan Radez (Red Hat)
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-import shutil
-import sys
-import tempfile
-
-from test_apex_ip_utils import get_default_gateway_linux
-from apex_python_utils import main
-from apex_python_utils import get_parser
-from apex_python_utils import parse_net_settings
-from apex_python_utils import parse_deploy_settings
-from apex_python_utils import find_ip
-from apex_python_utils import build_nic_template
-from apex_python_utils import parse_inventory
-
-from nose.tools import assert_equal
-from nose.tools import assert_raises
-
-
-net_sets = '../config/network/network_settings.yaml'
-net_env = '../build/network-environment.yaml'
-deploy_sets = '../config/deploy/deploy_settings.yaml'
-nic_template = '../build/nics-template.yaml.jinja2'
-inventory = '../config/inventory/pod_example_settings.yaml'
-
-
-class TestCommonUtils(object):
-    @classmethod
-    def setup_class(klass):
-        """This method is run once for each class before any tests are run"""
-        klass.parser = get_parser()
-        klass.iface_name = get_default_gateway_linux()
-
-    @classmethod
-    def teardown_class(klass):
-        """This method is run once for each class _after_ all tests are run"""
-
-    def setUp(self):
-        """This method is run once before _each_ test method is executed"""
-
-    def teardown(self):
-        """This method is run once after _each_ test method is executed"""
-
-    def test_main(self):
-        sys.argv = ['apex_python_utils', '-l', '/dev/null']
-        assert_raises(SystemExit, main)
-        sys.argv = ['apex_python_utils', '--debug', '-l', '/dev/null']
-        assert_raises(SystemExit, main)
-        sys.argv = ['apex_python_utils', '-l', '/dev/null',
-                                         'parse-deploy-settings',
-                                         '-f', deploy_sets]
-        assert_equal(main(), None)
-
-    def test_parse_net_settings(self):
-        tmp_dir = tempfile.mkdtemp()
-        args = self.parser.parse_args(['parse-net-settings',
-                                       '-s', net_sets,
-                                       '-td', tmp_dir,
-                                       '-e', net_env])
-        assert_equal(parse_net_settings(args), None)
-        shutil.rmtree(tmp_dir, ignore_errors=True)
-
-    def test_parse_deploy_settings(self):
-        args = self.parser.parse_args(['parse-deploy-settings',
-                                       '-f', deploy_sets])
-        assert_equal(parse_deploy_settings(args), None)
-
-    def test_find_ip(self):
-        args = self.parser.parse_args(['find-ip',
-                                       '-i', self.iface_name])
-        assert_equal(find_ip(args), None)
-
-    def test_build_nic_template(self):
-        args = self.parser.parse_args(['nic-template',
-                                       '-s', net_sets,
-                                       '-r', 'compute',
-                                       '-t', nic_template,
-                                       '--deploy-settings-file', deploy_sets])
-        assert_equal(build_nic_template(args), None)
-
-    def test_parse_inventory(self):
-        args = self.parser.parse_args(['parse-inventory',
-                                       '-f', inventory])
-        assert_equal(parse_inventory(args), None)
diff --git a/tox.ini b/tox.ini
new file mode 100644 (file)
index 0000000..87b6c03
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,26 @@
+[tox]
+envlist = docs,pep8,pylint,py35
+
+[testenv]
+usedevelop = True
+deps =
+  -r{toxinidir}/requirements.txt
+  -r{toxinidir}/test-requirements.txt
+commands =
+  coverage erase
+  nosetests-3.4 --with-xunit \
+  --with-coverage \
+  --cover-tests \
+  --cover-package=apex \
+  --cover-xml \
+  --cover-min-percentage 90 \
+  apex/tests
+  coverage report
+
+[testenv:pep8]
+basepython = python3
+commands = flake8 --exclude .build,build --ignore=F401
+
+[testenv:py35]
+basepython = python3
+