mv networking-vpp/dist/*.rpm networking-vpp.noarch.rpm
networking-vpp:
- git clone -b stable https://github.com/naveenjoy/networking-vpp.git
+ git clone -b stable_vlan_rewrite https://github.com/fepan/networking-vpp.git
###############
# UNDERCLOUD #
if [ "$CATEGORY" == "kernel" ]; then
echo "${KEY}=${VALUE}" >> $ROLE-kernel_params.txt
+ if [[ "$dataplane" == 'fdio' && "$KEY" == 'hugepages' ]]; then
+ # set kernel hugepages params for fdio
+ LIBGUESTFS_BACKEND=direct virt-customize --run-command "echo vm.hugetlb_shm_group=0 >> /usr/lib/sysctl.d/00-system.conf" \
+ --run-command "echo vm.max_map_count=$(printf "%.0f" $(echo 2.2*$VALUE | bc)) >> /usr/lib/sysctl.d/00-system.conf" \
+ --run-command "echo kernel.shmmax==$((VALUE * 2 * 1024 * 1024)) >> /usr/lib/sysctl.d/00-system.conf" \
+ -a ${IMAGE}
+ fi
fi
$dpdk_pkg_str \
$fdio_pkg_str \
--upload ../networking-vpp.noarch.rpm:/root/fdio \
+ --run-command "pip install distro flask_restful" \
--run-command "yum install -y etcd" \
--run-command "pip install python-etcd" \
--install "centos-release-qemu-ev" \
--run-command "cd /usr/lib/python2.7/site-packages/congress/datasources && curl -O $doctor_driver" \
--run-command "sed -i \"s/'--detailed-exitcodes',/'--detailed-exitcodes','-l','syslog','-l','console',/g\" /var/lib/heat-config/hooks/puppet" \
--run-command "yum install -y /root/fdio/*.rpm" \
+ --run-command "rm -f /etc/sysctl.d/80-vpp.conf" \
--run-command "tar zxvf /root/fdio/vpp_papi*.tar.gz -C /" \
--install unzip \
--upload puppet-fdio.tar.gz:/etc/puppet/modules \
--run-command "mkdir /root/fdio_neutron_l3" \
--upload ../neutron/agent/l3/namespaces.py:/root/fdio_neutron_l3/ \
--upload ../neutron/agent/l3/router_info.py:/root/fdio_neutron_l3/ \
+ --upload ../puppet-neutron/manifests/agents/ml2/networking-vpp.pp:/etc/puppet/modules/neutron/manifests/agents/ml2/ \
+ --upload ../puppet-neutron/manifests/plugins/ml2/networking-vpp.pp:/etc/puppet/modules/neutron/manifests/plugins/ml2/ \
-a overcloud-full_build.qcow2
mv -f overcloud-full_build.qcow2 overcloud-full.qcow2
--- /dev/null
+# == Class: neutron::agents::ml2::networking-vpp
+#
+# Setups networking-vpp Neutron agent for ML2 plugin.
+#
+# === Parameters
+#
+# [*package_ensure*]
+# (optional) Package ensure state.
+# Defaults to 'present'.
+#
+# [*enabled*]
+# (required) Whether or not to enable the agent.
+# Defaults to true.
+#
+# [*manage_service*]
+# (optional) Whether to start/stop the service
+# Defaults to true
+#
+# [*physnets*]
+# List of <physical_network>:<physical_interface>
+# tuples mapping physical network names to agent's node-specific physical
+# network interfaces. Defaults to empty list.
+#
+# [*flat_network_if*]
+# VPP interface used for flat network
+# Defaults to ''.
+#
+class neutron::agents::ml2::networking-vpp (
+ $package_ensure = 'present',
+ $enabled = true,
+ $manage_service = true,
+ $physnets = '',
+ $flat_network_if = '',
+) {
+
+ include ::neutron::params
+
+ if $manage_service {
+ if $enabled {
+ $service_ensure = 'running'
+ } else {
+ $service_ensure = 'stopped'
+ }
+ }
+
+ neutron_plugin_ml2 {
+ 'ml2_vpp/physnets': value => $physnets;
+ 'ml2_vpp/flat_network_if': value => $flat_network_if;
+ }->
+ service { 'networking-vpp-agent':
+ ensure => $service_ensure,
+ name => 'networking-vpp-agent',
+ enable => $enabled,
+ tag => 'neutron-service',
+ }
+}
\ No newline at end of file
--- /dev/null
+#
+# Install the networking-vpp ML2 mechanism driver and generate config file
+# from parameters in the other classes.
+#
+# === Parameters
+#
+# [*package_ensure*]
+# (optional) The intended state of the networking-vpp
+# package, i.e. any of the possible values of the 'ensure'
+# property for a package resource type.
+# Defaults to 'present'
+#
+# [*agents*]
+# Networking-vpp agents's addresses
+# Defaults to $::os_service_default
+#
+class neutron::plugins::ml2::networking-vpp (
+ $package_ensure = 'present',
+ $agents = $::os_service_default,
+) {
+ require ::neutron::plugins::ml2
+
+ ensure_resource('package', 'networking-vpp',
+ {
+ ensure => $package_ensure,
+ tag => 'openstack',
+ }
+ )
+
+ neutron_plugin_ml2 {
+ 'ml2_vpp/agents': value => $agents;
+ }
+}
Source0: openstack-congress.tar.gz
BuildArch: noarch
-BuildRequires: python-setuptools python2-oslo-config python2-debtcollector
+BuildRequires: python-setuptools python2-oslo-config python2-debtcollector libffi-devel python-devel openssl-devel
#Requires: pbr>=0.8 Paste PasteDeploy>=1.5.0 Routes>=1.12.3!=2.0 anyjson>=0.3.3 argparse
#Requires: Babel>=1.3 eventlet>=0.16.1!=0.17.0 greenlet>=0.3.2 httplib2>=0.7.5 requests>=2.2.0!=2.4.0
#Requires: iso8601>=0.1.9 kombu>=2.5.0 netaddr>=0.7.12 SQLAlchemy<1.1.0>=0.9.7
--run-command "sed -i '/SERVICE_LIST/a\\ \x27tacker\x27: {\x27password_field\x27: \x27OVERCLOUD_TACKER_PASSWORD\x27},' /usr/lib/python2.7/site-packages/tripleoclient/constants.py" \
--run-command "sed -i '/PASSWORD_NAMES =/a\\ \"OVERCLOUD_TACKER_PASSWORD\",' /usr/lib/python2.7/site-packages/tripleoclient/utils.py" \
--run-command "sed -i '/AodhPassword/a\\ parameters\[\x27TackerPassword\x27\] = passwords\[\x27OVERCLOUD_TACKER_PASSWORD\x27\]' /usr/lib/python2.7/site-packages/tripleoclient/v1/overcloud_deploy.py" \
- --run-command "sed -i '/^SERVICES/a\ \x27tacker\x27: {\x27description\x27: \x27Tacker Service\x27, \x27type\x27: \x27servicevm\x27, \x27path\x27: \x27/\x27, \x27port\x27: 1789 },' /usr/lib/python2.7/site-packages/os_cloud_config/keystone.py" \
+ --run-command "sed -i '/^SERVICES/a\ \x27tacker\x27: {\x27description\x27: \x27Tacker Service\x27, \x27type\x27: \x27servicevm\x27, \x27path\x27: \x27/\x27, \x27port\x27: 8888 },' /usr/lib/python2.7/site-packages/os_cloud_config/keystone.py" \
--upload ../noarch/python-tackerclient-2015.2-1.trozet.noarch.rpm:/root/ \
--install /root/python-tackerclient-2015.2-1.trozet.noarch.rpm \
--install "python2-aodhclient" \
'vpp-lib-16.09-rc1~7_gea60221~b1030.x86_64.rpm'
'vpp_papi-1.0.linux-x86_64.tar.gz'
)
-honeycomb_pkg='honeycomb-1.0.0-1066.noarch.rpm'
+honeycomb_pkg='honeycomb-1.0.0-1609.noarch.rpm'
ovs_rpm_name=openvswitch-2.5.90-1.el7.centos.x86_64.rpm
68,Fix neutron host name
69,Fix vpp mount
72,Fix compute node preconfig DPDK
+75,Add AggregateInstanceExtraSpecFilter to Scheduler
+76,Add networking-vpp ML2 mechanism driver
+77,Update FDIO to use opendaylight_v2 mechanism driver
+78,Fix spelling mistake in specs filter
+79,Fix controller and compute ip array
+80,Change TenantNIC and PublicNIC to be role specific
+81,Fix duplicate NeutronServicePlugins
done
vm_index=4
-ovs_bridges="br-admin br-private br-public br-storage"
-OPNFV_NETWORK_TYPES="admin_network private_network public_network storage_network api_network"
+ovs_bridges="br-admin br-tenant br-public br-storage"
+ovs_bridges+=" br-private br-external" # Legecy names, remove in E river
+
+#OPNFV_NETWORK_TYPES=$(python3 -c 'from apex.common.constants import OPNFV_NETWORK_TYPES; print(" ".join(OPNFV_NETWORK_TYPES))')
+OPNFV_NETWORK_TYPES+=" admin tenant external storage api"
+OPNFV_NETWORK_TYPES+=" admin_network private_network public_network storage_network api_network" # Legecy names, remove in E river
display_usage() {
if [ -n "$INVENTORY_FILE" ]; then
echo -e "${blue}INFO: Parsing inventory file...${reset}"
- if ! python3.4 -B $LIB/python/apex_python_utils.py clean -f ${INVENTORY_FILE}; then
+ if ! python3 -B $LIB/python/apex_python_utils.py clean -f ${INVENTORY_FILE}; then
echo -e "${red}WARN: Unable to shutdown all nodes! Please check /var/log/apex.log${reset}"
else
echo -e "${blue}INFO: Node shutdown complete...${reset}"
exit 1
fi
- if [[ -n "$virtual" && -n "$INVENTORY_FILE" ]]; then
- echo -e "${red}ERROR: You should not specify an inventory with virtual deployments${reset}"
+ # inventory file usage validation
+ if [[ -n "$virtual" ]]; then
+ if [[ -n "$INVENTORY_FILE" ]]; then
+ echo -e "${red}ERROR: You should not specify an inventory file with virtual deployments${reset}"
+ exit 1
+ else
+ INVENTORY_FILE='/tmp/inventory-virt.yaml'
+ fi
+ elif [[ -z "$INVENTORY_FILE" ]]; then
+ echo -e "${red}ERROR: You must specify an inventory file for baremetal deployments! Exiting...${reset}"
+ exit 1
+ elif [[ ! -f "$INVENTORY_FILE" ]]; then
+ echo -e "{$red}ERROR: Inventory File: ${INVENTORY_FILE} does not exist! Exiting...${reset}"
exit 1
fi
exit 1
fi
- if [[ ! -z "$INVENTORY_FILE" && ! -f "$INVENTORY_FILE" ]]; then
- echo -e "{$red}ERROR: Inventory File: ${INVENTORY_FILE} does not exist! Exiting...${reset}"
- exit 1
- fi
-
- if [[ -z "$virtual" && -z "$INVENTORY_FILE" ]]; then
- echo -e "${red}ERROR: You must specify an inventory file for baremetal deployments! Exiting...${reset}"
- exit 1
- fi
-
if [[ "$net_isolation_enabled" == "FALSE" && "$post_config" == "TRUE" ]]; then
echo -e "${blue}INFO: Post Install Configuration will be skipped. It is not supported with --flat${reset}"
post_config="FALSE"
echo -e "${red}Dependency Validation Failed, Exiting.${reset}"
exit 1
fi
+ #Correct the time on the server prior to launching any VMs
+ if ntpdate $ntp_server; then
+ hwclock --systohc
+ else
+ echo "${blue}WARNING: ntpdate failed to update the time on the server. ${reset}"
+ fi
setup_undercloud_vm
if [ "$virtual" == "TRUE" ]; then
setup_virtual_baremetal $VM_CPUS $VM_RAM
- elif [ -n "$INVENTORY_FILE" ]; then
- parse_inventory_file
fi
+ parse_inventory_file
configure_undercloud
overcloud_deploy
if [ "$post_config" == "TRUE" ]; then
hugepages: 1024
hugepagesz: 2M
Compute:
- nova:
- libvirtpin: 1
kernel:
hugepagesz: 2M
hugepages: 2048
hugepages: 1024
hugepagesz: 2M
Compute:
- nova:
- libvirtpin: 1
kernel:
hugepagesz: 2M
hugepages: 2048
hugepagesz: 2M
hugepages: 1024
Compute:
- nova:
- libvirtpin: 1
kernel:
hugepagesz: 2M
hugepages: 2048
hugepages: 1024
hugepagesz: 2M
Compute:
- nova:
- libvirtpin: 1
kernel:
hugepagesz: 2M
hugepages: 2048
intel_iommu: 'on'
iommu: pt
Compute:
- nova:
- libvirtpin: 1
kernel:
hugepagesz: 2M
hugepages: 2048
no longer carry them and they will not need special handling for
installation.
+ Python 3.4 is also required and it needs to be installed if you are using
+ the Centos 7 base image:
+
+ ``sudo yum install epel-release``
+ ``sudo yum install python34``
+
To install these RPMs download them to the local disk on your CentOS 7
install and pass the file names directly to yum:
``sudo yum install python34-markupsafe-<version>.rpm
`OPNFV Apex project page <https://wiki.opnfv.org/apex>`_
+`OPNFV Apex release notes <http://artifacts.opnfv.org/apex/colorado/docs/release-notes/release-notes.html#references>`_
+
OpenStack
---------
-----------
**-**
+Scenario specific release notes
+===============================
+
+Scenario os-odl_l2-fdio-noha known issues
+-----------------------------------------
+
+* `FDS-16 <https://jira.opnfv.org/browse/FDS-16>`_:
+ Security group configuration through nova leads
+ to vhostuser port connection issues
+* `FDS-62 <https://jira.opnfv.org/browse/FDS-62>`_:
+ APEX - Increase number of files MariaDB can open
+* `FDS-79 <https://jira.opnfv.org/browse/FDS-79>`_:
+ Sometimes (especially in bulk crete/delete operations
+ when multiple networks/ports are created within short time)
+ OpenDaylight doesn't accept creation requests
+* `FDS-80 <https://jira.opnfv.org/browse/FDS-80>`_:
+ After launching a VM it stayed forever in BUILD status.
+ Also further operation related to this VM (volume attachment etc.)
+ caused problems
+* `FDS-81 <https://jira.opnfv.org/browse/FDS-81>`_:
+ After functest finishes there are two bds on computes and
+ none on controller
+* `FDS-82 <https://jira.opnfv.org/browse/FDS-82>`_:
+ Nova list shows no vms but there are some on computes in paused state
+
+
+General HA scenario known issues
+--------------------------------
+
+* `COPPER-22 <https://jira.opnfv.org/browse/COPPER-22>`_:
+ Congress service HA deployment is not yet supported/verified.
Test Result
===========
done
else
for network in ${OPNFV_NETWORK_TYPES}; do
+ if ! ovs-vsctl --may-exist add-br ${NET_MAP[$network]}; then
+ echo -e "${red}ERROR: Failed to create ovs bridge ${NET_MAP[$network]}{$reset}"
+ exit 1
+ fi
echo "${blue}INFO: Creating Virsh Network: $network${reset}"
virsh net-list --all | grep $network > /dev/null || (cat > ${libvirt_dir}/apex-virsh-net.xml && virsh net-define ${libvirt_dir}/apex-virsh-net.xml) << EOF
<network ipv6='yes'>
<name>$network</name>
+<forward mode='bridge'/>
<bridge name='${NET_MAP[$network]}'/>
+<virtualport type='openvswitch'/>
</network>
EOF
if ! (virsh net-list --all | grep $network > /dev/null); then
done
echo -e "${blue}INFO: Bridges set: ${reset}"
- brctl show
+ ovs-vsctl list-br
fi
echo -e "${blue}INFO: virsh networks set: ${reset}"
exit 1
elif [[ -z "${deploy_options_array['sdn_controller']}" || "${deploy_options_array['sdn_controller']}" == 'False' ]]; then
echo -e "${blue}INFO: SDN Controller disabled...will deploy nosdn scenario${reset}"
+ if [ "${deploy_options_array['vpp']}" == 'True' ]; then
+ DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/neutron-ml2-networking-vpp.yaml"
+ fi
SDN_IMAGE=opendaylight
else
echo "${red}Invalid sdn_controller: ${deploy_options_array['sdn_controller']}${reset}"
# Push performance options to subscript to modify per-role images as needed
for option in "${performance_options[@]}" ; do
echo -e "${blue}Setting performance option $option${reset}"
- ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "bash build_perf_image.sh $option"
+ ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "dataplane=${deploy_options_array['dataplane']} bash build_perf_image.sh $option"
done
# Build IPA kernel option ramdisks
# set NIC heat params and resource registry
ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
-sed -i '/TenantNIC:/c\ TenantNIC: '${private_network_compute_interface} opnfv-environment.yaml
-sed -i '/PublicNIC:/c\ PublicNIC: '${public_network_compute_interface} opnfv-environment.yaml
+if [ -n "${private_network_compute_interface}" ]; then
+ sudo sed -i '/ComputeTenantNIC:/c\ ComputeTenantNIC: '${private_network_compute_interface} /usr/share/openstack-tripleo-heat-templates/environments/numa.yaml
+fi
+if [ -n "${private_network_controller_interface}" ]; then
+ sudo sed -i '/ControllerTenantNIC:/c\ ControllerTenantNIC: '${private_network_controller_interface} /usr/share/openstack-tripleo-heat-templates/environments/numa.yaml
+fi
+# TODO: PublicNIC is not used today, however, in the future, we'll bind public nic to DPDK as well for certain scenarios. At that time,
+# we'll need to make sure public network is enabled.
+if [ -n "${public_network_compute_interface}" ]; then
+ sudo sed -i '/ComputePublicNIC:/c\ ComputePublicNIC: '${public_network_compute_interface} /usr/share/openstack-tripleo-heat-templates/environments/numa.yaml
+fi
+if [ -n "${public_network_controller_interface}" ]; then
+ sudo sed -i '/ControllerPublicNIC:/c\ ControllerPublicNIC: '${public_network_controller_interface} /usr/share/openstack-tripleo-heat-templates/environments/numa.yaml
+fi
EOI
DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/numa.yaml"
done
fi
- if output=$(python3.4 -B $LIB/python/apex_python_utils.py parse-net-settings -s $NETSETS $net_isolation_arg -e $CONFIG/network-environment.yaml $parse_ext); then
+ if output=$(python3 -B $LIB/python/apex_python_utils.py parse-net-settings -s $NETSETS $net_isolation_arg -e $CONFIG/network-environment.yaml $parse_ext); then
echo -e "${blue}${output}${reset}"
eval "$output"
else
##parses deploy settings yaml into globals
parse_deploy_settings() {
local output
- if output=$(python3.4 -B $LIB/python/apex_python_utils.py parse-deploy-settings -f $DEPLOY_SETTINGS_FILE); then
+ if output=$(python3 -B $LIB/python/apex_python_utils.py parse-deploy-settings -f $DEPLOY_SETTINGS_FILE); then
echo -e "${blue}${output}${reset}"
eval "$output"
else
}
##parses baremetal yaml settings into compatible json
-##writes the json to $CONFIG/instackenv_tmp.json
+##writes the json to undercloud:instackenv.json
##params: none
##usage: parse_inventory_file
parse_inventory_file() {
- local inventory=$(parse_yaml $INVENTORY_FILE)
- local node_list
- local node_prefix="node"
- local node_count=0
- local node_total
- local inventory_list
-
- # detect number of nodes
- for entry in $inventory; do
- if echo $entry | grep -Eo "^nodes_node[0-9]+_" > /dev/null; then
- this_node=$(echo $entry | grep -Eo "^nodes_node[0-9]+_")
- if [[ "$inventory_list" != *"$this_node"* ]]; then
- inventory_list+="$this_node "
- fi
- fi
- done
-
- inventory_list=$(echo $inventory_list | sed 's/ $//')
-
- for node in $inventory_list; do
- ((node_count+=1))
- done
-
- node_total=$node_count
-
- if [[ "$node_total" -lt 5 && "$ha_enabled" == "True" ]]; then
- echo -e "${red}ERROR: You must provide at least 5 nodes for HA baremetal deployment${reset}"
- exit 1
- elif [[ "$node_total" -lt 2 ]]; then
- echo -e "${red}ERROR: You must provide at least 2 nodes for non-HA baremetal deployment${reset}"
- exit 1
- fi
-
- eval $(parse_yaml $INVENTORY_FILE) || {
- echo "${red}Failed to parse inventory.yaml. Aborting.${reset}"
- exit 1
- }
-
- instackenv_output="
-{
- \"nodes\" : [
-
-"
- node_count=0
- for node in $inventory_list; do
- ((node_count+=1))
- node_output="
- {
- \"pm_password\": \"$(eval echo \${${node}ipmi_pass})\",
- \"pm_type\": \"$(eval echo \${${node}pm_type})\",
- \"mac\": [
- \"$(eval echo \${${node}mac_address})\"
- ],
- \"cpu\": \"$(eval echo \${${node}cpus})\",
- \"memory\": \"$(eval echo \${${node}memory})\",
- \"disk\": \"$(eval echo \${${node}disk})\",
- \"arch\": \"$(eval echo \${${node}arch})\",
- \"pm_user\": \"$(eval echo \${${node}ipmi_user})\",
- \"pm_addr\": \"$(eval echo \${${node}ipmi_ip})\",
- \"capabilities\": \"$(eval echo \${${node}capabilities})\"
-"
- instackenv_output+=${node_output}
- if [ $node_count -lt $node_total ]; then
- instackenv_output+=" },"
- else
- instackenv_output+=" }"
- fi
- done
-
- instackenv_output+='
- ]
-}
-'
- #Copy instackenv.json to undercloud for baremetal
- echo -e "{blue}Parsed instackenv JSON:\n${instackenv_output}${reset}"
+ if [ "$virtual" == "TRUE" ]; then inv_virt="--virtual"; fi
+ if [[ "$ha_enabled" == "True" ]]; then inv_ha="--ha"; fi
+ instackenv_output=$(python3 -B $LIB/python/apex_python_utils.py parse-inventory -f $INVENTORY_FILE $inv_virt $inv_ha)
+ #Copy instackenv.json to undercloud
+ echo -e "${blue}Parsed instackenv JSON:\n${instackenv_output}${reset}"
ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
cat > instackenv.json << EOF
$instackenv_output
openstack endpoint delete \$swift_endpoint_id
openstack service delete \$swift_service_id
+if [ "${deploy_options_array['dataplane']}" == 'fdio' ] || [ "${deploy_options_array['dataplane']}" == 'ovs_dpdk' ]; then
+ for flavor in \$(openstack flavor list -c Name -f value); do
+ echo "INFO: Configuring \$flavor to use hugepage"
+ nova flavor-key \$flavor set hw:mem_page_size=large
+ done
+fi
+
if [ "${deploy_options_array['congress']}" == 'True' ]; then
ds_configs="--config username=\$OS_USERNAME
--config tenant_name=\$OS_TENANT_NAME
from .deploy_settings import DeploySettings
from .network_environment import NetworkEnvironment
from .clean import clean_nodes
+from .inventory import Inventory
--- /dev/null
+##############################################################################
+# Copyright (c) 2016 Dan Radez (dradez@redhat.com) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import yaml
+import json
+
+
+class Inventory(dict):
+ """
+ This class parses an APEX inventory yaml file into an object. It
+ generates or detects all missing fields for deployment.
+
+ It then collapses one level of identifcation from the object to
+ convert it to a structure that can be dumped into a json file formatted
+ such that Triple-O can read the resulting json as an instackenv.json file.
+ """
+ def __init__(self, source, ha=True, virtual=False):
+ init_dict = {}
+ if type(source) is str:
+ with open(source, 'r') as network_settings_file:
+ yaml_dict = yaml.load(network_settings_file)
+ # collapse node identifiers from the structure
+ init_dict['nodes'] = list(map(lambda n: n[1],
+ yaml_dict['nodes'].items()))
+ else:
+ # assume input is a dict to build from
+ init_dict = source
+
+ # move ipmi_* to pm_*
+ # make mac a list
+ def munge_nodes(node):
+ node['pm_addr'] = node['ipmi_ip']
+ node['pm_password'] = node['ipmi_pass']
+ node['pm_user'] = node['ipmi_user']
+ node['mac'] = [node['mac_address']]
+
+ for i in ('ipmi_ip', 'ipmi_pass', 'ipmi_user', 'mac_address'):
+ del i
+
+ return node
+
+ super().__init__({'nodes': list(map(munge_nodes, init_dict['nodes']))})
+
+ # verify number of nodes
+ if ha and len(self['nodes']) < 5:
+ raise InventoryException('You must provide at least 5 '
+ 'nodes for HA baremetal deployment')
+ elif len(self['nodes']) < 2:
+ raise InventoryException('You must provide at least 2 nodes '
+ 'for non-HA baremetal deployment${reset}')
+
+ if virtual:
+ self['arch'] = 'x86_64'
+ self['host-ip'] = '192.168.122.1'
+ self['power_manager'] = \
+ 'nova.virt.baremetal.virtual_power_driver.VirtualPowerManager'
+ self['seed-ip'] = ''
+ self['ssh-key'] = 'INSERT_STACK_USER_PRIV_KEY'
+ self['ssh-user'] = 'root'
+
+ def dump_instackenv_json(self):
+ print(json.dumps(dict(self), sort_keys=True, indent=4))
+
+
+class InventoryException(Exception):
+ def __init__(self, value):
+ self.value = value
+
+ def __str__(self):
+ return self.value
from apex import NetworkSettings
from apex import NetworkEnvironment
from apex import DeploySettings
+from apex import Inventory
from apex import ip_utils
from apex.common.constants import ADMIN_NETWORK
apex.clean_nodes(args.file)
+def parse_inventory(args):
+ inventory = Inventory(args.file, ha=args.ha, virtual=args.virtual)
+ inventory.dump_instackenv_json()
+
+
def find_ip(args):
"""
Get and print the IP from a specific interface
parser.add_argument('-l', '--log-file', default='/var/log/apex/apex.log',
dest='log_file', help="Log file to log to")
subparsers = parser.add_subparsers()
-
+ # parse-net-settings
net_settings = subparsers.add_parser('parse-net-settings',
help='Parse network settings file')
net_settings.add_argument('-s', '--net-settings-file',
help='Boolean to enable Controller Pre Config')
net_settings.set_defaults(func=parse_net_settings)
-
+ # find-ip
get_int_ip = subparsers.add_parser('find-ip',
help='Find interface ip')
get_int_ip.add_argument('-i', '--interface', required=True,
choices=[4, 6], dest='address_family',
help='IP Address family')
get_int_ip.set_defaults(func=find_ip)
-
+ # nic-template
nic_template = subparsers.add_parser('nic-template',
help='Build NIC templates')
nic_template.add_argument('-r', '--role', required=True,
default=None, dest='ovs_dpdk_bridge',
help='OVS DPDK Bridge Name')
nic_template.set_defaults(func=build_nic_template)
-
+ # parse-deploy-settings
deploy_settings = subparsers.add_parser('parse-deploy-settings',
help='Parse deploy settings file')
deploy_settings.add_argument('-f', '--file',
default='deploy_settings.yaml',
help='path to deploy settings file')
deploy_settings.set_defaults(func=parse_deploy_settings)
+ # parse-inventory
+ inventory = subparsers.add_parser('parse-inventory',
+ help='Parse inventory file')
+ inventory.add_argument('-f', '--file',
+ default='deploy_settings.yaml',
+ help='path to deploy settings file')
+ inventory.add_argument('--ha',
+ default=False,
+ action='store_true',
+ help='Indicate if deployment is HA or not')
+ inventory.add_argument('--virtual',
+ default=False,
+ action='store_true',
+ help='Indicate if deployment inventory is virtual')
+ inventory.set_defaults(func=parse_inventory)
clean = subparsers.add_parser('clean',
help='Parse deploy settings file')
# root's auth keys so that Undercloud can control
# vm power on the hypervisor
ssh ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "cat /home/stack/.ssh/id_rsa.pub" >> /root/.ssh/authorized_keys
-
- INSTACKENV=$CONFIG/instackenv-virt.json
-
- # upload instackenv file to Undercloud for virtual deployment
- scp ${SSH_OPTIONS[@]} $INSTACKENV "stack@$UNDERCLOUD":instackenv.json
fi
# allow stack to control power management on the hypervisor via sshkey
openstack-config --set undercloud.conf DEFAULT inspection_iprange ${admin_network_introspection_range}
openstack-config --set undercloud.conf DEFAULT undercloud_debug false
openstack-config --set undercloud.conf DEFAULT undercloud_hostname "undercloud.${domain_name}"
+ sudo openstack-config --set /etc/ironic/ironic.conf disk_utils iscsi_verify_attempts 30
+ sudo openstack-config --set /etc/ironic/ironic.conf disk_partitioner check_device_max_retries 40
fi
vcpus=$1
ramsize=$(($2*1024))
fi
- #start by generating the opening json for instackenv.json
- cat > $CONFIG/instackenv-virt.json << EOF
-{
- "nodes": [
+ #start by generating the opening yaml for the inventory-virt.yaml file
+ cat > /tmp/inventory-virt.yaml << EOF
+nodes:
EOF
# next create the virtual machines and add their definitions to the file
fi
done
else
- echo "Found Baremetal ${i} VM, using existing VM"
+ echo "Found baremetal${i} VM, using existing VM"
fi
#virsh vol-list default | grep baremetal${i} 2>&1> /dev/null || virsh vol-create-as default baremetal${i}.qcow2 41G --format qcow2
mac=$(virsh domiflist baremetal${i} | grep admin_network | awk '{ print $5 }')
- cat >> $CONFIG/instackenv-virt.json << EOF
- {
- "pm_addr": "192.168.122.1",
- "pm_user": "root",
- "pm_password": "INSERT_STACK_USER_PRIV_KEY",
- "pm_type": "pxe_ssh",
- "mac": [
- "$mac"
- ],
- "cpu": "$vcpus",
- "memory": "$ramsize",
- "disk": "41",
- "arch": "x86_64",
- "capabilities": "$capability"
- },
+ cat >> /tmp/inventory-virt.yaml << EOF
+ node${i}:
+ mac_address: "$mac"
+ ipmi_ip: 192.168.122.1
+ ipmi_user: root
+ ipmi_pass: "INSERT_STACK_USER_PRIV_KEY"
+ pm_type: "pxe_ssh"
+ cpus: $vcpus
+ memory: $ramsize
+ disk: 41
+ arch: "x86_64"
+ capabilities: "$capability"
EOF
done
- #truncate the last line to remove the comma behind the bracket
- tail -n 1 $CONFIG/instackenv-virt.json | wc -c | xargs -I {} truncate $CONFIG/instackenv-virt.json -s -{}
-
- #finally reclose the bracket and close the instackenv.json file
- cat >> $CONFIG/instackenv-virt.json << EOF
- }
- ],
- "arch": "x86_64",
- "host-ip": "192.168.122.1",
- "power_manager": "nova.virt.baremetal.virtual_power_driver.VirtualPowerManager",
- "seed-ip": "",
- "ssh-key": "INSERT_STACK_USER_PRIV_KEY",
- "ssh-user": "root"
-}
-EOF
#Overwrite the tripleo-inclubator domain.xml with our own, keeping a backup.
if [ ! -f /usr/share/tripleo/templates/domain.xml.bak ]; then
/usr/bin/mv -f /usr/share/tripleo/templates/domain.xml /usr/share/tripleo/templates/domain.xml.bak
--- /dev/null
+##############################################################################
+# Copyright (c) 2016 Dan Radez (Red Hat)
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+from apex.inventory import Inventory
+from apex.inventory import InventoryException
+
+from nose.tools import assert_is_instance
+from nose.tools import assert_raises
+from nose.tools import assert_equal
+
+inventory_files = ('intel_pod2_settings.yaml',
+ 'nokia_pod1_settings.yaml',
+ 'pod_example_settings.yaml')
+
+
+class TestInventory(object):
+ @classmethod
+ def setup_class(klass):
+ """This method is run once for each class before any tests are run"""
+
+ @classmethod
+ def teardown_class(klass):
+ """This method is run once for each class _after_ all tests are run"""
+
+ def setUp(self):
+ """This method is run once before _each_ test method is executed"""
+
+ def teardown(self):
+ """This method is run once after _each_ test method is executed"""
+
+ def test_init(self):
+ for f in inventory_files:
+ i = Inventory('../config/inventory/{}'.format(f))
+ assert_equal(i.dump_instackenv_json(), None)
+
+ # test virtual
+ i = Inventory(i, virtual=True)
+ assert_equal(i.dump_instackenv_json(), None)
+
+ # Remove nodes to violate HA node count
+ while len(i['nodes']) >= 5:
+ i['nodes'].pop()
+ assert_raises(InventoryException,
+ Inventory, i)
+
+ # Remove nodes to violate non-HA node count
+ while len(i['nodes']) >= 2:
+ i['nodes'].pop()
+ assert_raises(InventoryException,
+ Inventory, i, ha=False)
+
+ def test_exception(sefl):
+ e = InventoryException("test")
+ print(e)
+ assert_is_instance(e, InventoryException)
from apex_python_utils import parse_deploy_settings
from apex_python_utils import find_ip
from apex_python_utils import build_nic_template
+from apex_python_utils import parse_inventory
from nose.tools import assert_equal
from nose.tools import assert_raises
net_env = '../build/network-environment.yaml'
deploy_sets = '../config/deploy/deploy_settings.yaml'
nic_template = '../build/nics-template.yaml.jinja2'
+inventory = '../config/inventory/pod_example_settings.yaml'
class TestCommonUtils(object):
'-r', 'compute',
'-t', nic_template])
assert_equal(build_nic_template(args), None)
+
+ def test_parse_inventory(self):
+ args = self.parser.parse_args(['parse-inventory',
+ '-f', inventory])
+ assert_equal(parse_inventory(args), None)