<packagereq type="default">openlmi-software</packagereq>
<packagereq type="default">openlmi-storage</packagereq>
<packagereq type="default">openslp-server</packagereq>
- <packagereq type="default">openwsman-client</packagereq>
<packagereq type="default">tog-pegasus</packagereq>
- <packagereq type="default">wsmancli</packagereq>
<packagereq type="optional">openlmi</packagereq>
<packagereq type="optional">openlmi-account-doc</packagereq>
<packagereq type="optional">openlmi-fan</packagereq>
<packagereq type="optional">openlmi-storage-doc</packagereq>
<packagereq type="optional">openlmi-tools</packagereq>
<packagereq type="optional">openlmi-tools-doc</packagereq>
- <packagereq type="optional">openwsman-server</packagereq>
<packagereq type="optional">sblim-cim-client2</packagereq>
<packagereq type="optional">sblim-cmpi-fsvol</packagereq>
<packagereq type="optional">sblim-cmpi-network</packagereq>
OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None
OS::TripleO::Services::SwiftProxy: OS::Heat::None
+ # Extra Config
+ OS::TripleO::ComputeExtraConfigPre: OS::Heat::None
+ OS::TripleO::ControllerExtraConfigPre: OS::Heat::None
+
parameter_defaults:
NeutronExternalNetworkBridge: 'br-ex'
{%- if nets['tenant']['enabled'] and nets['tenant']['nic_mapping'][role]['vlan'] == 'native' %}
{%- if ovs_dpdk_bridge == 'br-phy' and role == 'compute' %}
-
- type: ovs_bridge
+ type: ovs_user_bridge
name: {{ ovs_dpdk_bridge }}
use_dhcp: false
addresses:
ip_netmask: {get_param: TenantIpSubnet}
members:
-
- type: interface
- name: {{ nets['tenant']['nic_mapping'][role]['members'][0] }}
- # force the MAC address of the bridge to this interface
- primary: true
- -
- type: ovs_bridge
- name: br-tun
- use_dhcp: false
+ type: ovs_dpdk_port
+ name: dpdk0
+ driver: {{ nets['tenant']['nic_mapping'][role]['uio_driver'] }}
+ members:
+ -
+ type: interface
+ name: {{ nets['tenant']['nic_mapping'][role]['members'][0] }}
+ # force the MAC address of the bridge to this interface
+ primary: true
{%- else %}
-
type: {{ nets['tenant']['nic_mapping'][role]['phys_type'] }}
{%- if 'uio-driver' in nets['tenant']['nic_mapping'][role] %}
uio_driver: {{ nets['tenant']['nic_mapping'][role]['uio-driver'] }}
{%- endif %}
+ {%- if 'interface-options' in nets['tenant']['nic_mapping'][role] %}
+ options: '{{ nets['tenant']['nic_mapping'][role]['interface-options'] }}'
+ {%- endif %}
use_dhcp: false
addresses:
-
next_hop: {get_param: ExternalInterfaceDefaultRoute}
{%- elif nets['external'][0]['enabled'] and external_net_type == 'br-ex' and nets['external'][0]['nic_mapping'][role]['vlan'] == 'native' %}
-
+ {%- if ovs_dpdk_bridge == 'br-phy' and role == 'compute' %}
+ type: ovs_user_bridge
+ {%- else %}
type: ovs_bridge
+ {%- endif %}
name: {get_input: bridge_name}
use_dhcp: false
members:
#NovaSchedulerDefaultFilters: "RamFilter,ComputeFilter,AvailabilityZoneFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,NUMATopologyFilter"
# Kernel arguments, this value will be set to kernel arguments specified for compute nodes in deploy setting file.
#ComputeKernelArgs: "intel_iommu=on iommu=pt default_hugepagesz=2MB hugepagesz=2MB hugepages=2048"
+ #PmdCoreList: 1
+ #OvsDpdkCoreList: 2
+ #OvsDpdkSocketMemory: 1024
+ #ControllerExtraConfig:
+ #NovaComputeExtraConfig:
ExtraConfig:
tripleo::ringbuilder::build_ring: False
nova::nova_public_key:
key: 'os_compute_api:servers:show:host_status'
value: 'rule:admin_or_owner'
nova::api::default_floating_pool: 'external'
- #VPP routing node, used for odl_l3-fdio only.
+ #VPP routing node, used for odl-fdio only.
#opendaylight::vpp_routing_node: overcloud-novacompute-0.opnfvlf.org
ControllerServices:
- OS::TripleO::Services::CACerts
- OS::TripleO::Services::NeutronHoneycombAgent
- OS::TripleO::Services::Congress
- OS::TripleO::Services::NeutronVppAgent
+ - OS::TripleO::Services::OVNDBs
+ - OS::TripleO::Services::Vpp
ComputeServices:
- OS::TripleO::Services::CACerts
- OS::TripleO::Services::CephClient
- OS::TripleO::Services::VipHosts
- OS::TripleO::Services::NeutronHoneycombAgent
- OS::TripleO::Services::NeutronVppAgent
+ - OS::TripleO::Services::Vpp
# Get Real Time Kernel from kvm4nfv
populate_cache $kvmfornfv_uri_base/$kvmfornfv_kernel_rpm
+# ODL/FDIO packages frozen for L3 scenarios
+fdio_l3_pkg_str=''
+for package in ${fdio_l3_pkgs[@]}; do
+ wget "$fdio_l3_uri_base/$package"
+ fdio_l3_pkg_str+=" --upload ${BUILD_DIR}/${package}:/root/fdio_l3/"
+done
+
# ODL/FDIO packages frozen for L2 scenarios
fdio_l2_pkg_str=''
for package in ${fdio_l2_pkgs[@]}; do
fdio_l2_pkg_str+=" --upload ${BUILD_DIR}/${package}:/root/fdio_l2/"
done
+# FDIO packages frozen for nosdn scenarios
+fdio_nosdn_pkg_str=''
+for package in ${fdio_nosdn_pkgs[@]}; do
+ wget "$fdio_nosdn_uri_base/$package"
+ fdio_nosdn_pkg_str+=" --upload ${BUILD_DIR}/${package}:/root/fdio_nosdn/"
+done
+
# Increase disk size by 900MB to accommodate more packages
qemu-img resize overcloud-full_build.qcow2 +900MB
LIBGUESTFS_BACKEND=direct virt-customize \
--run-command "xfs_growfs /dev/sda" \
--upload ${BUILD_DIR}/opnfv-puppet-tripleo.tar.gz:/etc/puppet/modules \
+ --run-command "cd /etc/puppet/modules && rm -rf tripleo && tar xzf opnfv-puppet-tripleo.tar.gz" \
--run-command "yum update -y python-ipaddress rabbitmq-server erlang*" \
--run-command "if ! rpm -qa | grep python-redis; then yum install -y python-redis; fi" \
--run-command "sed -i 's/^#UseDNS.*$/UseDNS no/' /etc/ssh/sshd_config" \
--run-command "sed -i 's/^GSSAPIAuthentication.*$/GSSAPIAuthentication no/' /etc/ssh/sshd_config" \
- --run-command "cd /etc/puppet/modules && rm -rf tripleo && tar xzf opnfv-puppet-tripleo.tar.gz" \
--run-command "mkdir /root/dpdk_rpms" \
--upload ${BUILD_DIR}/fdio.repo:/etc/yum.repos.d/fdio.repo \
$dpdk_pkg_str \
- --run-command "yum install --downloadonly --downloaddir=/root/fdio vpp vpp-lib vpp-api-python vpp-plugins vpp-api-java" \
- --upload ${BUILD_DIR}/noarch/$netvpp_pkg:/root/fdio \
- --run-command "yum install -y /root/fdio/*.rpm" \
+ --run-command "mkdir /root/fdio_l3" \
--run-command "mkdir /root/fdio_l2" \
+ --run-command "mkdir /root/fdio_nosdn" \
+ --upload ${BUILD_DIR}/noarch/$netvpp_pkg:/root/fdio_nosdn \
+ $fdio_l3_pkg_str \
$fdio_l2_pkg_str \
+ $fdio_nosdn_pkg_str \
+ --run-command "yum install -y /root/fdio_l2/*.rpm" \
--run-command "yum install -y etcd" \
--install python-etcd \
--run-command "puppet module install cristifalcas/etcd" \
--install /root/$tacker_pkg \
--upload ${BUILD_DIR}/noarch/$tackerclient_pkg:/root/ \
--install /root/$tackerclient_pkg \
+ --run-command "curl -f https://copr.fedorainfracloud.org/coprs/leifmadsen/ovs-master/repo/epel-7/leifmadsen-ovs-master-epel-7.repo > /etc/yum.repos.d/leifmadsen-ovs-master-epel-7.repo" \
+ --run-command "mkdir /root/ovs27" \
+ --run-command "yumdownloader --destdir=/root/ovs27 openvswitch*2.7* python-openvswitch-2.7*" \
--run-command "pip install python-senlinclient" \
--run-command "sed -i -E 's/timeout=[0-9]+/timeout=60/g' /usr/share/openstack-puppet/modules/rabbitmq/lib/puppet/provider/rabbitmqctl.rb" \
--upload ${BUILD_ROOT}/patches/puppet-neutron-add-odl-settings.patch:/usr/share/openstack-puppet/modules/neutron/ \
--run-command "wget ${fdio_l2_uri_base}/honeycomb-1.17.04-2439.noarch.rpm -O /root/fdio_l2/honeycomb-1.17.04-2439.noarch.rpm" \
--run-command "wget ${fdio_l2_uri_base}/opendaylight-6.0.0-0.1.20170228snap4111.el7.noarch.rpm -O /root/fdio_l2/opendaylight-6.0.0-0.1.20170228snap4111.el7.noarch.rpm" \
--install opendaylight,python-networking-odl \
- --install honeycomb \
- --upload ${BUILD_ROOT}/honeycomb-jar-replace.sh:/root/ \
- --run-command "/root/honeycomb-jar-replace.sh" \
+ --run-command "yum install -y /root/fdio_l2/honeycomb-1.17.04-2439.noarch.rpm" \
--upload ${BUILD_DIR}/puppet-opendaylight.tar.gz:/etc/puppet/modules/ \
--run-command "cd /etc/puppet/modules/ && tar xzf puppet-opendaylight.tar.gz" \
--upload ${BUILD_DIR}/networking-bgpvpn.tar.gz:/root/ \
--install zeromq-4.1.4,zeromq-devel-4.1.4 \
--install capnproto-devel,capnproto-libs,capnproto \
--upload ${BUILD_ROOT}/patches/neutron-patch-NSDriver.patch:/usr/lib/python2.7/site-packages/ \
+ --upload ${BUILD_ROOT}/patches/disable_odl_clustering.patch:/etc/puppet/modules/tripleo/ \
-a overcloud-full-opendaylight_build.qcow2
LIBGUESTFS_BACKEND=direct virt-sparsify --compress overcloud-full-opendaylight_build.qcow2 overcloud-full-opendaylight.qcow2
--- /dev/null
+heat_template_version: 2014-10-16
+
+description: >
+ Example extra config for post-deployment
+
+parameters:
+ server:
+ type: string
+ OvsDpdkCoreList:
+ description: >
+ List of logical cores for OVS DPDK
+ type: string
+ default: ""
+ OvsDpdkSocketMemory:
+ description: Memory allocated for each socket
+ default: ""
+ type: string
+ PmdCoreList:
+ description: >
+ A list or range of physical CPU cores to be pinned to PMD
+ The given args will be appended to the tuned cpu-partitioning profile.
+ Ex. HostCpusList: '4-12' will tune cores from 4-12
+ type: string
+ default: ""
+
+resources:
+ OvsDpdkSetup:
+ type: OS::Heat::StructuredDeployment
+ properties:
+ server: {get_param: server}
+ config: {get_resource: OvsDpdkConfig}
+
+ OvsDpdkConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config:
+ str_replace:
+ template: |
+ #!/bin/bash
+ set -x
+ get_mask()
+ {
+ local list=$1
+ local mask=0
+ declare -a bm
+ max_idx=0
+ for core in $(echo $list | sed 's/,/ /g')
+ do
+ index=$(($core/32))
+ bm[$index]=0
+ if [ $max_idx -lt $index ]; then
+ max_idx=$index
+ fi
+ done
+ for ((i=$max_idx;i>=0;i--));
+ do
+ bm[$i]=0
+ done
+ for core in $(echo $list | sed 's/,/ /g')
+ do
+ index=$(($core/32))
+ temp=$((1<<$core))
+ bm[$index]=$((${bm[$index]} | $temp))
+ done
+ printf -v mask "%x" "${bm[$max_idx]}"
+ for ((i=$max_idx-1;i>=0;i--));
+ do
+ printf -v hex "%08x" "${bm[$i]}"
+ mask+=$hex
+ done
+ printf "%s" "$mask"
+ }
+ pmd_cpu_mask=$( get_mask $PMD_CORES )
+ dpdk_lcore_mask=$( get_mask $DPDK_CORES )
+ yum remove -y vpp-devel
+ yum install -y /root/dpdk_rpms/*
+ systemctl restart openvswitch
+ sleep 5
+ sed -i "s/#user\s*=.*/user = \"root\"/" /etc/libvirt/qemu.conf
+ sed -i "s/#group\s*=.*/group = \"root\"/" /etc/libvirt/qemu.conf
+ ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-init=true
+ if [ -n "$SOCKET_MEMORY" ]; then
+ ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-socket-mem=$SOCKET_MEMORY
+ fi
+ if [ -n "$pmd_cpu_mask" ]; then
+ ovs-vsctl --no-wait set Open_vSwitch . other_config:pmd-cpu-mask=$pmd_cpu_mask
+ fi
+ if [ -n "$dpdk_lcore_mask" ]; then
+ ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-lcore-mask=$dpdk_lcore_mask
+ fi
+ systemctl restart openvswitch
+
+ params:
+ $DPDK_CORES: {get_param: OvsDpdkCoreList}
+ $PMD_CORES: {get_param: PmdCoreList}
+ $SOCKET_MEMORY: {get_param: OvsDpdkSocketMemory}
+outputs:
+ deploy_stdout:
+ description: Output of the extra dpdk ovs deployment
+ value: {get_attr: [OvsDpdkSetup, deploy_stdout]}
--- /dev/null
+diff --git a/manifests/profile/base/neutron/opendaylight.pp b/manifests/profile/base/neutron/opendaylight.pp
+index 3269f3e..556fe63 100644
+--- a/manifests/profile/base/neutron/opendaylight.pp
++++ b/manifests/profile/base/neutron/opendaylight.pp
+@@ -22,34 +22,19 @@
+ # (Optional) The current step of the deployment
+ # Defaults to hiera('step')
+ #
+-# [*odl_api_ips*]
+-# (Optional) List of OpenStack Controller IPs for ODL API
+-# Defaults to hiera('opendaylight_api_node_ips')
+-#
+-# [*node_name*]
+-# (Optional) The short hostname of node
+-# Defaults to hiera('bootstack_nodeid')
++# [*primary_node*]
++# (Optional) The hostname of the first node of this role type
++# Defaults to hiera('bootstrap_nodeid', undef)
+ #
+ class tripleo::profile::base::neutron::opendaylight (
+ $step = hiera('step'),
+- $odl_api_ips = hiera('opendaylight_api_node_ips'),
+- $node_name = hiera('bootstack_nodeid')
++ $primary_node = hiera('bootstrap_nodeid', undef),
+ ) {
+
+ if $step >= 1 {
+- if empty($odl_api_ips) {
+- fail('No IPs assigned to OpenDaylight Api Service')
+- } elsif size($odl_api_ips) == 2 {
+- fail('2 node OpenDaylight deployments are unsupported. Use 1 or greater than 2')
+- } elsif size($odl_api_ips) > 2 {
+- $node_string = split($node_name, '-')
+- $ha_node_index = $node_string[-1] + 1
+- class { '::opendaylight':
+- enable_ha => true,
+- ha_node_ips => $odl_api_ips,
+- ha_node_index => $ha_node_index,
+- }
+- } else {
++ # Configure ODL only on first node of the role where this service is
++ # applied
++ if $primary_node == downcase($::hostname) {
+ include ::opendaylight
+ }
+ }
Name: opnfv-apex-common
-Version: 4.0
+Version: 5.0
Release: %{release}
Summary: Scripts for OPNFV deployment using RDO Manager
install config/deploy/os-nosdn-nofeature-noha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-nosdn-nofeature-noha.yaml
install config/deploy/os-nosdn-fdio-noha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-nosdn-fdio-noha.yaml
install config/deploy/os-nosdn-fdio-ha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-nosdn-fdio-ha.yaml
-install config/deploy/os-nosdn-ovs-noha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-nosdn-ovs-noha.yaml
+install config/deploy/os-nosdn-ovs_dpdk-noha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-nosdn-ovs_dpdk-noha.yaml
install config/deploy/os-nosdn-nofeature-ha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-nosdn-nofeature-ha.yaml
install config/deploy/os-nosdn-performance-ha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-nosdn-performance-ha.yaml
-install config/deploy/os-nosdn-ovs-ha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-nosdn-ovs-ha.yaml
+install config/deploy/os-nosdn-ovs_dpdk-ha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-nosdn-ovs_dpdk-ha.yaml
install config/deploy/os-nosdn-kvm-ha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-nosdn-kvm-ha.yaml
install config/deploy/os-nosdn-kvm-noha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-nosdn-kvm-noha.yaml
-install config/deploy/os-odl_l2-nofeature-ha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-odl_l2-nofeature-ha.yaml
install config/deploy/os-odl_l2-sfc-noha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-odl_l2-sfc-noha.yaml
install config/deploy/os-odl-bgpvpn-ha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-odl-bgpvpn-ha.yaml
install config/deploy/os-odl-bgpvpn-noha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-odl-bgpvpn-noha.yaml
install config/deploy/os-odl_l2-fdio-ha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-odl_l2-fdio-ha.yaml
install config/deploy/os-odl_l2-fdio-noha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-odl_l2-fdio-noha.yaml
-install config/deploy/os-odl_l3-fdio-noha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-odl_l3-fdio-noha.yaml
-install config/deploy/os-odl_l3-fdio-ha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-odl_l3-fdio-ha.yaml
-install config/deploy/os-odl_l3-nofeature-ha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-odl_l3-nofeature-ha.yaml
-install config/deploy/os-odl_l3-nofeature-noha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-odl_l3-nofeature-noha.yaml
+install config/deploy/os-odl-fdio-noha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-odl-fdio-noha.yaml
+install config/deploy/os-odl-fdio-ha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-odl-fdio-ha.yaml
+install config/deploy/os-odl-nofeature-ha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-odl-nofeature-ha.yaml
+install config/deploy/os-odl-nofeature-noha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-odl-nofeature-noha.yaml
+install config/deploy/os-odl-ovs_dpdk-ha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-odl-ovs_dpdk-ha.yaml
+install config/deploy/os-odl-ovs_dpdk-noha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-odl-ovs_dpdk-noha.yaml
install config/deploy/os-odl-gluon-noha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-odl-gluon-noha.yaml
+install config/deploy/os-ovn-nofeature-noha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-ovn-nofeature-noha.yaml
#install config/deploy/os-onos-nofeature-ha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-onos-nofeature-ha.yaml
#install config/deploy/os-onos-sfc-ha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-onos-sfc-ha.yaml
install config/deploy/os-ocl-nofeature-ha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-ocl-nofeature-ha.yaml
%{_sysconfdir}/opnfv-apex/os-nosdn-nofeature-noha.yaml
%{_sysconfdir}/opnfv-apex/os-nosdn-fdio-noha.yaml
%{_sysconfdir}/opnfv-apex/os-nosdn-fdio-ha.yaml
-%{_sysconfdir}/opnfv-apex/os-nosdn-ovs-noha.yaml
+%{_sysconfdir}/opnfv-apex/os-nosdn-ovs_dpdk-noha.yaml
%{_sysconfdir}/opnfv-apex/os-nosdn-nofeature-ha.yaml
%{_sysconfdir}/opnfv-apex/os-nosdn-performance-ha.yaml
-%{_sysconfdir}/opnfv-apex/os-nosdn-ovs-ha.yaml
+%{_sysconfdir}/opnfv-apex/os-nosdn-ovs_dpdk-ha.yaml
%{_sysconfdir}/opnfv-apex/os-nosdn-kvm-ha.yaml
%{_sysconfdir}/opnfv-apex/os-nosdn-kvm-noha.yaml
-%{_sysconfdir}/opnfv-apex/os-odl_l2-nofeature-ha.yaml
%{_sysconfdir}/opnfv-apex/os-odl_l2-sfc-noha.yaml
%{_sysconfdir}/opnfv-apex/os-odl-bgpvpn-ha.yaml
%{_sysconfdir}/opnfv-apex/os-odl-bgpvpn-noha.yaml
%{_sysconfdir}/opnfv-apex/os-odl_l2-fdio-noha.yaml
%{_sysconfdir}/opnfv-apex/os-odl_l2-fdio-ha.yaml
-%{_sysconfdir}/opnfv-apex/os-odl_l3-fdio-noha.yaml
-%{_sysconfdir}/opnfv-apex/os-odl_l3-fdio-ha.yaml
-%{_sysconfdir}/opnfv-apex/os-odl_l3-nofeature-ha.yaml
-%{_sysconfdir}/opnfv-apex/os-odl_l3-nofeature-noha.yaml
+%{_sysconfdir}/opnfv-apex/os-odl-fdio-noha.yaml
+%{_sysconfdir}/opnfv-apex/os-odl-fdio-ha.yaml
+%{_sysconfdir}/opnfv-apex/os-odl-ovs_dpdk-noha.yaml
+%{_sysconfdir}/opnfv-apex/os-odl-ovs_dpdk-ha.yaml
+%{_sysconfdir}/opnfv-apex/os-odl-nofeature-ha.yaml
+%{_sysconfdir}/opnfv-apex/os-odl-nofeature-noha.yaml
%{_sysconfdir}/opnfv-apex/os-odl-gluon-noha.yaml
+%{_sysconfdir}/opnfv-apex/os-ovn-nofeature-noha.yaml
#%{_sysconfdir}/opnfv-apex/os-onos-nofeature-ha.yaml
#%{_sysconfdir}/opnfv-apex/os-onos-sfc-ha.yaml
%{_sysconfdir}/opnfv-apex/os-ocl-nofeature-ha.yaml
%doc %{_docdir}/opnfv/inventory.yaml.example
%changelog
+* Tue Apr 04 2017 Dan Radez <dradez@redhat.com> - 5.0-1
+- Version update for Euphrates
+- rename to ovs_dpdk
+* Wed Mar 29 2017 Dan Radez <dradez@redhat.com> - 4.0-9
+- Remove odl_l2-nofeature scenario file
+- rename all odl_l3 scenario files to odl
+* Thu Mar 23 2017 Tim Rozet <trozet@redhat.com> - 4.0-8
+- Adds os-odl_l3-ovs-ha and noha scenarios
* Mon Mar 12 2017 Feng Pan <fpan@redhat.com> - 4.0-7
- Add os-nosdn-fdio-ha.yaml
* Fri Mar 10 2017 Feng Pan <fpan@redhat.com> - 4.0-6
* Wed Mar 08 2017 Dan Radez <dradez@redhat.com> - 4.0-5
- Commenting out ONOS, unsupported in Danube
- Adding kvm4nfv files
+- Adding OVN files
* Tue Feb 14 2017 Feng Pan <fpan@redhat.com> - 4.0-4
- Add network_settings_vpp.yaml
* Fri Feb 3 2017 Nikolas Hermanns <nikolas.hermanns@ericsson.com> - 4.0-3
Name: opnfv-apex-onos
-Version: 4.0
+Version: 5.0
Release: %{release}
Summary: Overcloud Disk images for OPNFV Apex ONOS deployment
%{_var}/opt/opnfv/images/overcloud-full-onos.qcow2
%changelog
+* Tue Apr 04 2017 Dan Radez <dradez@redhat.com> - 5.0-1
+- Version update for Euphrates
* Wed Dec 7 2016 Tim Rozet <trozet@redhat.com> - 4.0-2
- Make install path consistent
* Wed Nov 2 2016 Dan Radez <dradez@redhat.com> - 4.0-1
Name: opnfv-apex-release
-Version: danube
+Version: euphrates
Release: %{release}
Summary: RPM Release file
%{_sysconfdir}/yum.repos.d/opnfv-apex.repo
%changelog
+* Tue Apr 04 2017 Dan Radez <dradez@redhat.com> - 5.0-1
+- Version update for Euphrates
* Wed Nov 23 2016 Dan Radez <dradez@redhat.com> - 3.0-1
- Initial Packaging
Name: opnfv-apex-undercloud
-Version: 4.0
+Version: 5.0
Release: %{release}
Summary: Scripts and Disk images to launch the Undercloud for OPNFV Apex
%changelog
+* Tue Apr 04 2017 Dan Radez <dradez@redhat.com> - 5.0-1
+- Version update for Euphrates
* Wed Nov 2 2016 Dan Radez <dradez@redhat.com> - 4.0-1
- Version update for Danube
* Tue May 24 2016 Tim Rozet <trozet@redhat.com> - 3.0-1
Name: opnfv-apex
-Version: 4.0
+Version: 5.0
Release: %{release}
Summary: Overcloud Disk images for OPNFV Apex OpenDaylight deployment
%{_var}/opt/opnfv/images/overcloud-full-opendaylight.qcow2
%changelog
+* Tue Apr 04 2017 Dan Radez <dradez@redhat.com> - 5.0-1
+- Version update for Euphrates
* Wed Dec 7 2016 Tim Rozet <trozet@redhat.com> - 4.0-2
- Make install path consistent
* Wed Nov 2 2016 Dan Radez <dradez@redhat.com> - 4.0-1
# installing forked opnfv-tht
# enabling ceph OSDs to live on the controller
-# OpenWSMan package update supports the AMT Ironic driver for the TealBox
# seeding configuration files specific to OPNFV
# add congress client
# add congress password to python-tripleoclient
--run-command "cd /usr/share && rm -rf openstack-tripleo-heat-templates && tar xzf opnfv-tht.tar.gz" \
--run-command "sed -i '/ControllerEnableCephStorage/c\\ ControllerEnableCephStorage: true' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml" \
--run-command "sed -i '/ComputeEnableCephStorage/c\\ ComputeEnableCephStorage: true' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml" \
- --run-command "curl -f http://download.opensuse.org/repositories/Openwsman/CentOS_CentOS-7/Openwsman.repo > /etc/yum.repos.d/wsman.repo" \
- --run-command "yum update -y openwsman*" \
--run-command "cp /usr/share/instack-undercloud/undercloud.conf.sample /home/stack/undercloud.conf && chown stack:stack /home/stack/undercloud.conf" \
--upload ${BUILD_ROOT}/opnfv-environment.yaml:/home/stack/ \
--upload ${BUILD_ROOT}/first-boot.yaml:/home/stack/ \
--upload ${BUILD_ROOT}/kvm4nfv-1st-boot.yaml:/home/stack/ \
--upload ${BUILD_DIR}/enable_rt_kvm.yaml:/home/stack/ \
+ --upload ${BUILD_ROOT}/ovs-dpdk-preconfig.yaml:/home/stack/ \
--upload ${BUILD_ROOT}/csit-environment.yaml:/home/stack/ \
--upload ${BUILD_ROOT}/virtual-environment.yaml:/home/stack/ \
--install "python2-congressclient" \
--install "openstack-heat-api" \
--upload ${BUILD_ROOT}/patches/0001-Removes-doing-yum-update.patch:/usr/lib/python2.7/site-packages/ \
--run-command "cd /usr/lib/python2.7/site-packages/ && patch -p1 < 0001-Removes-doing-yum-update.patch" \
- --root-password password:stack \
-a undercloud_build.qcow2
mv -f undercloud_build.qcow2 undercloud.qcow2
if [ -z ${GS_PATHNAME+x} ]; then
GS_PATHNAME=/colorado
fi
-dpdk_uri_base=http://artifacts.opnfv.org/ovsnfv$GS_PATHNAME
+dpdk_uri_base=http://artifacts.opnfv.org/ovsnfv
dpdk_rpms=(
-'ovs4opnfv-e8acab14-dpdk-16.04.0-1.el7.centos.x86_64.rpm'
-'ovs4opnfv-e8acab14-dpdk-devel-16.04.0-1.el7.centos.x86_64.rpm'
-'ovs4opnfv-e8acab14-dpdk-examples-16.04.0-1.el7.centos.x86_64.rpm'
-'ovs4opnfv-e8acab14-dpdk-tools-16.04.0-1.el7.centos.x86_64.rpm'
-'ovs4opnfv-e8acab14-openvswitch-2.5.90-0.12032.gitc61e93d6.1.el7.centos.x86_64.rpm'
+'ovs4opnfv-e8acab14-dpdk-16.11-5.el7.centos.x86_64.rpm'
+'ovs4opnfv-e8acab14-dpdk-devel-16.11-5.el7.centos.x86_64.rpm'
+'ovs4opnfv-e8acab14-dpdk-examples-16.11-5.el7.centos.x86_64.rpm'
+'ovs4opnfv-e8acab14-dpdk-tools-16.11-5.el7.centos.x86_64.rpm'
)
kvmfornfv_uri_base="http://artifacts.opnfv.org/kvmfornfv"
gluon_rpm=gluon-0.0.1-1_20170302.noarch.rpm
+fdio_l3_uri_base=http://artifacts.opnfv.org/apex/danube/fdio_l3_rpms
+fdio_l3_pkgs=(
+'vpp-17.04-rc0~399_g17a75cb~b2022.x86_64.rpm'
+'vpp-api-python-17.04-rc0~399_g17a75cb~b2022.x86_64.rpm'
+'vpp-lib-17.04-rc0~399_g17a75cb~b2022.x86_64.rpm'
+'vpp-plugins-17.04-rc0~399_g17a75cb~b2022.x86_64.rpm'
+'honeycomb-1.17.04-2503.noarch.rpm'
+)
+
fdio_l2_uri_base=http://artifacts.opnfv.org/apex/danube/fdio_l2_rpms
fdio_l2_pkgs=(
'vpp-17.04-rc0~300_gdef19da~b1923.x86_64.rpm'
'vpp-lib-17.04-rc0~300_gdef19da~b1923.x86_64.rpm'
'vpp-plugins-17.04-rc0~300_gdef19da~b1923.x86_64.rpm'
)
+
+fdio_nosdn_uri_base=http://artifacts.opnfv.org/apex/danube/fdio_nosdn_rpms
+fdio_nosdn_pkgs=(
+'vpp-17.04-rc0~476_geaabe07~b2100.x86_64.rpm'
+'vpp-api-python-17.04-rc0~476_geaabe07~b2100.x86_64.rpm'
+'vpp-lib-17.04-rc0~476_geaabe07~b2100.x86_64.rpm'
+'vpp-plugins-17.04-rc0~476_geaabe07~b2100.x86_64.rpm'
+)
120,Add old openflow port (6633) and karaf ssh port (8101)
123,Disable L3 HA for ODL/FDIO scenarios
124,setting keystone region for tacker and congress
+111,Backporting OVN fixes
#author: Dan Radez (dradez@redhat.com)
#author: Tim Rozet (trozet@redhat.com)
-# Backwards compat for old ENV Vars
-# Remove in E Release
-if [ -n "$CONFIG" ]; then
- echo -e "${red}WARNING: ENV var CONFIG is Deprecated, please unset CONFIG and export BASE in its place${reset}"
- echo -e "${red}WARNING: CONFIG will be removed in E${reset}"
- BASE=$CONFIG
-fi
-if [ -n "$RESOURCES" ]; then
- echo -e "${red}WARNING: ENV var RESOURCES is Deprecated, please unset RESOURCES and export IMAGES in its place${reset}"
- echo -e "${red}WARNING: RESOURCES will be removed in E${reset}"
- IMAGES=$RESOURCES
-fi
-
# Use default if no param passed
BASE=${BASE:-'/var/opt/opnfv'}
IMAGES=${IMAGES:-"$BASE/images"}
declare -a performance_options
declare -A NET_MAP
-# Backwards compat for old ENV Vars
-# Remove in E Release
-if [ -n "$CONFIG" ]; then
- echo -e "${red}WARNING: ENV var CONFIG is Deprecated, please unset CONFIG and export BASE in its place${reset}"
- echo -e "${red}WARNING: CONFIG will be removed in E${reset}"
- BASE=$CONFIG
-fi
-if [ -n "$RESOURCES" ]; then
- echo -e "${red}WARNING: ENV var RESOURCES is Deprecated, please unset RESOURCES and export IMAGES in its place${reset}"
- echo -e "${red}WARNING: RESOURCES will be removed in E${reset}"
- IMAGES=$RESOURCES
-fi
-
APEX_TMP_DIR=$(python3 -c "import tempfile; print(tempfile.mkdtemp())")
SSH_OPTIONS=(-o StrictHostKeyChecking=no -o GlobalKnownHostsFile=/dev/null -o UserKnownHostsFile=/dev/null -o LogLevel=error)
DEPLOY_OPTIONS=""
echo -e " --debug : enable debug output."
echo -e " --interactive : enable interactive deployment mode which requires user to confirm steps of deployment."
echo -e " --virtual-cpus : Number of CPUs to use per Overcloud VM in a virtual deployment (defaults to 4)."
+ echo -e " --virtual-computes : Number of Virtual Compute nodes to create and use during deployment (defaults to 1 for noha and 2 for ha)."
echo -e " --virtual-default-ram : Amount of default RAM to use per Overcloud VM in GB (defaults to 8)."
echo -e " --virtual-compute-ram : Amount of RAM to use per Overcloud Compute VM in GB (defaults to 8). Overrides --virtual-default-ram arg for computes"
}
--- /dev/null
+# poweroff on success
+poweroff
+
+# Do not configure the X Window System
+skipx
+# System timezone
+timezone US/Eastern --isUtc
+# System bootloader configuration
+bootloader --append=" crashkernel=auto" --location=mbr --boot-drive=vda
+autopart --type=lvm
+# Partition clearing information
+clearpart --all --initlabel --drives=vda
+
+%packages
+@apex-opendaylight
+@base
+@core
+@virtualization-hypervisor
+@virtualization-tools
+chrony
+kexec-tools
+
+%end
+
+%addon com_redhat_kdump --disable
+
+%end
+
+%anaconda
+pwpolicy root --minlen=6 --minquality=50 --notstrict --nochanges --notempty
+pwpolicy user --minlen=6 --minquality=50 --notstrict --nochanges --notempty
+pwpolicy luks --minlen=6 --minquality=50 --notstrict --nochanges --notempty
+%end
+
+#version=DEVEL
+# System authorization information
+auth --enableshadow --passalgo=sha512
+# Use CDROM installation media
+cdrom
+# Use text mode install
+text
+# Run the Setup Agent on first boot
+firstboot --disable
+ignoredisk --only-use=vda
+# Keyboard layouts
+keyboard --vckeymap=us --xlayouts=''
+# System language
+lang en_US.UTF-8
+
+# Network information
+network --bootproto=dhcp --device=eth0 --onboot=off --ipv6=auto --no-activate
+network --hostname=localhost.localdomain
+
+# Root password
+rootpw --iscrypted $6$l4m1GNdyJ/.EP40T$2Nn99xwbJexsqqYqbgWCUivSIqJTOTTNuxmli6TM9.3uom5eiIZDPQ3UZ6gVYi0ir2px4z7e2DnccmoV/EXNB/
+# System services
+services --enabled="chronyd"
# Utility script used to interact with a deployment
# @author Tim Rozet (trozet@redhat.com)
-# Backwards compat for old ENV Vars
-# Remove in E Release
-if [ -n "$CONFIG" ]; then
- echo -e "${red}WARNING: ENV var CONFIG is Deprecated, please unset CONFIG and export BASE in its place${reset}"
- echo -e "${red}WARNING: CONFIG will be removed in E${reset}"
- BASE=$CONFIG
-fi
-if [ -n "$RESOURCES" ]; then
- echo -e "${red}WARNING: ENV var RESOURCES is Deprecated, please unset RESOURCES and export IMAGES in its place${reset}"
- echo -e "${red}WARNING: RESOURCES will be removed in E${reset}"
- IMAGES=$RESOURCES
-fi
-
BASE=${BASE:-'/var/opt/opnfv'}
IMAGES=${IMAGES:-"$BASE/images"}
LIB=${LIB:-"$BASE/lib"}
# # This is currently the only available option in the nova section. It will
# # add the provided string to vcpu_pin_set in nova.conf. This is used to pin
# # guest VMs to a set of CPU cores, and is decsribed in more detail here:
- # # http://docs.openstack.org/mitaka/config-reference/compute/config-options.html
+ # # http://docs.openstack.org/ocata/config-reference/compute/config-options.html
# libvirtpin: 1
# kernel:
# # On compute nodes, isolcpus is usually used to reserve cores for use either by VMs
main-core: 1
corelist-workers: 2
uio-driver: uio_pci_generic
+ interface-options: "vlan-strip-offload off"
Compute:
kernel:
hugepagesz: 2M
main-core: 1
corelist-workers: 2
uio-driver: uio_pci_generic
+ interface-options: "vlan-strip-offload off"
main-core: 1
corelist-workers: 2
uio-driver: uio_pci_generic
+ interface-options: "vlan-strip-offload off"
Compute:
kernel:
hugepagesz: 2M
main-core: 1
corelist-workers: 2
uio-driver: uio_pci_generic
+ interface-options: "vlan-strip-offload off"
hugepages: 2048
intel_iommu: 'on'
iommu: pt
+ ovs:
+ socket_memory: 1024
+ pmd_cores: 2
+ dpdk_cores: 1
hugepages: 2048
intel_iommu: 'on'
iommu: pt
+ ovs:
+ socket_memory: 1024
+ pmd_cores: 2
+ dpdk_cores: 1
--- /dev/null
+global_params:
+ ha_enabled: true
+
+deploy_options:
+ sdn_controller: opendaylight
+ odl_version: boron
+ sdn_l3: true
+ tacker: true
+ congress: true
+ sfc: false
+ vpn: false
+ dataplane: ovs_dpdk
+ performance:
+ Controller:
+ kernel:
+ hugepagesz: 2M
+ hugepages: 1024
+ Compute:
+ kernel:
+ hugepagesz: 2M
+ hugepages: 2048
+ intel_iommu: 'on'
+ iommu: pt
+ ovs:
+ socket_memory: 1024
+ pmd_cores: 2
+ dpdk_cores: 1
--- /dev/null
+global_params:
+ ha_enabled: false
+
+deploy_options:
+ sdn_controller: opendaylight
+ odl_version: boron
+ sdn_l3: true
+ tacker: true
+ congress: true
+ sfc: false
+ vpn: false
+ dataplane: ovs_dpdk
+ performance:
+ Controller:
+ kernel:
+ hugepagesz: 2M
+ hugepages: 1024
+ Compute:
+ kernel:
+ hugepagesz: 2M
+ hugepages: 2048
+ intel_iommu: 'on'
+ iommu: pt
+ ovs:
+ socket_memory: 1024
+ pmd_cores: 2
+ dpdk_cores: 1
+++ /dev/null
-os-odl_l3-nofeature-ha.yaml
\ No newline at end of file
--- /dev/null
+global_params:
+ ha_enabled: false
+
+deploy_options:
+ sdn_controller: ovn
+ sdn_l3: false
+ tacker: false
+ congress: true
+ sfc: false
+ vpn: false
controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
phys_type: interface
members:
- - nic1
+ - eth0
#
tenant: # Tenant network configuration
enabled: true
nic_mapping: # Mapping of network configuration for Overcloud Nodes
compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
phys_type: interface # Physical interface type (interface or bond)
+ uio_driver: uio_pci_generic # UIO driver to use for DPDK scenarios. The value is ignored for non-DPDK scenarios.
vlan: native # VLAN tag to use with this NIC
members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
- eth1 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
phys_type: interface
vlan: native
members:
- - nic2 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
+ - eth1 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
#
external: # Can contain 1 or more external networks
- public: # "public" network will be the network the installer VM attaches to
phys_type: interface
vlan: native
members:
- - nic3
+ - eth2
external_overlay: # External network to be created in OpenStack by Services tenant
name: Public_internet
type: flat
phys_type: interface
vlan: 101
members:
- - nic3
+ - eth2
external_overlay: # External network to be created in OpenStack by Services tenant
name: private_cloud
type: vlan
phys_type: interface
vlan: native
members:
- - nic4
+ - eth3
#
api: # API network configuration
enabled: false
phys_type: interface # Physical interface type (interface or bond)
vlan: native # VLAN tag to use with this NIC
members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
- - nic5 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
+ - eth4 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
phys_type: interface
vlan: native
members:
- - nic5
+ - eth4
# Apex specific settings
apex:
#!/bin/bash
set -e
apex_home=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/../
-export CONFIG=$apex_home/build
+export BASE=$apex_home/build
export LIB=$apex_home/lib
-export RESOURCES=$apex_home/.build/
+export IMAGES=$apex_home/.build/
export PYTHONPATH=$PYTHONPATH:$apex_home/lib/python
$apex_home/ci/dev_dep_check.sh || true
$apex_home/ci/clean.sh
popd
pushd $apex_home/ci
echo "All further output will be piped to $PWD/nohup.out"
-(nohup ./deploy.sh -v -n $apex_home/config/network/network_settings.yaml -d $apex_home/config/deploy/os-odl_l3-nofeature-noha.yaml &)
+(nohup ./deploy.sh -v -n $apex_home/config/network/network_settings.yaml -d $apex_home/config/deploy/os-odl-nofeature-noha.yaml &)
tail -f nohup.out
popd
Abstract
========
-This document describes how to install the Colorado release of OPNFV when
+This document describes how to install the Euphrates release of OPNFV when
using Apex as a deployment tool covering it's limitations, dependencies
and required system resources.
License
=======
-Colorado release of OPNFV when using Apex as a deployment tool Docs
+Euphrates release of OPNFV when using Apex as a deployment tool Docs
(c) by Tim Rozet (Red Hat) and Dan Radez (Red Hat)
-Colorado release of OPNFV when using Apex as a deployment tool Docs
+Euphrates release of OPNFV when using Apex as a deployment tool Docs
are licensed under a Creative Commons Attribution 4.0 International License.
You should have received a copy of the license along with this.
If not, see <http://creativecommons.org/licenses/by/4.0/>.
----------
The undercloud is not Highly Available. End users do not depend on the
-underloud. It is only for management purposes.
+undercloud. It is only for management purposes.
Overcloud
---------
- Ceph Monitors and OSDs
Stateless OpenStack services
- All running statesless OpenStack services are load balanced by HA Proxy.
+ All running stateless OpenStack services are load balanced by HA Proxy.
Pacemaker monitors the services and ensures that they are running.
Stateful OpenStack services
establishment of clustering across cluster members.
OpenDaylight
- OpenDaylight is currently installed on all three control nodes but only
- started on the first control node. OpenDaylight's HA capabilities are not yet
- mature enough to be enabled.
+ OpenDaylight is currently installed on all three control nodes and started as
+ an HA cluster unless otherwise noted for that scenario. OpenDaylight's
+ database, known as MD-SAL, breaks up pieces of the database into "shards".
+ Each shard will have its own election take place, which will determine
+ which OpenDaylight node is the leader for that shard. The other
+ OpenDaylight nodes in the cluster will be in standby. Every Open vSwitch
+ node connects to every OpenDaylight to enable HA.
HA Proxy
HA Proxy is monitored by Pacemaker to ensure it is running across all nodes
issues per scenario. The following scenarios correspond to a supported
<Scenario>.yaml deploy settings file:
-+-------------------------+------------+-----------------+
-| **Scenario** | **Owner** | **Supported** |
-+-------------------------+------------+-----------------+
-| os-nosdn-nofeature-ha | Apex | Yes |
-+-------------------------+------------+-----------------+
-| os-nosdn-nofeature-noha | Apex | Yes |
-+-------------------------+------------+-----------------+
-| os-nosdn-ovs-noha | OVS for NFV| Yes |
-+-------------------------+------------+-----------------+
-| os-nosdn-fdio-noha | FDS | Yes |
-+-------------------------+------------+-----------------+
-| os-odl_l2-nofeature-ha | Apex | Yes |
-+-------------------------+------------+-----------------+
-| os-odl_l3-nofeature-ha | Apex | Yes |
-+-------------------------+------------+-----------------+
-| os-odl_l2-sfc-noha | SFC | Yes |
-+-------------------------+------------+-----------------+
-| os-odl-bgpvpn-ha | SDNVPN | No |
-+-------------------------+------------+-----------------+
-| os-odl_l2-fdio-noha | FDS | Yes |
-+-------------------------+------------+-----------------+
-| os-onos-nofeature-ha | ONOSFW | Yes |
-+-------------------------+------------+-----------------+
-| os-onos-sfc-ha | ONOSFW | Yes |
-+-------------------------+------------+-----------------+
++-------------------------+-------------+---------------+
+| **Scenario** | **Owner** | **Supported** |
++-------------------------+-------------+---------------+
+| os-nosdn-nofeature-ha | Apex | Yes |
++-------------------------+-------------+---------------+
+| os-nosdn-nofeature-noha | Apex | Yes |
++-------------------------+-------------+---------------+
+| os-nosdn-ovs_dpdk-ha | OVS for NFV | Yes |
++-------------------------+-------------+---------------+
+| os-nosdn-ovs_dpdk-noha | OVS for NFV | Yes |
++-------------------------+-------------+---------------+
+| os-nosdn-fdio-ha | FDS | No |
++-------------------------+-------------+---------------+
+| os-nosdn-fdio-noha | FDS | No |
++-------------------------+-------------+---------------+
+| os-nosdn-kvm-ha | KVM for NFV | Yes |
++-------------------------+-------------+---------------+
+| os-nosdn-kvm-noha | KVM for NFV | Yes |
++-------------------------+-------------+---------------+
+| os-nosdn-performance-ha | Apex | Yes |
++-------------------------+-------------+---------------+
+| os-odl_l3-nofeature-ha | Apex | Yes |
++-------------------------+-------------+---------------+
+| os-odl_l3-nofeature-noha| Apex | Yes |
++-------------------------+-------------+---------------+
+| os-odl_l3-ovs_dpdk-ha | OVS for NFV | Yes |
++-------------------------+-------------+---------------+
+| os-odl_l3-ovs_dpdk-noha | OVS for NFV | Yes |
++-------------------------+-------------+---------------+
+| os-odl-bgpvpn-ha | SDNVPN | Yes |
++-------------------------+-------------+---------------+
+| os-odl-bgpvpn-noha | SDNVPN | Yes |
++-------------------------+-------------+---------------+
+| os-odl-gluon-noha | GluOn | Yes |
++-------------------------+-------------+---------------+
+| os-odl_l3-csit-noha | Apex | Yes |
++-------------------------+-------------+---------------+
+| os-odl_l3-fdio-ha | FDS | Yes |
++-------------------------+-------------+---------------+
+| os-odl_l3-fdio-noha | FDS | Yes |
++-------------------------+-------------+---------------+
+| os-odl_l2-fdio-ha | FDS | Yes |
++-------------------------+-------------+---------------+
+| os-odl_l2-fdio-noha | FDS | Yes |
++-------------------------+-------------+---------------+
+| os-odl_l2-sfc-noha | SFC | No |
++-------------------------+-------------+---------------+
+| os-onos-nofeature-ha | ONOSFW | No |
++-------------------------+-------------+---------------+
+| os-onos-sfc-ha | ONOSFW | No |
++-------------------------+-------------+---------------+
+| os-ovn-nofeature-noha | Apex | Yes |
++-------------------------+-------------+---------------+
packages and configuration for an OPNFV deployment to execute. Once the disk
images have been written to node's disks the nodes will boot locally and
execute cloud-init which will execute the final node configuration. This
-configuration is largly completed by executing a puppet apply on each node.
+configuration is largely completed by executing a puppet apply on each node.
Installation High-Level Overview - VM Deployment
================================================
control node and 1 or more compute nodes for a Non-HA Deployment) will be
defined for the target OPNFV deployment. The part of the toolchain that
executes IPMI power instructions calls into libvirt instead of the IPMI
-interfaces on baremetal servers to operate the power managment. These VMs are
+interfaces on baremetal servers to operate the power management. These VMs are
then provisioned with the same disk images and configuration that baremetal
would be.
install the opnfv-apex RPMs using the OPNFV artifacts yum repo. This yum
repo is created at release. It will not exist before release day.
- ``sudo yum install http://artifacts.opnfv.org/apex/danube/opnfv-apex-release-danube.noarch.rpm``
+ ``sudo yum install http://artifacts.opnfv.org/apex/euphrates/opnfv-apex-release-euphrates.noarch.rpm``
Once you have installed the repo definitions for Apex, RDO and EPEL then
yum install Apex:
(``/etc/opnfv-apex/``). These files are named with the naming convention
os-sdn_controller-enabled_feature-[no]ha.yaml. These files can be used in
place of the (``/etc/opnfv-apex/deploy_settings.yaml``) file if one suites
- your deployment needs. If a pre-built deploy_settings file is choosen there
+ your deployment needs. If a pre-built deploy_settings file is chosen there
is no need to customize (``/etc/opnfv-apex/deploy_settings.yaml``). The
pre-built file can be used in place of the
(``/etc/opnfv-apex/deploy_settings.yaml``) file.
:Authors: Tim Rozet (trozet@redhat.com)
:Authors: Dan Radez (dradez@redhat.com)
-:Version: 3.0
+:Version: 5.0
Indices and tables
==================
Introduction
============
-This document describes the steps to install an OPNFV Colorado reference
+This document describes the steps to install an OPNFV Euphrates reference
platform, as defined by the Genesis Project using the Apex installer.
The audience is assumed to have a good background in networking
The Apex deployment artifacts contain the necessary tools to deploy and
configure an OPNFV target system using the Apex deployment toolchain.
These artifacts offer the choice of using the Apex bootable ISO
-(``opnfv-apex-colorado.iso``) to both install CentOS 7 and the
+(``opnfv-apex-euphrates.iso``) to both install CentOS 7 and the
necessary materials to deploy or the Apex RPMs (``opnfv-apex*.rpm``),
and their associated dependencies, which expects installation to a
CentOS 7 libvirt enabled host. The RPM contains a collection of
`OPNFV Home Page <www.opnfv.org>`_
-`OPNFV Genesis project page <https://wiki.opnfv.org/get_started>`_
-
`OPNFV Apex project page <https://wiki.opnfv.org/apex>`_
-`OPNFV Apex release notes <http://artifacts.opnfv.org/apex/colorado/docs/releasenotes/release-notes.html#references>`_
+:ref:`OPNFV Apex Release Notes <apex-releasenotes>`
OpenStack
---------
-`OpenStack Mitaka Release artifacts <http://www.openstack.org/software/mitaka>`_
+`OpenStack Newton Release artifacts <http://www.openstack.org/software/newton>`_
`OpenStack documentation <http://docs.openstack.org>`_
4. minimum 1 networks and maximum 5 networks, multiple NIC and/or VLAN
combinations are supported. This is virtualized for a VM deployment.
-5. The Colorado Apex RPMs and their dependencies.
+5. The Euphrates Apex RPMs and their dependencies.
6. 16 GB of RAM for a bare metal deployment, 64 GB of RAM for a VM
deployment.
OpenDaylight Integration
------------------------
-When a user deploys any of the following scenarios:
-
- - os-odl-bgpvpn-ha.yaml
- - os-odl_l2-fdio-ha.yaml
- - os-odl_l2-fdio-noha.yaml
- - os-odl_l2-nofeature-ha.yaml
- - os-odl_l2-sfc-noha.yaml
- - os-odl_l3-nofeature-ha.yaml
-
-OpenDaylight (ODL) SDN controller will be deployed too and completely
-integrated with OpenStack. ODL is running as a systemd service, so you can
-manage it as a regular service:
+When a user deploys a scenario that starts with os-odl*:
+
+OpenDaylight (ODL) SDN controller will be deployed and integrated with
+OpenStack. ODL will run as a systemd service, and can be managed as
+as a regular service:
``systemctl start/restart/stop opendaylight.service``
1. In the left pane, click Compute -> Images, click Create Image.
2. Insert a name "cirros", Insert an Image Location
- ``http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img``.
+ ``http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img``.
3. Select format "QCOW2", select Public, then click Create Image.
or more compute nodes for a non-HA Deployment) will be defined for the target
OPNFV deployment. The part of the toolchain that executes IPMI power
instructions calls into libvirt instead of the IPMI interfaces on baremetal
-servers to operate the power managment. These VMs are then provisioned with
+servers to operate the power management. These VMs are then provisioned with
the same disk images and configuration that baremetal would be. To Triple-O
these nodes look like they have just built and registered the same way as bare
metal nodes, the main difference is the use of a libvirt driver for the power
This section goes step-by-step on how to correctly install and provision the
OPNFV target system to VM nodes.
+Special Requirements for Virtual Deployments
+--------------------------------------------
+
+In scenarios where advanced performance options or features are used, such
+as using huge pages with nova instances, DPDK, or iommu; it is required to
+enabled nested KVM support. This allows hardware extensions to be passed to
+the overcloud VMs, which will allow the overcloud compute nodes to bring up
+KVM guest nova instances, rather than QEMU. This also provides a great
+performance increase even in non-required scenarios and is recommended to be
+enabled.
+
+During deployment the Apex installer will detect if nested KVM is enabled,
+and if not, it will attempt to enable it; while printing a warning message
+if it cannot. Check to make sure before deployment that Nested
+Virtualization is enabled in BIOS, and that the output of ``cat
+/sys/module/kvm_intel/parameters/nested`` returns "Y". Also verify using
+``lsmod`` that the kvm_intel module is loaded for x86_64 machines, and
+kvm_amd is loaded for AMD64 machines.
+
Install Jumphost
----------------
You are now ready to deploy OPNFV!
``opnfv-deploy`` has virtual deployment capability that includes all of
-the configuration nessesary to deploy OPNFV with no modifications.
+the configuration necessary to deploy OPNFV with no modifications.
If no modifications are made to the included configurations the target
environment will deploy with the following architecture:
-==========================================================================
-OPNFV Release Notes for the Colorado release of OPNFV Apex deployment tool
-==========================================================================
+========================================================================
+OPNFV Release Notes for the Danube release of OPNFV Apex deployment tool
+========================================================================
.. contents:: Table of Contents
Abstract
========
-This document provides the release notes for Colorado release with the Apex
+This document provides the release notes for Danube release with the Apex
deployment toolchain.
License
| **Date** | **Ver.** | **Authors** | **Comment** |
| | | | |
+-------------+-----------+-----------------+----------------------+
+| 2017-03-30 | 4.0 | Tim Rozet | Updates for Danube |
++-------------+-----------+-----------------+----------------------+
| 2016-09-20 | 2.1.0 | Tim Rozet | More updates for |
| | | | Colorado |
+-------------+-----------+-----------------+----------------------+
Important Notes
===============
-This is the OPNFV Colorado release that implements the deploy stage of the
+This is the OPNFV Danube release that implements the deploy stage of the
OPNFV CI pipeline via Apex.
Apex is based on RDO's Triple-O installation tool chain.
Summary
=======
-Colorado release with the Apex deployment toolchain will establish an OPNFV
+Danube release with the Apex deployment toolchain will establish an OPNFV
target system on a Pharos compliant lab infrastructure. The current definition
-of an OPNFV target system is OpenStack Mitaka combined with an SDN
+of an OPNFV target system is OpenStack Newton combined with an SDN
controller, such as OpenDaylight. The system is deployed with OpenStack High
Availability (HA) for most OpenStack services. SDN controllers are deployed
-only on the first controller (see HAIssues_ for known HA SDN issues). Ceph
+on every controller unless deploying with one the HA FD.IO scenarios. Ceph
storage is used as Cinder backend, and is the only supported storage for
-Colorado. Ceph is setup as 3 OSDs and 3 Monitors, one OSD+Mon per Controller
+Danube. Ceph is setup as 3 OSDs and 3 Monitors, one OSD+Mon per Controller
node in an HA setup. Apex also supports non-HA deployments, which deploys a
single controller and n number of compute nodes. Furthermore, Apex is
capable of deploying scenarios in a bare metal or virtual fashion. Virtual
- Documentation is built by Jenkins
- .iso image is built by Jenkins
- .rpm packages are built by Jenkins
-- Jenkins deploys a Colorado release with the Apex deployment toolchain
+- Jenkins deploys a Danube release with the Apex deployment toolchain
bare metal, which includes 3 control+network nodes, and 2 compute nodes.
Release Data
| **Project** | apex |
| | |
+--------------------------------------+--------------------------------------+
-| **Repo/tag** | apex/colorado.1.0 |
+| **Repo/tag** | apex/danube.1.0 |
| | |
+--------------------------------------+--------------------------------------+
-| **Release designation** | colorado.1.0 |
+| **Release designation** | danube.1.0 |
| | |
+--------------------------------------+--------------------------------------+
-| **Release date** | 2016-09-22 |
+| **Release date** | 2017-03-31 |
| | |
+--------------------------------------+--------------------------------------+
-| **Purpose of the delivery** | OPNFV Colorado release |
+| **Purpose of the delivery** | OPNFV Danube release |
| | |
+--------------------------------------+--------------------------------------+
Module version changes
~~~~~~~~~~~~~~~~~~~~~~
-This is the first tracked version of the Colorado release with the Apex
+This is the first tracked version of the Danube release with the Apex
deployment toolchain. It is based on following upstream versions:
-- OpenStack (Mitaka release)
+- OpenStack (Newton release)
-- OpenDaylight (Beryllium/Boron releases)
+- OpenDaylight (Boron/Carbon releases)
- CentOS 7
Document Version Changes
~~~~~~~~~~~~~~~~~~~~~~~~
-This is the first tracked version of Colorado release with the Apex
+This is the first tracked version of Danube release with the Apex
deployment toolchain.
The following documentation is provided with this release:
-- OPNFV Installation instructions for the Colorado release with the Apex
+- OPNFV Installation instructions for the Danube release with the Apex
deployment toolchain - ver. 1.0.0
-- OPNFV Release Notes for the Colorado release with the Apex deployment
+- OPNFV Release Notes for the Danube release with the Apex deployment
toolchain - ver. 1.0.0 (this document)
Feature Additions
| **JIRA REFERENCE** | **SLOGAN** |
| | |
+--------------------------------------+--------------------------------------+
-| JIRA: APEX-107 | OpenDaylight HA - OVSDB Clustering |
-+--------------------------------------+--------------------------------------+
-| JIRA: APEX-108 | Migrate to OpenStack Mitaka |
+| JIRA: APEX-129 | Adds OVN SDN Controller support |
+--------------------------------------+--------------------------------------+
-| JIRA: APEX-30 | Support VLAN tagged deployments |
+| JIRA: APEX-299 | Migrate to OpenStack Newton |
+--------------------------------------+--------------------------------------+
-| JIRA: APEX-105 | Enable Huge Page Configuration |
-| | Options |
+| JIRA: APEX-150 | Allow for multiple external networks |
+--------------------------------------+--------------------------------------+
-| JIRA: APEX-111 | Allow RAM to be specified for |
-| | Control/Compute in Virtual |
-| | Deployments |
+| JIRA: APEX-301 | Support Networking ODL v2 Driver |
+--------------------------------------+--------------------------------------+
-| JIRA: APEX-119 | Enable OVS DPDK as a deployment |
-| | Scenario in Apex |
+| JIRA: APEX-300 | Support OpenDaylight new netvirt |
+--------------------------------------+--------------------------------------+
-| JIRA: APEX-126 | Tacker Service deployed by Apex |
+| JIRA: APEX-302 | Upstream Tacker and Congress |
+| | support |
+--------------------------------------+--------------------------------------+
-| JIRA: APEX-135 | Congress Service deployed by Apex |
+| JIRA: APEX-106 | Enable CPU pinning for Overcloud |
+--------------------------------------+--------------------------------------+
-| JIRA: APEX-127 | Nova Instance CPU Pinning |
+| JIRA: APEX-390 | OpenDaylight HA as default for HA |
+| | scenarios |
+--------------------------------------+--------------------------------------+
-| JIRA: APEX-130 | IPv6 Underlay Deployment |
+| JIRA: APEX-357 | Include Quagga in SDNVPN scenario |
+--------------------------------------+--------------------------------------+
-| JIRA: APEX-133 | FDIO with Honeycomb Agent |
+| JIRA: APEX-262 | Migrate to new network settings |
+| | format |
+--------------------------------------+--------------------------------------+
-| JIRA: APEX-141 | Integrate VSPERF into Apex |
-+--------------------------------------+--------------------------------------+
-| JIRA: APEX-172 | Enable ONOS SFC |
+| JIRA: APEX-128 | Adds Real Time KVM support |
+--------------------------------------+--------------------------------------+
Bug Corrections
| **JIRA REFERENCE** | **SLOGAN** |
| | |
+--------------------------------------+--------------------------------------+
-| JIRA: APEX-86 | Need ability to specify number of |
-| | compute nodes |
+| JIRA: APEX-208 | Need ability to specify which nic |
+| | to place vlan on |
+--------------------------------------+--------------------------------------+
-| JIRA: APEX-180 | Baremetal deployment error: Failed to|
-| | mount root partition /dev/sda on |
-| | /mnt/rootfs |
+| JIRA: APEX-215 | Keystone services not configured and |
+| | error is silently ignored on VLAN |
+| | Deployments |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-221 | NoHA virtual deployments should use 1|
+| | compute |
+--------------------------------------+--------------------------------------+
-| JIRA: APEX-161 | Heat autoscaling stack creation fails|
-| | for non-admin users |
+| JIRA: APEX-276 | ODL HA is unstable and crashes |
+| | frequently |
+--------------------------------------+--------------------------------------+
-| JIRA: APEX-198 | Missing NAT iptables rule for public |
-| | network in instack VM |
+| JIRA: APEX-287 | Name mismatch for package openstack- |
+| | congress during overcloud build |
+--------------------------------------+--------------------------------------+
-| JIRA: APEX-147 | Installer doesn't generate/distribute|
-| | SSH keys between compute nodes |
+| JIRA: APEX-339 | Enable pinning for OVS DPDK |
+--------------------------------------+--------------------------------------+
-| JIRA: APEX-109 | ONOS routes local subnet traffic to |
-| | GW |
+| JIRA: APEX-345 | Horizon and cloud failures due to |
+| | running out of file descriptors for |
+| | MariaDB in noha deployments |
+--------------------------------------+--------------------------------------+
-| JIRA: APEX-146 | Swift service present in available |
-| | endpoints |
+| JIRA: APEX-370 | ISO builds fail in Danube |
+--------------------------------------+--------------------------------------+
-| JIRA: APEX-160 | Enable force_metadata to support |
-| | subnets with VM as the router |
+| JIRA: APEX-372 | Specifying same NIC for storage and |
+| | private network but different VLANs |
+| | results in duplicate NIC error |
+--------------------------------------+--------------------------------------+
-| JIRA: APEX-114 | OpenDaylight GUI is not available |
+| JIRA: APEX-373 | Running smoke tests should install |
+| | Ansible onto jump host |
+--------------------------------------+--------------------------------------+
-| JIRA: APEX-100 | DNS1 and DNS2 should be handled in |
-| | nic bridging |
+| JIRA: APEX-374 | Ceph accidentally disabled by default|
+--------------------------------------+--------------------------------------+
-| JIRA: APEX-100 | DNS1 and DNS2 should be handled in |
-| | nic bridging |
+| JIRA: APEX-378 | OVS 2.5.90 NSH build fails |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-382 | yum update on undercloud breaks |
+| | deployments |
+--------------------------------------+--------------------------------------+
-| JIRA: APEX-155 | NIC Metric value not used when |
-| | bridging NICs |
+| JIRA: APEX-386 | Fix os-net-config to match upstream |
+| | stable/newton |
+--------------------------------------+--------------------------------------+
-| JIRA: APEX-136 | 2 network deployment fails |
+| JIRA: APEX-398 | Tacker uses "RegionOne" instead of |
+| | "regionOne" |
+--------------------------------------+--------------------------------------+
-| JIRA: APEX-89 | Deploy Ceph OSDs on compute nodes |
+| JIRA: APEX-399 | hugepages are not enabled when |
+| | configured in deploy settings |
+--------------------------------------+--------------------------------------+
-| JIRA: APEX-137 | added arping ass dependency for |
-| | ONOS deployments |
+| JIRA: APEX-403 | Remove Quagga from build process and |
+| | cache to artifacts |
+--------------------------------------+--------------------------------------+
-| JIRA: APEX-121 | VM Storage deletion intermittently |
-| | fails |
+| JIRA: APEX-406 | ODL FDIO neutron patches to all |
+| | scenarios |
+--------------------------------------+--------------------------------------+
-| JIRA: APEX-182 | Nova services not correctly deployed |
+| JIRA: APEX-407 | VPP service does not start upon |
+| | reboot |
+--------------------------------------+--------------------------------------+
-| JIRA: APEX-153 | brbm bridge not created in jumphost |
+| JIRA: APEX-408 | Quagga's bgpd cannot start due to |
+| | permissions |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-421 | Update odl/hc/vpp versions for odl_l3|
+| | noha |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-426 | Missing virtual-computes arg in help |
+| | output for deploy |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-427 | Neutron openvswitch agent starts when|
+| | openvswitch is restarted |
+--------------------------------------+--------------------------------------+
Deliverables
- Apex .iso file
- Apex release .rpm (opnfv-apex-release)
- Apex overcloud .rpm (opnfv-apex) - For nosdn and OpenDaylight Scenarios
-- Apex overcloud onos .rpm (opnfv-apex-onos) - ONOS Scenarios
- Apex undercloud .rpm (opnfv-apex-undercloud)
- Apex common .rpm (opnfv-apex-common)
- build.sh - Builds the above artifacts
Documentation Deliverables
~~~~~~~~~~~~~~~~~~~~~~~~~~
-- OPNFV Installation instructions for the Colorado release with the Apex
- deployment toolchain - ver. 1.0.0
-- OPNFV Release Notes for the Colorado release with the Apex deployment
- toolchain - ver. 1.0.0 (this document)
+- OPNFV Installation instructions for the Danube release with the Apex
+ deployment toolchain - ver. 4.0
+- OPNFV Release Notes for the Danube release with the Apex deployment
+ toolchain - ver. 4.0 (this document)
Known Limitations, Issues and Workarounds
=========================================
| **JIRA REFERENCE** | **SLOGAN** |
| | |
+--------------------------------------+--------------------------------------+
-| JIRA: APEX-203 | Swift proxy enabled and fails in noha|
-| | deployments |
+| JIRA: APEX-138 | Unclear error message when interface |
+| | set to dhcp |
+--------------------------------------+--------------------------------------+
-| JIRA: APEX-215 | Keystone services not configured and |
-| | the error is silently ignored (VLAN |
-| | Deployments) |
+| JIRA: APEX-280 | Deleted network not cleaned up |
+| | on controller |
+--------------------------------------+--------------------------------------+
-| JIRA: APEX-208 | Need ability to specify which NIC to |
-| | place VLAN on |
+| JIRA: APEX-295 | Missing support for VLAN tenant |
+| | networks |
+--------------------------------------+--------------------------------------+
-| JIRA: APEX-254 | Add dynamic hugepages configuration |
+| JIRA: APEX-352 | Package "openstack-utils" is |
+| | missing from overcloud |
+--------------------------------------+--------------------------------------+
-| JIRA: APEX-138 | Unclear error message when interface |
-| | set to dhcp |
+| JIRA: APEX-368 | Ceilometer stores samples and events |
+| | forever |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-371 | Ceph partitions need to be prepared |
+| | on deployment when using 2nd disk |
+--------------------------------------+--------------------------------------+
-| JIRA: APEX-389 (Danube) | Compute kernel parameters are used |
+| JIRA: APEX-375 | Default glance storage points to |
+| | http,swift when ceph disabled |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-389 | Compute kernel parameters are used |
| | for all nodes |
+--------------------------------------+--------------------------------------+
+| JIRA: APEX-410 | Need to limit number of workers per |
+| | OpenStack service for baremetal |
+| | deployments |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-412 | Install failures with UEFI |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-417 | Missing OVS 2.6 + NSH support |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-419 | opnfv-clean sometimes leaves admin |
+| | and public network down |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-425 | Need to tweak performance settings |
+| | virtual DPDK scenarios |
++--------------------------------------+--------------------------------------+
Workarounds
-----------
**-**
-Scenario specific release notes
-===============================
-
-Scenario os-odl_l3-nofeature known issues
------------------------------------------
-
-* `APEX-112 <https://jira.opnfv.org/browse/APEX-112>`_:
- ODL routes local subnet traffic to GW
-
-Scenario os-odl_l2-nofeature known issues
------------------------------------------
-
-* `APEX-149 <https://jira.opnfv.org/browse/APEX-149>`_:
- Openflow rules are populated very slowly
-
-Scenario os-odl-bgpvpn known issues
---------------------------------------
-
-* `APEX-278 <https://jira.opnfv.org/browse/APEX-278>`_:
- Duplicate neutron config class declaration for SDNVPN
-
-Scenario os-onos-nofeatures/os-onos-sfc known issues
-----------------------------------------------------
-
-* `APEX-281 <https://jira.opnfv.org/browse/APEX-281>`_:
- ONOS sometimes fails to provide addresses to instances
-
-Scenario os-odl_l2-sfc-noha known issues
-----------------------------------------
-
-* `APEX-275 <https://jira.opnfv.org/browse/APEX-275>`_:
- Metadata fails in Boron
-
-Scenario os-nosdn-ovs known issues
-----------------------------------
-
-* `APEX-274 <https://jira.opnfv.org/browse/APEX-274>`_:
- OVS DPDK scenario does not create vhost user ports
-
-Scenario os-odl_l2-fdio-noha known issues
------------------------------------------
-
-* `FDS-16 <https://jira.opnfv.org/browse/FDS-16>`_:
- Security group configuration through nova leads
- to vhostuser port connection issues
-* `FDS-62 <https://jira.opnfv.org/browse/FDS-62>`_:
- APEX - Increase number of files MariaDB can open
-* `FDS-79 <https://jira.opnfv.org/browse/FDS-79>`_:
- Sometimes (especially in bulk crete/delete operations
- when multiple networks/ports are created within short time)
- OpenDaylight doesn't accept creation requests
-* `FDS-80 <https://jira.opnfv.org/browse/FDS-80>`_:
- After launching a VM it stayed forever in BUILD status.
- Also further operation related to this VM (volume attachment etc.)
- caused problems
-* `FDS-81 <https://jira.opnfv.org/browse/FDS-81>`_:
- After functest finishes there are two bds on computes and
- none on controller
-* `FDS-82 <https://jira.opnfv.org/browse/FDS-82>`_:
- Nova list shows no vms but there are some on computes in paused state
-* `APEX-217 <https://jira.opnfv.org/browse/APEX-217>`_:
- qemu not configured with correct group:user
-
-Scenario os-nosdn-fdio-noha known issues
-----------------------------------------
-
-Note that a set of manual configration steps need to be performed
-post an automated deployment for the scenario to be fully functional.
-Please refer to `FDS-159 <https://jira.opnfv.org/browse/FDS-159>`_ and
-`FDS-160 <https://jira.opnfv.org/browse/FDS-160>`_ for details.
-
-* `FDS-155 <https://jira.opnfv.org/browse/FDS-155>`_:
- os-nosdn-fdio-noha scenario: tempest_smoke_serial causes
- mariadb/mysqld process to hang
-* `FDS-156 <https://jira.opnfv.org/browse/FDS-156>`_:
- os-nosdn-fdio-noha scenario: Race conditions for
- network-vif-plugged notification
-* `FDS-157 <https://jira.opnfv.org/browse/FDS-157>`_:
- os-nosdn-fdio-noha scenario: Intermittently VMs
- would get assigned 2 IPs instead of 1
-* `FDS-158 <https://jira.opnfv.org/browse/FDS-158>`_:
- os-nosdn-fdio-noha scenario: VM start/launch fails with
- "no more IP addresses" in neutron logs
-* `FDS-159 <https://jira.opnfv.org/browse/FDS-159>`_:
- os-nosdn-fdio-noha scenario: Security groups not yet supported
-* `FDS-160 <https://jira.opnfv.org/browse/FDS-160>`_:
- os-nosdn-fdio-noha scenario: Vlan fix on controller
-* `FDS-161 <https://jira.opnfv.org/browse/FDS-161>`_:
- os-nosdn-fdio-noha scenario: VPP fails with certain UCS B-series blades
-
-.. _HAIssues:
-
-General HA scenario known issues
---------------------------------
-
-* `COPPER-22 <https://jira.opnfv.org/browse/COPPER-22>`_:
- Congress service HA deployment is not yet supported/verified.
-* `APEX-276 <https://jira.opnfv.org/browse/APEX-276>`_:
- ODL HA unstable and crashes frequently
Test Result
===========
-The Colorado release with the Apex deployment toolchain has undergone QA
-test runs with the following results:
-
-+--------------------------------------+--------------------------------------+
-| **TEST-SUITE** | **Results:** |
-| | |
-+--------------------------------------+--------------------------------------+
-| **-** | **-** |
-+--------------------------------------+--------------------------------------+
+Please reference Functest project documentation for test results with the
+Apex installer.
References
==========
-For more information on the OPNFV Colorado release, please see:
+For more information on the OPNFV Danube release, please see:
-http://wiki.opnfv.org/releases/Colorado
+http://wiki.opnfv.org/releases/Danube
:Authors: Tim Rozet (trozet@redhat.com)
:Authors: Dan Radez (dradez@redhat.com)
-:Version: 2.1.0
+:Version: 4.0
--- /dev/null
+.. _os-nosdn-nofeature-ha:
+
+.. This work is licensed under a Creative Commons Attribution 4.0 International Licence.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) <optionally add copywriters name>
+
+==============================================
+os-nosdn-nofeature-ha overview and description
+==============================================
+
+.. toctree::
+ :numbered:
+ :maxdepth: 4
+
+ os-nosdn-nofeature-ha.rst
--- /dev/null
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) <optionally add copywriters name>
+
+This document provides scenario level details for Danube 1.0 of
+deployment with no SDN controller and no extra features enabled.
+
+.. contents::
+ :depth: 3
+ :local:
+
+============
+Introduction
+============
+
+This scenario is used primarily to validate and deploy a Newton OpenStack
+deployment without any NFV features or SDN controller enabled.
+
+Scenario components and composition
+===================================
+
+This scenario is composed of common OpenStack services enabled by default,
+including Nova, Neutron, Glance, Cinder, Keystone, Horizon. Optionally and
+by default, Tacker and Congress services are also enabled. Ceph is used as
+the backend storage to Cinder on all deployed nodes.
+
+All services are in HA, meaning that there are multiple cloned instances of
+each service, and they are balanced by HA Proxy using a Virtual IP Address
+per service.
+
+Scenario usage overview
+=======================
+
+Simply deploy this scenario by using the os-nosdn-nofeature-ha.yaml deploy
+settings file.
+
+Limitations, Issues and Workarounds
+===================================
+
+None
+
+References
+==========
+
+For more information on the OPNFV Danube release, please visit
+http://www.opnfv.org/danube
+
--- /dev/null
+.. _os-nosdn-nofeature-noha:
+
+.. This work is licensed under a Creative Commons Attribution 4.0 International Licence.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) <optionally add copywriters name>
+
+================================================
+os-nosdn-nofeature-noha overview and description
+================================================
+
+.. toctree::
+ :numbered:
+ :maxdepth: 4
+
+ os-nosdn-nofeature-noha.rst
--- /dev/null
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) <optionally add copywriters name>
+
+This document provides scenario level details for Danube 1.0 of
+deployment with no SDN controller and no extra features enabled.
+
+.. contents::
+ :depth: 3
+ :local:
+
+============
+Introduction
+============
+
+This scenario is used primarily to validate and deploy a Newton OpenStack
+deployment without any NFV features or SDN controller enabled.
+
+Scenario components and composition
+===================================
+
+This scenario is composed of common OpenStack services enabled by default,
+including Nova, Neutron, Glance, Cinder, Keystone, Horizon. Optionally and
+by default, Tacker and Congress services are also enabled. Ceph is used as
+the backend storage to Cinder on all deployed nodes.
+
+
+Scenario usage overview
+=======================
+
+Simply deploy this scenario by using the os-nosdn-nofeature-noha.yaml deploy
+settings file.
+
+Limitations, Issues and Workarounds
+===================================
+
+None
+
+References
+==========
+
+For more information on the OPNFV Danube release, please visit
+http://www.opnfv.org/danube
+
--- /dev/null
+.. _os-nosdn-performance-ha:
+
+.. This work is licensed under a Creative Commons Attribution 4.0 International Licence.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) <optionally add copywriters name>
+
+================================================
+os-nosdn-performance-ha overview and description
+================================================
+
+.. toctree::
+ :numbered:
+ :maxdepth: 4
+
+ os-nosdn-performance-ha.rst
--- /dev/null
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) <optionally add copywriters name>
+
+This document provides scenario level details for Danube 1.0 of
+deployment with no SDN controller and performance options enabled.
+
+.. contents::
+ :depth: 3
+ :local:
+
+============
+Introduction
+============
+
+This scenario is used primarily to demonstrate the performance settings and
+capabilities in Apex. This scenario will deploy a Newton OpenStack
+deployment without any NFV features or SDN controller enabled.
+
+Scenario components and composition
+===================================
+
+This scenario is composed of common OpenStack services enabled by default,
+including Nova, Neutron, Glance, Cinder, Keystone, Horizon. Optionally and
+by default, Tacker and Congress services are also enabled. Ceph is used as
+the backend storage to Cinder on all deployed nodes.
+
+All services are in HA, meaning that there are multiple cloned instances of
+each service, and they are balanced by HA Proxy using a Virtual IP Address
+per service.
+
+The main purpose of this scenario is to serve as an example to show how to
+set optional performance settings in an Apex deploy settings file.
+
+Scenario usage overview
+=======================
+
+The performance options listed in os-nosdn-performance-ha.yaml give an example
+of the different options a user can set in any deploy settings file. Some
+of these performance options are actually required for other scenarios which
+rely on DPDK. Options under the nova section like 'libvirtpin' allow a
+user to choose which core to pin nova instances to on the overcloud compute
+node. Options under 'kernel' allow a user to set kernel specific arguments
+at boot, which include options like hugepages, isolcpus, enabling iommu, etc.
+
+
+Limitations, Issues and Workarounds
+===================================
+
+* `APEX-389 <https://jira.opnfv.org/browse/APEX-389>`_:
+ Compute kernel parameters are applied to all nodes
+
+References
+==========
+
+For more information on the OPNFV Danube release, please visit
+http://www.opnfv.org/danube
+
--- /dev/null
+.. _os-odl_l3-csit-noha:
+
+.. This work is licensed under a Creative Commons Attribution 4.0 International Licence.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) <optionally add copywriters name>
+
+============================================
+os-odl_l3-csit-noha overview and description
+============================================
+
+.. toctree::
+ :numbered:
+ :maxdepth: 4
+
+ os-odl_l3-csit-noha.rst
--- /dev/null
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) <optionally add copywriters name>
+
+This document provides scenario level details for Danube 1.0 of
+deployment with the OpenDaylight SDN controller and only CSIT relevant
+features enabled.
+
+.. contents::
+ :depth: 3
+ :local:
+
+============
+Introduction
+============
+
+This scenario is used primarily to validate and deploy a minimum Newton
+OpenStack + OpenDaylight deployment with only required OpenStack services.
+
+Scenario components and composition
+===================================
+
+This scenario is composed of only required OpenStack services enabled by
+default, including Nova, Neutron, Glance, and Keystone. OpenDaylight is also
+enabled. File storage is used as the backend to Glance.
+
+The purpose of this file is to deploy a minimum OpenStack setup that will
+still be able to exercise OpenDaylight. The use case for this scenario is
+to be able to test OpenDaylight quickly in an environment with low
+CPU/Memory requirements.
+
+
+Scenario usage overview
+=======================
+
+Simply deploy this scenario by using the os-odl_l3-csit-noha.yaml deploy
+settings file.
+
+Limitations, Issues and Workarounds
+===================================
+
+* `APEX-112 <https://jira.opnfv.org/browse/APEX-112>`_:
+ ODL routes local subnet traffic to GW
+* `APEX-149 <https://jira.opnfv.org/browse/APEX-149>`_:
+ OpenFlow rules are populated very slowly
+* `APEX-268 <https://jira.opnfv.org/browse/APEX-268>`_:
+ VMs with multiple floating IPs can only access via first NIC
+* `APEX-384 <https://jira.opnfv.org/browse/APEX-384>`_:
+ Not including odl_version in deploy settings causes error
+* `APEX-422 <https://jira.opnfv.org/browse/APEX-422>`_:
+ First nova instance DHCP request fails
+
+References
+==========
+
+For more information on the OPNFV Danube release, please visit
+http://www.opnfv.org/danube
+
--- /dev/null
+.. _os-odl_l3-nofeature-ha:
+
+.. This work is licensed under a Creative Commons Attribution 4.0 International Licence.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) <optionally add copywriters name>
+
+===============================================
+os-odl_l3-nofeature-ha overview and description
+===============================================
+
+.. toctree::
+ :numbered:
+ :maxdepth: 4
+
+ os-odl_l3-nofeature-ha.rst
--- /dev/null
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) <optionally add copywriters name>
+
+This document provides scenario level details for Danube 1.0 of
+deployment with the OpenDaylight SDN controller and no extra features enabled.
+
+.. contents::
+ :depth: 3
+ :local:
+
+============
+Introduction
+============
+
+This scenario is used primarily to validate and deploy a Newton OpenStack
+deployment with OpenDaylight, and without any NFV features enabled.
+
+Scenario components and composition
+===================================
+
+This scenario is composed of common OpenStack services enabled by default,
+including Nova, Neutron, Glance, Cinder, Keystone, Horizon. Optionally and
+by default, Tacker and Congress services are also enabled. Ceph is used as
+the backend storage to Cinder on all deployed nodes.
+
+All services are in HA, meaning that there are multiple cloned instances of
+each service, and they are balanced by HA Proxy using a Virtual IP Address
+per service.
+
+OpenDaylight is also enabled in HA, and forms a cluster. Neutron
+communicates with a Virtual IP Address for OpenDaylight which is load
+balanced acrosss the OpenDaylight cluster. Every Open vSwitch node is
+connected to every OpenDaylight for High Availability.
+
+Scenario usage overview
+=======================
+
+Simply deploy this scenario by using the os-odl_l3-nofeature-ha.yaml deploy
+settings file.
+
+Limitations, Issues and Workarounds
+===================================
+
+* `APEX-112 <https://jira.opnfv.org/browse/APEX-112>`_:
+ ODL routes local subnet traffic to GW
+* `APEX-149 <https://jira.opnfv.org/browse/APEX-149>`_:
+ OpenFlow rules are populated very slowly
+* `APEX-268 <https://jira.opnfv.org/browse/APEX-268>`_:
+ VMs with multiple floating IPs can only access via first NIC
+* `APEX-384 <https://jira.opnfv.org/browse/APEX-384>`_:
+ Not including odl_version in deploy settings causes error
+* `APEX-422 <https://jira.opnfv.org/browse/APEX-422>`_:
+ First nova instance DHCP request fails
+
+References
+==========
+
+For more information on the OPNFV Danube release, please visit
+http://www.opnfv.org/danube
+
--- /dev/null
+.. _os-odl_l3-nofeature-noha:
+
+.. This work is licensed under a Creative Commons Attribution 4.0 International Licence.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) <optionally add copywriters name>
+
+=================================================
+os-odl_l3-nofeature-noha overview and description
+=================================================
+
+.. toctree::
+ :numbered:
+ :maxdepth: 4
+
+ os-odl_l3-nofeature-noha.rst
--- /dev/null
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) <optionally add copywriters name>
+
+This document provides scenario level details for Danube 1.0 of
+deployment with the OpenDaylight SDN controller and no extra features enabled.
+
+.. contents::
+ :depth: 3
+ :local:
+
+============
+Introduction
+============
+
+This scenario is used primarily to validate and deploy a Newton OpenStack
+deployment with OpenDaylight, and without any NFV features enabled.
+
+Scenario components and composition
+===================================
+
+This scenario is composed of common OpenStack services enabled by default,
+including Nova, Neutron, Glance, Cinder, Keystone, Horizon. Optionally and
+by default, Tacker and Congress services are also enabled. Ceph is used as
+the backend storage to Cinder on all deployed nodes.
+
+Only a single controller is deployed in this scenario, which also includes
+the OpenDaylight service on it.
+
+Scenario usage overview
+=======================
+
+Simply deploy this scenario by using the os-odl_l3-nofeature-noha.yaml deploy
+settings file.
+
+Limitations, Issues and Workarounds
+===================================
+
+* `APEX-112 <https://jira.opnfv.org/browse/APEX-112>`_:
+ ODL routes local subnet traffic to GW
+* `APEX-149 <https://jira.opnfv.org/browse/APEX-149>`_:
+ OpenFlow rules are populated very slowly
+* `APEX-268 <https://jira.opnfv.org/browse/APEX-268>`_:
+ VMs with multiple floating IPs can only access via first NIC
+* `APEX-384 <https://jira.opnfv.org/browse/APEX-384>`_:
+ Not including odl_version in deploy settings causes error
+* `APEX-422 <https://jira.opnfv.org/browse/APEX-422>`_:
+ First nova instance DHCP request fails
+
+References
+==========
+
+For more information on the OPNFV Danube release, please visit
+http://www.opnfv.org/danube
+
--- /dev/null
+.. _os-ovn-nofeature-noha:
+
+.. This work is licensed under a Creative Commons Attribution 4.0 International Licence.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) <optionally add copywriters name>
+
+==============================================
+os-ovn-nofeature-noha overview and description
+==============================================
+
+.. toctree::
+ :numbered:
+ :maxdepth: 4
+
+ os-ovn-nofeature-noha.rst
--- /dev/null
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) <optionally add copywriters name>
+
+This document provides scenario level details for Danube 1.0 of
+deployment with the OVN SDN controller and no extra features enabled.
+
+.. contents::
+ :depth: 3
+ :local:
+
+============
+Introduction
+============
+
+This scenario is used primarily to validate and deploy a Newton OpenStack
+deployment with the OVN SDN controller, and without any NFV features enabled.
+
+Scenario components and composition
+===================================
+
+This scenario is composed of common OpenStack services enabled by default,
+including Nova, Neutron, Glance, Cinder, Keystone, Horizon. Optionally and
+by default, Tacker and Congress services are also enabled. Ceph is used as
+the backend storage to Cinder on all deployed nodes.
+
+Scenario usage overview
+=======================
+
+Simply deploy this scenario by using the os-ovn-nofeature-noha.yaml deploy
+settings file.
+
+Limitations, Issues and Workarounds
+===================================
+
+* `APEX-430 <https://jira.opnfv.org/browse/APEX-430>`_:
+ OVN HA functionality is not available.
+
+References
+==========
+
+For more information on the OPNFV Danube release, please visit
+http://www.opnfv.org/danube
+
function overcloud_deploy {
local num_compute_nodes
local num_control_nodes
+ local dpdk_cores pmd_cores socket_mem ovs_dpdk_perf_flag ovs_option_heat_arr
+ declare -A ovs_option_heat_arr
+
+ ovs_option_heat_arr['dpdk_cores']=OvsDpdkCoreList
+ ovs_option_heat_arr['pmd_cores']=PmdCoreList
+ ovs_option_heat_arr['socket_memory']=OvsDpdkSocketMemory
# OPNFV Default Environment and Network settings
DEPLOY_OPTIONS+=" -e ${ENV_FILE}"
# DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/onos.yaml"
#fi
#SDN_IMAGE=onos
+ elif [ "${deploy_options_array['sdn_controller']}" == 'ovn' ]; then
+ if [[ "$ha_enabled" == "True" ]]; then
+ DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/neutron-ml2-ovn-ha.yaml"
+ echo "${red}OVN HA support is not not supported... exiting.${reset}"
+ exit 1
+ else
+ DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/neutron-ml2-ovn.yaml"
+ fi
+ SDN_IMAGE=opendaylight
elif [ "${deploy_options_array['sdn_controller']}" == 'opencontrail' ]; then
echo -e "${red}ERROR: OpenContrail is currently unsupported...exiting${reset}"
exit 1
echo -e "${blue}INFO: SDN Controller disabled...will deploy nosdn scenario${reset}"
if [ "${deploy_options_array['vpp']}" == 'True' ]; then
DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/neutron-ml2-vpp.yaml"
+ elif [ "${deploy_options_array['dataplane']}" == 'ovs_dpdk' ]; then
+ DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/neutron-ovs-dpdk.yaml"
fi
SDN_IMAGE=opendaylight
else
ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "rm -f overcloud-full.qcow2"
scp ${SSH_OPTIONS[@]} $IMAGES/overcloud-full-${SDN_IMAGE}.qcow2 "stack@$UNDERCLOUD":overcloud-full.qcow2
+ # disable neutron openvswitch agent from starting
+ if [[ -n "${deploy_options_array['sdn_controller']}" && "${deploy_options_array['sdn_controller']}" != 'False' ]]; then
+ echo -e "${blue}INFO: Disabling neutron-openvswitch-agent from systemd${reset}"
+ ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
+ LIBGUESTFS_BACKEND=direct virt-customize --run-command "rm -f /etc/systemd/system/multi-user.target.wants/neutron-openvswitch-agent.service" \
+ --run-command "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent.service" \
+ -a overcloud-full.qcow2
+EOI
+ fi
+
if [ "${deploy_options_array['vpn']}" == 'True' ]; then
echo -e "${blue}INFO: Enabling ZRPC and Quagga${reset}"
ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
LIBGUESTFS_BACKEND=direct virt-customize --run-command "yum -y install /root/quagga/*.rpm" \
+ --run-command "sudo usermod -a -G quaggavt quagga" \
+ --run-command "sudo mkdir -p /var/run/quagga/" \
+ --run-command "sudo chown quagga:quagga -R /var/run/quagga/" \
--run-command "systemctl enable zrpcd" \
-a overcloud-full.qcow2
EOI
-a overcloud-full.qcow2
if [ "${deploy_options_array['dataplane']}" == 'ovs_dpdk' ]; then
- LIBGUESTFS_BACKEND=direct virt-customize --run-command "yum install -y /root/dpdk_rpms/*" \
- --run-command "sed -i '/RuntimeDirectoryMode=.*/d' /usr/lib/systemd/system/openvswitch-nonetwork.service" \
- --run-command "printf \"%s\\n\" RuntimeDirectoryMode=0775 Group=qemu UMask=0002 >> /usr/lib/systemd/system/openvswitch-nonetwork.service" \
- --run-command "sed -i 's/\\(^\\s\\+\\)\\(start_daemon "$OVS_VSWITCHD_PRIORITY"\\)/\\1umask 0002 \\&\\& \\2/' /usr/share/openvswitch/scripts/ovs-ctl" \
- -a overcloud-full.qcow2
+ sed -i "/OS::TripleO::ComputeExtraConfigPre:/c\ OS::TripleO::ComputeExtraConfigPre: ./ovs-dpdk-preconfig.yaml" network-environment.yaml
fi
EOI
fi
# Patch neutron with using OVS external interface for router and add generic linux NS interface driver
- if [[ "${deploy_options_array['sdn_controller']}" == 'opendaylight' && "${deploy_options_array['dataplane']}" == 'fdio' ]]; then
+ if [[ "${deploy_options_array['dataplane']}" == 'fdio' ]]; then
ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
LIBGUESTFS_BACKEND=direct virt-customize --run-command "cd /usr/lib/python2.7/site-packages/ && patch -p1 < neutron-patch-NSDriver.patch" \
-a overcloud-full.qcow2
EOI
- # Configure routing node for odl_l3-fdio
+
+ # Disable clustering for ODL FDIO HA scenarios
+ if [[ "${deploy_options_array['sdn_controller']}" == 'opendaylight' ]]; then
+ ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
+ LIBGUESTFS_BACKEND=direct virt-customize --run-command "cd /etc/puppet/modules/tripleo/ && patch -p1 < disable_odl_clustering.patch" \
+ -a overcloud-full.qcow2
+EOI
+ fi
+
+ # Configure routing node for odl-fdio
if [[ "${deploy_options_array['sdn_l3']}" == 'True' ]]; then
ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
sed -i "/opendaylight::vpp_routing_node:/c\ opendaylight::vpp_routing_node: ${deploy_options_array['odl_vpp_routing_node']}.${domain_name}" ${ENV_FILE}
fi
if [ -n "${deploy_options_array['performance']}" ]; then
+ ovs_dpdk_perf_flag="False"
for option in "${performance_options[@]}" ; do
- arr=($option)
- # use compute's kernel settings for all nodes for now.
- if [ "${arr[0]}" == "Compute" ] && [ "${arr[1]}" == "kernel" ]; then
- kernel_args+=" ${arr[2]}=${arr[3]}"
- fi
+ if [ "${arr[1]}" == "vpp" ]; then
+ if [ "${arr[0]}" == "Compute" ]; then
+ role='NovaCompute'
+ else
+ role=${arr[0]}
+ fi
+ if [ "${arr[2]}" == "main-core" ]; then
+ ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
+ sed -i "/${role}ExtraConfig:/ c\ ${role}ExtraConfig:\n fdio::vpp_cpu_main_core: \"'${arr[3]}'\"" ${ENV_FILE}
+EOI
+ elif [ "${arr[2]}" == "corelist-workers" ]; then
+ ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
+ sed -i "/${role}ExtraConfig:/ c\ ${role}ExtraConfig:\n fdio::vpp_cpu_corelist_workers: \"'${arr[3]}'\"" ${ENV_FILE}
+EOI
+ fi
+ fi
+ arr=($option)
+ # use compute's kernel settings for all nodes for now.
+ if [ "${arr[0]}" == "Compute" ] && [ "${arr[1]}" == "kernel" ]; then
+ kernel_args+=" ${arr[2]}=${arr[3]}"
+ fi
+ if [ "${arr[0]}" == "Compute" ] && [ "${arr[1]}" == "ovs" ]; then
+ eval "${arr[2]}=${arr[3]}"
+ ovs_dpdk_perf_flag="True"
+ fi
done
ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
- sed -i "/ComputeKernelArgs:/c\ ComputeKernelArgs: '$kernel_args'" ${ENV_FILE}
- sed -i "$ a\resource_registry:\n OS::TripleO::NodeUserData: first-boot.yaml" ${ENV_FILE}
- sed -i "/NovaSchedulerDefaultFilters:/c\ NovaSchedulerDefaultFilters: 'RamFilter,ComputeFilter,AvailabilityZoneFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,NUMATopologyFilter'" ${ENV_FILE}
+ sed -i "/ComputeKernelArgs:/c\ ComputeKernelArgs: '$kernel_args'" ${ENV_FILE}
+ sed -i "$ a\resource_registry:\n OS::TripleO::NodeUserData: first-boot.yaml" ${ENV_FILE}
+ sed -i "/NovaSchedulerDefaultFilters:/c\ NovaSchedulerDefaultFilters: 'RamFilter,ComputeFilter,AvailabilityZoneFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,NUMATopologyFilter'" ${ENV_FILE}
+EOI
+
+ if [[ "${deploy_options_array['dataplane']}" == 'ovs_dpdk' && "$ovs_dpdk_perf_flag" == "True" ]]; then
+ for ovs_option in ${!ovs_option_heat_arr[@]}; do
+ if [ -n "${!ovs_option}" ]; then
+ ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
+ sed -i "/${ovs_option_heat_arr[$ovs_option]}:/c\ ${ovs_option_heat_arr[$ovs_option]}: ${!ovs_option}" ${ENV_FILE}
EOI
+ fi
+ done
+ fi
fi
if [[ -z "${deploy_options_array['sdn_controller']}" || "${deploy_options_array['sdn_controller']}" == 'False' ]]; then
if [ "${deploy_options_array['dataplane']}" == "fdio" ]; then
if [ "$tenant_nic_mapping_controller_members" == "$tenant_nic_mapping_compute_members" ]; then
+ echo -e "${blue}INFO: nosdn fdio deployment...installing correct vpp packages...${reset}"
ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
sed -i "/NeutronVPPAgentPhysnets:/c\ NeutronVPPAgentPhysnets: 'datacentre:${tenant_nic_mapping_controller_members}'" ${ENV_FILE}
+ LIBGUESTFS_BACKEND=direct virt-customize --run-command "yum remove -y vpp vpp-api-python vpp-lib vpp-plugins" \
+ --run-command "yum install -y /root/fdio_nosdn/*.rpm" \
+ --run-command "rm -f /etc/sysctl.d/80-vpp.conf" \
+ -a overcloud-full.qcow2
EOI
else
echo -e "${red}Compute and Controller must use the same tenant nic name, please modify network setting file.${reset}"
EOI
fi
- # Override any previous packages if FDIO and ODL L2
- if [[ "${deploy_options_array['vpp']}" == 'True' && "${deploy_options_array['sdn_controller']}" == 'opendaylight' && "${deploy_options_array['sdn_l3']}" == "False" ]]; then
- ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
- LIBGUESTFS_BACKEND=direct virt-customize --run-command "yum -y remove opendaylight vpp vpp-api-python vpp-lib vpp-plugins honeycomb" \
- --run-command "yum -y install /root/fdio_l2/*.rpm" \
+ # Override ODL if FDIO and ODL L2
+ if [[ "${deploy_options_array['vpp']}" == 'True' && "${deploy_options_array['sdn_controller']}" == 'opendaylight' ]]; then
+ if [ "${deploy_options_array['sdn_l3']}" == "False" ]; then
+ ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
+ LIBGUESTFS_BACKEND=direct virt-customize --run-command "yum -y remove opendaylight" \
+ --run-command "yum -y install /root/fdio_l2/opendaylight*.rpm" \
+ -a overcloud-full.qcow2
+EOI
+ else
+ ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
+ LIBGUESTFS_BACKEND=direct virt-customize --run-command "yum remove -y vpp vpp-api-python vpp-lib vpp-plugins honeycomb" \
+ --run-command "yum -y install /root/fdio_l3/*.rpm" \
--run-command "rm -f /etc/sysctl.d/80-vpp.conf" \
-a overcloud-full.qcow2
EOI
+ fi
fi
# check if ceph should be enabled
DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml"
fi
+ if [ "${deploy_options_array['sdn_controller']}" == 'ovn' ]; then
+ # The epoch in deloran's ovs is 1: and in leif's is 0:
+ # so we have to execute a downgrade instead of an update
+ ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
+ LIBGUESTFS_BACKEND=direct virt-customize \
+ --run-command "cd /root/ovs27 && yum update -y *openvswitch*" \
+ --run-command "cd /root/ovs27 && yum downgrade -y *openvswitch*" \
+ -a overcloud-full.qcow2
+EOI
+ fi
+
# get number of nodes available in inventory
num_control_nodes=$(ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "grep -c profile:control /home/stack/instackenv.json")
num_compute_nodes=$(ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "grep -c profile:compute /home/stack/instackenv.json")
fi
EOI
- # Configure DPDK
+ # Configure DPDK and restart ovs agent after bringing up br-phy
if [ "${deploy_options_array['dataplane']}" == 'ovs_dpdk' ]; then
ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI || (echo "DPDK config failed, exiting..."; exit 1)
source stackrc
set -o errexit
for node in \$(nova list | grep novacompute | grep -Eo "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+"); do
-echo "Running DPDK test app on \$node"
+echo "Checking DPDK status and bringing up br-phy on \$node"
ssh -T ${SSH_OPTIONS[@]} "heat-admin@\$node" <<EOF
set -o errexit
-sudo dpdk_helloworld --no-pci
-sudo dpdk_nic_bind -s
+sudo dpdk-devbind -s
+sudo ifup br-phy
+if [[ -z "${deploy_options_array['sdn_controller']}" || "${deploy_options_array['sdn_controller']}" == 'False' ]]; then
+ echo "Restarting openvswitch agent to pick up VXLAN tunneling"
+ sudo systemctl restart neutron-openvswitch-agent
+fi
EOF
done
EOI
'odl_vpp_routing_node']
VALID_ROLES = ['Controller', 'Compute', 'ObjectStorage']
-VALID_PERF_OPTS = ['kernel', 'nova', 'vpp']
+VALID_PERF_OPTS = ['kernel', 'nova', 'vpp', 'ovs']
VALID_DATAPLANES = ['ovs', 'ovs_dpdk', 'fdio']
if ds['sdn_l3']:
nets['external'][0]['nic_mapping'][args.role]['uio-driver'] =\
ds['performance'][args.role.title()]['vpp']['uio-driver']
+ if ds.get('performance', {}).get(args.role.title(), {}).get('vpp', {})\
+ .get('interface-options'):
+ nets['tenant']['nic_mapping'][args.role]['interface-options'] =\
+ ds['performance'][args.role.title()]['vpp']['interface-options']
print(template.render(nets=nets,
role=args.role,
# if the VM is not running update the authkeys and start it
if ! virsh list | grep undercloud > /dev/null; then
+ if [ "$debug" == 'TRUE' ]; then
+ LIBGUESTFS_BACKEND=direct virt-customize -a $undercloud_dst --root-password password:opnfvapex
+ fi
+
echo "Injecting ssh key to Undercloud VM"
LIBGUESTFS_BACKEND=direct virt-customize -a $undercloud_dst --run-command "mkdir -p /root/.ssh/" \
--upload ~/.ssh/id_rsa.pub:/root/.ssh/authorized_keys \
openstack-config --set undercloud.conf DEFAULT inspection_iprange ${admin_introspection_range}
openstack-config --set undercloud.conf DEFAULT undercloud_debug false
openstack-config --set undercloud.conf DEFAULT undercloud_hostname "undercloud.${domain_name}"
+openstack-config --set undercloud.conf DEFAULT enable_ui false
sudo openstack-config --set /etc/ironic/ironic.conf disk_utils iscsi_verify_attempts 30
sudo openstack-config --set /etc/ironic/ironic.conf disk_partitioner check_device_max_retries 40
deploy_files = ('deploy_settings.yaml',
'os-nosdn-nofeature-noha.yaml',
- 'os-nosdn-ovs-noha.yaml',
+ 'os-nosdn-ovs_dpdk-noha.yaml',
'os-ocl-nofeature-ha.yaml',
'os-odl-bgpvpn-ha.yaml',
'os-odl-bgpvpn-noha.yaml',
- 'os-odl_l3-nofeature-ha.yaml',
+ 'os-odl-nofeature-ha.yaml',
'os-nosdn-nofeature-ha.yaml',
- 'os-nosdn-ovs-ha.yaml',
+ 'os-nosdn-ovs_dpdk-ha.yaml',
'os-nosdn-performance-ha.yaml',
- 'os-odl_l2-nofeature-ha.yaml',
+ 'os-odl-nofeature-ha.yaml',
'os-odl_l2-sfc-noha.yaml',
'os-onos-nofeature-ha.yaml',
'os-onos-sfc-ha.yaml')
ns = NetworkSettings('../config/network/network_settings.yaml')
assert_is_instance(ns, NetworkSettings)
for role in ['controller', 'compute']:
- nic_index = 1
+ nic_index = 0
print(ns.nics)
for network in ns.enabled_network_list:
- if role == 'compute':
- nic = 'eth' + str(nic_index - 1)
- else:
- nic = 'nic' + str(nic_index)
+ nic = 'eth' + str(nic_index)
assert_equal(ns.nics[role][network], nic)
nic_index += 1
ns = NetworkSettings(files_dir+'network_settings.yaml')
storage_net_nicmap = ns['networks'][STORAGE_NETWORK]['nic_mapping']
# set duplicate nic
- storage_net_nicmap['controller']['members'][0] = 'nic1'
+ storage_net_nicmap['controller']['members'][0] = 'eth0'
assert_raises(NetworkSettingsException, NetworkSettings, ns)
# remove nic members
storage_net_nicmap['controller']['members'] = []