adding the opendaylight integration 53/3053/6
authorDan Radez <dradez@redhat.com>
Tue, 27 Oct 2015 05:55:20 +0000 (01:55 -0400)
committerDan Radez <dradez@redhat.com>
Sat, 7 Nov 2015 14:23:36 +0000 (09:23 -0500)
Change-Id: Iaaf5cbc790abd3b7af6f94b4e6d7e8ecfbbc6534

build/Makefile
build/instack.sh
build/opendaylight.patch [new file with mode: 0644]
build/opendaylight.yaml [new file with mode: 0644]
build/opnfv-apex.spec
ci/deploy.sh

index fa89ee8..7aec8fb 100644 (file)
@@ -115,8 +115,10 @@ rpm:
        tar -u --xform="s:stack/ironic-python-agent.kernel:opnfv-apex-$(RPMVERS)/build/stack/ironic-python-agent.kernel:" --file=opnfv-apex.tar stack/ironic-python-agent.kernel
        tar -u --xform="s:stack/ironic-python-agent.vmlinuz:opnfv-apex-$(RPMVERS)/build/stack/ironic-python-agent.vmlinuz:" --file=opnfv-apex.tar stack/ironic-python-agent.vmlinuz
        tar -u --xform="s:stack/overcloud-full.initrd:opnfv-apex-$(RPMVERS)/build/stack/overcloud-full.initrd:" --file=opnfv-apex.tar stack/overcloud-full.initrd
-       tar -u --xform="s:stack/overcloud-full.qcow2:opnfv-apex-$(RPMVERS)/build/stack/overcloud-full.qcow2:" --file=opnfv-apex.tar stack/overcloud-full.qcow2
+       tar -u --xform="s:stack/overcloud-full-odl.qcow2:opnfv-apex-$(RPMVERS)/build/stack/overcloud-full.qcow2:" --file=opnfv-apex.tar stack/overcloud-full-odl.qcow2
        tar -u --xform="s:stack/overcloud-full.vmlinuz:opnfv-apex-$(RPMVERS)/build/stack/overcloud-full.vmlinuz:" --file=opnfv-apex.tar stack/overcloud-full.vmlinuz
+       tar -u --xform="s:opendaylight.yaml:opnfv-apex-$(RPMVERS)/build/opendaylight.yaml:" --file=opnfv-apex.tar opendaylight.yaml
+       tar -u --xform="s:opendaylight.patch:opnfv-apex-$(RPMVERS)/build/opendaylight.patch:" --file=opnfv-apex.tar opendaylight.patch
        gzip -f opnfv-apex.tar
        rpmbuild -ba opnfv-apex.spec -D '_topdir %(echo `pwd`)' -D '_builddir %(echo `pwd`)' -D '_sourcedir %(echo `pwd`)' -D '_rpmdir %(echo `pwd`)' -D '_specdir %(echo `pwd`)' -D '_srcrpmdir %(echo `pwd`)' -D "release $(shell echo $(REVSTATE) | tr -d '_-')"
 
index 79a99f8..1e8e8b3 100755 (executable)
@@ -57,9 +57,17 @@ elif [ "$1" == "-master" ]; then
     sudo curl -o /etc/yum.repos.d/delorean.repo http://trunk.rdoproject.org/centos7-liberty/current-passed-ci/delorean.repo
     sudo curl -o /etc/yum.repos.d/delorean-deps.repo http://trunk.rdoproject.org/centos7-liberty/delorean-deps.repo
     sudo rm -f /etc/yum.repos.d/delorean-current.repo
-
 fi
 
+# install the opendaylight yum repo definition
+cat << 'EOF' | sudo tee /etc/yum.repos.d/opendaylight.repo
+[opendaylight]
+name=OpenDaylight $releasever - $basearch
+baseurl=http://cbs.centos.org/repos/nfv7-opendaylight-3-candidate/$basearch/os/
+enabled=1
+gpgcheck=0
+EOF
+
 # ensure the undercloud package is installed so we can build the undercloud
 if ! rpm -q instack-undercloud > /dev/null; then
     sudo yum install -y python-tripleoclient
@@ -187,6 +195,27 @@ for i in $IMAGES; do
   curl https://repos.fedorapeople.org/repos/openstack-m/rdo-images-centos-liberty/$i -z stack/$i -o stack/$i --verbose --silent --location
 done
 
+#Adding OpenDaylight to overcloud
+pushd stack
+cp overcloud-full.qcow2 overcloud-full-odl.qcow2
+for i in opendaylight python-networking-odl; do
+    yumdownloader $i
+    if rpmfile=$(ls -r $i*); then
+        rpmfile=$(echo $rpmfile | head -n1)
+        LIBGUESTFS_BACKEND=direct virt-customize --upload $rpmfile:/tmp --install /tmp/$rpmfile -a overcloud-full-odl.qcow2
+    else
+        echo "Cannot install $i into overcloud-full image."
+       exit 1
+    fi
+done
+rm -rf puppet-opendaylight
+git clone https://github.com/dfarrell07/puppet-opendaylight
+pushd puppet-opendaylight
+git archive --format=tar.gz --prefix=opendaylight/ HEAD > ../puppet-opendaylight.tar.gz
+popd
+LIBGUESTFS_BACKEND=direct virt-customize --upload puppet-opendaylight.tar.gz:/etc/puppet/modules/ --run-command "cd /etc/puppet/modules/; tar xzf puppet-opendaylight.tar.gz" -a overcloud-full-odl.qcow2
+popd
+
 # move and Sanitize private keys from instack.json file
 mv stack/instackenv.json instackenv-virt.json
 sed -i '/pm_password/c\      "pm_password": "INSERT_STACK_USER_PRIV_KEY",' instackenv-virt.json
diff --git a/build/opendaylight.patch b/build/opendaylight.patch
new file mode 100644 (file)
index 0000000..5376665
--- /dev/null
@@ -0,0 +1,446 @@
+From bdaa77b2b92f470fe0bc6b18bff5f2af1f7b65cf Mon Sep 17 00:00:00 2001
+From: Tim Rozet <tdrozet@gmail.com>
+Date: Tue, 23 Jun 2015 17:46:00 -0400
+Subject: [PATCH] Adds OpenDaylight support
+
+To enable set neturon Mechanism Drivers to opendaylight via ExtraConfig:
+ -  EnableOpenDaylight (used to enable ODL, defaults to false)
+ -  OpenDaylightPort (used to define ODL REST Port, default 8081)
+
+Change-Id: I2a4c5b69ee0ad70d2372cad23b9af0890715c85f
+Signed-off-by: Dan Radez <dradez@redhat.com>
+---
+ environments/opendaylight.yaml                     |   4 +
+ puppet/compute.yaml                                |   6 +
+ puppet/controller.yaml                             |   8 +
+ puppet/manifests/overcloud_compute.pp              |  31 +++-
+ puppet/manifests/overcloud_controller.pp           |  49 +++++-
+ puppet/manifests/overcloud_controller_pacemaker.pp | 183 +++++++++++++--------
+ 6 files changed, 201 insertions(+), 80 deletions(-)
+ create mode 100644 environments/opendaylight.yaml
+
+diff --git a/environments/opendaylight.yaml b/environments/opendaylight.yaml
+new file mode 100644
+index 0000000..39e4aa3
+--- /dev/null
++++ b/environments/opendaylight.yaml
+@@ -0,0 +1,4 @@
++parameters:
++    ExtraConfig:
++      neutron_mechanism_drivers: ['opendaylight']
++      neutron_tenant_network_type: vxlan
+diff --git a/puppet/compute.yaml b/puppet/compute.yaml
+index 2b63535..3f20d48 100644
+--- a/puppet/compute.yaml
++++ b/puppet/compute.yaml
+@@ -221,6 +221,10 @@ parameters:
+   NtpServer:
+     type: string
+     default: ''
++  OpenDaylightPort:
++    default: 8081
++    description: Set opendaylight service port
++    type: number
+   RabbitHost:
+     type: string
+     default: ''  # Has to be here because of the ignored empty value bug
+@@ -409,6 +413,7 @@ resources:
+                 neutron::rabbit_user: {get_input: rabbit_user}
+                 neutron::rabbit_use_ssl: {get_input: rabbit_client_use_ssl}
+                 neutron::rabbit_port: {get_input: rabbit_client_port}
++                opendaylight_port: {get_input: opendaylight_port}
+                 neutron_flat_networks: {get_input: neutron_flat_networks}
+                 neutron_host: {get_input: neutron_host}
+                 neutron::agents::ml2::ovs::local_ip: {get_input: neutron_local_ip}
+@@ -474,6 +479,7 @@ resources:
+               - {get_param: GlanceHost}
+               - ':'
+               - {get_param: GlancePort}
++        opendaylight_port: {get_param: OpenDaylightPort}
+         neutron_flat_networks: {get_param: NeutronFlatNetworks}
+         neutron_host: {get_param: NeutronHost}
+         neutron_local_ip: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, NeutronTenantNetwork]}]}
+diff --git a/puppet/controller.yaml b/puppet/controller.yaml
+index 0bb8035..fa0dc3e 100644
+--- a/puppet/controller.yaml
++++ b/puppet/controller.yaml
+@@ -427,6 +427,10 @@ parameters:
+   NtpServer:
+     type: string
+     default: ''
++  OpenDaylightPort:
++    default: 8081
++    description: Set opendaylight service port
++    type: number
+   PcsdPassword:
+     type: string
+     description: The password for the 'pcsd' user.
+@@ -794,6 +798,7 @@ resources:
+             template: tripleo-CLUSTER
+             params:
+               CLUSTER: {get_param: MysqlClusterUniquePart}
++        opendaylight_port: {get_param: OpenDaylightPort}
+         neutron_flat_networks: {get_param: NeutronFlatNetworks}
+         neutron_metadata_proxy_shared_secret: {get_param: NeutronMetadataProxySharedSecret}
+         neutron_agent_mode: {get_param: NeutronAgentMode}
+@@ -1136,6 +1141,9 @@ resources:
+                 mysql_bind_host: {get_input: mysql_network}
+                 mysql_virtual_ip: {get_input: mysql_virtual_ip}
++                # OpenDaylight
++                opendaylight_port: {get_input: opendaylight_port}
++
+                 # Neutron
+                 neutron::bind_host: {get_input: neutron_api_network}
+                 neutron::rabbit_password: {get_input: rabbit_password}
+diff --git a/puppet/manifests/overcloud_compute.pp b/puppet/manifests/overcloud_compute.pp
+index 2150bab..9846636 100644
+--- a/puppet/manifests/overcloud_compute.pp
++++ b/puppet/manifests/overcloud_compute.pp
+@@ -21,6 +21,8 @@ if count(hiera('ntp::servers')) > 0 {
+   include ::ntp
+ }
++$controller_node_ips = split(hiera('controller_node_ips'), ',')
++
+ file { ['/etc/libvirt/qemu/networks/autostart/default.xml',
+         '/etc/libvirt/qemu/networks/default.xml']:
+   ensure => absent,
+@@ -74,9 +76,32 @@ class { 'neutron::plugins::ml2':
+   tenant_network_types => [hiera('neutron_tenant_network_type')],
+ }
+-class { 'neutron::agents::ml2::ovs':
+-  bridge_mappings => split(hiera('neutron_bridge_mappings'), ','),
+-  tunnel_types    => split(hiera('neutron_tunnel_types'), ','),
++if 'opendaylight' in hiera('neutron_mechanism_drivers') {
++  $opendaylight_port = hiera('opendaylight_port')
++  $private_ip = hiera('neutron::agents::ml2::ovs::local_ip')
++
++  exec { 'Wait for NetVirt OVS to come up':
++    command   => "/bin/curl -o /dev/null --fail --silent --head -u admin:admin \
++                  http://${controller_node_ips[0]}:${opendaylight_port}/restconf/operational/network-topology:network-topology",
++    tries     => 20,
++    try_sleep => 60,
++  } ->
++  # OVS manager
++  exec { 'Set OVS Manager to OpenDaylight':
++    command => "/usr/bin/ovs-vsctl set-manager tcp:${controller_node_ips[0]}:6640",
++    unless  => "/usr/bin/ovs-vsctl show | /usr/bin/grep 'Manager \"tcp:${controller_node_ips[0]}:6640\"'",
++  } ->
++  # local ip
++  exec { 'Set local_ip Other Option':
++    command => "/usr/bin/ovs-vsctl set Open_vSwitch $(ovs-vsctl get Open_vSwitch . _uuid) other_config:local_ip=$private_ip",
++    unless  => "/usr/bin/ovs-vsctl list Open_vSwitch | /usr/bin/grep 'local_ip=\"$private_ip\"'",
++  }
++
++} else {
++  class { 'neutron::agents::ml2::ovs':
++    bridge_mappings => split(hiera('neutron_bridge_mappings'), ','),
++    tunnel_types    => split(hiera('neutron_tunnel_types'), ','),
++  }
+ }
+ if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') {
+diff --git a/puppet/manifests/overcloud_controller.pp b/puppet/manifests/overcloud_controller.pp
+index c330236..68739a6 100644
+--- a/puppet/manifests/overcloud_controller.pp
++++ b/puppet/manifests/overcloud_controller.pp
+@@ -30,6 +30,13 @@ if hiera('step') >= 1 {
+ if hiera('step') >= 2 {
++  if 'opendaylight' in hiera('neutron_mechanism_drivers') {
++    class {"opendaylight":
++      extra_features => ['odl-ovsdb-openstack', 'odl-dlux-core', 'odl-sfc-core', 'odl-sfc-ui', 'odl-sfc-sb-rest', 'odl-sfc-ovs', 'odl-sfc-netconf', 'odl-sfclisp', 'odl-sfcofl2'],
++      odl_rest_port  => hiera('opendaylight_port'),
++    }
++  }
++
+   if count(hiera('ntp::servers')) > 0 {
+     include ::ntp
+   }
+@@ -242,10 +249,45 @@ if hiera('step') >= 3 {
+     tenant_network_types => [hiera('neutron_tenant_network_type')],
+     mechanism_drivers   => [hiera('neutron_mechanism_drivers')],
+   }
+-  class { 'neutron::agents::ml2::ovs':
+-    bridge_mappings => split(hiera('neutron_bridge_mappings'), ','),
+-    tunnel_types => split(hiera('neutron_tunnel_types'), ','),
++
++  if 'opendaylight' in hiera('neutron_mechanism_drivers') {
++
++    $opendaylight_port = hiera('opendaylight_port')
++    $private_ip = hiera('neutron::agents::ml2::ovs::local_ip')
++
++    neutron_plugin_ml2 {
++      'ml2_odl/username':         value => 'admin';
++      'ml2_odl/password':         value => 'admin';
++      'ml2_odl/url':              value => "http://${controller_node_ips[0]}:${opendaylight_port}/controller/nb/v2/neutron";
++    }
++
++    exec { 'Wait for NetVirt OVSDB to come up':
++      command   => "/bin/curl -o /dev/null --fail --silent --head -u admin:admin \
++                    http://${controller_node_ips[0]}:${opendaylight_port}/restconf/operational/network-topology:network-topology",
++      tries     => 20,
++      try_sleep => 60,
++    } ->
++    # OVS manager
++    exec { 'Set OVS Manager to OpenDaylight':
++      command => "/usr/bin/ovs-vsctl set-manager tcp:${controller_node_ips[0]}:6640",
++      unless  => "/usr/bin/ovs-vsctl show | /usr/bin/grep 'Manager \"tcp:${controller_node_ips[0]}:6640\"'",
++    } ->
++    # local ip
++    exec { 'Set local_ip Other Option':
++      command => "/usr/bin/ovs-vsctl set Open_vSwitch $(ovs-vsctl get Open_vSwitch . _uuid) other_config:local_ip=$private_ip",
++      unless  => "/usr/bin/ovs-vsctl list Open_vSwitch | /usr/bin/grep 'local_ip=\"$private_ip\"'",
++    }
++
++  } else {
++
++    class { 'neutron::agents::ml2::ovs':
++      bridge_mappings => split(hiera('neutron_bridge_mappings'), ','),
++      tunnel_types => split(hiera('neutron_tunnel_types'), ','),
++    }
++
++    Service['neutron-server'] -> Service['neutron-ovs-agent-service']
+   }
++
+   if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') {
+     include neutron::plugins::ml2::cisco::nexus1000v
+@@ -281,7 +323,6 @@ if hiera('step') >= 3 {
+   Service['neutron-server'] -> Service['neutron-dhcp-service']
+   Service['neutron-server'] -> Service['neutron-l3']
+-  Service['neutron-server'] -> Service['neutron-ovs-agent-service']
+   Service['neutron-server'] -> Service['neutron-metadata']
+   include ::cinder
+diff --git a/puppet/manifests/overcloud_controller_pacemaker.pp b/puppet/manifests/overcloud_controller_pacemaker.pp
+index b8fa89f..9911285 100644
+--- a/puppet/manifests/overcloud_controller_pacemaker.pp
++++ b/puppet/manifests/overcloud_controller_pacemaker.pp
+@@ -380,6 +380,13 @@ if hiera('step') >= 2 {
+   }
++  if 'opendaylight' in hiera('neutron_mechanism_drivers') {
++    class {"opendaylight":
++      extra_features => ['odl-ovsdb-openstack', 'odl-dlux-core', 'odl-sfc-core', 'odl-sfc-ui', 'odl-sfc-sb-rest', 'odl-sfc-ovs', 'odl-sfc-netconf', 'odl-sfclisp', 'odl-sfcofl2'],
++      odl_rest_port  => hiera('opendaylight_port'),
++    }
++  }
++
+   exec { 'galera-ready' :
+     command     => '/usr/bin/clustercheck >/dev/null',
+     timeout     => 30,
+@@ -604,13 +611,42 @@ if hiera('step') >= 3 {
+     tenant_network_types => [hiera('neutron_tenant_network_type')],
+     mechanism_drivers   => [hiera('neutron_mechanism_drivers')],
+   }
+-  class { 'neutron::agents::ml2::ovs':
+-    manage_service   => false,
+-    enabled          => false,
+-    bridge_mappings  => split(hiera('neutron_bridge_mappings'), ','),
+-    tunnel_types     => split(hiera('neutron_tunnel_types'), ','),
+-  }
++  if 'opendaylight' in hiera('neutron_mechanism_drivers') {
++
++    $opendaylight_port = hiera('opendaylight_port')
++    $private_ip = hiera('neutron::agents::ml2::ovs::local_ip')
++
++    neutron_plugin_ml2 {
++      'ml2_odl/username':         value => 'admin';
++      'ml2_odl/password':         value => 'admin';
++      'ml2_odl/url':              value => "http://${controller_node_ips[0]}:${opendaylight_port}/controller/nb/v2/neutron";
++    }
++    exec { 'Wait for NetVirt OVSDB to come up':
++      command   => "/bin/curl -o /dev/null --fail --silent --head -u admin:admin \
++                    http://${controller_node_ips[0]}:${opendaylight_port}/restconf/operational/network-topology:network-topology",
++      tries     => 20,
++      try_sleep => 60,
++    } ->
++    # OVS manager
++    exec { 'Set OVS Manager to OpenDaylight':
++      command => "/usr/bin/ovs-vsctl set-manager tcp:${controller_node_ips[0]}:6640",
++      unless  => "/usr/bin/ovs-vsctl show | /usr/bin/grep 'Manager \"tcp:${controller_node_ips[0]}:6640\"'",
++    } ->
++    # local ip
++    exec { 'Set local_ip Other Option':
++      command => "/usr/bin/ovs-vsctl set Open_vSwitch $(ovs-vsctl get Open_vSwitch . _uuid) other_config:local_ip=$private_ip",
++      unless  => "/usr/bin/ovs-vsctl list Open_vSwitch | /usr/bin/grep 'local_ip=\"$private_ip\"'",
++    }
++
++  } else {
++    class { 'neutron::agents::ml2::ovs':
++      manage_service   => false,
++      enabled          => false,
++      bridge_mappings  => split(hiera('neutron_bridge_mappings'), ','),
++      tunnel_types     => split(hiera('neutron_tunnel_types'), ','),
++    }
++  }
+   if 'cisco_ucsm' in hiera('neutron_mechanism_drivers') {
+     include ::neutron::plugins::ml2::cisco::ucsm
+   }
+@@ -1059,56 +1095,13 @@ if hiera('step') >= 4 {
+     pacemaker::resource::service { $::neutron::params::dhcp_agent_service:
+       clone_params   => "interleave=true",
+     }
+-    pacemaker::resource::service { $::neutron::params::ovs_agent_service:
+-      clone_params => "interleave=true",
+-    }
+     pacemaker::resource::service { $::neutron::params::metadata_agent_service:
+       clone_params => "interleave=true",
+     }
+-    pacemaker::resource::ocf { $::neutron::params::ovs_cleanup_service:
+-      ocf_agent_name => "neutron:OVSCleanup",
+-      clone_params => "interleave=true",
+-    }
+     pacemaker::resource::ocf { 'neutron-netns-cleanup':
+       ocf_agent_name => "neutron:NetnsCleanup",
+       clone_params => "interleave=true",
+     }
+-
+-    # neutron - one chain ovs-cleanup-->netns-cleanup-->ovs-agent
+-    pacemaker::constraint::base { 'neutron-ovs-cleanup-to-netns-cleanup-constraint':
+-      constraint_type => "order",
+-      first_resource => "${::neutron::params::ovs_cleanup_service}-clone",
+-      second_resource => "neutron-netns-cleanup-clone",
+-      first_action => "start",
+-      second_action => "start",
+-      require => [Pacemaker::Resource::Ocf["${::neutron::params::ovs_cleanup_service}"],
+-                  Pacemaker::Resource::Ocf['neutron-netns-cleanup']],
+-    }
+-    pacemaker::constraint::colocation { 'neutron-ovs-cleanup-to-netns-cleanup-colocation':
+-      source => "neutron-netns-cleanup-clone",
+-      target => "${::neutron::params::ovs_cleanup_service}-clone",
+-      score => "INFINITY",
+-      require => [Pacemaker::Resource::Ocf["${::neutron::params::ovs_cleanup_service}"],
+-                  Pacemaker::Resource::Ocf['neutron-netns-cleanup']],
+-    }
+-    pacemaker::constraint::base { 'neutron-netns-cleanup-to-openvswitch-agent-constraint':
+-      constraint_type => "order",
+-      first_resource => "neutron-netns-cleanup-clone",
+-      second_resource => "${::neutron::params::ovs_agent_service}-clone",
+-      first_action => "start",
+-      second_action => "start",
+-      require => [Pacemaker::Resource::Ocf["neutron-netns-cleanup"],
+-                  Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"]],
+-    }
+-    pacemaker::constraint::colocation { 'neutron-netns-cleanup-to-openvswitch-agent-colocation':
+-      source => "${::neutron::params::ovs_agent_service}-clone",
+-      target => "neutron-netns-cleanup-clone",
+-      score => "INFINITY",
+-      require => [Pacemaker::Resource::Ocf["neutron-netns-cleanup"],
+-                  Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"]],
+-    }
+-
+-    #another chain keystone-->neutron-server-->ovs-agent-->dhcp-->l3
+     pacemaker::constraint::base { 'keystone-to-neutron-server-constraint':
+       constraint_type => "order",
+       first_resource => "${::keystone::params::service_name}-clone",
+@@ -1118,31 +1111,75 @@ if hiera('step') >= 4 {
+       require => [Pacemaker::Resource::Service[$::keystone::params::service_name],
+                   Pacemaker::Resource::Service[$::neutron::params::server_service]],
+     }
+-    pacemaker::constraint::base { 'neutron-server-to-openvswitch-agent-constraint':
+-      constraint_type => "order",
+-      first_resource => "${::neutron::params::server_service}-clone",
+-      second_resource => "${::neutron::params::ovs_agent_service}-clone",
+-      first_action => "start",
+-      second_action => "start",
+-      require => [Pacemaker::Resource::Service[$::neutron::params::server_service],
+-                  Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]],
+-    }
+-    pacemaker::constraint::base { 'neutron-openvswitch-agent-to-dhcp-agent-constraint':
+-      constraint_type => "order",
+-      first_resource => "${::neutron::params::ovs_agent_service}-clone",
+-      second_resource => "${::neutron::params::dhcp_agent_service}-clone",
+-      first_action => "start",
+-      second_action => "start",
+-      require => [Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"],
+-                  Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"]],
++    if 'openvswitch' in hiera('neutron_mechanism_drivers') {
++      pacemaker::resource::service { $::neutron::params::ovs_agent_service:
++        clone_params => "interleave=true",
++      }
++      pacemaker::resource::ocf { $::neutron::params::ovs_cleanup_service:
++        ocf_agent_name => "neutron:OVSCleanup",
++        clone_params => "interleave=true",
++      }
++      # neutron - one chain ovs-cleanup-->netns-cleanup-->ovs-agent
++      pacemaker::constraint::base { 'neutron-ovs-cleanup-to-netns-cleanup-constraint':
++        constraint_type => "order",
++        first_resource => "${::neutron::params::ovs_cleanup_service}-clone",
++        second_resource => "neutron-netns-cleanup-clone",
++        first_action => "start",
++        second_action => "start",
++        require => [Pacemaker::Resource::Ocf["${::neutron::params::ovs_cleanup_service}"],
++                    Pacemaker::Resource::Ocf['neutron-netns-cleanup']],
++      }
++      pacemaker::constraint::colocation { 'neutron-ovs-cleanup-to-netns-cleanup-colocation':
++        source => "neutron-netns-cleanup-clone",
++        target => "${::neutron::params::ovs_cleanup_service}-clone",
++        score => "INFINITY",
++        require => [Pacemaker::Resource::Ocf["${::neutron::params::ovs_cleanup_service}"],
++                    Pacemaker::Resource::Ocf['neutron-netns-cleanup']],
++      }
++      pacemaker::constraint::base { 'neutron-netns-cleanup-to-openvswitch-agent-constraint':
++        constraint_type => "order",
++        first_resource => "neutron-netns-cleanup-clone",
++        second_resource => "${::neutron::params::ovs_agent_service}-clone",
++        first_action => "start",
++        second_action => "start",
++        require => [Pacemaker::Resource::Ocf["neutron-netns-cleanup"],
++                    Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"]],
++      }
++      pacemaker::constraint::colocation { 'neutron-netns-cleanup-to-openvswitch-agent-colocation':
++        source => "${::neutron::params::ovs_agent_service}-clone",
++        target => "neutron-netns-cleanup-clone",
++        score => "INFINITY",
++        require => [Pacemaker::Resource::Ocf["neutron-netns-cleanup"],
++                    Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"]],
++      }
+-    }
+-    pacemaker::constraint::colocation { 'neutron-openvswitch-agent-to-dhcp-agent-colocation':
+-      source => "${::neutron::params::dhcp_agent_service}-clone",
+-      target => "${::neutron::params::ovs_agent_service}-clone",
+-      score => "INFINITY",
+-      require => [Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"],
+-                  Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"]],
++      #another chain keystone-->neutron-server-->ovs-agent-->dhcp-->l3
++      pacemaker::constraint::base { 'neutron-server-to-openvswitch-agent-constraint':
++        constraint_type => "order",
++        first_resource => "${::neutron::params::server_service}-clone",
++        second_resource => "${::neutron::params::ovs_agent_service}-clone",
++        first_action => "start",
++        second_action => "start",
++        require => [Pacemaker::Resource::Service[$::neutron::params::server_service],
++                    Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]],
++      }
++      pacemaker::constraint::base { 'neutron-openvswitch-agent-to-dhcp-agent-constraint':
++        constraint_type => "order",
++        first_resource => "${::neutron::params::ovs_agent_service}-clone",
++        second_resource => "${::neutron::params::dhcp_agent_service}-clone",
++        first_action => "start",
++        second_action => "start",
++        require => [Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"],
++                    Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"]],
++
++      }
++      pacemaker::constraint::colocation { 'neutron-openvswitch-agent-to-dhcp-agent-colocation':
++        source => "${::neutron::params::dhcp_agent_service}-clone",
++        target => "${::neutron::params::ovs_agent_service}-clone",
++        score => "INFINITY",
++        require => [Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"],
++                    Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"]],
++      }
+     }
+     pacemaker::constraint::base { 'neutron-dhcp-agent-to-l3-agent-constraint':
+       constraint_type => "order",
+-- 
+2.5.0
+
diff --git a/build/opendaylight.yaml b/build/opendaylight.yaml
new file mode 100644 (file)
index 0000000..39e4aa3
--- /dev/null
@@ -0,0 +1,4 @@
+parameters:
+    ExtraConfig:
+      neutron_mechanism_drivers: ['opendaylight']
+      neutron_tenant_network_type: vxlan
index 641c37c..9d334a1 100644 (file)
@@ -35,6 +35,8 @@ cp build/instack.xml %{buildroot}%{_var}/opt/opnfv/
 cp build/baremetalbrbm_*.xml %{buildroot}%{_var}/opt/opnfv/
 cp build/brbm-net.xml %{buildroot}%{_var}/opt/opnfv/
 cp build/default-pool.xml %{buildroot}%{_var}/opt/opnfv/
+cp build/opendaylight.yaml %{buildroot}%{_var}/opt/opnfv/
+cp build/opendaylight.patch %{buildroot}%{_var}/opt/opnfv/
 
 cp build/instackenv-virt.json %{buildroot}%{_var}/opt/opnfv/
 cp build/stack/deploy-ramdisk-ironic.initramfs %{buildroot}%{_var}/opt/opnfv/stack/
@@ -54,6 +56,8 @@ cp build/stack/overcloud-full.vmlinuz %{buildroot}%{_var}/opt/opnfv/stack/
 %{_var}/opt/opnfv/baremetalbrbm_*.xml
 %{_var}/opt/opnfv/brbm-net.xml
 %{_var}/opt/opnfv/default-pool.xml
+%{_var}/opt/opnfv/opendaylight.yaml
+%{_var}/opt/opnfv/opendaylight.patch
 %{_var}/opt/opnfv/instackenv-virt.json
 %{_var}/opt/opnfv/stack/deploy-ramdisk-ironic.initramfs
 %{_var}/opt/opnfv/stack/deploy-ramdisk-ironic.kernel
index 0a5a2a8..251fc72 100755 (executable)
@@ -198,6 +198,14 @@ function copy_materials {
   scp ${SSH_OPTIONS[@]} $RESOURCES/overcloud-full.initrd "stack@$UNDERCLOUD":
   scp ${SSH_OPTIONS[@]} $RESOURCES/overcloud-full.qcow2 "stack@$UNDERCLOUD":
   scp ${SSH_OPTIONS[@]} $RESOURCES/overcloud-full.vmlinuz "stack@$UNDERCLOUD":
+  scp ${SSH_OPTIONS[@]} $CONFIG/opendaylight.yaml "stack@$UNDERCLOUD":
+
+  ## WORK AROUND
+  # when OpenDaylight lands in upstream RDO manager this can be removed
+  # apply the opendaylight patch
+  scp ${SSH_OPTIONS[@]} $CONFIG/opendaylight.patch "root@$UNDERCLOUD":
+  ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "cd /usr/share/openstack-tripleo-heat-templates/; patch -Np1 < /root/opendaylight.patch"
+  ## END WORK AROUND
 
   # ensure stack user on instack machine has an ssh key
   ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "if [ ! -e ~/.ssh/id_rsa.pub ]; then ssh-keygen -t rsa -N '' -f ~/.ssh/id_rsa; fi"
@@ -264,7 +272,7 @@ echo "Configuring nameserver on ctlplane network"
 neutron subnet-update \$(neutron subnet-list | grep -v id | grep -v \\\\-\\\\- | awk {'print \$2'}) --dns-nameserver 8.8.8.8
 echo "Executing overcloud deployment, this should run for an extended period without output."
 sleep 60 #wait for Hypervisor stats to check-in to nova
-openstack overcloud deploy --templates $DEPLOY_OPTIONS
+openstack overcloud deploy --templates $DEPLOY_OPTIONS -e opendaylight.yaml
 EOI
 
 }