Enable Cisco N1KV driver
authorShiva Prasad Rao <shivrao@cisco.com>
Tue, 14 Jul 2015 02:37:12 +0000 (19:37 -0700)
committermarios <marios@redhat.com>
Wed, 30 Sep 2015 06:22:33 +0000 (09:22 +0300)
This enables support for the Cisco N1kv driver for the ML2 plugin.
It also configures the Nexus 1000v switch.

Co-Authored-By: Steven Hillman <sthillma@cisco.com>
Depends-On: I02dda0685c7df9013693db5eeacb2f47745d05b5
Depends-On: I3f14cdce9b9bf278aa9b107b2d313e1e82a20709

Change-Id: Idf23ed11a53509c00aa5fea4c87a515f42ad744f

environments/neutron-ml2-cisco-n1kv.yaml [new file with mode: 0644]
puppet/compute.yaml
puppet/controller.yaml
puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml [new file with mode: 0644]
puppet/manifests/overcloud_compute.pp
puppet/manifests/overcloud_controller.pp
puppet/manifests/overcloud_controller_pacemaker.pp

diff --git a/environments/neutron-ml2-cisco-n1kv.yaml b/environments/neutron-ml2-cisco-n1kv.yaml
new file mode 100644 (file)
index 0000000..651e956
--- /dev/null
@@ -0,0 +1,11 @@
+# A Heat environment file which can be used to enable a
+# a Cisco N1KV backend, configured via puppet
+resource_registry:
+  OS::TripleO::ControllerExtraConfigPre: ../puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml
+  OS::TripleO::ComputeExtraConfigPre: ../puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml
+
+parameter_defaults:
+  N1000vVSMIP: '192.0.2.50'
+  N1000vMgmtGatewayIP: '192.0.2.1'
+  N1000vVSMDomainID: '100'
+  N1000vVSMHostMgmtIntf: 'br-ex'
index a7c1c8c..85f7515 100644 (file)
@@ -360,6 +360,7 @@ resources:
             - all_nodes # provided by allNodesConfig
             - '"%{::osfamily}"'
             - common
+            - cisco_n1kv_data  # Optionally provided by ComputeExtraConfigPre
           datafiles:
             compute_extraconfig:
               mapped_data: {get_param: NovaComputeExtraConfig}
index a68ece2..7db0000 100644 (file)
@@ -948,6 +948,7 @@ resources:
             - cinder_netapp_data # Optionally provided by ControllerExtraConfigPre
             - neutron_bigswitch_data # Optionally provided by ControllerExtraConfigPre
             - neutron_cisco_data # Optionally provided by ControllerExtraConfigPre
+            - cisco_n1kv_data # Optionally provided by ControllerExtraConfigPre
           datafiles:
             controller_extraconfig:
               mapped_data: {get_param: ControllerExtraConfig}
diff --git a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml
new file mode 100644 (file)
index 0000000..5985116
--- /dev/null
@@ -0,0 +1,174 @@
+heat_template_version: 2015-04-30
+
+description: Configure hieradata for Cisco N1KV configuration
+
+parameters:
+  server:
+    description: ID of the controller node to apply this config to
+    type: string
+
+  # Config specific parameters, to be provided via parameter_defaults
+  N1000vVSMIP:
+    type: string
+    default: '192.0.2.50'
+  N1000vVSMDomainID:
+    type: number
+    default: 100
+  N1000vVSMIPV6:
+    type: string
+    default: '::1'
+  N1000vVEMHostMgmtIntf:
+    type: string
+    default: 'br-ex'
+  N1000vUplinkProfile:
+    type: string
+    default: '{eth1: system-uplink,}'
+  N1000vVtepConfig:
+    type: string
+    default: '{}'
+  N1000vVEMSource:
+    type: string
+    default: ''
+  N1000vVEMVersion:
+    type: string
+    default: ''
+  N1000vPortDB:
+    type: string
+    default: 'ovs'
+  N1000vVtepsInSameSub:
+    type: boolean
+    default: false
+  N1000vVEMFastpathFlood:
+    type: string
+    default: 'enable'
+#VSM Puppet Parameter
+  N1000vVSMSource:
+    type: string
+    default: ''
+  N1000vVSMVersion:
+    type: string
+    default: 'latest'
+  N1000vVSMHostMgmtIntf:
+    type: string
+    default: 'br-ex'
+  N1000vVSMRole:
+    type: string
+    default: 'primary'
+  N1000vVSMPassword:
+    type: string
+    default: 'Password'
+  N1000vMgmtNetmask:
+    type: string
+    default: '255.255.255.0'
+  N1000vMgmtGatewayIP:
+    type: string
+    default: '192.0.2.1'
+  N1000vPacemakerControl:
+    type: boolean
+    default: true
+  N1000vExistingBridge:
+    type: boolean
+    default: true
+#Plugin Parameters
+  N1000vVSMUser:
+    type: string
+    default: 'admin'
+  N1000vPollDuration:
+    type: number
+    default: 60
+  N1000vHttpPoolSize:
+    type: number
+    default: 5
+  N1000vHttpTimeout:
+    type: number
+    default: 15
+  N1000vSyncInterval:
+    type: number
+    default: 300
+  N1000vMaxVSMRetries:
+    type: number
+    default: 2
+
+resources:
+  CiscoN1kvConfig:
+    type: OS::Heat::StructuredConfig
+    properties:
+      group: os-apply-config
+      config:
+        hiera:
+          datafiles:
+            cisco_n1kv_data:
+              mapped_data:
+                #enable_cisco_n1kv: {get_input: EnableCiscoN1kv}
+                # VEM Parameters
+                n1kv_vem_source: {get_input: n1kv_vem_source}
+                n1kv_vem_version: {get_input: n1kv_vem_version}
+                neutron::agents::n1kv_vem::n1kv_vsm_ip: {get_input: n1kv_vsm_ip}
+                neutron::agents::n1kv_vem::n1kv_vsm_domain_id: {get_input: n1kv_vsm_domain_id}
+                neutron::agents::n1kv_vem::n1kv_vsm_ip_v6: {get_input: n1kv_vsm_ip_v6}
+                neutron::agents::n1kv_vem::host_mgmt_intf: {get_input: n1kv_vem_host_mgmt_intf}
+                neutron::agents::n1kv_vem::uplink_profile: {get_input: n1kv_vem_uplink_profile}
+                neutron::agents::n1kv_vem::vtep_config: {get_input: n1kv_vem_vtep_config}
+                neutron::agents::n1kv_vem::portdb: {get_input: n1kv_vem_portdb}
+                neutron::agents::n1kv_vem::vteps_in_same_subnet: {get_input: n1kv_vem_vteps_in_same_subnet}
+                neutron::agents::n1kv_vem::fastpath_flood: {get_input: n1kv_vem_fastpath_flood}
+                #VSM Parameter
+                n1kv_vsm_source: {get_input: n1kv_vsm_source}
+                n1kv_vsm_version: {get_input: n1kv_vsm_version}
+                n1k_vsm::phy_if_bridge: {get_input: n1kv_vsm_host_mgmt_intf}
+                n1k_vsm::vsm_role: {get_input: n1kv_vsm_role}
+                n1k_vsm::pacemaker_control: {get_input: n1kv_vsm_pacemaker_ctrl}
+                n1k_vsm::existing_bridge: {get_input: n1kv_vsm_existing_br}
+                n1k_vsm::vsm_admin_passwd: {get_input: n1kv_vsm_password}
+                n1k_vsm::vsm_domain_id: {get_input: n1kv_vsm_domain_id}
+                n1k_vsm::vsm_mgmt_ip: {get_input: n1kv_vsm_ip}
+                n1k_vsm::vsm_mgmt_netmask: {get_input: n1kv_vsm_mgmt_netmask}
+                n1k_vsm::vsm_mgmt_gateway: {get_input: n1kv_vsm_gateway_ip}
+                n1k_vsm::phy_gateway: {get_input: n1kv_vsm_gateway_ip}
+                # Cisco N1KV driver Parameters
+                neutron::plugins::ml2::cisco::nexus1000v::n1kv_vsm_ip: {get_input: n1kv_vsm_ip}
+                neutron::plugins::ml2::cisco::nexus1000v::n1kv_vsm_username: {get_input: n1kv_vsm_username}
+                neutron::plugins::ml2::cisco::nexus1000v::n1kv_vsm_password: {get_input: n1kv_vsm_password}
+                neutron::plugins::ml2::cisco::nexus1000v::poll_duration: {get_input: n1kv_vsm_poll_duration}
+                neutron::plugins::ml2::cisco::nexus1000v::http_pool_size: {get_input: n1kv_vsm_http_pool_size}
+                neutron::plugins::ml2::cisco::nexus1000v::http_timeout: {get_input: n1kv_vsm_http_timeout}
+                neutron::plugins::ml2::cisco::nexus1000v::n1kv_vsm_sync_interval: {get_input: n1kv_vsm_sync_interval}
+                neutron::plugins::ml2::cisco::nexus1000v::max_vsm_retries: {get_input: n1kv_max_vsm_retries}
+
+  CiscoN1kvDeployment:
+    type: OS::Heat::StructuredDeployment
+    properties:
+      config: {get_resource: CiscoN1kvConfig}
+      server: {get_param: server}
+      input_values:
+        n1kv_vsm_ip: {get_param: N1000vVSMIP}
+        n1kv_vsm_domain_id: {get_param: N1000vVSMDomainID}
+        n1kv_vsm_ip_v6: {get_param: N1000vVSMIPV6}
+        n1kv_vem_host_mgmt_intf: {get_param: N1000vVEMHostMgmtIntf}
+        n1kv_vem_uplink_profile: {get_param: N1000vUplinkProfile}
+        n1kv_vem_vtep_config: {get_param: N1000vVtepConfig}
+        n1kv_vem_source: {get_param: N1000vVEMSource}
+        n1kv_vem_version: {get_param: N1000vVEMVersion}
+        n1kv_vem_portdb: {get_param: N1000vPortDB}
+        n1kv_vem_vteps_in_same_subnet: {get_param: N1000vVtepsInSameSub}
+        n1kv_vem_fastpath_flood: {get_param: N1000vVEMFastpathFlood}
+        n1kv_vsm_source: {get_param: N1000vVSMSource}
+        n1kv_vsm_version: {get_param: N1000vVSMVersion}
+        n1kv_vsm_host_mgmt_intf: {get_param: N1000vVSMHostMgmtIntf}
+        n1kv_vsm_role: {get_param: N1000vVSMRole}
+        n1kv_vsm_password: {get_param: N1000vVSMPassword}
+        n1kv_vsm_mgmt_netmask: {get_param: N1000vMgmtNetmask}
+        n1kv_vsm_gateway_ip: {get_param: N1000vMgmtGatewayIP}
+        n1kv_vsm_pacemaker_ctrl: {get_param: N1000vPacemakerControl}
+        n1kv_vsm_existing_br: {get_param: N1000vExistingBridge}
+        n1kv_vsm_username: {get_param: N1000vVSMUser}
+        n1kv_vsm_poll_duration: {get_param: N1000vPollDuration}
+        n1kv_vsm_http_pool_size: {get_param: N1000vHttpPoolSize}
+        n1kv_vsm_http_timeout: {get_param: N1000vHttpTimeout}
+        n1kv_vsm_sync_interval: {get_param: N1000vSyncInterval}
+        n1kv_max_vsm_retries: {get_param: N1000vMaxVSMRetries}
+
+outputs:
+  deploy_stdout:
+    description: Deployment reference, used to trigger puppet apply on changes
+    value: {get_attr: [CiscoN1kvDeployment, deploy_stdout]}
index e6fa947..70e5326 100644 (file)
@@ -78,6 +78,14 @@ class { 'neutron::agents::ml2::ovs':
   tunnel_types    => split(hiera('neutron_tunnel_types'), ','),
 }
 
+if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') {
+  class { 'neutron::agents::n1kv_vem':
+    n1kv_source          => hiera('n1kv_vem_source', undef),
+    n1kv_version         => hiera('n1kv_vem_version', undef),
+  }
+}
+
+
 include ::ceilometer
 include ::ceilometer::agent::compute
 include ::ceilometer::agent::auth
index fdb16ea..13a2ed0 100644 (file)
@@ -245,6 +245,20 @@ if hiera('step') >= 3 {
     bridge_mappings => split(hiera('neutron_bridge_mappings'), ','),
     tunnel_types => split(hiera('neutron_tunnel_types'), ','),
   }
+  if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') {
+    include neutron::plugins::ml2::cisco::nexus1000v
+
+    class { 'neutron::agents::n1kv_vem':
+      n1kv_source          => hiera('n1kv_vem_source', undef),
+      n1kv_version         => hiera('n1kv_vem_version', undef),
+    }
+
+    class { 'n1k_vsm':
+      n1kv_source       => hiera('n1kv_vsm_source', undef),
+      n1kv_version      => hiera('n1kv_vsm_version', undef),
+      pacemaker_control => false,
+    }
+  }
 
   if 'cisco_ucsm' in hiera('neutron_mechanism_drivers') {
     include ::neutron::plugins::ml2::cisco::ucsm
@@ -439,10 +453,17 @@ if hiera('step') >= 3 {
   include ::heat::engine
 
   # Horizon
+  if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') {
+    $_profile_support = 'cisco'
+  } else {
+    $_profile_support = 'None'
+  }
+  $neutron_options   = {'profile_support' => $_profile_support }
   $vhost_params = { add_listen => false }
   class { 'horizon':
     cache_server_ip    => hiera('memcache_node_ips', '127.0.0.1'),
     vhost_extra_params => $vhost_params,
+    neutron_options    => $neutron_options,
   }
 
   $snmpd_user = hiera('snmpd_readonly_user_name')
index 7615290..3726722 100644 (file)
@@ -617,6 +617,19 @@ if hiera('step') >= 3 {
     include ::neutron::plugins::ml2::cisco::nexus
     include ::neutron::plugins::ml2::cisco::type_nexus_vxlan
   }
+  if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') {
+    include neutron::plugins::ml2::cisco::nexus1000v
+
+    class { 'neutron::agents::n1kv_vem':
+      n1kv_source          => hiera('n1kv_vem_source', undef),
+      n1kv_version         => hiera('n1kv_vem_version', undef),
+    }
+
+    class { 'n1k_vsm':
+      n1kv_source       => hiera('n1kv_vsm_source', undef),
+      n1kv_version      => hiera('n1kv_vsm_version', undef),
+    }
+  }
 
   if hiera('neutron_enable_bigswitch_ml2', false) {
     include neutron::plugins::ml2::bigswitch::restproxy
@@ -859,6 +872,12 @@ if hiera('step') >= 3 {
   # NOTE(gfidente): server-status can be consumed by the pacemaker resource agent
   include ::apache
   include ::apache::mod::status
+  if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') {
+    $_profile_support = 'cisco'
+  } else {
+    $_profile_support = 'None'
+  }
+  $neutron_options   = {'profile_support' => $_profile_support }
   $vhost_params = {
     add_listen => false,
     priority   => 10,
@@ -867,6 +886,7 @@ if hiera('step') >= 3 {
     cache_server_ip    => hiera('memcache_node_ips', '127.0.0.1'),
     vhost_extra_params => $vhost_params,
     server_aliases     => $::hostname,
+    neutron_options    => $neutron_options,
   }
 
   $snmpd_user = hiera('snmpd_readonly_user_name')
@@ -1482,6 +1502,30 @@ if hiera('step') >= 4 {
         clone_params => "interleave=true",
     }
 
+    #VSM
+    if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') {
+      pacemaker::resource::ocf { 'vsm-p' :
+        ocf_agent_name  => 'heartbeat:VirtualDomain',
+        resource_params => 'force_stop=true config=/var/spool/cisco/vsm/vsm_primary_deploy.xml',
+        require         => Class['n1k_vsm'],
+        meta_params     => 'resource-stickiness=INFINITY',
+      }
+      if str2bool(hiera('n1k_vsm::pacemaker_control', 'true')) {
+        pacemaker::resource::ocf { 'vsm-s' :
+          ocf_agent_name  => 'heartbeat:VirtualDomain',
+          resource_params => 'force_stop=true config=/var/spool/cisco/vsm/vsm_secondary_deploy.xml',
+          require         => Class['n1k_vsm'],
+          meta_params     => 'resource-stickiness=INFINITY',
+        }
+        pacemaker::constraint::colocation { 'vsm-colocation-contraint':
+          source  => "vsm-p",
+          target  => "vsm-s",
+          score   => "-INFINITY",
+          require => [Pacemaker::Resource::Ocf['vsm-p'],
+                      Pacemaker::Resource::Ocf['vsm-s']],
+        }
+      }
+    }
 
   }