Merge "Refactor cellv2 host discovery logic to avoid races" into stable/pike
authorZuul <zuul@review.openstack.org>
Fri, 10 Nov 2017 17:15:39 +0000 (17:15 +0000)
committerGerrit Code Review <review@openstack.org>
Fri, 10 Nov 2017 17:15:39 +0000 (17:15 +0000)
ci/environments/scenario001-multinode-containers.yaml
docker/services/pacemaker/cinder-backup.yaml
docker/services/pacemaker/cinder-volume.yaml
environments/neutron-opendaylight-dpdk.yaml
environments/neutron-opendaylight-sriov.yaml
environments/services-docker/neutron-opendaylight-dpdk.yaml [new file with mode: 0644]
environments/services-docker/neutron-opendaylight-sriov.yaml [new file with mode: 0644]
puppet/services/docker.yaml
releasenotes/notes/change_default_docker0_address-6a017b7078825996.yaml [new file with mode: 0644]

index 082541c..3ddc29c 100644 (file)
@@ -26,12 +26,7 @@ resource_registry:
   OS::TripleO::Tasks::ControllerPreConfig: OS::Heat::None
   OS::TripleO::Tasks::ControllerPostConfig: OS::Heat::None
   OS::TripleO::Tasks::ControllerPostPuppetRestart: ../../extraconfig/tasks/post_puppet_pacemaker_restart.yaml
-  # FIXME(mandre) fluentd container image missing from tripleomaster registry
-  # https://bugs.launchpad.net/tripleo/+bug/1721723
-  # OS::TripleO::Services::FluentdClient: ../../docker/services/fluentd-client.yaml
-  # FIXME(mandre/bandini) mixing BM fluentd and containers is problematic
-  # https://bugs.launchpad.net/tripleo/+bug/1726891
-  # OS::TripleO::Services::FluentdClient: ../../puppet/services/logging/fluentd-client.yaml
+  OS::TripleO::Services::FluentdClient: ../../docker/services/fluentd-client.yaml
   OS::TripleO::Services::SensuClient: ../../docker/services/sensu-client.yaml
   # Some infra instances don't pass the ping test but are otherwise working.
   # Since the OVB jobs also test this functionality we can shut it off here.
index 4a99184..52e54a6 100644 (file)
@@ -87,21 +87,34 @@ outputs:
             cinder::backup::enabled: false
       logging_source: {get_attr: [CinderBackupBase, role_data, logging_source]}
       logging_groups: {get_attr: [CinderBackupBase, role_data, logging_groups]}
-      step_config: ""
+      step_config: &step_config
+        list_join:
+          - "\n"
+          - - {get_attr: [CinderBackupBase, role_data, step_config]}
+            - {get_attr: [MySQLClient, role_data, step_config]}
       service_config_settings: {get_attr: [CinderBackupBase, role_data, service_config_settings]}
       # BEGIN DOCKER SETTINGS
       puppet_config:
         config_volume: cinder
         puppet_tags: cinder_config,file,concat,file_line
-        step_config:
-          list_join:
-            - "\n"
-            - - {get_attr: [CinderBackupBase, role_data, step_config]}
-              - {get_attr: [MySQLClient, role_data, step_config]}
+        step_config: *step_config
         config_image: {get_param: DockerCinderConfigImage}
       kolla_config:
         /var/lib/kolla/config_files/cinder_backup.json:
           command: /usr/bin/cinder-backup --config-file /usr/share/cinder/cinder-dist.conf --config-file /etc/cinder/cinder.conf
+          config_files:
+            - source: "/var/lib/kolla/config_files/src/*"
+              dest: "/"
+              merge: true
+              preserve_properties: true
+            - source: "/var/lib/kolla/config_files/src-ceph/"
+              dest: "/etc/ceph/"
+              merge: true
+              preserve_properties: true
+            - source: "/var/lib/kolla/config_files/src-iscsid/*"
+              dest: "/"
+              merge: true
+              preserve_properties: true
           permissions:
             - path: /var/lib/cinder
               owner: cinder:cinder
@@ -176,6 +189,10 @@ outputs:
           with_items:
             - /var/lib/cinder
             - /var/log/containers/cinder
+        - name: ensure ceph configurations exist
+          file:
+            path: /etc/ceph
+            state: directory
       upgrade_tasks:
         - name: get bootstrap nodeid
           tags: common
index ce93e5f..a1b04c3 100644 (file)
@@ -69,7 +69,8 @@ outputs:
       config_settings:
         map_merge:
           - get_attr: [CinderBase, role_data, config_settings]
-          - tripleo::profile::pacemaker::cinder::volume_bundle::cinder_volume_docker_image: &cinder_volume_image_pcmklatest
+          - tripleo::profile::base::lvm::enable_udev: false
+            tripleo::profile::pacemaker::cinder::volume_bundle::cinder_volume_docker_image: &cinder_volume_image_pcmklatest
               list_join:
                 - ':'
                 - - yaql:
@@ -81,21 +82,35 @@ outputs:
             cinder::host: hostgroup
       logging_source: {get_attr: [CinderBase, role_data, logging_source]}
       logging_groups: {get_attr: [CinderBase, role_data, logging_groups]}
-      step_config: ""
+      step_config: &step_config
+        list_join:
+          - "\n"
+          - - "include ::tripleo::profile::base::lvm"
+            - get_attr: [CinderBase, role_data, step_config]
+            - get_attr: [MySQLClient, role_data, step_config]
       service_config_settings: {get_attr: [CinderBase, role_data, service_config_settings]}
       # BEGIN DOCKER SETTINGS
       puppet_config:
         config_volume: cinder
         puppet_tags: cinder_config,file,concat,file_line
-        step_config:
-          list_join:
-            - "\n"
-            - - {get_attr: [CinderBase, role_data, step_config]}
-              - {get_attr: [MySQLClient, role_data, step_config]}
+        step_config: *step_config
         config_image: {get_param: DockerCinderConfigImage}
       kolla_config:
         /var/lib/kolla/config_files/cinder_volume.json:
           command: /usr/bin/cinder-volume --config-file /usr/share/cinder/cinder-dist.conf --config-file /etc/cinder/cinder.conf
+          config_files:
+            - source: "/var/lib/kolla/config_files/src/*"
+              dest: "/"
+              merge: true
+              preserve_properties: true
+            - source: "/var/lib/kolla/config_files/src-ceph/"
+              dest: "/etc/ceph/"
+              merge: true
+              preserve_properties: true
+            - source: "/var/lib/kolla/config_files/src-iscsid/*"
+              dest: "/"
+              merge: true
+              preserve_properties: true
           permissions:
             - path: /var/log/cinder
               owner: cinder:cinder
@@ -167,7 +182,13 @@ outputs:
           with_items:
             - /var/log/containers/cinder
             - /var/lib/cinder
-        #FIXME: all of this should be conditional on the CinderEnableIscsiBackend value being set to true
+        - name: ensure ceph configurations exist
+          file:
+            path: /etc/ceph
+            state: directory
+        - name: cinder_enable_iscsi_backend fact
+          set_fact:
+            cinder_enable_iscsi_backend: {get_param: CinderEnableIscsiBackend}
         - name: cinder create LVM volume group dd
           command:
             list_join:
@@ -180,6 +201,7 @@ outputs:
               - 'M'
           args:
             creates: /var/lib/cinder/cinder-volumes
+          when: cinder_enable_iscsi_backend
         - name: cinder create LVM volume group
           shell: |
             if ! losetup /dev/loop2; then
@@ -194,6 +216,7 @@ outputs:
           args:
             executable: /bin/bash
             creates: /dev/loop2
+          when: cinder_enable_iscsi_backend
       upgrade_tasks:
         - name: get bootstrap nodeid
           tags: common
index 0d59898..749b215 100644 (file)
@@ -1,17 +1,7 @@
 # A Heat environment that can be used to deploy OpenDaylight with L3 DVR and DPDK
-resource_registry:
-  OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None
-  OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None
-  OS::TripleO::Services::ComputeNeutronCorePlugin: OS::Heat::None
-  OS::TripleO::Services::OpenDaylightApi: ../puppet/services/opendaylight-api.yaml
-  OS::TripleO::Services::OpenDaylightOvs: ../puppet/services/opendaylight-ovs.yaml
-  OS::TripleO::Services::NeutronL3Agent: OS::Heat::None
+# This file is to be used with neutron-opendaylight.yaml
 
 parameter_defaults:
-  NeutronEnableForceMetadata: true
-  NeutronPluginExtensions: 'port_security'
-  NeutronMechanismDrivers: 'opendaylight_v2'
-  NeutronServicePlugins: 'odl-router_v2,trunk'
   NovaSchedulerDefaultFilters: "RamFilter,ComputeFilter,AvailabilityZoneFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,NUMATopologyFilter"
   OpenDaylightSNATMechanism: 'controller'
 
index 3a212ed..5292407 100644 (file)
@@ -1,19 +1,10 @@
 # A Heat environment that can be used to deploy OpenDaylight with SRIOV
 resource_registry:
-  OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None
-  OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None
   OS::TripleO::Services::ComputeNeutronCorePlugin: ../puppet/services/neutron-plugin-ml2.yaml
-  OS::TripleO::Services::NeutronCorePlugin: ../puppet/services/neutron-plugin-ml2-odl.yaml
-  OS::TripleO::Services::OpenDaylightApi: ../puppet/services/opendaylight-api.yaml
-  OS::TripleO::Services::OpenDaylightOvs: ../puppet/services/opendaylight-ovs.yaml
   OS::TripleO::Services::NeutronSriovAgent: ../puppet/services/neutron-sriov-agent.yaml
-  OS::TripleO::Services::NeutronL3Agent: OS::Heat::None
 
 parameter_defaults:
-  NeutronEnableForceMetadata: true
-  NeutronPluginExtensions: 'port_security'
   NeutronMechanismDrivers: ['sriovnicswitch','opendaylight_v2']
-  NeutronServicePlugins: 'odl-router_v2,trunk'
 
   # Add PciPassthroughFilter to the scheduler default filters
   #NovaSchedulerDefaultFilters: ['RetryFilter','AvailabilityZoneFilter','RamFilter','ComputeFilter','ComputeCapabilitiesFilter','ImagePropertiesFilter','ServerGroupAntiAffinityFilter','ServerGroupAffinityFilter','PciPassthroughFilter']
diff --git a/environments/services-docker/neutron-opendaylight-dpdk.yaml b/environments/services-docker/neutron-opendaylight-dpdk.yaml
new file mode 100644 (file)
index 0000000..dffafa9
--- /dev/null
@@ -0,0 +1,37 @@
+# A Heat environment that can be used to deploy OpenDaylight with L3 DVR and DPDK.
+# This file is to be used with neutron-opendaylight.yaml
+
+parameter_defaults:
+  NovaSchedulerDefaultFilters: "RamFilter,ComputeFilter,AvailabilityZoneFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,NUMATopologyFilter"
+  OpenDaylightSNATMechanism: 'controller'
+
+  ComputeOvsDpdkParameters:
+    OvsEnableDpdk: True
+
+    ## Host configuration Parameters
+    #TunedProfileName: "cpu-partitioning"
+    #IsolCpusList: ""               # Logical CPUs list to be isolated from the host process (applied via cpu-partitioning tuned).
+                                    # It is mandatory to provide isolated cpus for tuned to achive optimal performance.
+                                    # Example: "3-8,12-15,18"
+    #KernelArgs: ""                 # Space separated kernel args to configure hugepage and IOMMU.
+                                    # Deploying DPDK requires enabling hugepages for the overcloud compute nodes.
+                                    # It also requires enabling IOMMU when using the VFIO (vfio-pci) OvsDpdkDriverType.
+                                    # This should be done by configuring parameters via host-config-and-reboot.yaml environment file.
+
+    ## Attempting to deploy DPDK without appropriate values for the below parameters may lead to unstable deployments
+    ## due to CPU contention of DPDK PMD threads.
+    ## It is highly recommended to to enable isolcpus (via KernelArgs) on compute overcloud nodes and set the following parameters:
+    #OvsDpdkSocketMemory: ""       # Sets the amount of hugepage memory to assign per NUMA node.
+                                   # It is recommended to use the socket closest to the PCIe slot used for the
+                                   # desired DPDK NIC.  Format should be comma separated per socket string such as:
+                                   # "<socket 0 mem MB>,<socket 1 mem MB>", for example: "1024,0".
+    #OvsDpdkDriverType: "vfio-pci" # Ensure the Overcloud NIC to be used for DPDK supports this UIO/PMD driver.
+    #OvsPmdCoreList: ""            # List or range of CPU cores for PMD threads to be pinned to.  Note, NIC
+                                   # location to cores on socket, number of hyper-threaded logical cores, and
+                                   # desired number of PMD threads can all play a role in configuring this setting.
+                                   # These cores should be on the same socket where OvsDpdkSocketMemory is assigned.
+                                   # If using hyperthreading then specify both logical cores that would equal the
+                                   # physical core.  Also, specifying more than one core will trigger multiple PMD
+                                   # threads to be spawned, which may improve dataplane performance.
+    #NovaVcpuPinSet: ""            # Cores to pin Nova instances to.  For maximum performance, select cores
+                                   # on the same NUMA node(s) selected for previous settings.
\ No newline at end of file
diff --git a/environments/services-docker/neutron-opendaylight-sriov.yaml b/environments/services-docker/neutron-opendaylight-sriov.yaml
new file mode 100644 (file)
index 0000000..92bce16
--- /dev/null
@@ -0,0 +1,22 @@
+# A Heat environment that can be used to deploy OpenDaylight with SRIOV
+# This file is to be used with neutron-opendaylight.yaml
+
+resource_registry:
+  OS::TripleO::Services::ComputeNeutronCorePlugin: ../../docker/services/neutron-plugin-ml2.yaml
+  OS::TripleO::Services::NeutronSriovAgent: ../../docker/services/neutron-sriov-agent.yaml
+
+parameter_defaults:
+  NeutronMechanismDrivers: ['sriovnicswitch','opendaylight_v2']
+
+  # Add PciPassthroughFilter to the scheduler default filters
+  #NovaSchedulerDefaultFilters: ['RetryFilter','AvailabilityZoneFilter','RamFilter','ComputeFilter','ComputeCapabilitiesFilter','ImagePropertiesFilter','ServerGroupAntiAffinityFilter','ServerGroupAffinityFilter','PciPassthroughFilter']
+  #NovaSchedulerAvailableFilters: ["nova.scheduler.filters.all_filters","nova.scheduler.filters.pci_passthrough_filter.PciPassthroughFilter"]
+
+  #NeutronPhysicalDevMappings: "datacentre:ens20f2"
+
+  # Number of VFs that needs to be configured for a physical interface
+  #NeutronSriovNumVFs: "ens20f2:5"
+
+  #NovaPCIPassthrough:
+  #  - devname: "ens20f2"
+  #    physical_network: "datacentre"
\ No newline at end of file
index 2cda08e..2d8d336 100644 (file)
@@ -46,10 +46,14 @@ outputs:
     value:
       service_name: docker
       config_settings:
-        if:
-        - insecure_registry_is_empty
-        - {}
-        - tripleo::profile::base::docker::insecure_registries: {get_param: DockerInsecureRegistryAddress}
+        map_merge:
+          - tripleo::profile::base::docker::configure_network: true
+            tripleo::profile::base::docker::network_options: "--bip=172.31.0.1/24"
+          -
+            if:
+            - insecure_registry_is_empty
+            - {}
+            - tripleo::profile::base::docker::insecure_registries: {get_param: DockerInsecureRegistryAddress}
       step_config: |
         include ::tripleo::profile::base::docker
       upgrade_tasks:
diff --git a/releasenotes/notes/change_default_docker0_address-6a017b7078825996.yaml b/releasenotes/notes/change_default_docker0_address-6a017b7078825996.yaml
new file mode 100644 (file)
index 0000000..792bb7f
--- /dev/null
@@ -0,0 +1,7 @@
+---
+upgrade:
+  - |
+    Changed default address of docker0 bridge to be in the last class B private
+    network -- 172.31.0.1/24 -- to stop conflicting with the default network
+    range for InternalApiNetCidr. The docker0 bridge is normally unused in
+    TripleO deployment.