Merge "Enable trust anchor injection"
authorJenkins <jenkins@review.openstack.org>
Wed, 25 Nov 2015 17:58:58 +0000 (17:58 +0000)
committerGerrit Code Review <review@openstack.org>
Wed, 25 Nov 2015 17:58:58 +0000 (17:58 +0000)
15 files changed:
docker/compute-post.yaml
environments/docker-rdo.yaml
environments/manage-firewall.yaml [new file with mode: 0644]
environments/updates/README.md [new file with mode: 0644]
environments/updates/update-from-keystone-admin-internal-api.yaml [new file with mode: 0644]
extraconfig/tasks/yum_update.sh
overcloud-without-mergepy.yaml
puppet/controller.yaml
puppet/hieradata/controller.yaml
puppet/manifests/overcloud_cephstorage.pp
puppet/manifests/overcloud_compute.pp
puppet/manifests/overcloud_controller.pp
puppet/manifests/overcloud_controller_pacemaker.pp
puppet/manifests/overcloud_object.pp
puppet/manifests/overcloud_volume.pp

index 1dc7be1..a6607fd 100644 (file)
@@ -11,7 +11,7 @@ parameters:
      description: Value which changes if the node configuration may need to be re-applied
   DockerNamespace:
     type: string
-    default: kollaglue
+    default: tripleoupstream
   DockerComputeImage:
     type: string
   DockerComputeDataImage:
index 23f318b..66824fe 100644 (file)
@@ -7,18 +7,17 @@ resource_registry:
 parameters:
   NovaImage: atomic-image
 
-# FIXME: When Kolla cuts liberty tag we can use kollaglue registry
 parameter_defaults:
-  # Defaults to kollaglue.  Specify a local docker registry
+  # Defaults to 'tripleoupstream'.  Specify a local docker registry
   # Example: 192.168.122.131:8787
-  DockerNamespace: kollaglue
+  DockerNamespace: tripleoupstream
   # Enable local Docker registry
   DockerNamespaceIsRegistry: false
   # Compute Node Images
-  DockerComputeImage: centos-binary-nova-compute:liberty
-  DockerComputeDataImage: centos-binary-data:liberty
-  DockerLibvirtImage: centos-binary-nova-libvirt:liberty
-  DockerNeutronAgentImage: centos-binary-neutron-agents:liberty
-  DockerOpenvswitchImage: centos-binary-neutron-openvswitch-agent:liberty
-  DockerOvsVswitchdImage: centos-binary-openvswitch-vswitchd:liberty
-  DockerOpenvswitchDBImage: centos-binary-openvswitch-db-server:liberty
+  DockerComputeImage: centos-binary-nova-compute:latest
+  DockerComputeDataImage: centos-binary-data:latest
+  DockerLibvirtImage: centos-binary-nova-libvirt:latest
+  DockerNeutronAgentImage: centos-binary-neutron-agents:latest
+  DockerOpenvswitchImage: centos-binary-neutron-openvswitch-agent:latest
+  DockerOvsVswitchdImage: centos-binary-openvswitch-vswitchd:latest
+  DockerOpenvswitchDBImage: centos-binary-openvswitch-db-server:latest
diff --git a/environments/manage-firewall.yaml b/environments/manage-firewall.yaml
new file mode 100644 (file)
index 0000000..071f410
--- /dev/null
@@ -0,0 +1,2 @@
+parameters:
+  ManageFirewall: true
diff --git a/environments/updates/README.md b/environments/updates/README.md
new file mode 100644 (file)
index 0000000..8c03411
--- /dev/null
@@ -0,0 +1,9 @@
+This directory contains Heat environment file snippets which can
+be used to ensure smooth updates of the Overcloud.
+
+Contents
+--------
+
+**update-from-keystone-admin-internal-api.yaml**
+  To be used if the Keystone Admin API was originally deployed on the
+  Internal API network.
diff --git a/environments/updates/update-from-keystone-admin-internal-api.yaml b/environments/updates/update-from-keystone-admin-internal-api.yaml
new file mode 100644 (file)
index 0000000..3c71ef1
--- /dev/null
@@ -0,0 +1,33 @@
+# This environment file provides a default value for ServiceNetMap where
+# Keystone Admin API service is running on the Internal API network
+
+parameters:
+  ServiceNetMap:
+    NeutronTenantNetwork: tenant
+    CeilometerApiNetwork: internal_api
+    MongoDbNetwork: internal_api
+    CinderApiNetwork: internal_api
+    CinderIscsiNetwork: storage
+    GlanceApiNetwork: storage
+    GlanceRegistryNetwork: internal_api
+    KeystoneAdminApiNetwork: internal_api
+    KeystonePublicApiNetwork: internal_api
+    NeutronApiNetwork: internal_api
+    HeatApiNetwork: internal_api
+    NovaApiNetwork: internal_api
+    NovaMetadataNetwork: internal_api
+    NovaVncProxyNetwork: internal_api
+    SwiftMgmtNetwork: storage_mgmt
+    SwiftProxyNetwork: storage
+    HorizonNetwork: internal_api
+    MemcachedNetwork: internal_api
+    RabbitMqNetwork: internal_api
+    RedisNetwork: internal_api
+    MysqlNetwork: internal_api
+    CephClusterNetwork: storage_mgmt
+    CephPublicNetwork: storage
+    ControllerHostnameResolveNetwork: internal_api
+    ComputeHostnameResolveNetwork: internal_api
+    BlockStorageHostnameResolveNetwork: internal_api
+    ObjectStorageHostnameResolveNetwork: internal_api
+    CephStorageHostnameResolveNetwork: storage
index fa523e8..69ff554 100755 (executable)
@@ -22,7 +22,7 @@ mkdir -p $timestamp_dir
 update_identifier=${update_identifier//[^a-zA-Z0-9-_]/}
 
 # seconds to wait for this node to rejoin the cluster after update
-cluster_start_timeout=360
+cluster_start_timeout=600
 galera_sync_timeout=360
 
 timestamp_file="$timestamp_dir/$update_identifier"
@@ -42,109 +42,91 @@ if [[ "$list_updates" == "" ]]; then
 fi
 
 pacemaker_status=$(systemctl is-active pacemaker)
+pacemaker_dumpfile=$(mktemp)
 
 if [[ "$pacemaker_status" == "active" ]] ; then
-    echo "Checking for and adding missing constraints"
+    echo "Dumping Pacemaker config"
+    pcs cluster cib $pacemaker_dumpfile
+
+    echo "Checking for missing constraints"
 
     if ! pcs constraint order show | grep "start openstack-nova-novncproxy-clone then start openstack-nova-api-clone"; then
-        pcs constraint order start openstack-nova-novncproxy-clone then openstack-nova-api-clone
+        pcs -f $pacemaker_dumpfile constraint order start openstack-nova-novncproxy-clone then openstack-nova-api-clone
     fi
 
     if ! pcs constraint order show | grep "start rabbitmq-clone then start openstack-keystone-clone"; then
-        pcs constraint order start rabbitmq-clone then openstack-keystone-clone
+        pcs -f $pacemaker_dumpfile constraint order start rabbitmq-clone then openstack-keystone-clone
     fi
 
     if ! pcs constraint order show | grep "promote galera-master then start openstack-keystone-clone"; then
-        pcs constraint order promote galera-master then openstack-keystone-clone
+        pcs -f $pacemaker_dumpfile constraint order promote galera-master then openstack-keystone-clone
     fi
 
     if ! pcs constraint order show | grep "start haproxy-clone then start openstack-keystone-clone"; then
-        pcs constraint order start haproxy-clone then openstack-keystone-clone
+        pcs -f $pacemaker_dumpfile constraint order start haproxy-clone then openstack-keystone-clone
     fi
 
     if ! pcs constraint order show | grep "start memcached-clone then start openstack-keystone-clone"; then
-        pcs constraint order start memcached-clone then openstack-keystone-clone
+        pcs -f $pacemaker_dumpfile constraint order start memcached-clone then openstack-keystone-clone
     fi
 
     if ! pcs constraint order show | grep "promote redis-master then start openstack-ceilometer-central-clone"; then
-        pcs constraint order promote redis-master then start openstack-ceilometer-central-clone require-all=false
+        pcs -f $pacemaker_dumpfile constraint order promote redis-master then start openstack-ceilometer-central-clone require-all=false
+    fi
+
+    # ensure neutron constraints https://review.openstack.org/#/c/229466
+    # remove ovs-cleanup after server and add openvswitch-agent instead
+    if  pcs constraint order show  | grep "start neutron-server-clone then start neutron-ovs-cleanup-clone"; then
+        pcs -f $pacemaker_dumpfile constraint remove order-neutron-server-clone-neutron-ovs-cleanup-clone-mandatory
+    fi
+    if ! pcs constraint order show | grep "start neutron-server-clone then start neutron-openvswitch-agent-clone"; then
+        pcs -f $pacemaker_dumpfile constraint order start neutron-server-clone then neutron-openvswitch-agent-clone
     fi
 
+
     if ! pcs resource defaults | grep "resource-stickiness: INFINITY"; then
-        pcs resource defaults resource-stickiness=INFINITY
+        pcs -f $pacemaker_dumpfile resource defaults resource-stickiness=INFINITY
     fi
 
     echo "Setting resource start/stop timeouts"
-
-    # timeouts for non-openstack services and special cases
-    pcs resource update haproxy op start timeout=100s
-    pcs resource update haproxy op stop timeout=100s
-    # mongod start timeout is also higher, setting only stop timeout
+    SERVICES="
+haproxy
+memcached
+httpd
+neutron-dhcp-agent
+neutron-l3-agent
+neutron-metadata-agent
+neutron-openvswitch-agent
+neutron-server
+openstack-ceilometer-alarm-evaluator
+openstack-ceilometer-alarm-notifier
+openstack-ceilometer-api
+openstack-ceilometer-central
+openstack-ceilometer-collector
+openstack-ceilometer-notification
+openstack-cinder-api
+openstack-cinder-scheduler
+openstack-cinder-volume
+openstack-glance-api
+openstack-glance-registry
+openstack-heat-api
+openstack-heat-api-cfn
+openstack-heat-api-cloudwatch
+openstack-heat-engine
+openstack-keystone
+openstack-nova-api
+openstack-nova-conductor
+openstack-nova-consoleauth
+openstack-nova-novncproxy
+openstack-nova-scheduler"
+    for service in $SERVICES; do
+        pcs -f $pacemaker_dumpfile resource update $service op start timeout=100s op stop timeout=100s
+    done
+    # mongod start timeout is higher, setting only stop timeout
     pcs resource update mongod op stop timeout=100s
-    # rabbit start timeout is already 100s
-    pcs resource update rabbitmq op stop timeout=100s
-    pcs resource update memcached op start timeout=100s
-    pcs resource update memcached op stop timeout=100s
-    pcs resource update httpd op start timeout=100s
-    pcs resource update httpd op stop timeout=100s
-    # neutron-netns-cleanup stop timeout is 300s, setting only start timeout
-    pcs resource update neutron-netns-cleanup op start timeout=100s
-    # neutron-ovs-cleanup stop timeout is 300s, setting only start timeout
-    pcs resource update neutron-ovs-cleanup op start timeout=100s
-
-    # timeouts for openstack services
-    pcs resource update neutron-dhcp-agent op start timeout=100s
-    pcs resource update neutron-dhcp-agent op stop timeout=100s
-    pcs resource update neutron-l3-agent op start timeout=100s
-    pcs resource update neutron-l3-agent op stop timeout=100s
-    pcs resource update neutron-metadata-agent op start timeout=100s
-    pcs resource update neutron-metadata-agent op stop timeout=100s
-    pcs resource update neutron-openvswitch-agent op start timeout=100s
-    pcs resource update neutron-openvswitch-agent op stop timeout=100s
-    pcs resource update neutron-server op start timeout=100s
-    pcs resource update neutron-server op stop timeout=100s
-    pcs resource update openstack-ceilometer-alarm-evaluator op start timeout=100s
-    pcs resource update openstack-ceilometer-alarm-evaluator op stop timeout=100s
-    pcs resource update openstack-ceilometer-alarm-notifier op start timeout=100s
-    pcs resource update openstack-ceilometer-alarm-notifier op stop timeout=100s
-    pcs resource update openstack-ceilometer-api op start timeout=100s
-    pcs resource update openstack-ceilometer-api op stop timeout=100s
-    pcs resource update openstack-ceilometer-central op start timeout=100s
-    pcs resource update openstack-ceilometer-central op stop timeout=100s
-    pcs resource update openstack-ceilometer-collector op start timeout=100s
-    pcs resource update openstack-ceilometer-collector op stop timeout=100s
-    pcs resource update openstack-ceilometer-notification op start timeout=100s
-    pcs resource update openstack-ceilometer-notification op stop timeout=100s
-    pcs resource update openstack-cinder-api op start timeout=100s
-    pcs resource update openstack-cinder-api op stop timeout=100s
-    pcs resource update openstack-cinder-scheduler op start timeout=100s
-    pcs resource update openstack-cinder-scheduler op stop timeout=100s
-    pcs resource update openstack-cinder-volume op start timeout=100s
-    pcs resource update openstack-cinder-volume op stop timeout=100s
-    pcs resource update openstack-glance-api op start timeout=100s
-    pcs resource update openstack-glance-api op stop timeout=100s
-    pcs resource update openstack-glance-registry op start timeout=100s
-    pcs resource update openstack-glance-registry op stop timeout=100s
-    pcs resource update openstack-heat-api op start timeout=100s
-    pcs resource update openstack-heat-api op stop timeout=100s
-    pcs resource update openstack-heat-api-cfn op start timeout=100s
-    pcs resource update openstack-heat-api-cfn op stop timeout=100s
-    pcs resource update openstack-heat-api-cloudwatch op start timeout=100s
-    pcs resource update openstack-heat-api-cloudwatch op stop timeout=100s
-    pcs resource update openstack-heat-engine op start timeout=100s
-    pcs resource update openstack-heat-engine op stop timeout=100s
-    pcs resource update openstack-keystone op start timeout=100s
-    pcs resource update openstack-keystone op stop timeout=100s
-    pcs resource update openstack-nova-api op start timeout=100s
-    pcs resource update openstack-nova-api op stop timeout=100s
-    pcs resource update openstack-nova-conductor op start timeout=100s
-    pcs resource update openstack-nova-conductor op stop timeout=100s
-    pcs resource update openstack-nova-consoleauth op start timeout=100s
-    pcs resource update openstack-nova-consoleauth op stop timeout=100s
-    pcs resource update openstack-nova-novncproxy op start timeout=100s
-    pcs resource update openstack-nova-novncproxy op stop timeout=100s
-    pcs resource update openstack-nova-scheduler op start timeout=100s
-    pcs resource update openstack-nova-scheduler op stop timeout=100s
+
+    echo "Applying new Pacemaker config"
+    pcs cluster cib-push $pacemaker_dumpfile
 
     echo "Pacemaker running, stopping cluster node and doing full package update"
     node_count=$(pcs status xml | grep -o "<nodes_configured.*/>" | grep -o 'number="[0-9]*"' | grep -o "[0-9]*")
index c3b95b9..a0fc758 100644 (file)
@@ -457,6 +457,14 @@ parameters:
     type: string
     constraints:
       - allowed_values: [ 'basic', 'cadf' ]
+  ManageFirewall:
+    default: false
+    description: Whether to manage IPtables rules.
+    type: boolean
+  PurgeFirewallRules:
+    default: false
+    description: Whether IPtables rules should be purged before setting up the ones.
+    type: boolean
   MysqlInnodbBufferPoolSize:
     description: >
         Specifies the size of the buffer pool in megabytes. Setting to
@@ -797,6 +805,8 @@ resources:
           ControllerExtraConfig: {get_param: controllerExtraConfig}
           Debug: {get_param: Debug}
           EnableFencing: {get_param: EnableFencing}
+          ManageFirewall: {get_param: ManageFirewall}
+          PurgeFirewallRules: {get_param: PurgeFirewallRules}
           EnableGalera: {get_param: EnableGalera}
           EnableCephStorage: {get_param: ControllerEnableCephStorage}
           EnableSwiftStorage: {get_param: ControllerEnableSwiftStorage}
index 81fa6c1..a7038de 100644 (file)
@@ -278,6 +278,14 @@ parameters:
     type: string
     default: 'regionOne'
     description: Keystone region for endpoint
+  ManageFirewall:
+    default: false
+    description: Whether to manage IPtables rules.
+    type: boolean
+  PurgeFirewallRules:
+    default: false
+    description: Whether IPtables rules should be purged before setting up the new ones.
+    type: boolean
   MysqlClusterUniquePart:
     description: A unique identifier of the MySQL cluster the controller is in.
     type: string
@@ -829,6 +837,8 @@ resources:
         enable_galera: {get_param: EnableGalera}
         enable_ceph_storage: {get_param: EnableCephStorage}
         enable_swift_storage: {get_param: EnableSwiftStorage}
+        manage_firewall: {get_param: ManageFirewall}
+        purge_firewall_rules: {get_param: PurgeFirewallRules}
         mysql_innodb_buffer_pool_size: {get_param: MysqlInnodbBufferPoolSize}
         mysql_max_connections: {get_param: MysqlMaxConnections}
         mysql_root_password: {get_param: MysqlRootPassword}
@@ -1284,6 +1294,9 @@ resources:
                 # Redis
                 redis::bind: {get_input: redis_network}
                 redis_vip: {get_input: redis_vip}
+                # Firewall
+                tripleo::firewall::manage_firewall: {get_input: manage_firewall}
+                tripleo::firewall::purge_firewall_rules: {get_input: purge_firewall_rules}
                 # Misc
                 memcached::listen_ip: {get_input: memcached_network}
                 neutron_public_interface_ip: {get_input: neutron_public_interface_ip}
index a4914c0..f42ddf6 100644 (file)
@@ -127,3 +127,109 @@ tripleo::loadbalancer::heat_cfn: true
 tripleo::loadbalancer::horizon: true
 
 controller_classes: []
+# firewall
+tripleo::firewall::firewall_rules:
+  '101 mongodb_config':
+    port: 27019
+  '102 mongodb_sharding':
+    port: 27018
+  '103 mongod':
+    port: 27017
+  '104 mysql galera':
+    port:
+      - 873
+      - 3306
+      - 4444
+      - 4567
+      - 4568
+      - 9200
+  '105 ntp':
+    port: 123
+    proto: udp
+  '106 vrrp':
+    proto: vrrp
+  '107 haproxy stats':
+    port: 1993
+  '108 redis':
+    port:
+      - 6379
+      - 26379
+  '109 rabbitmq':
+    port:
+      - 5672
+      - 35672
+  '110 ceph':
+    port:
+      - 6789
+      - '6800-6810'
+  '111 keystone':
+    port:
+      - 5000
+      - 13000
+      - 35357
+      - 13357
+  '112 glance':
+    port:
+      - 9292
+      - 9191
+      - 13292
+  '113 nova':
+    port:
+      - 6080
+      - 13080
+      - 8773
+      - 3773
+      - 8774
+      - 13774
+      - 8775
+  '114 neutron server':
+    port:
+      - 9696
+      - 13696
+  '115 neutron dhcp input':
+    proto: 'udp'
+    port: 67
+  '116 neutron dhcp output':
+    proto: 'udp'
+    chain: 'OUTPUT'
+    port: 68
+  '118 neutron vxlan networks':
+    proto: 'udp'
+    port: 4789
+  '119 cinder':
+    port:
+      - 8776
+      - 13776
+  '120 iscsi initiator':
+    port: 3260
+  '121 memcached':
+    port: 11211
+  '122 swift proxy':
+    port:
+      - 8080
+      - 13808
+  '123 swift storage':
+    port:
+      - 873
+      - 6000
+      - 6001
+      - 6002
+  '124 ceilometer':
+    port:
+      - 8777
+      - 13777
+  '125 heat':
+    port:
+      - 8000
+      - 13800
+      - 8003
+      - 13003
+      - 8004
+      - 13004
+  '126 horizon':
+    port:
+      - 80
+      - 443
+  '127 snmp':
+    port: 161
+    proto: 'udp'
index 51f5e88..7f8970c 100644 (file)
@@ -14,6 +14,7 @@
 # under the License.
 
 include ::tripleo::packages
+include ::tripleo::firewall
 
 create_resources(sysctl::value, hiera('sysctl_settings'), {})
 
index cd41cc7..5818234 100644 (file)
@@ -14,6 +14,7 @@
 # under the License.
 
 include ::tripleo::packages
+include ::tripleo::firewall
 
 create_resources(sysctl::value, hiera('sysctl_settings'), {})
 
index 570c43b..f758c55 100644 (file)
@@ -14,6 +14,7 @@
 # under the License.
 
 include ::tripleo::packages
+include ::tripleo::firewall
 
 if hiera('step') >= 1 {
 
index 2a3f1f9..95b7992 100644 (file)
@@ -19,6 +19,7 @@ Pcmk_resource <| |> {
 }
 
 include ::tripleo::packages
+include ::tripleo::firewall
 
 if $::hostname == downcase(hiera('bootstrap_nodeid')) {
   $pacemaker_master = true
index 5f0b4c8..1eabddf 100644 (file)
@@ -14,6 +14,7 @@
 # under the License.
 
 include ::tripleo::packages
+include ::tripleo::firewall
 
 create_resources(sysctl::value, hiera('sysctl_settings'), {})
 
index 7f24959..2bdd8a9 100644 (file)
@@ -14,6 +14,7 @@
 # under the License.
 
 include ::tripleo::packages
+include ::tripleo::firewall
 
 create_resources(sysctl::value, hiera('sysctl_settings'), {})