Merge "Use default Sensu redact"
authorJenkins <jenkins@review.openstack.org>
Mon, 14 Nov 2016 13:18:42 +0000 (13:18 +0000)
committerGerrit Code Review <review@openstack.org>
Mon, 14 Nov 2016 13:18:42 +0000 (13:18 +0000)
36 files changed:
environments/low-memory-usage.yaml
environments/manila-cephfsnative-config.yaml
environments/manila-generic-config.yaml
environments/manila-netapp-config.yaml
environments/tls-everywhere-endpoints-dns.yaml
extraconfig/tasks/major_upgrade_check.sh
extraconfig/tasks/major_upgrade_compute.sh
extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh
extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh
extraconfig/tasks/major_upgrade_controller_pacemaker_3.sh
extraconfig/tasks/major_upgrade_controller_pacemaker_4.sh
extraconfig/tasks/major_upgrade_controller_pacemaker_5.sh
extraconfig/tasks/major_upgrade_controller_pacemaker_6.sh [new file with mode: 0755]
extraconfig/tasks/major_upgrade_pacemaker.yaml
extraconfig/tasks/pacemaker_resource_restart.sh
overcloud-resource-registry-puppet.j2.yaml
overcloud.j2.yaml
puppet/deploy-artifacts.sh
puppet/post.j2.yaml
puppet/services/ceph-external.yaml
puppet/services/ceph-rgw.yaml
puppet/services/cinder-api.yaml
puppet/services/gnocchi-api.yaml
puppet/services/heat-api-cfn.yaml
puppet/services/heat-api.yaml
puppet/services/heat-base.yaml
puppet/services/kernel.yaml
puppet/services/manila-api.yaml
puppet/services/neutron-l3.yaml
puppet/services/nova-api.yaml
puppet/services/opendaylight-api.yaml
puppet/services/opendaylight-ovs.yaml
puppet/services/sahara-base.yaml
puppet/services/services.yaml
puppet/services/swift-proxy.yaml
roles_data.yaml

index ad42868..47b2003 100644 (file)
@@ -13,3 +13,6 @@ parameter_defaults:
 
   ApacheMaxRequestWorkers: 32
   ApacheServerLimit: 32
+
+  ControllerExtraConfig:
+      'nova::network::neutron::neutron_url_timeout': '60'
index 825a506..5632d8d 100644 (file)
@@ -1,11 +1,11 @@
 # A Heat environment file which can be used to enable a
 # a Manila CephFS Native driver backend.
 resource_registry:
-  OS::Tripleo::Services::ManilaApi: ../puppet/services/manila-api.yaml
-  OS::Tripleo::Services::ManilaScheduler: ../puppet/services/manila-scheduler.yaml
+  OS::TripleO::Services::ManilaApi: ../puppet/services/manila-api.yaml
+  OS::TripleO::Services::ManilaScheduler: ../puppet/services/manila-scheduler.yaml
   # Only manila-share is pacemaker managed:
-  OS::Tripleo::Services::ManilaShare: ../puppet/services/pacemaker/manila-share.yaml
-  OS::Tripleo::Services::ManilaBackendCephFs: ../puppet/services/manila-backend-cephfs.yaml
+  OS::TripleO::Services::ManilaShare: ../puppet/services/pacemaker/manila-share.yaml
+  OS::TripleO::Services::ManilaBackendCephFs: ../puppet/services/manila-backend-cephfs.yaml
 
 
 parameter_defaults:
index 9344bc6..65884a9 100644 (file)
@@ -1,10 +1,10 @@
 # This environment file enables Manila with the Generic backend.
 resource_registry:
-  OS::Tripleo::Services::ManilaApi: ../puppet/services/manila-api.yaml
-  OS::Tripleo::Services::ManilaScheduler: ../puppet/services/manila-scheduler.yaml
+  OS::TripleO::Services::ManilaApi: ../puppet/services/manila-api.yaml
+  OS::TripleO::Services::ManilaScheduler: ../puppet/services/manila-scheduler.yaml
   # Only manila-share is pacemaker managed:
-  OS::Tripleo::Services::ManilaShare: ../puppet/services/pacemaker/manila-share.yaml
-  OS::Tripleo::Services::ManilaBackendGeneric: ../puppet/services/manila-backend-generic.yaml
+  OS::TripleO::Services::ManilaShare: ../puppet/services/pacemaker/manila-share.yaml
+  OS::TripleO::Services::ManilaBackendGeneric: ../puppet/services/manila-backend-generic.yaml
 
 parameter_defaults:
   ManilaServiceInstanceUser: ''
index 3dadfe5..7eb1494 100644 (file)
@@ -1,10 +1,10 @@
 # This environment file enables Manila with the Netapp backend.
 resource_registry:
-  OS::Tripleo::Services::ManilaApi: ../puppet/services/manila-api.yaml
-  OS::Tripleo::Services::ManilaScheduler: ../puppet/services/manila-scheduler.yaml
+  OS::TripleO::Services::ManilaApi: ../puppet/services/manila-api.yaml
+  OS::TripleO::Services::ManilaScheduler: ../puppet/services/manila-scheduler.yaml
   # Only manila-share is pacemaker managed:
-  OS::Tripleo::Services::ManilaShare: ../puppet/services/pacemaker/manila-share.yaml
-  OS::Tripleo::Services::ManilaBackendNetapp: ../puppet/services/manila-backend-netapp.yaml
+  OS::TripleO::Services::ManilaShare: ../puppet/services/pacemaker/manila-share.yaml
+  OS::TripleO::Services::ManilaBackendNetapp: ../puppet/services/manila-backend-netapp.yaml
 
 parameter_defaults:
   ManilaNetappBackendName: tripleo_netapp
index c3fbaf4..cc1915f 100644 (file)
@@ -5,6 +5,9 @@ parameter_defaults:
     AodhAdmin: {protocol: 'https', port: '8042', host: 'CLOUDNAME'}
     AodhInternal: {protocol: 'https', port: '8042', host: 'CLOUDNAME'}
     AodhPublic: {protocol: 'https', port: '13042', host: 'CLOUDNAME'}
+    BarbicanAdmin: {protocol: 'https', port: '9311', host: 'CLOUDNAME'}
+    BarbicanInternal: {protocol: 'https', port: '9311', host: 'CLOUDNAME'}
+    BarbicanPublic: {protocol: 'https', port: '13311', host: 'CLOUDNAME'}
     CeilometerAdmin: {protocol: 'https', port: '8777', host: 'CLOUDNAME'}
     CeilometerInternal: {protocol: 'https', port: '8777', host: 'CLOUDNAME'}
     CeilometerPublic: {protocol: 'https', port: '13777', host: 'CLOUDNAME'}
index b65f691..8bdff5e 100755 (executable)
@@ -18,14 +18,8 @@ check_pcsd()
     fi
 }
 
-check_disk_for_mysql_dump()
+mysql_need_update()
 {
-    # Where to backup current database if mysql need to be upgraded
-    MYSQL_BACKUP_DIR=/var/tmp/mysql_upgrade_osp
-    MYSQL_TEMP_UPGRADE_BACKUP_DIR=/var/lib/mysql-temp-upgrade-backup
-    # Spare disk ratio for extra safety
-    MYSQL_BACKUP_SIZE_RATIO=1.2
-
     # Shall we upgrade mysql data directory during the stack upgrade?
     if [ "$mariadb_do_major_upgrade" = "auto" ]; then
         ret=$(is_mysql_upgrade_needed)
@@ -40,6 +34,17 @@ check_disk_for_mysql_dump()
     else
         DO_MYSQL_UPGRADE=1
     fi
+}
+
+check_disk_for_mysql_dump()
+{
+    # Where to backup current database if mysql need to be upgraded
+    MYSQL_BACKUP_DIR=/var/tmp/mysql_upgrade_osp
+    MYSQL_TEMP_UPGRADE_BACKUP_DIR=/var/lib/mysql-temp-upgrade-backup
+    # Spare disk ratio for extra safety
+    MYSQL_BACKUP_SIZE_RATIO=1.2
+
+    mysql_need_update
 
     if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then
         if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
index f5105a1..b0d4280 100644 (file)
@@ -35,6 +35,10 @@ fi
 yum -y install python-zaqarclient  # needed for os-collect-config
 yum -y update
 
+# Due to bug#1640177 we need to restart compute agent
+echo "Restarting openstack ceilometer agent compute"
+systemctl restart openstack-ceilometer-compute
+
 ENDOFCAT
 
 # ensure the permissions are OK
index fbdbc30..739c01d 100755 (executable)
@@ -2,8 +2,6 @@
 
 set -eu
 
-cluster_sync_timeout=1800
-
 check_cluster
 check_pcsd
 if [[ -n $(is_bootstrap_node) ]]; then
@@ -19,6 +17,11 @@ check_disk_for_mysql_dump
 # at the end of this script
 if [[ -n $(is_bootstrap_node) ]]; then
     STONITH_STATE=$(pcs property show stonith-enabled | grep "stonith-enabled" | awk '{ print $2 }')
+    # We create this empty file if stonith was set to true so we can reenable stonith in step2
+    rm -f /var/tmp/stonith-true
+    if [ $STONITH_STATE == "true" ]; then
+        touch /var/tmp/stonith-true
+    fi
     pcs property set stonith-enabled=false
 fi
 
@@ -31,178 +34,3 @@ if [[ -n $(is_bootstrap_node) ]]; then
     rabbitmq_mitaka_newton_upgrade
 fi
 
-# After migrating the cluster to HA-NG the services not under pacemaker's control
-# are still up and running. We need to stop them explicitely otherwise during the yum
-# upgrade the rpm %post sections will try to do a systemctl try-restart <service>, which
-# is going to take a long time because rabbit is down. By having the service stopped
-# systemctl try-restart is a noop
-
-for service in $(services_to_migrate); do
-    manage_systemd_service stop "${service%%-clone}"
-    # So the reason for not reusing check_resource_systemd is that
-    # I have observed systemctl is-active returning unknown with at least
-    # one service that was stopped (See LP 1627254)
-    timeout=600
-    tstart=$(date +%s)
-    tend=$(( $tstart + $timeout ))
-    check_interval=3
-    while (( $(date +%s) < $tend )); do
-      if [[ "$(systemctl is-active ${service%%-clone})" = "active" ]]; then
-        echo "$service still active, sleeping $check_interval seconds."
-        sleep $check_interval
-      else
-        # we do not care if it is inactive, unknown or failed as long as it is
-        # not running
-        break
-      fi
-
-    done
-done
-
-# In case the mysql package is updated, the database on disk must be
-# upgraded as well. This typically needs to happen during major
-# version upgrades (e.g. 5.5 -> 5.6, 5.5 -> 10.1...)
-#
-# Because in-place upgrades are not supported across 2+ major versions
-# (e.g. 5.5 -> 10.1), we rely on logical upgrades via dump/restore cycle
-# https://bugzilla.redhat.com/show_bug.cgi?id=1341968
-#
-# The default is to determine automatically if upgrade is needed based
-# on mysql package versionning, but this can be overriden manually
-# to support specific upgrade scenario
-
-if [[ -n $(is_bootstrap_node) ]]; then
-    if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
-        mysqldump $backup_flags > "$MYSQL_BACKUP_DIR/openstack_database.sql"
-        cp -rdp /etc/my.cnf* "$MYSQL_BACKUP_DIR"
-    fi
-
-    pcs resource disable redis
-    check_resource redis stopped 600
-    pcs resource disable rabbitmq
-    check_resource rabbitmq stopped 600
-    pcs resource disable galera
-    check_resource galera stopped 600
-    pcs resource disable openstack-cinder-volume
-    check_resource openstack-cinder-volume stopped 600
-    # Disable all VIPs before stopping the cluster, so that pcs doesn't use one as a source address:
-    #   https://bugzilla.redhat.com/show_bug.cgi?id=1330688
-    for vip in $(pcs resource show | grep ocf::heartbeat:IPaddr2 | grep Started | awk '{ print $1 }'); do
-      pcs resource disable $vip
-      check_resource $vip stopped 60
-    done
-    pcs cluster stop --all
-fi
-
-
-# Swift isn't controlled by pacemaker
-systemctl_swift stop
-
-tstart=$(date +%s)
-while systemctl is-active pacemaker; do
-    sleep 5
-    tnow=$(date +%s)
-    if (( tnow-tstart > cluster_sync_timeout )) ; then
-        echo_error "ERROR: cluster shutdown timed out"
-        exit 1
-    fi
-done
-
-# The reason we do an sql dump *and* we move the old dir out of
-# the way is because it gives us an extra level of safety in case
-# something goes wrong during the upgrade. Once the restore is
-# successful we go ahead and remove it. If the directory exists
-# we bail out as it means the upgrade process had issues in the last
-# run.
-if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
-    if [ -d $MYSQL_TEMP_UPGRADE_BACKUP_DIR ]; then
-        echo_error "ERROR: mysql backup dir already exist"
-        exit 1
-    fi
-    mv /var/lib/mysql $MYSQL_TEMP_UPGRADE_BACKUP_DIR
-fi
-
-# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
-if [[ -n $(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart") ]]; then
-    echo "Manual upgrade of openvswitch - restart in postun detected"
-    mkdir OVS_UPGRADE || true
-    pushd OVS_UPGRADE
-    echo "Attempting to downloading latest openvswitch with yumdownloader"
-    yumdownloader --resolve openvswitch
-    echo "Updating openvswitch with nopostun option"
-    rpm -U --replacepkgs --nopostun ./*.rpm
-    popd
-else
-    echo "Skipping manual upgrade of openvswitch - no restart in postun detected"
-fi
-
-yum -y install python-zaqarclient  # needed for os-collect-config
-yum -y -q update
-
-# We need to ensure at least those two configuration settings, otherwise
-# mariadb 10.1+ won't activate galera replication.
-# wsrep_cluster_address must only be set though, its value does not
-# matter because it's overriden by the galera resource agent.
-cat >> /etc/my.cnf.d/galera.cnf <<EOF
-[mysqld]
-wsrep_on = ON
-wsrep_cluster_address = gcomm://localhost
-EOF
-
-if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
-    # Scripts run via heat have no HOME variable set and this confuses
-    # mysqladmin
-    export HOME=/root
-
-    mkdir /var/lib/mysql || /bin/true
-    chown mysql:mysql /var/lib/mysql
-    chmod 0755 /var/lib/mysql
-    restorecon -R /var/lib/mysql/
-    mysql_install_db --datadir=/var/lib/mysql --user=mysql
-    chown -R mysql:mysql /var/lib/mysql/
-
-    if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then
-        mysqld_safe --wsrep-new-cluster &
-        # We have a populated /root/.my.cnf with root/password here so
-        # we need to temporarily rename it because the newly created
-        # db is empty and no root password is set
-        mv /root/.my.cnf /root/.my.cnf.temporary
-        timeout 60 sh -c 'while ! mysql -e "" &> /dev/null; do sleep 1; done'
-        mysql -u root < "$MYSQL_BACKUP_DIR/openstack_database.sql"
-        mv /root/.my.cnf.temporary /root/.my.cnf
-        mysqladmin -u root shutdown
-        # The import was successful so we may remove the folder
-        rm -r "$MYSQL_BACKUP_DIR"
-    fi
-fi
-
-# If we reached here without error we can safely blow away the origin
-# mysql dir from every controller
-
-# TODO: What if the upgrade fails on the bootstrap node, but not on
-# this controller.  Data may be lost.
-if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
-    rm -r $MYSQL_TEMP_UPGRADE_BACKUP_DIR
-fi
-
-# Let's reset the stonith back to true if it was true, before starting the cluster
-if [[ -n $(is_bootstrap_node) ]]; then
-    if [ $STONITH_STATE == "true" ]; then
-        pcs -f /var/lib/pacemaker/cib/cib.xml property set stonith-enabled=true
-    fi
-fi
-
-# Pin messages sent to compute nodes to kilo, these will be upgraded later
-crudini  --set /etc/nova/nova.conf upgrade_levels compute "$upgrade_level_nova_compute"
-# https://bugzilla.redhat.com/show_bug.cgi?id=1284047
-# Change-Id: Ib3f6c12ff5471e1f017f28b16b1e6496a4a4b435
-crudini  --set /etc/ceilometer/ceilometer.conf DEFAULT rpc_backend rabbit
-# https://bugzilla.redhat.com/show_bug.cgi?id=1284058
-# Ifd1861e3df46fad0e44ff9b5cbd58711bbc87c97 Swift Ceilometer middleware no longer exists
-crudini --set /etc/swift/proxy-server.conf pipeline:main pipeline "catch_errors healthcheck cache ratelimit tempurl formpost authtoken keystone staticweb proxy-logging proxy-server"
-# LP: 1615035, required only for M/N upgrade.
-crudini --set /etc/nova/nova.conf DEFAULT scheduler_host_manager host_manager
-# LP: 1627450, required only for M/N upgrade
-crudini --set /etc/nova/nova.conf DEFAULT scheduler_driver filter_scheduler
-
-crudini --set /etc/sahara/sahara.conf DEFAULT plugins ambari,cdh,mapr,vanilla,spark,storm
index 3706151..7cc6735 100755 (executable)
 
 set -eu
 
-cluster_form_timeout=600
-cluster_settle_timeout=1800
-galera_sync_timeout=600
+cluster_sync_timeout=1800
 
-if [[ -n $(is_bootstrap_node) ]]; then
-    pcs cluster start --all
+# After migrating the cluster to HA-NG the services not under pacemaker's control
+# are still up and running. We need to stop them explicitely otherwise during the yum
+# upgrade the rpm %post sections will try to do a systemctl try-restart <service>, which
+# is going to take a long time because rabbit is down. By having the service stopped
+# systemctl try-restart is a noop
 
+for service in $(services_to_migrate); do
+    manage_systemd_service stop "${service%%-clone}"
+    # So the reason for not reusing check_resource_systemd is that
+    # I have observed systemctl is-active returning unknown with at least
+    # one service that was stopped (See LP 1627254)
+    timeout=600
     tstart=$(date +%s)
-    while pcs status 2>&1 | grep -E '(cluster is not currently running)|(OFFLINE:)'; do
-        sleep 5
-        tnow=$(date +%s)
-        if (( tnow-tstart > cluster_form_timeout )) ; then
-            echo_error "ERROR: timed out forming the cluster"
-            exit 1
-        fi
+    tend=$(( $tstart + $timeout ))
+    check_interval=3
+    while (( $(date +%s) < $tend )); do
+      if [[ "$(systemctl is-active ${service%%-clone})" = "active" ]]; then
+        echo "$service still active, sleeping $check_interval seconds."
+        sleep $check_interval
+      else
+        # we do not care if it is inactive, unknown or failed as long as it is
+        # not running
+        break
+      fi
+
     done
+done
 
-    if ! timeout -k 10 $cluster_settle_timeout crm_resource --wait; then
-        echo_error "ERROR: timed out waiting for cluster to finish transition"
-        exit 1
+# In case the mysql package is updated, the database on disk must be
+# upgraded as well. This typically needs to happen during major
+# version upgrades (e.g. 5.5 -> 5.6, 5.5 -> 10.1...)
+#
+# Because in-place upgrades are not supported across 2+ major versions
+# (e.g. 5.5 -> 10.1), we rely on logical upgrades via dump/restore cycle
+# https://bugzilla.redhat.com/show_bug.cgi?id=1341968
+#
+# The default is to determine automatically if upgrade is needed based
+# on mysql package versionning, but this can be overriden manually
+# to support specific upgrade scenario
+
+# Calling this function will set the DO_MYSQL_UPGRADE variable which is used
+# later
+mysql_need_update
+
+if [[ -n $(is_bootstrap_node) ]]; then
+    if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
+        mysqldump $backup_flags > "$MYSQL_BACKUP_DIR/openstack_database.sql"
+        cp -rdp /etc/my.cnf* "$MYSQL_BACKUP_DIR"
     fi
 
-    for vip in $(pcs resource show | grep ocf::heartbeat:IPaddr2 | grep Stopped | awk '{ print $1 }'); do
-      pcs resource enable $vip
-      check_resource_pacemaker $vip started 60
+    pcs resource disable redis
+    check_resource redis stopped 600
+    pcs resource disable rabbitmq
+    check_resource rabbitmq stopped 600
+    pcs resource disable galera
+    check_resource galera stopped 600
+    pcs resource disable openstack-cinder-volume
+    check_resource openstack-cinder-volume stopped 600
+    # Disable all VIPs before stopping the cluster, so that pcs doesn't use one as a source address:
+    #   https://bugzilla.redhat.com/show_bug.cgi?id=1330688
+    for vip in $(pcs resource show | grep ocf::heartbeat:IPaddr2 | grep Started | awk '{ print $1 }'); do
+      pcs resource disable $vip
+      check_resource $vip stopped 60
     done
+    pcs cluster stop --all
 fi
 
-start_or_enable_service galera
-check_resource galera started 600
-start_or_enable_service redis
-check_resource redis started 600
-# We need mongod which is now a systemd service up and running before calling
-# ceilometer-dbsync. There is still a race here: mongod might not be up on all nodes
-# so ceilometer-dbsync will fail a couple of times before that. As it retries indefinitely
-# we should be good.
-# Due to LP Bug https://bugs.launchpad.net/tripleo/+bug/1627254 am using systemctl directly atm
-systemctl start mongod
-check_resource mongod started 600
 
-if [[ -n $(is_bootstrap_node) ]]; then
-    tstart=$(date +%s)
-    while ! clustercheck; do
-        sleep 5
-        tnow=$(date +%s)
-        if (( tnow-tstart > galera_sync_timeout )) ; then
-            echo_error "ERROR galera sync timed out"
-            exit 1
-        fi
-    done
+# Swift isn't controlled by pacemaker
+systemctl_swift stop
 
-    # Run all the db syncs
-    # TODO: check if this can be triggered in puppet and removed from here
-    ceilometer-dbsync --config-file=/etc/ceilometer/ceilometer.conf
-    cinder-manage db sync
-    glance-manage --config-file=/etc/glance/glance-registry.conf db_sync
-    heat-manage --config-file /etc/heat/heat.conf db_sync
-    keystone-manage db_sync
-    neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini upgrade head
-    nova-manage db sync
-    nova-manage api_db sync
-    nova-manage db online_data_migrations
-    sahara-db-manage --config-file /etc/sahara/sahara.conf upgrade head
+tstart=$(date +%s)
+while systemctl is-active pacemaker; do
+    sleep 5
+    tnow=$(date +%s)
+    if (( tnow-tstart > cluster_sync_timeout )) ; then
+        echo_error "ERROR: cluster shutdown timed out"
+        exit 1
+    fi
+done
+
+# The reason we do an sql dump *and* we move the old dir out of
+# the way is because it gives us an extra level of safety in case
+# something goes wrong during the upgrade. Once the restore is
+# successful we go ahead and remove it. If the directory exists
+# we bail out as it means the upgrade process had issues in the last
+# run.
+if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
+    if [ -d $MYSQL_TEMP_UPGRADE_BACKUP_DIR ]; then
+        echo_error "ERROR: mysql backup dir already exist"
+        exit 1
+    fi
+    mv /var/lib/mysql $MYSQL_TEMP_UPGRADE_BACKUP_DIR
+fi
+
+# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
+if [[ -n $(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart") ]]; then
+    echo "Manual upgrade of openvswitch - restart in postun detected"
+    mkdir OVS_UPGRADE || true
+    pushd OVS_UPGRADE
+    echo "Attempting to downloading latest openvswitch with yumdownloader"
+    yumdownloader --resolve openvswitch
+    echo "Updating openvswitch with nopostun option"
+    rpm -U --replacepkgs --nopostun ./*.rpm
+    popd
+else
+    echo "Skipping manual upgrade of openvswitch - no restart in postun detected"
 fi
+
+yum -y install python-zaqarclient  # needed for os-collect-config
+yum -y -q update
+
+# We need to ensure at least those two configuration settings, otherwise
+# mariadb 10.1+ won't activate galera replication.
+# wsrep_cluster_address must only be set though, its value does not
+# matter because it's overriden by the galera resource agent.
+cat >> /etc/my.cnf.d/galera.cnf <<EOF
+[mysqld]
+wsrep_on = ON
+wsrep_cluster_address = gcomm://localhost
+EOF
+
+if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
+    # Scripts run via heat have no HOME variable set and this confuses
+    # mysqladmin
+    export HOME=/root
+
+    mkdir /var/lib/mysql || /bin/true
+    chown mysql:mysql /var/lib/mysql
+    chmod 0755 /var/lib/mysql
+    restorecon -R /var/lib/mysql/
+    mysql_install_db --datadir=/var/lib/mysql --user=mysql
+    chown -R mysql:mysql /var/lib/mysql/
+
+    if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then
+        mysqld_safe --wsrep-new-cluster &
+        # We have a populated /root/.my.cnf with root/password here so
+        # we need to temporarily rename it because the newly created
+        # db is empty and no root password is set
+        mv /root/.my.cnf /root/.my.cnf.temporary
+        timeout 60 sh -c 'while ! mysql -e "" &> /dev/null; do sleep 1; done'
+        mysql -u root < "$MYSQL_BACKUP_DIR/openstack_database.sql"
+        mv /root/.my.cnf.temporary /root/.my.cnf
+        mysqladmin -u root shutdown
+        # The import was successful so we may remove the folder
+        rm -r "$MYSQL_BACKUP_DIR"
+    fi
+fi
+
+# If we reached here without error we can safely blow away the origin
+# mysql dir from every controller
+
+# TODO: What if the upgrade fails on the bootstrap node, but not on
+# this controller.  Data may be lost.
+if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
+    rm -r $MYSQL_TEMP_UPGRADE_BACKUP_DIR
+fi
+
+# Let's reset the stonith back to true if it was true, before starting the cluster
+if [[ -n $(is_bootstrap_node) ]]; then
+    if [ -f /var/tmp/stonith-true ]; then
+        pcs -f /var/lib/pacemaker/cib/cib.xml property set stonith-enabled=true
+    fi
+    rm -f /var/tmp/stonith-true
+fi
+
+# Pin messages sent to compute nodes to kilo, these will be upgraded later
+crudini  --set /etc/nova/nova.conf upgrade_levels compute "$upgrade_level_nova_compute"
+# https://bugzilla.redhat.com/show_bug.cgi?id=1284047
+# Change-Id: Ib3f6c12ff5471e1f017f28b16b1e6496a4a4b435
+crudini  --set /etc/ceilometer/ceilometer.conf DEFAULT rpc_backend rabbit
+# https://bugzilla.redhat.com/show_bug.cgi?id=1284058
+# Ifd1861e3df46fad0e44ff9b5cbd58711bbc87c97 Swift Ceilometer middleware no longer exists
+crudini --set /etc/swift/proxy-server.conf pipeline:main pipeline "catch_errors healthcheck cache ratelimit tempurl formpost authtoken keystone staticweb proxy-logging proxy-server"
+# LP: 1615035, required only for M/N upgrade.
+crudini --set /etc/nova/nova.conf DEFAULT scheduler_host_manager host_manager
+# LP: 1627450, required only for M/N upgrade
+crudini --set /etc/nova/nova.conf DEFAULT scheduler_driver filter_scheduler
+
+crudini --set /etc/sahara/sahara.conf DEFAULT plugins ambari,cdh,mapr,vanilla,spark,storm
+
index d2cb955..3706151 100755 (executable)
@@ -2,16 +2,67 @@
 
 set -eu
 
-start_or_enable_service rabbitmq
-check_resource rabbitmq started 600
+cluster_form_timeout=600
+cluster_settle_timeout=1800
+galera_sync_timeout=600
+
+if [[ -n $(is_bootstrap_node) ]]; then
+    pcs cluster start --all
+
+    tstart=$(date +%s)
+    while pcs status 2>&1 | grep -E '(cluster is not currently running)|(OFFLINE:)'; do
+        sleep 5
+        tnow=$(date +%s)
+        if (( tnow-tstart > cluster_form_timeout )) ; then
+            echo_error "ERROR: timed out forming the cluster"
+            exit 1
+        fi
+    done
+
+    if ! timeout -k 10 $cluster_settle_timeout crm_resource --wait; then
+        echo_error "ERROR: timed out waiting for cluster to finish transition"
+        exit 1
+    fi
+
+    for vip in $(pcs resource show | grep ocf::heartbeat:IPaddr2 | grep Stopped | awk '{ print $1 }'); do
+      pcs resource enable $vip
+      check_resource_pacemaker $vip started 60
+    done
+fi
+
+start_or_enable_service galera
+check_resource galera started 600
 start_or_enable_service redis
 check_resource redis started 600
-start_or_enable_service openstack-cinder-volume
-check_resource openstack-cinder-volume started 600
+# We need mongod which is now a systemd service up and running before calling
+# ceilometer-dbsync. There is still a race here: mongod might not be up on all nodes
+# so ceilometer-dbsync will fail a couple of times before that. As it retries indefinitely
+# we should be good.
+# Due to LP Bug https://bugs.launchpad.net/tripleo/+bug/1627254 am using systemctl directly atm
+systemctl start mongod
+check_resource mongod started 600
 
-# start httpd so keystone is available for gnocchi
-# upgrade to run.
-systemctl start httpd
+if [[ -n $(is_bootstrap_node) ]]; then
+    tstart=$(date +%s)
+    while ! clustercheck; do
+        sleep 5
+        tnow=$(date +%s)
+        if (( tnow-tstart > galera_sync_timeout )) ; then
+            echo_error "ERROR galera sync timed out"
+            exit 1
+        fi
+    done
 
-# Swift isn't controled by pacemaker
-systemctl_swift start
+    # Run all the db syncs
+    # TODO: check if this can be triggered in puppet and removed from here
+    ceilometer-dbsync --config-file=/etc/ceilometer/ceilometer.conf
+    cinder-manage db sync
+    glance-manage --config-file=/etc/glance/glance-registry.conf db_sync
+    heat-manage --config-file /etc/heat/heat.conf db_sync
+    keystone-manage db_sync
+    neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini upgrade head
+    nova-manage db sync
+    nova-manage api_db sync
+    nova-manage db online_data_migrations
+    sahara-db-manage --config-file /etc/sahara/sahara.conf upgrade head
+fi
index fa95f1f..d2cb955 100755 (executable)
@@ -2,7 +2,16 @@
 
 set -eu
 
-if [[ -n $(is_bootstrap_node) ]]; then
-  # run gnocchi upgrade
-  gnocchi-upgrade
-fi
+start_or_enable_service rabbitmq
+check_resource rabbitmq started 600
+start_or_enable_service redis
+check_resource redis started 600
+start_or_enable_service openstack-cinder-volume
+check_resource openstack-cinder-volume started 600
+
+# start httpd so keystone is available for gnocchi
+# upgrade to run.
+systemctl start httpd
+
+# Swift isn't controled by pacemaker
+systemctl_swift start
index d569084..fa95f1f 100755 (executable)
@@ -2,14 +2,7 @@
 
 set -eu
 
-# We need to start the systemd services we explicitely stopped at step _1.sh
-# FIXME: Should we let puppet during the convergence step do the service enabling or
-# should we add it here?
-services=$(services_to_migrate)
-if [[ ${keep_sahara_services_on_upgrade} =~ [Ff]alse ]] ; then
-    services=${services%%openstack-sahara*}
+if [[ -n $(is_bootstrap_node) ]]; then
+  # run gnocchi upgrade
+  gnocchi-upgrade
 fi
-for service in $services; do
-    manage_systemd_service start "${service%%-clone}"
-    check_resource_systemd "${service%%-clone}" started 600
-done
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_6.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_6.sh
new file mode 100755 (executable)
index 0000000..d569084
--- /dev/null
@@ -0,0 +1,15 @@
+#!/bin/bash
+
+set -eu
+
+# We need to start the systemd services we explicitely stopped at step _1.sh
+# FIXME: Should we let puppet during the convergence step do the service enabling or
+# should we add it here?
+services=$(services_to_migrate)
+if [[ ${keep_sahara_services_on_upgrade} =~ [Ff]alse ]] ; then
+    services=${services%%openstack-sahara*}
+fi
+for service in $services; do
+    manage_systemd_service start "${service%%-clone}"
+    check_resource_systemd "${service%%-clone}" started 600
+done
index e13aada..b0418a5 100644 (file)
@@ -113,7 +113,20 @@ resources:
       config:
         list_join:
         - ''
-        - - get_file: pacemaker_common_functions.sh
+        - - str_replace:
+              template: |
+                #!/bin/bash
+                upgrade_level_nova_compute='UPGRADE_LEVEL_NOVA_COMPUTE'
+              params:
+                UPGRADE_LEVEL_NOVA_COMPUTE: {get_param: UpgradeLevelNovaCompute}
+          - str_replace:
+              template: |
+                #!/bin/bash
+                mariadb_do_major_upgrade='MYSQL_MAJOR_UPGRADE'
+              params:
+                MYSQL_MAJOR_UPGRADE: {get_param: MySqlMajorUpgrade}
+          - get_file: pacemaker_common_functions.sh
+          - get_file: major_upgrade_check.sh
           - get_file: major_upgrade_pacemaker_migrations.sh
           - get_file: major_upgrade_controller_pacemaker_2.sh
 
@@ -164,6 +177,25 @@ resources:
       input_values: {get_param: input_values}
 
   ControllerPacemakerUpgradeConfig_Step5:
+    type: OS::Heat::SoftwareConfig
+    properties:
+      group: script
+      config:
+        list_join:
+        - ''
+        - - get_file: pacemaker_common_functions.sh
+          - get_file: major_upgrade_pacemaker_migrations.sh
+          - get_file: major_upgrade_controller_pacemaker_5.sh
+
+  ControllerPacemakerUpgradeDeployment_Step5:
+    type: OS::Heat::SoftwareDeploymentGroup
+    depends_on: ControllerPacemakerUpgradeDeployment_Step4
+    properties:
+      servers:  {get_param: [servers, Controller]}
+      config: {get_resource: ControllerPacemakerUpgradeConfig_Step5}
+      input_values: {get_param: input_values}
+
+  ControllerPacemakerUpgradeConfig_Step6:
     type: OS::Heat::SoftwareConfig
     properties:
       group: script
@@ -178,12 +210,12 @@ resources:
                 KEEP_SAHARA_SERVICES_ON_UPGRADE: {get_param: KeepSaharaServicesOnUpgrade}
           - get_file: pacemaker_common_functions.sh
           - get_file: major_upgrade_pacemaker_migrations.sh
-          - get_file: major_upgrade_controller_pacemaker_5.sh
+          - get_file: major_upgrade_controller_pacemaker_6.sh
 
-  ControllerPacemakerUpgradeDeployment_Step5:
+  ControllerPacemakerUpgradeDeployment_Step6:
     type: OS::Heat::SoftwareDeploymentGroup
-    depends_on: ControllerPacemakerUpgradeDeployment_Step4
+    depends_on: ControllerPacemakerUpgradeDeployment_Step5
     properties:
       servers:  {get_param: [servers, Controller]}
-      config: {get_resource: ControllerPacemakerUpgradeConfig_Step5}
+      config: {get_resource: ControllerPacemakerUpgradeConfig_Step6}
       input_values: {get_param: input_values}
index 3da7efe..8500bce 100755 (executable)
@@ -4,11 +4,14 @@ set -eux
 
 # Run if pacemaker is running, we're the bootstrap node,
 # and we're updating the deployment (not creating).
-if [[ -n $(pcmk_running) && -n $(is_bootstrap_node) ]]; then
+
+RESTART_FOLDER="/var/lib/tripleo/pacemaker-restarts"
+
+if [[ -d "$RESTART_FOLDER" && -n $(pcmk_running) && -n $(is_bootstrap_node) ]]; then
 
     TIMEOUT=600
-    SERVICES_TO_RESTART="$(ls /var/lib/tripleo/pacemaker-restarts)"
     PCS_STATUS_OUTPUT="$(pcs status)"
+    SERVICES_TO_RESTART="$(ls $RESTART_FOLDER)"
 
     for service in $SERVICES_TO_RESTART; do
         if ! echo "$PCS_STATUS_OUTPUT" | grep $service; then
@@ -20,6 +23,12 @@ if [[ -n $(pcmk_running) && -n $(is_bootstrap_node) ]]; then
     for service in $SERVICES_TO_RESTART; do
         echo "Restarting $service..."
         pcs resource restart --wait=$TIMEOUT $service
-        rm -f /var/lib/tripleo/pacemaker-restarts/$service
+        rm -f "$RESTART_FOLDER"/$service
     done
+
+fi
+
+haproxy_status=$(systemctl is-active haproxy)
+if [ "$haproxy_status" = "active" ]; then
+    systemctl reload haproxy
 fi
index f06f51e..3e20117 100644 (file)
@@ -57,6 +57,9 @@ resource_registry:
   OS::TripleO::NodeExtraConfig: puppet/extraconfig/pre_deploy/default.yaml
   OS::TripleO::NodeExtraConfigPost: extraconfig/post_deploy/default.yaml
 
+  OS::TripleO::Tasks::ControllerPrePuppet: OS::Heat::None
+  OS::TripleO::Tasks::ControllerPostPuppet: OS::Heat::None
+
   # "AllNodes" Extra cluster config, runs on all nodes prior to the post_deploy
   # phase, e.g when puppet is applied, but after the pre_deploy phase.  Useful when
   # configuration with knowledge of all nodes in the cluster is required vs single
@@ -181,12 +184,12 @@ resource_registry:
   # Services that are disabled by default (use relevant environment files):
   OS::TripleO::Services::FluentdClient: OS::Heat::None
   OS::TripleO::LoggingConfiguration: puppet/services/logging/fluentd-config.yaml
-  OS::Tripleo::Services::ManilaApi: OS::Heat::None
-  OS::Tripleo::Services::ManilaScheduler: OS::Heat::None
-  OS::Tripleo::Services::ManilaShare: OS::Heat::None
-  OS::Tripleo::Services::ManilaBackendGeneric: OS::Heat::None
-  OS::Tripleo::Services::ManilaBackendNetapp: OS::Heat::None
-  OS::Tripleo::Services::ManilaBackendCephFs: OS::Heat::None
+  OS::TripleO::Services::ManilaApi: OS::Heat::None
+  OS::TripleO::Services::ManilaScheduler: OS::Heat::None
+  OS::TripleO::Services::ManilaShare: OS::Heat::None
+  OS::TripleO::Services::ManilaBackendGeneric: OS::Heat::None
+  OS::TripleO::Services::ManilaBackendNetapp: OS::Heat::None
+  OS::TripleO::Services::ManilaBackendCephFs: OS::Heat::None
   OS::TripleO::Services::ComputeNeutronL3Agent: OS::Heat::None
   OS::TripleO::Services::ComputeNeutronMetadataAgent: OS::Heat::None
   OS::TripleO::Services::BarbicanApi: OS::Heat::None
index 47c73f8..64bed27 100644 (file)
@@ -231,8 +231,19 @@ resources:
       config: {get_attr: [allNodesConfig, config_id]}
       servers: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
       input_values:
-        bootstrap_nodeid: {get_attr: [{{role.name}}, resource.0.hostname]}
-        bootstrap_nodeid_ip: {get_attr: [{{role.name}}, resource.0.ip_address]}
+        # Note we have to use yaql to look up the first hostname/ip in the
+        # list because heat path based attributes operate on the attribute
+        # inside the ResourceGroup, not the exposed list ref discussion in
+        # https://bugs.launchpad.net/heat/+bug/1640488
+        # The coalesce is needed because $.data is None during heat validation
+        bootstrap_nodeid:
+          yaql:
+            expression: coalesce($.data, []).first(null)
+            data: {get_attr: [{{role.name}}, hostname]}
+        bootstrap_nodeid_ip:
+          yaql:
+            expression: coalesce($.data, []).first(null)
+            data: {get_attr: [{{role.name}}, ip_address]}
 
   {{role.name}}AllNodesValidationDeployment:
     type: OS::Heat::StructuredDeployments
@@ -555,60 +566,6 @@ outputs:
   KeystoneAdminVip:
     description: Keystone Admin VIP endpoint
     value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, KeystoneAdminApiNetwork]}]}
-  PublicVip:
-    description: Controller VIP for public API endpoints
-    value: {get_attr: [VipMap, net_ip_map, external]}
-  AodhInternalVip:
-    description: VIP for Aodh API internal endpoint
-    value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, AodhApiNetwork]}]}
-  BarbicanInternalVip:
-    description: VIP for Barbican API internal endpoint
-    value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, BarbicanApiNetwork]}]}
-  CeilometerInternalVip:
-    description: VIP for Ceilometer API internal endpoint
-    value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, CeilometerApiNetwork]}]}
-  CephRgwInternalVip:
-    description: VIP for Ceph RGW internal endpoint
-    value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, CephRgwNetwork]}]}
-  CinderInternalVip:
-    description: VIP for Cinder API internal endpoint
-    value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, CinderApiNetwork]}]}
-  GlanceInternalVip:
-    description: VIP for Glance API internal endpoint
-    value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, GlanceApiNetwork]}]}
-  GnocchiInternalVip:
-    description: VIP for Gnocchi API internal endpoint
-    value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, GnocchiApiNetwork]}]}
-  MistralInternalVip:
-    description: VIP for Mistral API internal endpoint
-    value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, MistralApiNetwork]}]}
-  HeatInternalVip:
-    description: VIP for Heat API internal endpoint
-    value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, HeatApiNetwork]}]}
-  IronicInternalVip:
-    description: VIP for Ironic API internal endpoint
-    value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, IronicApiNetwork]}]}
-  KeystoneInternalVip:
-    description: VIP for Keystone API internal endpoint
-    value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, KeystonePublicApiNetwork]}]}
-  ManilaInternalVip:
-    description: VIP for Manila API internal endpoint
-    value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, ManilaApiNetwork]}]}
-  NeutronInternalVip:
-    description: VIP for Neutron API internal endpoint
-    value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, NeutronApiNetwork]}]}
-  NovaInternalVip:
-    description: VIP for Nova API internal endpoint
-    value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, NovaApiNetwork]}]}
-  OpenDaylightInternalVip:
-    description: VIP for OpenDaylight API internal endpoint
-    value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, OpenDaylightApiNetwork]}]}
-  SaharaInternalVip:
-    description: VIP for Sahara API internal endpoint
-    value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, SaharaApiNetwork]}]}
-  SwiftInternalVip:
-    description: VIP for Swift Proxy internal endpoint
-    value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, SwiftProxyNetwork]}]}
   EndpointMap:
     description: |
       Mapping of the resources with the needed info for their endpoints.
index 22fde9a..8bcbbf4 100644 (file)
@@ -8,7 +8,7 @@ trap cleanup EXIT
 
 if [ -n "$artifact_urls" ]; then
   for URL in $(echo $artifact_urls | sed -e "s| |\n|g" | sort -u); do
-    curl -o $TMP_DATA/file_data "$artifact_urls"
+    curl --globoff -o $TMP_DATA/file_data "$artifact_urls"
     if file -b $TMP_DATA/file_data | grep RPM &>/dev/null; then
       yum install -y $TMP_DATA/file_data
     elif file -b $TMP_DATA/file_data | grep 'gzip compressed data' &>/dev/null; then
index 65c96ac..8218f41 100644 (file)
@@ -47,6 +47,15 @@ resources:
     properties:
       StepConfig: {get_param: [role_data, {{role.name}}, step_config]}
 
+  {% if role.name == 'Controller' %}
+  ControllerPrePuppet:
+    type: OS::TripleO::Tasks::ControllerPrePuppet
+    properties:
+      servers: {get_param: [servers, Controller]}
+      input_values:
+        update_identifier: {get_param: DeployIdentifier}
+  {% endif %}
+
   # Step through a series of configuration steps
   {{role.name}}Deployment_Step1:
     type: OS::Heat::StructuredDeploymentGroup
@@ -136,4 +145,16 @@ resources:
     type: OS::TripleO::NodeExtraConfigPost
     properties:
         servers: {get_param: [servers, {{role.name}}]}
+
+  {% if role.name == 'Controller' %}
+  ControllerPostPuppet:
+    depends_on:
+      - ControllerExtraConfigPost
+    type: OS::TripleO::Tasks::ControllerPostPuppet
+    properties:
+      servers: {get_param: [servers, Controller]}
+      input_values:
+        update_identifier: {get_param: DeployIdentifier}
+  {% endif %}
+
 {% endfor %}
index 7d75074..9120687 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: 2016-10-14
 
 description: >
   Ceph External service.
@@ -27,9 +27,20 @@ parameters:
   GlanceRbdPoolName:
     default: images
     type: string
+  GlanceBackend:
+    default: swift
+    description: The short name of the Glance backend to use. Should be one
+      of swift, rbd, or file
+    type: string
+    constraints:
+    - allowed_values: ['swift', 'file', 'rbd']
   GnocchiRbdPoolName:
     default: metrics
     type: string
+  NovaEnableRbdBackend:
+    default: false
+    description: Whether to enable or not the Rbd backend for Nova
+    type: boolean
   NovaRbdPoolName:
     default: vms
     type: string
@@ -51,6 +62,16 @@ parameters:
     default: 'overcloud-ceph-external'
     type: string
 
+conditions:
+  glance_multiple_locations:
+    and:
+    - equals:
+      - get_param: GlanceBackend
+      - rbd
+    - equals:
+      - get_param: NovaEnableRbdBackend
+      - true
+
 outputs:
   role_data:
     description: Role data for the Ceph External service.
@@ -79,6 +100,7 @@ outputs:
               GLANCE_POOL: {get_param: GlanceRbdPoolName}
               GNOCCHI_POOL: {get_param: GnocchiRbdPoolName}
       service_config_settings:
-        get_attr: [CephBase, role_data, service_config_settings]
+        glance_api:
+          glance::api::show_multiple_locations: {if: [glance_multiple_locations, true, false]}
       step_config: |
         include ::tripleo::profile::base::ceph::client
index 18a4b78..89c1a5e 100644 (file)
@@ -55,15 +55,9 @@ outputs:
           - tripleo::profile::base::ceph::rgw::rgw_key: {get_param: CephRgwKey}
             tripleo::profile::base::ceph::rgw::keystone_admin_token: {get_param: AdminToken}
             tripleo::profile::base::ceph::rgw::keystone_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
-            ceph::profile::params::frontend_type: 'civetweb'
-            ceph_rgw_civetweb_bind_address: {get_param: [ServiceNetMap, CephRgwNetwork]}
-            ceph::profile::params::rgw_frontends:
-              list_join:
-               - ''
-               - - 'civetweb port='
-                 - '%{hiera("ceph_rgw_civetweb_bind_address")}'
-                 - ':'
-                 - {get_param: [EndpointMap, CephRgwInternal, port]}
+            tripleo::profile::base::ceph::rgw::civetweb_bind_ip: {get_param: [ServiceNetMap, CephRgwNetwork]}
+            tripleo::profile::base::ceph::rgw::civetweb_bind_port: {get_param: [EndpointMap, CephRgwInternal, port]}
+            ceph::params::user_radosgw: ceph
             tripleo.ceph_rgw.firewall_rules:
               '122 ceph rgw':
                 dport: {get_param: [EndpointMap, CephRgwInternal, port]}
index fe48667..803d8b8 100644 (file)
@@ -43,6 +43,9 @@ parameters:
     type: string
     description: Set the number of workers for cinder::wsgi::apache
     default: '"%{::os_workers}"'
+  EnableInternalTLS:
+    type: boolean
+    default: false
 
 conditions:
   cinder_workers_zero: {equals : [{get_param: CinderWorkers}, 0]}
@@ -55,6 +58,7 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      EnableInternalTLS: {get_param: EnableInternalTLS}
 
   CinderBase:
     type: ./cinder-base.yaml
@@ -94,21 +98,26 @@ outputs:
                 dport:
                   - 8776
                   - 13776
+            cinder::api::bind_host:
+              str_replace:
+                template:
+                  '"%{::fqdn_$NETWORK}"'
+                params:
+                  $NETWORK: {get_param: [ServiceNetMap, CinderApiNetwork]}
+            cinder::wsgi::apache::ssl: {get_param: EnableInternalTLS}
+            cinder::api::service_name: 'httpd'
             # NOTE: bind IP is found in Heat replacing the network name with the local node IP
             # for the given network; replacement examples (eg. for internal_api):
             # internal_api -> IP
             # internal_api_uri -> [IP]
             # internal_api_subnet - > IP/CIDR
-            cinder::api::bind_host: {get_param: [ServiceNetMap, CinderApiNetwork]}
-            cinder::api::service_name: 'httpd'
-            cinder::wsgi::apache::ssl: false
             cinder::wsgi::apache::bind_host: {get_param: [ServiceNetMap, CinderApiNetwork]}
             cinder::wsgi::apache::servername:
               str_replace:
                 template:
                   '"%{::fqdn_$NETWORK}"'
                 params:
-                  $NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
+                  $NETWORK: {get_param: [ServiceNetMap, CinderApiNetwork]}
           -
             if:
             - cinder_workers_zero
index e339776..ac15de4 100644 (file)
@@ -41,6 +41,9 @@ parameters:
     default:
       tag: openstack.gnocchi.api
       path: /var/log/gnocchi/app.log
+  EnableInternalTLS:
+    type: boolean
+    default: false
 
 resources:
 
@@ -57,6 +60,7 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      EnableInternalTLS: {get_param: EnableInternalTLS}
 
 outputs:
   role_data:
@@ -83,7 +87,7 @@ outputs:
             gnocchi::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
             gnocchi::keystone::authtoken::password: {get_param: GnocchiPassword}
             gnocchi::keystone::authtoken::project_name: 'service'
-            gnocchi::wsgi::apache::ssl: false
+            gnocchi::wsgi::apache::ssl: {get_param: EnableInternalTLS}
             gnocchi::wsgi::apache::servername:
               str_replace:
                 template:
@@ -98,7 +102,12 @@ outputs:
             # internal_api_uri -> [IP]
             # internal_api_subnet - > IP/CIDR
             gnocchi::wsgi::apache::bind_host: {get_param: [ServiceNetMap, GnocchiApiNetwork]}
-            gnocchi::api::host: {get_param: [ServiceNetMap, GnocchiApiNetwork]}
+            gnocchi::api::host:
+              str_replace:
+                template:
+                  '"%{::fqdn_$NETWORK}"'
+                params:
+                  $NETWORK: {get_param: [ServiceNetMap, GnocchiApiNetwork]}
 
             gnocchi::api::keystone_auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
             gnocchi::api::keystone_identity_uri: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
index 1a86ec7..12d4a6a 100644 (file)
@@ -76,9 +76,11 @@ outputs:
         include ::tripleo::profile::base::heat::api_cfn
       service_config_settings:
         keystone:
-          heat::keystone::auth_cfn::tenant: 'service'
-          heat::keystone::auth_cfn::public_url: {get_param: [EndpointMap, HeatCfnPublic, uri]}
-          heat::keystone::auth_cfn::internal_url: {get_param: [EndpointMap, HeatCfnInternal, uri]}
-          heat::keystone::auth_cfn::admin_url: {get_param: [EndpointMap, HeatCfnAdmin, uri]}
-          heat::keystone::auth_cfn::password: {get_param: HeatPassword}
-          heat::keystone::auth_cfn::region: {get_param: KeystoneRegion}
+          map_merge:
+            - get_attr: [HeatBase, role_data, service_config_settings, keystone]
+            - heat::keystone::auth_cfn::tenant: 'service'
+              heat::keystone::auth_cfn::public_url: {get_param: [EndpointMap, HeatCfnPublic, uri]}
+              heat::keystone::auth_cfn::internal_url: {get_param: [EndpointMap, HeatCfnInternal, uri]}
+              heat::keystone::auth_cfn::admin_url: {get_param: [EndpointMap, HeatCfnAdmin, uri]}
+              heat::keystone::auth_cfn::password: {get_param: HeatPassword}
+              heat::keystone::auth_cfn::region: {get_param: KeystoneRegion}
index 2ea96fc..b0cd16d 100644 (file)
@@ -76,9 +76,11 @@ outputs:
         include ::tripleo::profile::base::heat::api
       service_config_settings:
         keystone:
-          heat::keystone::auth::tenant: 'service'
-          heat::keystone::auth::public_url: {get_param: [EndpointMap, HeatPublic, uri]}
-          heat::keystone::auth::internal_url: {get_param: [EndpointMap, HeatInternal, uri]}
-          heat::keystone::auth::admin_url: {get_param: [EndpointMap, HeatAdmin, uri]}
-          heat::keystone::auth::password: {get_param: HeatPassword}
-          heat::keystone::auth::region: {get_param: KeystoneRegion}
+          map_merge:
+            - get_attr: [HeatBase, role_data, service_config_settings, keystone]
+            - heat::keystone::auth::tenant: 'service'
+              heat::keystone::auth::public_url: {get_param: [EndpointMap, HeatPublic, uri]}
+              heat::keystone::auth::internal_url: {get_param: [EndpointMap, HeatInternal, uri]}
+              heat::keystone::auth::admin_url: {get_param: [EndpointMap, HeatAdmin, uri]}
+              heat::keystone::auth::password: {get_param: HeatPassword}
+              heat::keystone::auth::region: {get_param: KeystoneRegion}
index 7eb58f5..a2a65d7 100644 (file)
@@ -77,3 +77,8 @@ outputs:
         heat::cron::purge_deleted::destination: '/dev/null'
         heat::db::database_db_max_retries: -1
         heat::db::database_max_retries: -1
+      service_config_settings:
+        keystone:
+          tripleo::profile::base::keystone::heat_admin_domain: 'heat_stack'
+          tripleo::profile::base::keystone::heat_admin_user: 'heat_stack_domain_admin'
+          tripleo::profile::base::keystone::heat_admin_email: 'heat_stack_domain_admin@localhost'
index 1fc88bf..6989871 100644 (file)
@@ -18,6 +18,10 @@ parameters:
     description: Mapping of service endpoint -> protocol. Typically set
                  via parameter_defaults in the resource registry.
     type: json
+  KernelPidMax:
+    default: 1048576
+    description: Configures sysctl kernel.pid_max key
+    type: number
 
 outputs:
   role_data:
@@ -49,5 +53,7 @@ outputs:
             value: 0
           net.core.netdev_max_backlog:
             value: 10000
+          kernel.pid_max:
+            value: {get_param: KernelPidMax}
       step_config: |
         include ::tripleo::profile::base::kernel
index 5f4ab6b..b4b3d48 100644 (file)
@@ -51,6 +51,11 @@ outputs:
             manila::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
             manila::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] }
             manila::keystone::authtoken::project_name: 'service'
+            tripleo.manila_api.firewall_rules:
+              '150 manila':
+                dport:
+                  - 8786
+                  - 13786
             # NOTE: bind IP is found in Heat replacing the network name with the
             # local node IP for the given network; replacement examples
             # (eg. for internal_api):
index a89e3d7..a215755 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: 2016-10-14
 
 description: >
   OpenStack Neutron L3 agent configured with Puppet
@@ -43,6 +43,10 @@ parameters:
       tag: openstack.neutron.agent.l3
       path: /var/log/neutron/l3-agent.log
 
+conditions:
+
+  external_network_bridge_empty: {equals : [{get_param: NeutronExternalNetworkBridge}, "''"]}
+
 resources:
 
   NeutronBase:
@@ -63,12 +67,16 @@ outputs:
         - neutron
       config_settings:
         map_merge:
-          - get_attr: [NeutronBase, role_data, config_settings]
+        - get_attr: [NeutronBase, role_data, config_settings]
+        - neutron::agents::l3::router_delete_namespaces: True
+          neutron::agents::l3::agent_mode: {get_param: NeutronL3AgentMode}
+          tripleo.neutron_l3.firewall_rules:
+            '106 neutron_l3 vrrp':
+              proto: vrrp
+        - 
+          if:
+          - external_network_bridge_empty
+          - {}
           - neutron::agents::l3::external_network_bridge: {get_param: NeutronExternalNetworkBridge}
-            neutron::agents::l3::router_delete_namespaces: True
-            neutron::agents::l3::agent_mode : {get_param: NeutronL3AgentMode}
-            tripleo.neutron_l3.firewall_rules:
-              '106 neutron_l3 vrrp':
-                proto: vrrp
       step_config: |
         include tripleo::profile::base::neutron::l3
index 3cc238c..49bd84b 100644 (file)
@@ -51,6 +51,9 @@ parameters:
     default:
       tag: openstack.nova.api
       path: /var/log/nova/nova-api.log
+  EnableInternalTLS:
+    type: boolean
+    default: false
 
 conditions:
   nova_workers_zero: {equals : [{get_param: NovaWorkers}, 0]}
@@ -62,6 +65,7 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      EnableInternalTLS: {get_param: EnableInternalTLS}
 
   NovaBase:
     type: ./nova-base.yaml
@@ -101,21 +105,26 @@ outputs:
           nova::api::default_floating_pool: 'public'
           nova::api::sync_db_api: true
           nova::api::enable_proxy_headers_parsing: true
+          nova::api::api_bind_address:
+            str_replace:
+              template:
+                '"%{::fqdn_$NETWORK}"'
+              params:
+                $NETWORK: {get_param: [ServiceNetMap, NovaApiNetwork]}
+          nova::api::service_name: 'httpd'
+          nova::wsgi::apache::ssl: {get_param: EnableInternalTLS}
           # NOTE: bind IP is found in Heat replacing the network name with the local node IP
           # for the given network; replacement examples (eg. for internal_api):
           # internal_api -> IP
           # internal_api_uri -> [IP]
           # internal_api_subnet - > IP/CIDR
-          nova::api::api_bind_address: {get_param: [ServiceNetMap, NovaApiNetwork]}
-          nova::api::service_name: 'httpd'
-          nova::wsgi::apache::ssl: false
           nova::wsgi::apache::bind_host: {get_param: [ServiceNetMap, NovaApiNetwork]}
           nova::wsgi::apache::servername:
             str_replace:
               template:
                 '"%{::fqdn_$NETWORK}"'
               params:
-                $NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
+                $NETWORK: {get_param: [ServiceNetMap, NovaApiNetwork]}
           nova::api::neutron_metadata_proxy_shared_secret: {get_param: NeutronMetadataProxySharedSecret}
           nova::api::instance_name_template: {get_param: InstanceNameTemplate}
           nova_enable_db_purge: {get_param: NovaEnableDBPurge}
index 318c898..253d63e 100644 (file)
@@ -59,6 +59,6 @@ outputs:
         opendaylight::enable_l3: {get_param: OpenDaylightEnableL3}
         opendaylight::extra_features: {get_param: OpenDaylightFeatures}
         opendaylight::enable_dhcp: {get_param: OpenDaylightEnableDHCP}
-        opendaylight::odl_bind_ip: {get_param: [ServiceNetMap, OpenDaylightApiNetwork]}
+        opendaylight::odl_bind_ip: {get_param: [ServiceNetMap, OpendaylightApiNetwork]}
       step_config: |
         include tripleo::profile::base::neutron::opendaylight
index 268ca24..907ecdd 100644 (file)
@@ -54,5 +54,11 @@ outputs:
             template: MAPPINGS
             params:
               MAPPINGS: {get_param: OpenDaylightProviderMappings}
+        tripleo.opendaylight_ovs.firewall_rules:
+          '118 neutron vxlan networks':
+             proto: 'udp'
+             dport: 4789
+          '136 neutron gre networks':
+             proto: 'gre'
       step_config: |
         include tripleo::profile::base::neutron::plugins::ovs::opendaylight
index 5fc8ed6..4072a15 100644 (file)
@@ -44,6 +44,10 @@ parameters:
     type: string
     default: ''
     description: Set to True to enable debugging on all services.
+  SaharaPlugins:
+    default: ["ambari","cdh","mapr","vanilla","spark","storm"]
+    description: Sahara enabled plugin list
+    type: comma_delimited_list
 
 outputs:
   role_data:
@@ -69,13 +73,7 @@ outputs:
         sahara::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
         sahara::identity_uri: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] }
         sahara::use_neutron: true
-        sahara::plugins:
-          - ambari
-          - cdh
-          - mapr
-          - vanilla
-          - spark
-          - storm
+        sahara::plugins: {get_param: SaharaPlugins}
         sahara::rpc_backend: rabbit
         sahara::admin_tenant_name: 'service'
         sahara::db::database_db_max_retries: -1
index 176fd23..ffe2d2d 100644 (file)
@@ -54,8 +54,8 @@ outputs:
           data: {s_names: {get_attr: [ServiceChain, role_data, service_name]}}
       monitoring_subscriptions:
         yaql:
-          expression: list($.data.where($ != null).select($.get('monitoring_subscription')).where($ != null))
-          data: {get_attr: [ServiceChain, role_data]}
+          expression: list($.data.role_data.where($ != null).select($.get('monitoring_subscription')).where($ != null))
+          data: {role_data: {get_attr: [ServiceChain, role_data]}}
       logging_sources:
         # Transform the individual logging_source configuration from
         # each service in the chain into a global list, adding some
@@ -78,8 +78,9 @@ outputs:
             sources:
               - {get_attr: [LoggingConfiguration, LoggingDefaultSources]}
               - yaql:
-                  expression: list($.data.where($ != null).select($.get('logging_source')).where($ != null))
-                  data: {get_attr: [ServiceChain, role_data]}
+                  expression: list($.data.role_data.where($ != null).select($.get('logging_source')).where($ != null))
+                  data: {role_data: {get_attr: [ServiceChain, role_data]}}
+
               - {get_attr: [LoggingConfiguration, LoggingExtraSources]}
             default_format: {get_attr: [LoggingConfiguration, LoggingDefaultFormat]}
             pos_file_path: {get_attr: [LoggingConfiguration, LoggingPosFilePath]}
@@ -93,17 +94,17 @@ outputs:
             groups:
               - [{get_attr: [LoggingConfiguration, LoggingDefaultGroups]}]
               - yaql:
-                  expression: list($.data.where($ != null).select($.get('logging_groups')).where($ != null))
-                  data: {get_attr: [ServiceChain, role_data]}
+                  expression: list($.data.role_data.where($ != null).select($.get('logging_groups')).where($ != null))
+                  data: {role_data: {get_attr: [ServiceChain, role_data]}}
               - [{get_attr: [LoggingConfiguration, LoggingExtraGroups]}]
       config_settings: {map_merge: {get_attr: [ServiceChain, role_data, config_settings]}}
       global_config_settings:
         map_merge:
           yaql:
-            expression: list($.data.where($ != null).select($.get('global_config_settings')).where($ != null))
-            data: {get_attr: [ServiceChain, role_data]}
+            expression: list($.data.role_data.where($ != null).select($.get('global_config_settings')).where($ != null))
+            data: {role_data: {get_attr: [ServiceChain, role_data]}}
       service_config_settings:
         yaql:
-          expression: $.data.where($ != null).select($.get('service_config_settings')).where($ != null).reduce($1.mergeWith($2), {})
-          data: {get_attr: [ServiceChain, role_data]}
+          expression: $.data.role_data.where($ != null).select($.get('service_config_settings')).where($ != null).reduce($1.mergeWith($2), {})
+          data: {role_data: {get_attr: [ServiceChain, role_data]}}
       step_config: {list_join: ["\n", {get_attr: [ServiceChain, role_data, step_config]}]}
index ae26544..ba1d99f 100644 (file)
@@ -76,6 +76,7 @@ outputs:
             swift::proxy::workers: {get_param: SwiftWorkers}
             swift::proxy::ceilometer::rabbit_user: {get_param: RabbitUserName}
             swift::proxy::ceilometer::rabbit_password: {get_param: RabbitPassword}
+            swift::proxy::staticweb::url_base: {get_param: [EndpointMap, SwiftPublic, uri_no_suffix]}
             tripleo.swift_proxy.firewall_rules:
               '122 swift proxy':
                 dport:
index 320bb70..dad62f8 100644 (file)
     - OS::TripleO::Services::GnocchiApi
     - OS::TripleO::Services::GnocchiMetricd
     - OS::TripleO::Services::GnocchiStatsd
-    - OS::Tripleo::Services::ManilaApi
-    - OS::Tripleo::Services::ManilaScheduler
-    - OS::Tripleo::Services::ManilaBackendGeneric
-    - OS::Tripleo::Services::ManilaBackendNetapp
-    - OS::Tripleo::Services::ManilaBackendCephFs
-    - OS::Tripleo::Services::ManilaShare
+    - OS::TripleO::Services::ManilaApi
+    - OS::TripleO::Services::ManilaScheduler
+    - OS::TripleO::Services::ManilaBackendGeneric
+    - OS::TripleO::Services::ManilaBackendNetapp
+    - OS::TripleO::Services::ManilaBackendCephFs
+    - OS::TripleO::Services::ManilaShare
     - OS::TripleO::Services::AodhApi
     - OS::TripleO::Services::AodhEvaluator
     - OS::TripleO::Services::AodhNotifier
     - OS::TripleO::Services::CephOSD
     - OS::TripleO::Services::Kernel
     - OS::TripleO::Services::Ntp
+    - OS::TripleO::Services::Snmp
     - OS::TripleO::Services::Timezone
     - OS::TripleO::Services::TripleoPackages
     - OS::TripleO::Services::TripleoFirewall