Merge "nova-placement: switch auth_uri to keystone versionless endpoint" into stable...
authorZuul <zuul@review.openstack.org>
Wed, 8 Nov 2017 14:43:30 +0000 (14:43 +0000)
committerGerrit Code Review <review@openstack.org>
Wed, 8 Nov 2017 14:43:30 +0000 (14:43 +0000)
36 files changed:
ci/environments/scenario002-multinode-containers.yaml
ci/environments/scenario002-multinode.yaml
ci/environments/scenario007-multinode-containers.yaml
deployed-server/scripts/enable-ssh-admin.sh
docker/services/ceph-ansible/ceph-base.yaml
docker/services/cinder-api.yaml
docker/services/glance-api.yaml
docker/services/heat-api.yaml
docker/services/horizon.yaml
docker/services/keystone.yaml
docker/services/memcached.yaml
docker/services/mistral-api.yaml
docker/services/nova-api.yaml
docker/services/pacemaker/clustercheck.yaml
environments/composable-roles/standalone.yaml
environments/network-isolation-v6.j2.yaml
environments/storage/enable-ceph.yaml
environments/storage/external-ceph.yaml
environments/storage/glance-nfs.yaml
extraconfig/pre_deploy/rhel-registration/environment-rhel-registration.yaml
extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration
puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml
puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml
puppet/role.role.j2.yaml
puppet/services/cinder-base.yaml
puppet/services/database/mysql.yaml
puppet/services/glance-api.yaml
puppet/services/memcached.yaml
puppet/services/neutron-lbaas.yaml
puppet/services/nova-compute.yaml
puppet/services/rabbitmq.yaml
releasenotes/notes/ceph-pools-with-ceph-ansible-f82425e585f90ef6.yaml [new file with mode: 0644]
releasenotes/notes/rhsm_proxy_verify-548f104c97cf5f90.yaml [new file with mode: 0644]
releasenotes/notes/sat-tools-0d0f0c53de9d34a5.yaml [new file with mode: 0644]
sample-env-generator/composable-roles.yaml
tools/yaml-validate.py

index bec5f48..534f829 100644 (file)
@@ -9,7 +9,6 @@ resource_registry:
   OS::TripleO::Services::BarbicanApi: ../../docker/services/barbican-api.yaml
   OS::TripleO::Services::Zaqar: ../../docker/services/zaqar.yaml
   OS::TripleO::Services::Ec2Api: ../../docker/services/ec2-api.yaml
-  OS::TripleO::Services::MongoDb: ../../docker/services/database/mongodb.yaml
   # Some infra instances don't pass the ping test but are otherwise working.
   # Since the OVB jobs also test this functionality we can shut it off here.
   OS::TripleO::AllNodes::Validation: ../common/all-nodes-validation-disabled.yaml
@@ -58,7 +57,6 @@ parameter_defaults:
     - OS::TripleO::Services::SwiftStorage
     - OS::TripleO::Services::SwiftRingBuilder
     - OS::TripleO::Services::BarbicanApi
-    - OS::TripleO::Services::MongoDb
     - OS::TripleO::Services::Zaqar
     - OS::TripleO::Services::Ec2Api
     - OS::TripleO::Services::TripleoPackages
@@ -69,5 +67,7 @@ parameter_defaults:
     nova::compute::libvirt::services::libvirt_virt_type: qemu
     nova::compute::libvirt::libvirt_virt_type: qemu
   Debug: true
+  ZaqarMessageStore: 'swift'
+  ZaqarManagementStore: 'sqlalchemy'
   SwiftCeilometerPipelineEnabled: false
   NotificationDriver: 'noop'
index 6c7f4eb..2f731ce 100644 (file)
@@ -9,7 +9,6 @@ resource_registry:
   OS::TripleO::Services::HAproxy: ../../puppet/services/pacemaker/haproxy.yaml
   OS::TripleO::Services::Pacemaker: ../../puppet/services/pacemaker.yaml
   OS::TripleO::Services::MySQL: ../../puppet/services/pacemaker/database/mysql.yaml
-  OS::TripleO::Services::MongoDb: ../../puppet/services/database/mongodb.yaml
   OS::TripleO::Services::CinderBackup: ../../puppet/services/pacemaker/cinder-backup.yaml
   OS::TripleO::Services::CinderVolume: ../../puppet/services/pacemaker/cinder-volume.yaml
   OS::TripleO::Services::Keepalived: OS::Heat::None
@@ -63,7 +62,6 @@ parameter_defaults:
     - OS::TripleO::Services::SwiftStorage
     - OS::TripleO::Services::SwiftRingBuilder
     - OS::TripleO::Services::BarbicanApi
-    - OS::TripleO::Services::MongoDb
     - OS::TripleO::Services::Zaqar
     - OS::TripleO::Services::Ec2Api
     - OS::TripleO::Services::TripleoPackages
@@ -86,5 +84,7 @@ parameter_defaults:
     nova::compute::libvirt::services::libvirt_virt_type: qemu
     nova::compute::libvirt::libvirt_virt_type: qemu
   Debug: true
+  ZaqarMessageStore: 'swift'
+  ZaqarManagementStore: 'sqlalchemy'
   SwiftCeilometerPipelineEnabled: false
   NotificationDriver: 'noop'
index bad3e4a..149f2d3 100644 (file)
@@ -7,6 +7,9 @@ resource_registry:
   # Since the OVB jobs also test this functionality we can shut it off here.
   OS::TripleO::AllNodes::Validation: ../common/all-nodes-validation-disabled.yaml
   OS::TripleO::Services::NovaMigrationTarget: OS::Heat::None
+  OS::TripleO::Services::SwiftProxy: OS::Heat::None
+  OS::TripleO::Services::SwiftStorage: OS::Heat::None
+  OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None
 parameter_defaults:
   ControllerServices:
     - OS::TripleO::Services::Clustercheck
index dcabead..daff390 100755 (executable)
@@ -10,6 +10,7 @@ SUBNODES_SSH_KEY=${SUBNODES_SSH_KEY:-"$HOME/.ssh/id_rsa"}
 # this is the intended variable for overriding
 OVERCLOUD_SSH_KEY=${OVERCLOUD_SSH_KEY:-"$SUBNODES_SSH_KEY"}
 
+SHORT_TERM_KEY_COMMENT="TripleO split stack short term key"
 SLEEP_TIME=5
 
 function overcloud_ssh_hosts_json {
@@ -22,7 +23,7 @@ print(json.dumps(re.split("\s+", sys.stdin.read().strip())))'
 function overcloud_ssh_key_json {
     # we pass the contents to Mistral instead of just path, otherwise
     # the key file would have to be readable for the mistral user
-    cat "$OVERCLOUD_SSH_KEY" | python -c 'import json,sys; print(json.dumps(sys.stdin.read()))'
+    cat "$1" | python -c 'import json,sys; print(json.dumps(sys.stdin.read()))'
 }
 
 function workflow_finished {
@@ -30,6 +31,12 @@ function workflow_finished {
     openstack workflow execution show -f shell $execution_id | grep 'state="SUCCESS"' > /dev/null
 }
 
+function generate_short_term_keys {
+    local tmpdir=$(mktemp -d)
+    ssh-keygen -N '' -t rsa -b 4096 -f "$tmpdir/id_rsa" -C "$SHORT_TERM_KEY_COMMENT" > /dev/null
+    echo "$tmpdir"
+}
+
 if [ -z "$OVERCLOUD_HOSTS" ]; then
     echo 'Please set $OVERCLOUD_HOSTS'
     exit 1
@@ -41,7 +48,20 @@ echo "SSH key file: $OVERCLOUD_SSH_KEY"
 echo "Hosts: $OVERCLOUD_HOSTS"
 echo
 
-EXECUTION_PARAMS="{\"ssh_user\": \"$OVERCLOUD_SSH_USER\", \"ssh_servers\": $(overcloud_ssh_hosts_json), \"ssh_private_key\": $(overcloud_ssh_key_json)}"
+SHORT_TERM_KEY_DIR=$(generate_short_term_keys)
+SHORT_TERM_KEY_PRIVATE="$SHORT_TERM_KEY_DIR/id_rsa"
+SHORT_TERM_KEY_PUBLIC="$SHORT_TERM_KEY_DIR/id_rsa.pub"
+SHORT_TERM_KEY_PUBLIC_CONTENT=$(cat $SHORT_TERM_KEY_PUBLIC)
+
+for HOST in $OVERCLOUD_HOSTS; do
+    echo "Inserting TripleO short term key for $HOST"
+    # prepending an extra newline so that if authorized_keys didn't
+    # end with a newline previously, we don't end up garbling it up
+    ssh -i "$OVERCLOUD_SSH_KEY" -l "$OVERCLOUD_SSH_USER" "$HOST" "echo -e '\n$SHORT_TERM_KEY_PUBLIC_CONTENT' >> \$HOME/.ssh/authorized_keys"
+done
+
+echo "Starting ssh admin enablement workflow"
+EXECUTION_PARAMS="{\"ssh_user\": \"$OVERCLOUD_SSH_USER\", \"ssh_servers\": $(overcloud_ssh_hosts_json), \"ssh_private_key\": $(overcloud_ssh_key_json "$SHORT_TERM_KEY_PRIVATE")}"
 EXECUTION_CREATE_OUTPUT=$(openstack workflow execution create -f shell -d 'deployed server ssh admin creation' tripleo.access.v1.enable_ssh_admin "$EXECUTION_PARAMS")
 echo "$EXECUTION_CREATE_OUTPUT"
 EXECUTION_ID=$(echo "$EXECUTION_CREATE_OUTPUT" | grep '^id=' | awk '-F"' '{ print $2 }')
@@ -56,5 +76,14 @@ while ! workflow_finished $EXECUTION_ID; do
     sleep $SLEEP_TIME
     echo -n .
 done
+echo  # newline after the previous dots
+
+for HOST in $OVERCLOUD_HOSTS; do
+    echo "Removing TripleO short term key from $HOST"
+    ssh -l "$OVERCLOUD_SSH_USER" "$HOST" "sed -i -e '/$SHORT_TERM_KEY_COMMENT/d' \$HOME/.ssh/authorized_keys"
+done
+
+echo "Removing short term keys locally"
+rm -r "$SHORT_TERM_KEY_DIR"
 
 echo "Success."
index 8cc81fb..4674ec1 100644 (file)
@@ -73,15 +73,9 @@ parameters:
     description: >
       It can be used to override settings for one of the predefined pools, or to create
       additional ones. Example:
-      {
-        "volumes": {
-          "size": 5,
-          "pg_num": 128,
-          "pgp_num": 128
-         }
-      }
-    default: {}
-    type: json
+      [{"name": "volumes", "pg_num": 64, "rule_name": ""}]
+    default: []
+    type: comma_delimited_list
   CinderRbdPoolName:
     default: volumes
     type: string
@@ -225,13 +219,7 @@ outputs:
                       - {get_param: NovaRbdPoolName}
                       - {get_param: GlanceRbdPoolName}
                       - {get_param: GnocchiRbdPoolName}
-              - repeat:
-                  template:
-                    name: <%pool%>
-                    pg_num: {get_param: CephPoolDefaultPgNum}
-                    rule_name: ""
-                  for_each:
-                    <%pool%>: {get_param: CephPools}
+              - {get_param: CephPools}
           openstack_keys: &openstack_keys
           - name:
               list_join:
index 25390c6..336b454 100644 (file)
@@ -200,6 +200,7 @@ outputs:
           tags: step2
           service: name=httpd state=stopped enabled=no
         - name: remove old cinder cron jobs
+          tags: step2
           file:
             path: /var/spool/cron/cinder
             state: absent
index e1a3827..b4336be 100644 (file)
@@ -39,6 +39,13 @@ parameters:
   EnableInternalTLS:
     type: boolean
     default: false
+  GlanceBackend:
+    default: swift
+    description: The short name of the Glance backend to use. Should be one
+      of swift, rbd, cinder, or file
+    type: string
+    constraints:
+    - allowed_values: ['swift', 'file', 'rbd', 'cinder']
   GlanceNfsEnabled:
     default: false
     description: >
@@ -48,11 +55,22 @@ parameters:
     default: false
     description: Remove package if the service is being disabled during upgrade
     type: boolean
+  GlanceNfsShare:
+    default: ''
+    description: >
+      NFS share to mount for image storage (when GlanceNfsEnabled is true)
+    type: string
+  GlanceNfsOptions:
+    default: 'intr,context=system_u:object_r:glance_var_lib_t:s0'
+    description: >
+      NFS mount options for image storage (when GlanceNfsEnabled is true)
+    type: string
 
 conditions:
 
   internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
   nfs_backend_enabled: {equals: [{get_param: GlanceNfsEnabled}, true]}
+  cinder_backend_enabled: {equals: [{get_param: GlanceBackend}, cinder]}
 
 
 resources:
@@ -108,6 +126,10 @@ outputs:
               dest: "/etc/ceph/"
               merge: true
               preserve_properties: true
+          permissions:
+            - path: /var/lib/glance
+              owner: glance:glance
+              recurse: true
         /var/lib/kolla/config_files/glance_api_tls_proxy.json:
           command: /usr/sbin/httpd -DFOREGROUND
           config_files:
@@ -147,6 +169,12 @@ outputs:
                       - nfs_backend_enabled
                       - /var/lib/glance:/var/lib/glance
                       - ''
+                -
+                  if:
+                    - cinder_backend_enabled
+                    - - /dev:/dev
+                      - /etc/iscsi:/etc/iscsi
+                    - []
             environment:
               - KOLLA_BOOTSTRAP=True
               - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
@@ -157,7 +185,7 @@ outputs:
                 start_order: 2
                 image: *glance_api_image
                 net: host
-                privileged: false
+                privileged: {if: [cinder_backend_enabled, true, false]}
                 restart: always
                 volumes: *glance_volumes
                 environment:
@@ -182,6 +210,15 @@ outputs:
                       - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
                 - {}
       host_prep_tasks:
+        - name: Mount NFS on host
+          vars:
+            nfs_backend_enable: {get_param: GlanceNfsEnabled}
+          mount: name=/var/lib/glance src="{{item.NFS_SHARE}}" fstype=nfs4 opts="{{item.NFS_OPTIONS}}" state=mounted
+          with_items:
+            - NFS_SHARE: {get_param: GlanceNfsShare}
+              NFS_OPTIONS: {get_param: GlanceNfsOptions}
+          when:
+            - nfs_backend_enable
         - name: create persistent logs directory
           file:
             path: "{{ item }}"
index 75d0b8c..dcba519 100644 (file)
@@ -166,6 +166,7 @@ outputs:
           ignore_errors: True
           register: heat_api_enabled
         - name: remove old heat cron jobs
+          tags: step2
           file:
             path: /var/spool/cron/heat
             state: absent
index d6ffb6d..94fd9ee 100644 (file)
@@ -95,6 +95,12 @@ outputs:
             - path: /var/log/horizon/
               owner: apache:apache
               recurse: true
+            # NOTE The upstream Kolla Dockerfile sets /etc/openstack-dashboard/ ownership to
+            # horizon:horizon - the policy.json files need read permissions for the apache user
+            # FIXME We should consider whether this should be fixed in the Kolla Dockerfile instead
+            - path: /etc/openstack-dashboard/
+              owner: apache:apache
+              recurse: true
             # FIXME Apache tries to write a .lock file there
             - path: /usr/share/openstack-dashboard/openstack_dashboard/local/
               owner: apache:apache
@@ -113,7 +119,7 @@ outputs:
             volumes:
               - /var/log/containers/horizon:/var/log/horizon
               - /var/log/containers/httpd/horizon:/var/log/httpd
-              - /var/lib/config-data/horizon/etc/:/etc/
+              - /var/lib/config-data/puppet-generated/horizon/etc/openstack-dashboard:/etc/openstack-dashboard
         step_3:
           horizon:
             image: *horizon_image
index 26cef61..a8ba5bf 100644 (file)
@@ -211,6 +211,7 @@ outputs:
           tags: step2
           service: name=httpd state=stopped enabled=no
         - name: remove old keystone cron jobs
+          tags: step2
           file:
             path: /var/spool/cron/keystone
             state: absent
index c78b85a..67b8424 100644 (file)
@@ -80,8 +80,8 @@ outputs:
             user: root
             volumes:
                - /var/lib/config-data/memcached/etc/sysconfig/memcached:/etc/sysconfig/memcached:ro
-               - /var/log/memcached.log:/var/log/memcached.log
-            command: ['/bin/bash', '-c', 'source /etc/sysconfig/memcached; chown ${USER} /var/log/memcached.log']
+               - /var/log/containers/memcached:/var/log/
+            command: ['/bin/bash', '-c', 'source /etc/sysconfig/memcached; touch /var/log/memcached.log && chown ${USER} /var/log/memcached.log']
           memcached:
             start_order: 1
             image: *memcached_image
@@ -93,8 +93,16 @@ outputs:
                 - {get_attr: [ContainersCommon, volumes]}
                 -
                   - /var/lib/config-data/memcached/etc/sysconfig/memcached:/etc/sysconfig/memcached:ro
-            # TODO(bogdando) capture memcached syslog logs from a container
-            command: ['/bin/bash', '-c', 'source /etc/sysconfig/memcached; /usr/bin/memcached -p ${PORT} -u ${USER} -m ${CACHESIZE} -c ${MAXCONN} $OPTIONS']
+                  - /var/log/containers/memcached:/var/log/
+            # NOTE: We're adding the log redirection here, even though should
+            # already be part of the options. This is because the redirection
+            # via the options is not working and ends up being passed as a
+            # parameter to the memcached command (which it silently ignores).
+            # Thus the need for the explicit redirection here. The redirection
+            # will be removed from the $OPTIONS, which is done via the puppet
+            # module, but we'll only be able to do this once the following pull
+            # request merges: https://github.com/saz/puppet-memcached/pull/88
+            command: ['/bin/bash', '-c', 'source /etc/sysconfig/memcached; /usr/bin/memcached -p ${PORT} -u ${USER} -m ${CACHESIZE} -c ${MAXCONN} $OPTIONS >> /var/log/memcached.log 2>&1']
       upgrade_tasks:
         - name: Stop and disable memcached service
           tags: step2
index 50c8021..1b4b44f 100644 (file)
@@ -36,6 +36,16 @@ parameters:
     default: {}
     description: Parameters specific to the role
     type: json
+  MistralWorkers:
+    default: 1
+    description: The number of workers for the mistral-api.
+    type: number
+  MistralApiPolicies:
+    description: |
+      A hash of policies to configure for Mistral API.
+      e.g. { mistral-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+    default: {}
+    type: json
 
 resources:
 
@@ -45,6 +55,16 @@ resources:
   MySQLClient:
     type: ../../puppet/services/database/mysql-client.yaml
 
+  MistralBase:
+    type: ../../puppet/services/mistral-base.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceData: {get_param: ServiceData}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+
   MistralApiBase:
     type: ../../puppet/services/mistral-api.yaml
     properties:
@@ -60,9 +80,23 @@ outputs:
     description: Role data for the Mistral API role.
     value:
       service_name: {get_attr: [MistralApiBase, role_data, service_name]}
+      # FIXME(mandre) restore once mistral-api image has the necessary packages
+      # to run on top of apache
+      # config_settings:
+      #   map_merge:
+      #     - get_attr: [MistralApiBase, role_data, config_settings]
       config_settings:
         map_merge:
-          - get_attr: [MistralApiBase, role_data, config_settings]
+          - get_attr: [MistralBase, role_data, config_settings]
+          - mistral::api::api_workers: {get_param: MistralWorkers}
+            mistral::api::bind_host: {get_param: [ServiceNetMap, MistralApiNetwork]}
+            mistral::policy::policies: {get_param: MistralApiPolicies}
+            tripleo.mistral_api.firewall_rules:
+              '133 mistral':
+                dport:
+                  - 8989
+                  - 13989
+            mistral_wsgi_enabled: false
       logging_source: {get_attr: [MistralApiBase, role_data, logging_source]}
       logging_groups: {get_attr: [MistralApiBase, role_data, logging_groups]}
       step_config: &step_config
index f262bcb..7f1b7a5 100644 (file)
@@ -246,6 +246,7 @@ outputs:
           ignore_errors: True
           when: {get_param: UpgradeRemoveUnusedPackages}
         - name: remove old nova cron jobs
+          tags: step2
           file:
             path: /var/spool/cron/nova
             state: absent
index b5d128d..6db8a21 100644 (file)
@@ -44,8 +44,11 @@ resources:
   ContainersCommon:
     type: ../containers-common.yaml
 
+# We import from the corresponding docker service because otherwise we risk
+# rewriting the tripleo.mysql.firewall_rules key with the baremetal firewall
+# rules (see LP#1728918)
   MysqlPuppetBase:
-    type: ../../../puppet/services/pacemaker/database/mysql.yaml
+    type: ../../../docker/services/pacemaker/database/mysql.yaml
     properties:
       EndpointMap: {get_param: EndpointMap}
       ServiceData: {get_param: ServiceData}
index 3305c9e..c12d72d 100644 (file)
@@ -30,13 +30,13 @@ parameter_defaults:
   # Type: string
   ComputeHostnameFormat: '%stackname%-novacompute-%index%'
 
-  # Number of Controller nodes to deploy
+  # Number of ControllerOpenstack nodes
   # Type: number
-  ControllerCount: 3
+  ControllerOpenstackCount: 3
 
-  # Format for Controller node hostnames Note %index% is translated into the index of the node, e.g 0/1/2 etc and %stackname% is replaced with the stack name e.g overcloud
+  # Format for ControllerOpenstack node hostnames Note %index% is translated into the index of the node, e.g 0/1/2 etc and %stackname% is replaced with the stack name e.g overcloud
   # Type: string
-  ControllerHostnameFormat: '%stackname%-controller-%index%'
+  ControllerOpenstackHostnameFormat: '%stackname%-controller-%index%'
 
   # Number of Database nodes
   # Type: number
@@ -66,9 +66,9 @@ parameter_defaults:
   # Type: string
   OvercloudComputeFlavor: compute
 
-  # Name of the flavor for Controller nodes
+  # Name of the flavor for ControllerOpenstack nodes
   # Type: string
-  OvercloudControllerFlavor: control
+  OvercloudControllerOpenstackFlavor: control
 
   # Name of the flavor for Database nodes
   # Type: string
index 617dfa6..fefa20c 100644 (file)
@@ -55,3 +55,5 @@ parameter_defaults:
   RabbitIPv6: True
   # Enable IPv6 environment for Memcached.
   MemcachedIPv6: True
+  # Enable IPv6 environment for MySQL.
+  MysqlIPv6: True
index 596ec16..c43f2fa 100644 (file)
@@ -21,7 +21,7 @@ parameter_defaults:
   # Type: boolean
   CinderEnableRbdBackend: True
 
-  # The short name of the Glance backend to use. Should be one of swift, rbd, or file
+  # The short name of the Glance backend to use. Should be one of swift, rbd, cinder, or file
   # Type: string
   GlanceBackend: rbd
 
index 0f2d039..dde2c90 100644 (file)
@@ -43,7 +43,7 @@ parameter_defaults:
   # Type: string
   CinderRbdPoolName: volumes
 
-  # The short name of the Glance backend to use. Should be one of swift, rbd, or file
+  # The short name of the Glance backend to use. Should be one of swift, rbd, cinder, or file
   # Type: string
   GlanceBackend: rbd
 
index 3c13930..359401d 100644 (file)
@@ -21,7 +21,7 @@ parameter_defaults:
   # Static parameters - these are values that must be
   # included in the environment but should not be changed.
   # ******************************************************
-  # The short name of the Glance backend to use. Should be one of swift, rbd, or file
+  # The short name of the Glance backend to use. Should be one of swift, rbd, cinder, or file
   # Type: string
   GlanceBackend: file
 
index 2455751..a5eb35c 100644 (file)
@@ -20,7 +20,7 @@ parameter_defaults:
   rhel_reg_user: ""
   rhel_reg_type: ""
   rhel_reg_method: ""
-  rhel_reg_sat_repo: "rhel-7-server-satellite-tools-6.1-rpms"
+  rhel_reg_sat_repo: "rhel-7-server-satellite-tools-6.2-rpms"
   rhel_reg_http_proxy_host: ""
   rhel_reg_http_proxy_port: ""
   rhel_reg_http_proxy_username: ""
index d754aaf..4592473 100644 (file)
@@ -23,6 +23,8 @@ proxy_port=
 proxy_url=
 proxy_username=
 proxy_password=
+curl_opts="--retry-delay 10 --max-time 30 --retry ${retry_max_count} --cacert /etc/rhsm/ca/redhat-uep.pem"
+portal_test_url="https://$(crudini --get /etc/rhsm/rhsm.conf server hostname)/subscription/"
 
 # process variables..
 if [ -n "${REG_AUTO_ATTACH:-}" ]; then
@@ -129,12 +131,14 @@ if [ -n "${REG_HTTP_PROXY_HOST:-}" ]; then
         # Good both values are not empty
         proxy_url="http://${proxy_host}:${proxy_port}"
         config_opts="--server.proxy_hostname=${proxy_host} --server.proxy_port=${proxy_port}"
-        sat5_opts="${sat5_opts} --proxy_hostname=${proxy_url}"
+        sat5_opts="${sat5_opts} --proxy=${proxy_url}"
+        curl_opts="${curl_opts} -x http://${proxy_host}:${proxy_port}"
         echo "RHSM Proxy set to: ${proxy_url}"
         if [ -n "${REG_HTTP_PROXY_USERNAME:-}" ]; then
             if [ -n "${REG_HTTP_PROXY_PASSWORD:-}" ]; then
                 config_opts="${config_opts} --server.proxy_user=${proxy_username} --server.proxy_password=${proxy_password}"
                 sat5_opts="${sat5_opts} --proxyUser=${proxy_username} --proxyPassword=${proxy_password}"
+                curl_opts="${curl_opts} --proxy-user ${proxy_username}:${proxy_password}"
             else
                 echo "Warning: REG_HTTP_PROXY_PASSWORD cannot be null with non-empty REG_HTTP_PROXY_USERNAME! Skipping..."
                 proxy_username= ; proxy_password=
@@ -187,10 +191,10 @@ function retry() {
 }
 
 function detect_satellite_server {
-    if curl --retry ${retry_max_count} --retry-delay 10 --max-time 30 -L -k -s -D - -o /dev/null $REG_SAT_URL/pub/katello-ca-consumer-latest.noarch.rpm | grep "200 OK"; then
+    if curl ${curl_opts} -L -k -s -D - -o /dev/null $REG_SAT_URL/pub/katello-ca-consumer-latest.noarch.rpm | grep "200 OK"; then
         echo Satellite 6 or beyond with Katello API detected at $REG_SAT_URL
         katello_api_enabled=1
-    elif curl --retry ${retry_max_count} --retry-delay 10 --max-time 30 -L -k -s -D - -o /dev/null $REG_SAT_URL/rhn/Login.do | grep "200 OK"; then
+    elif curl ${curl_opts} -L -k -s -D - -o /dev/null $REG_SAT_URL/rhn/Login.do | grep "200 OK"; then
         echo Satellite 5 with RHN detected at $REG_SAT_URL
         katello_api_enabled=0
     else
@@ -199,7 +203,13 @@ function detect_satellite_server {
     fi
 }
 
-if [ "x${proxy_url}" != "x" ];then
+if [ "x${proxy_url}" != "x" ]; then
+    # Before everything, we want to make sure the proxy can be reached
+    # Note: no need to manage retries, already done by retry() function.
+    echo "Testing proxy connectivity..."
+    retry bash -c "</dev/tcp/${proxy_host}/${proxy_port}"
+    echo "Proxy ${proxy_url} is reachable!"
+
     # Config subscription-manager for proxy
     subscription-manager config ${config_opts}
 
@@ -222,6 +232,22 @@ fi
 
 case "${REG_METHOD:-}" in
     portal)
+        # First test curl to RHSM through the specified proxy
+
+        if curl ${curl_opts} -L -s -D - -o /dev/null ${portal_test_url}|grep '200 OK'; then
+           if [ "x${proxy_url}" = "x" ]; then
+               echo "Access to RHSM portal OK, continuing..."
+           else
+               echo "Access to RHSM portal through proxy ${proxy_url} OK, continuing..."
+           fi
+        else
+           if [ "x${proxy_url}" = "x" ]; then
+               echo "Unable to access RHSM portal! Please check your parameters."
+           else
+               echo "Unable to access RHSM portal through configured HTTP proxy (${proxy_url}) ! Please check your parameters."
+           fi
+           exit 1
+        fi
         retry subscription-manager register $opts
         if [ -z "${REG_AUTO_ATTACH:-}" -a -z "${REG_ACTIVATION_KEY:-}" ]; then
             retry subscription-manager attach $attach_opts
@@ -233,7 +259,7 @@ case "${REG_METHOD:-}" in
         detect_satellite_server
         if [ "$katello_api_enabled" = "1" ]; then
             repos="$repos --enable ${satellite_repo}"
-            curl --retry ${retry_max_count} --retry-delay 10 --max-time 30 -L -k -O "$REG_SAT_URL/pub/katello-ca-consumer-latest.noarch.rpm"
+            curl ${curl_opts} -L -k -O "$REG_SAT_URL/pub/katello-ca-consumer-latest.noarch.rpm"
 
             # https://bugs.launchpad.net/tripleo/+bug/1711435
             # Delete the /etc/rhsm/facts directory entirely so that the
@@ -247,7 +273,7 @@ case "${REG_METHOD:-}" in
             rpm -Uvh katello-ca-consumer-latest.noarch.rpm || true
             retry subscription-manager register $opts
             retry subscription-manager $repos
-            retry yum install -y katello-agent || true # needed for errata reporting to satellite6
+            yum install -y katello-agent || true # needed for errata reporting to satellite6
             katello-package-upload
 
             # https://bugs.launchpad.net/tripleo/+bug/1711435
@@ -255,7 +281,7 @@ case "${REG_METHOD:-}" in
             mkdir -p /etc/rhsm/facts
         else
             pushd /usr/share/rhn/
-            curl --retry ${retry_max_count} --retry-delay 10 --max-time 30 -k -O $REG_SAT_URL/pub/RHN-ORG-TRUSTED-SSL-CERT
+            curl ${curl_opts} -k -O $REG_SAT_URL/pub/RHN-ORG-TRUSTED-SSL-CERT
             popd
             retry rhnreg_ks --serverUrl=$REG_SAT_URL/XMLRPC $sat5_opts
         fi
index 93408dd..6e010de 100644 (file)
@@ -24,17 +24,16 @@ resources:
       config:
         datafiles:
           neutron_bigswitch_data:
-            mapped_data:
-              neutron::agents::bigswitch::agent_enabled: {get_input: neutron_enable_bigswitch_agent}
-              neutron::agents::bigswitch::lldp_enabled: {get_input: neutron_enable_bigswitch_lldp}
-              # NOTE(aschultz): required for the puppet module but we don't
-              # actually want them defined on the compute nodes so we're
-              # relying on the puppet  module's handling of <SERVICE DEFAULT>
-              # to just not set these but still accept that they were defined.
-              # This will should be fixed in puppet-neutron and removed here,
-              # but for backportability, we need to define something.
-              neutron::plugins::ml2::bigswitch::restproxy::servers: '<SERVICE DEFAULT>'
-              neutron::plugins::ml2::bigswitch::restproxy::server_auth: '<SERVICE DEFAULT>'
+            neutron::agents::bigswitch::agent_enabled: {get_input: neutron_enable_bigswitch_agent}
+            neutron::agents::bigswitch::lldp_enabled: {get_input: neutron_enable_bigswitch_lldp}
+            # NOTE(aschultz): required for the puppet module but we don't
+            # actually want them defined on the compute nodes so we're
+            # relying on the puppet  module's handling of <SERVICE DEFAULT>
+            # to just not set these but still accept that they were defined.
+            # This will should be fixed in puppet-neutron and removed here,
+            # but for backportability, we need to define something.
+            neutron::plugins::ml2::bigswitch::restproxy::servers: '<SERVICE DEFAULT>'
+            neutron::plugins::ml2::bigswitch::restproxy::server_auth: '<SERVICE DEFAULT>'
 
 
   NeutronBigswitchDeployment:
index 71a915d..cda598a 100644 (file)
@@ -50,16 +50,15 @@ resources:
       config:
         datafiles:
           neutron_bigswitch_data:
-            mapped_data:
-              neutron::agents::bigswitch::agent_enabled: {get_input: neutron_enable_bigswitch_agent}
-              neutron::agents::bigswitch::lldp_enabled: {get_input: neutron_enable_bigswitch_lldp}
-              neutron::plugins::ml2::bigswitch::restproxy::servers: {get_input: restproxy_servers}
-              neutron::plugins::ml2::bigswitch::restproxy::server_auth: {get_input: restproxy_server_auth}
-              neutron::plugins::ml2::bigswitch::restproxy::auto_sync_on_failure: {get_input: restproxy_auto_sync_on_failure}
-              neutron::plugins::ml2::bigswitch::restproxy::consistency_interval: {get_input: restproxy_consistency_interval}
-              neutron::plugins::ml2::bigswitch::restproxy::neutron_id: {get_input: restproxy_neutron_id}
-              neutron::plugins::ml2::bigswitch::restproxy::server_ssl: {get_input: restproxy_server_ssl}
-              neutron::plugins::ml2::bigswitch::restproxy::ssl_cert_directory: {get_input: restproxy_ssl_cert_directory}
+            neutron::agents::bigswitch::agent_enabled: {get_input: neutron_enable_bigswitch_agent}
+            neutron::agents::bigswitch::lldp_enabled: {get_input: neutron_enable_bigswitch_lldp}
+            neutron::plugins::ml2::bigswitch::restproxy::servers: {get_input: restproxy_servers}
+            neutron::plugins::ml2::bigswitch::restproxy::server_auth: {get_input: restproxy_server_auth}
+            neutron::plugins::ml2::bigswitch::restproxy::auto_sync_on_failure: {get_input: restproxy_auto_sync_on_failure}
+            neutron::plugins::ml2::bigswitch::restproxy::consistency_interval: {get_input: restproxy_consistency_interval}
+            neutron::plugins::ml2::bigswitch::restproxy::neutron_id: {get_input: restproxy_neutron_id}
+            neutron::plugins::ml2::bigswitch::restproxy::server_ssl: {get_input: restproxy_server_ssl}
+            neutron::plugins::ml2::bigswitch::restproxy::ssl_cert_directory: {get_input: restproxy_ssl_cert_directory}
 
   NeutronBigswitchDeployment:
     type: OS::Heat::StructuredDeployment
index 15da177..d53afd0 100644 (file)
@@ -565,6 +565,7 @@ resources:
           - bootstrap_node # provided by allNodesConfig
           - all_nodes # provided by allNodesConfig
           - vip_data # provided by allNodesConfig
+          - net_ip_map
           - '"%{::osfamily}"'
           # The following are required for compatibility with the Controller role
           # where some vendor integrations added hieradata via ExtraConfigPre
@@ -578,6 +579,7 @@ resources:
           service_names:
             service_names: {get_param: ServiceNames}
             sensu::subscriptions: {get_param: MonitoringSubscriptions}
+          net_ip_map: {get_attr: [NetIpMap, net_ip_map]}
           service_configs:
             map_replace:
               - {get_param: ServiceConfigSettings}
index 5b2a258..2a8026d 100644 (file)
@@ -89,7 +89,7 @@ parameters:
     type: string
     description: >
         Cron to move deleted instances to another table - User
-    default: 'keystone'
+    default: 'cinder'
   CinderCronDbPurgeAge:
     type: string
     description: >
index abbe7a2..c1f54bb 100644 (file)
@@ -57,6 +57,11 @@ parameters:
   EnableInternalTLS:
     type: boolean
     default: false
+  MysqlIPv6:
+    default: false
+    description: Enable IPv6 in MySQL
+    type: boolean
+
 
 conditions:
 
@@ -77,6 +82,7 @@ outputs:
             # in tripleo-puppet-elements.
             mysql::server::package_name: 'mariadb-galera-server'
             mysql::server::manage_config_file: true
+            mysql_ipv6: {get_param: MysqlIPv6}
             tripleo.mysql.firewall_rules:
               '104 mysql galera':
                 dport:
@@ -113,30 +119,34 @@ outputs:
               {get_param: [ServiceNetMap, MysqlNetwork]}
             tripleo::profile::base::database::mysql::generate_dropin_file_limit:
               {get_param: MysqlIncreaseFileLimit}
-          - generate_service_certificates: true
-            tripleo::profile::base::database::mysql::certificate_specs:
-              service_certificate: '/etc/pki/tls/certs/mysql.crt'
-              service_key: '/etc/pki/tls/private/mysql.key'
-              hostname:
-                str_replace:
-                  template: "%{hiera('cloud_name_NETWORK')}"
-                  params:
-                    NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
-              dnsnames:
-                - str_replace:
+          - if:
+            - internal_tls_enabled
+            -
+              generate_service_certificates: true
+              tripleo::profile::base::database::mysql::certificate_specs:
+                service_certificate: '/etc/pki/tls/certs/mysql.crt'
+                service_key: '/etc/pki/tls/private/mysql.key'
+                hostname:
+                  str_replace:
                     template: "%{hiera('cloud_name_NETWORK')}"
                     params:
                       NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
-                - str_replace:
-                    template:
-                      "%{hiera('fqdn_$NETWORK')}"
+                dnsnames:
+                  - str_replace:
+                      template: "%{hiera('cloud_name_NETWORK')}"
+                      params:
+                        NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
+                  - str_replace:
+                      template:
+                        "%{hiera('fqdn_$NETWORK')}"
+                      params:
+                        $NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
+                principal:
+                  str_replace:
+                    template: "mysql/%{hiera('cloud_name_NETWORK')}"
                     params:
-                      $NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
-              principal:
-                str_replace:
-                  template: "mysql/%{hiera('cloud_name_NETWORK')}"
-                  params:
-                    NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
+                      NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
+            - {}
       step_config: |
         include ::tripleo::profile::base::database::mysql
       metadata_settings:
index 8ec3546..1baf120 100644 (file)
@@ -78,10 +78,10 @@ parameters:
   GlanceBackend:
     default: swift
     description: The short name of the Glance backend to use. Should be one
-      of swift, rbd, or file
+      of swift, rbd, cinder, or file
     type: string
     constraints:
-    - allowed_values: ['swift', 'file', 'rbd']
+    - allowed_values: ['swift', 'file', 'rbd', 'cinder']
   GlanceNfsEnabled:
     default: false
     description: >
index 2bc08fd..30ea4e1 100644 (file)
@@ -54,6 +54,7 @@ outputs:
         # internal_api_subnet - > IP/CIDR
         memcached::listen_ip: {get_param: [ServiceNetMap, MemcachedNetwork]}
         memcached::max_memory: {get_param: MemcachedMaxMemory}
+        memcached::verbosity: 'v'
         tripleo.memcached.firewall_rules:
           '121 memcached':
             dport: 11211
index ec477dd..a2c1a2a 100644 (file)
@@ -73,3 +73,6 @@ outputs:
       service_config_settings:
         neutron_api:
           neutron::server::service_providers: {get_param: NeutronServiceProviders}
+        horizon:
+          horizon::neutron_options:
+            enable_lb: True
index 9e5ba12..5326a25 100644 (file)
@@ -210,7 +210,7 @@ outputs:
         collectd:
           tripleo.collectd.plugins.nova_compute:
             - virt
-          collectd::plugins::virt::connection: "qemu:///system"
+          collectd::plugin::virt::connection: 'qemu:///system'
       upgrade_tasks:
         - name: Stop nova-compute service
           tags: step1
index a1a6020..879af2a 100644 (file)
@@ -41,7 +41,7 @@ parameters:
   RabbitFDLimit:
     default: 65536
     description: Configures RabbitMQ FD limit
-    type: string
+    type: number
   RabbitIPv6:
     default: false
     description: Enable IPv6 in RabbitMQ
diff --git a/releasenotes/notes/ceph-pools-with-ceph-ansible-f82425e585f90ef6.yaml b/releasenotes/notes/ceph-pools-with-ceph-ansible-f82425e585f90ef6.yaml
new file mode 100644 (file)
index 0000000..63e6f21
--- /dev/null
@@ -0,0 +1,17 @@
+---
+upgrade:
+  - |
+    The format to use for the CephPools parameter needs to be updated into the
+    form expected by ceph-ansible. For example, for a new pool named `mypool`
+    it should change from:
+    { "mypool": { "size": 3, "pg_num": 128, "pgp_num": 128 } }
+    into:
+    [ { "name": "mypool", "pg_num": 128, "rule_name": "" } ]
+    The first is a map where each key is a pool name and its value the pool
+    properties, the second is a list where each item describes all properties
+    of a pool, including its name.
+other:
+  - |
+    With the migration from puppet-ceph to ceph-ansible for the deployment
+    of Ceph, the format of CephPools parameter changes because the two tools
+    use a different format to represent the list of additional pools to create.
diff --git a/releasenotes/notes/rhsm_proxy_verify-548f104c97cf5f90.yaml b/releasenotes/notes/rhsm_proxy_verify-548f104c97cf5f90.yaml
new file mode 100644 (file)
index 0000000..626ecba
--- /dev/null
@@ -0,0 +1,5 @@
+---
+features:
+  - |
+    When using RHSM proxy, TripleO will now verify that the proxy can be reached
+    otherwise we'll stop early and not try to subscribe nodes.
diff --git a/releasenotes/notes/sat-tools-0d0f0c53de9d34a5.yaml b/releasenotes/notes/sat-tools-0d0f0c53de9d34a5.yaml
new file mode 100644 (file)
index 0000000..eb3ab5f
--- /dev/null
@@ -0,0 +1,5 @@
+---
+upgrade:
+  - |
+    When deploying with RHSM, sat-tools 6.2 will be installed instead of 6.1.
+    The new version is supported by RHEL 7.4 and provides katello-agent package.
index 91d6060..2c929a4 100644 (file)
@@ -89,10 +89,8 @@ environments:
     files:
       overcloud.yaml:
         parameters:
-          - ControllerHostnameFormat
           - ComputeHostnameFormat
           - CephStorageHostnameFormat
-          - ControllerCount
           - ComputeCount
           - CephStorageCount
       puppet/services/time/ntp.yaml:
@@ -100,19 +98,21 @@ environments:
           - NtpServer
       sample-env-generator/composable-roles.yaml:
         parameters:
+          - ControllerOpenstackHostnameFormat
           - DnsServers
+          - ControllerOpenstackCount
           - DatabaseCount
           - MessagingCount
           - NetworkerCount
-          - OvercloudControllerFlavor
+          - OvercloudControllerOpenstackFlavor
           - OvercloudComputeFlavor
           - OvercloudCephStorageFlavor
           - OvercloudDatabaseFlavor
           - OvercloudMessagingFlavor
           - OvercloudNetworkerFlavor
     sample_values:
-      ControllerCount: 3
-      OvercloudControllerFlavor: control
+      ControllerOpenstackCount: 3
+      OvercloudControllerOpenstackFlavor: control
       ComputeCount: 1
       OvercloudComputeFlavor: compute
       CephStorageCount: 1
@@ -135,6 +135,10 @@ parameters:
     description: DNS servers to use for the Overcloud
     type: comma_delimited_list
   # Dynamic vars based on roles
+  ControllerOpenstackCount:
+    default: 0
+    description: Number of ControllerOpenstack nodes
+    type: number
   DatabaseCount:
     default: 0
     description: Number of Database nodes
@@ -147,10 +151,21 @@ parameters:
     default: 0
     description: Number of Networker nodes
     type: number
+  ControllerOpenstackHostnameFormat:
+    type: string
+    description: >
+      Format for ControllerOpenstack node hostnames
+      Note %index% is translated into the index of the node, e.g 0/1/2 etc
+      and %stackname% is replaced with the stack name e.g overcloud
+    default: "%stackname%-controller-%index%"
   OvercloudControllerFlavor:
     default: control
     description: Name of the flavor for Controller nodes
     type: string
+  OvercloudControllerOpenstackFlavor:
+    default: control
+    description: Name of the flavor for ControllerOpenstack nodes
+    type: string
   OvercloudComputeFlavor:
     default: compute
     description: Name of the flavor for Compute nodes
index c322962..76f856d 100755 (executable)
@@ -46,7 +46,10 @@ OPTIONAL_DOCKER_PUPPET_CONFIG_SECTIONS = [ 'puppet_tags', 'volumes' ]
 # consistency across files on.  This should only contain parameters whose
 # definition we cannot change for backwards compatibility reasons.  New
 # parameters to the templates should not be added to this list.
-PARAMETER_DEFINITION_EXCLUSIONS = {'ManagementNetCidr': ['default'],
+PARAMETER_DEFINITION_EXCLUSIONS = {'CephPools': ['description',
+                                                 'type',
+                                                 'default'],
+                                   'ManagementNetCidr': ['default'],
                                    'ManagementAllocationPools': ['default'],
                                    'ExternalNetCidr': ['default'],
                                    'ExternalAllocationPools': ['default'],