#!/bin/bash -e
# shellcheck disable=SC2034,SC2154,SC1090,SC1091,SC2155
##############################################################################
-# Copyright (c) 2017 Ericsson AB, Mirantis Inc., Enea AB and others.
+# Copyright (c) 2018 Ericsson AB, Mirantis Inc., Enea AB and others.
# jonas.bjurel@ericsson.com
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
$(basename "$0") -l lab-name -p pod-name -s deploy-scenario \\
[-b Lab Config Base URI] \\
[-S storage-dir] [-L /path/to/log/file.tar.gz] \\
- [-f[f]] [-F] [-e | -E[E]] [-d] [-D]
+ [-f[f]] [-F] [-e | -E[E]] [-d] [-D] [-N]
$(notify "OPTIONS:" 2)
-b Base-uri for the stack-configuration structure
-s Deploy-scenario short-name
-S Storage dir for VM images
-L Deployment log path and file name
+ -N Experimental: Do not virtualize control plane (novcp)
$(notify_i "Description:" 2)
Deploys the Fuel@OPNFV stack on the indicated lab resource.
-L Deployment log path and name, eg. -L /home/jenkins/job.log.tar.gz
-l Lab name as defined in the configuration directory, e.g. lf
-p POD name as defined in the configuration directory, e.g. pod2
+-N Experimental: Instead of virtualizing the control plane (VCP), deploy
+ control plane directly on baremetal nodes
-P Skip installing dependency distro packages on current host
This flag should only be used if you have kept back older packages that
would be upgraded and that is undesirable on the current system.
INFRA_CREATION_ONLY=${INFRA_CREATION_ONLY:-0}
NO_DEPLOY_ENVIRONMENT=${NO_DEPLOY_ENVIRONMENT:-0}
ERASE_ENV=${ERASE_ENV:-0}
+MCP_VCP=${MCP_VCP:-1}
source "${DEPLOY_DIR}/globals.sh"
source "${DEPLOY_DIR}/lib.sh"
# BEGIN of main
#
set +x
-while getopts "b:dDfEFl:L:p:Ps:S:he" OPTION
+while getopts "b:dDfEFl:L:Np:Ps:S:he" OPTION
do
case $OPTION in
b)
L)
DEPLOY_LOG="${OPTARG}"
;;
+ N)
+ MCP_VCP=0
+ ;;
p)
TARGET_POD=${OPTARG}
if [[ "${TARGET_POD}" =~ virtual ]]; then
export MAAS_SSH_KEY="$(cat "$(basename "${SSH_KEY}").pub")"
# Expand jinja2 templates based on PDF data and env vars
-export MCP_JUMP_ARCH=$(uname -i)
+export MCP_VCP MCP_JUMP_ARCH=$(uname -i)
do_templates_scenario "${STORAGE_DIR}" "${TARGET_LAB}" "${TARGET_POD}" \
"${BASE_CONFIG_URI}" "${SCENARIO_DIR}"
do_templates_cluster "${STORAGE_DIR}" "${TARGET_LAB}" "${TARGET_POD}" \
reclass:
node:
- compute_params: &compute_params
- dpdk:
+ common: &compute_params_common
compute_hugepages_size: 2M
compute_hugepages_count: 2048
compute_hugepages_mount: /mnt/hugepages_2M
+ dpdk:
+ <<: *compute_params_common
compute_dpdk_driver: uio
compute_ovs_pmd_cpu_mask: "0x6"
compute_ovs_dpdk_socket_mem: "1024"
-os-nosdn-nofeature-novcp-ha.yaml
+*-ha.yaml
+++ /dev/null
-##############################################################################
-# Copyright (c) 2017 Mirantis Inc., Enea AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-cluster:
- domain: mcp-pike-ovs-ha.local
- states:
- - maas
- - baremetal_init
- - virtual_control_plane
- - openstack_ha
- - networks
-virtual:
- nodes:
- - cfg01
- - mas01
- cfg01:
- vcpus: 4
- ram: 6144
- mas01:
- vcpus: 4
- ram: 6144
{%- endfor %}
---
cluster:
- domain: mcp-pike-ovs-novcp-ha.local
+ domain: mcp-pike-ovs-ha.local
states:
{%- if cluster.has_baremetal_nodes %}
- maas
- baremetal_init
+{%- endif %}
+{%- if conf.MCP_VCP %}
+ - virtual_control_plane
{%- endif %}
- openstack_ha
- networks
kvm03:
vcpus: 4
ram: 14336
+ cmp001:
+ vcpus: 4
+ ram: 8192
+ cmp002:
+ vcpus: 4
+ ram: 8192
+++ /dev/null
-##############################################################################
-# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-cluster:
- domain: mcp-pike-ovs-dpdk-ha.local
- states:
- - maas
- - baremetal_init
- - virtual_control_plane
- - dpdk
- - openstack_ha
- - networks
-virtual:
- nodes:
- - cfg01
- - mas01
- cfg01:
- vcpus: 4
- ram: 6144
- mas01:
- vcpus: 4
- ram: 6144
--- /dev/null
+##############################################################################
+# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+{%- import 'net_map.j2' as nm with context %}
+{%- set cluster = {'has_virtual_nodes': False} %}
+{%- for node in conf.nodes %}
+ {%- if not cluster.has_virtual_nodes and node.node.type == 'baremetal' %}
+ {%- do cluster.update({'has_baremetal_nodes': True}) %}
+ {%- endif %}
+{%- endfor %}
+---
+cluster:
+ domain: mcp-pike-ovs-dpdk-ha.local
+ states:
+{%- if cluster.has_baremetal_nodes %}
+ - maas
+ - baremetal_init
+{%- endif %}
+{%- if conf.MCP_VCP %}
+ - virtual_control_plane
+{%- endif %}
+ - dpdk
+ - openstack_ha
+ - networks
+virtual:
+ nodes:
+ - cfg01
+{%- if cluster.has_baremetal_nodes %}
+ - mas01
+{%- endif %}
+{#- Most likely, controllers will always have the same type and number (3) #}
+{%- if conf.nodes[nm.ctl01.idx].node.type == 'virtual' %}
+ - kvm01
+ - kvm02
+ - kvm03
+{%- endif %}
+{#- Later, we might have mixed computes here, for hybrid multi-arch testing #}
+{%- if conf.nodes[nm.cmp001.idx].node.type == 'virtual' %}
+ - cmp001
+ - cmp002
+{%- endif %}
+ cfg01:
+ vcpus: 4
+ ram: 6144
+ # Below values are only used when nodes are defined in virtual.nodes above
+ mas01:
+ vcpus: 4
+ ram: 6144
+ # NOTE: We might need to add more RAM here
+ kvm01:
+ vcpus: 4
+ ram: 14336
+ kvm02:
+ vcpus: 4
+ ram: 14336
+ kvm03:
+ vcpus: 4
+ ram: 14336
+ cmp001:
+ vcpus: 4
+ ram: 8192
+ cmp002:
+ vcpus: 4
+ ram: 8192
+++ /dev/null
-##############################################################################
-# Copyright (c) 2017 Mirantis Inc., Enea AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-cluster:
- domain: mcp-pike-odl-ha.local
- states:
- - maas
- - baremetal_init
- - virtual_control_plane
- - opendaylight
- - openstack_ha
- - networks
-virtual:
- nodes:
- - cfg01
- - mas01
- cfg01:
- vcpus: 4
- ram: 6144
- mas01:
- vcpus: 4
- ram: 6144
--- /dev/null
+##############################################################################
+# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+{%- import 'net_map.j2' as nm with context %}
+{%- set cluster = {'has_virtual_nodes': False} %}
+{%- for node in conf.nodes %}
+ {%- if not cluster.has_virtual_nodes and node.node.type == 'baremetal' %}
+ {%- do cluster.update({'has_baremetal_nodes': True}) %}
+ {%- endif %}
+{%- endfor %}
+---
+cluster:
+ domain: mcp-pike-odl-ha.local
+ states:
+{%- if cluster.has_baremetal_nodes %}
+ - maas
+ - baremetal_init
+{%- endif %}
+{%- if conf.MCP_VCP %}
+ - virtual_control_plane
+{%- endif %}
+ - opendaylight
+ - openstack_ha
+ - networks
+virtual:
+ nodes:
+ - cfg01
+{%- if cluster.has_baremetal_nodes %}
+ - mas01
+{%- endif %}
+{#- Most likely, controllers will always have the same type and number (3) #}
+{%- if conf.nodes[nm.ctl01.idx].node.type == 'virtual' %}
+ - kvm01
+ - kvm02
+ - kvm03
+{%- endif %}
+{#- Later, we might have mixed computes here, for hybrid multi-arch testing #}
+{%- if conf.nodes[nm.cmp001.idx].node.type == 'virtual' %}
+ - cmp001
+ - cmp002
+{%- endif %}
+ cfg01:
+ vcpus: 4
+ ram: 6144
+ # Below values are only used when nodes are defined in virtual.nodes above
+ mas01:
+ vcpus: 4
+ ram: 6144
+ # NOTE: We might need to add more RAM here
+ kvm01:
+ vcpus: 4
+ ram: 14336
+ kvm02:
+ vcpus: 4
+ ram: 14336
+ kvm03:
+ vcpus: 4
+ ram: 14336
+ cmp001:
+ vcpus: 4
+ ram: 8192
+ cmp002:
+ vcpus: 4
+ ram: 8192
+++ /dev/null
-##############################################################################
-# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-cluster:
- domain: mcp-pike-ovn-ha.local
- states:
- - maas
- - baremetal_init
- - virtual_control_plane
- - openstack_ha
- - networks
-virtual:
- nodes:
- - cfg01
- - mas01
- cfg01:
- vcpus: 4
- ram: 6144
- mas01:
- vcpus: 4
- ram: 6144
--- /dev/null
+##############################################################################
+# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+{%- import 'net_map.j2' as nm with context %}
+{%- set cluster = {'has_virtual_nodes': False} %}
+{%- for node in conf.nodes %}
+ {%- if not cluster.has_virtual_nodes and node.node.type == 'baremetal' %}
+ {%- do cluster.update({'has_baremetal_nodes': True}) %}
+ {%- endif %}
+{%- endfor %}
+---
+cluster:
+ domain: mcp-pike-ovn-ha.local
+ states:
+{%- if cluster.has_baremetal_nodes %}
+ - maas
+ - baremetal_init
+{%- endif %}
+{%- if conf.MCP_VCP %}
+ - virtual_control_plane
+{%- endif %}
+ - openstack_ha
+ - networks
+virtual:
+ nodes:
+ - cfg01
+{%- if cluster.has_baremetal_nodes %}
+ - mas01
+{%- endif %}
+{#- Most likely, controllers will always have the same type and number (3) #}
+{%- if conf.nodes[nm.ctl01.idx].node.type == 'virtual' %}
+ - kvm01
+ - kvm02
+ - kvm03
+{%- endif %}
+{#- Later, we might have mixed computes here, for hybrid multi-arch testing #}
+{%- if conf.nodes[nm.cmp001.idx].node.type == 'virtual' %}
+ - cmp001
+ - cmp002
+{%- endif %}
+ cfg01:
+ vcpus: 4
+ ram: 6144
+ # Below values are only used when nodes are defined in virtual.nodes above
+ mas01:
+ vcpus: 4
+ ram: 6144
+ # NOTE: We might need to add more RAM here
+ kvm01:
+ vcpus: 4
+ ram: 14336
+ kvm02:
+ vcpus: 4
+ ram: 14336
+ kvm03:
+ vcpus: 4
+ ram: 14336
+ cmp001:
+ vcpus: 4
+ ram: 8192
+ cmp002:
+ vcpus: 4
+ ram: 8192
**/*_pdf.yml
+mcp-pike-common-ha/infra/init.yml
+mcp-pike-common-ha/openstack_control.yml
+mcp-pike-common-ha/openstack_init.yml
+mcp-pike-common-ha/openstack_interface_vcp_biport.yml
+mcp-pike-common-ha/openstack_interface_vcp_triport.yml
+mcp-pike-common-ha/openstack_proxy.yml
+mcp-pike-ovs-ha/infra/init_vcp.yml
+mcp-pike-ovs-ha/infra/kvm.yml
+mcp-pike-ovs-dpdk-ha/infra/init_vcp.yml
+mcp-pike-ovs-dpdk-ha/infra/kvm.yml
+mcp-pike-odl-ha/infra/init_vcp.yml
+mcp-pike-odl-ha/infra/kvm.yml
+mcp-pike-ovn-ha/infra/init_vcp.yml
+mcp-pike-ovn-ha/infra/kvm.yml
+mcp-pike-odl-ha/opendaylight/control.yml
+mcp-pike-odl-ha/openstack/init.yml
- cluster.mcp-pike-common-ha.openstack_control_init
params:
linux_system_codename: xenial
+ # NOTE: When VCP is present, external_address is not used
+ external_address: ${_param:openstack_proxy_node01_address}
openstack_control_node02:
params:
linux_system_codename: xenial
+ external_address: 0.0.0.0
openstack_control_node03:
params:
linux_system_codename: xenial
+ external_address: ${_param:openstack_proxy_node02_address}
openstack_database_node01:
classes:
- cluster.mcp-pike-common-ha.openstack_database_init
infra_maas_node01_deploy_address: ${_param:opnfv_infra_maas_node01_deploy_address}
infra_kvm_address: ${_param:opnfv_infra_kvm_address}
+{%- if conf.MCP_VCP %}
infra_kvm_node01_address: ${_param:opnfv_infra_kvm_node01_address}
infra_kvm_node02_address: ${_param:opnfv_infra_kvm_node02_address}
infra_kvm_node03_address: ${_param:opnfv_infra_kvm_node03_address}
+{%- else %}
+ # For NOVCP, we override kvm addresses to overlap with ctl
+ infra_kvm_node01_address: ${_param:openstack_control_node01_address}
+ infra_kvm_node02_address: ${_param:openstack_control_node02_address}
+ infra_kvm_node03_address: ${_param:openstack_control_node03_address}
+{%- endif %}
infra_maas_node01_hostname: mas01
infra_kvm_node01_hostname: kvm01
- system.glusterfs.client.cluster
- system.nova.compute.cluster
- system.nova.compute.nfv.hugepages
- - system.nova.compute.nfv.cpu_pinning
- system.neutron.gateway.cluster
- system.cinder.volume.single
- system.cinder.volume.backend.lvm
{%- set nics = { nm.cmp001.nic_admin: True, nm.cmp001.nic_mgmt: True } %}
{%- set vlans = { nm.vlan_admin: nm.cmp001.nic_admin, nm.vlan_mgmt: nm.cmp001.nic_mgmt } %}
---
+{%- if not conf.nodes[nm.cmp001.idx].node.type == 'virtual' %}
+classes:
+ - system.nova.compute.nfv.cpu_pinning
+{%- endif %}
parameters:
_param:
# Should later be determined via PDF/IDF, AArch64 has ESP on /dev/sda1
-{%- if conf.nodes[nm.cmp001.idx].node.arch == 'aarch64' or
- conf.nodes[nm.cmp001.idx].disks.0.disk_capacity | storage_size_num | float > 2000000000000 %}
+{%- if conf.nodes[nm.cmp001.idx].node.type == 'virtual' %}
+ ~cinder_lvm_devices: ['/dev/vdb']
+{%- elif conf.nodes[nm.cmp001.idx].node.arch == 'aarch64' or
+ conf.nodes[nm.cmp001.idx].disks.0.disk_capacity | storage_size_num | float > 2000000000000 %}
~cinder_lvm_devices: ['/dev/sda2']
{%- else %}
~cinder_lvm_devices: ['/dev/sda1']
+++ /dev/null
-##############################################################################
-# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-classes:
- - system.linux.system.repo.glusterfs
- - system.ceilometer.client
- - system.memcached.server.single
- - system.keystone.server.cluster
- - system.keystone.server.wsgi
- - system.glance.control.cluster
- - system.nova.control.cluster
- - system.cinder.control.cluster
- - system.cinder.control.backend.lvm
- - system.heat.server.cluster
- - system.designate.server.cluster
- - system.designate.server.backend.bind
- - system.bind.server.single
- - system.haproxy.proxy.listen.openstack.nova-placement
- - system.haproxy.proxy.listen.openstack.glare
- - system.glusterfs.client.cluster
- - system.glusterfs.client.volume.glance
- - system.glusterfs.client.volume.keystone
-parameters:
- _param:
- keepalived_vip_interface: ${_param:single_nic}
- keepalived_vip_virtual_router_id: 50
- cluster_vip_address: ${_param:openstack_control_address}
- cluster_local_address: ${_param:single_address}
- cluster_node01_hostname: ${_param:openstack_control_node01_hostname}
- cluster_node01_address: ${_param:openstack_control_node01_address}
- cluster_node02_hostname: ${_param:openstack_control_node02_hostname}
- cluster_node02_address: ${_param:openstack_control_node02_address}
- cluster_node03_hostname: ${_param:openstack_control_node03_hostname}
- cluster_node03_address: ${_param:openstack_control_node03_address}
- nova_vncproxy_url: https://${_param:cluster_public_host}:6080
- glusterfs_version: '3.13'
- heat:
- server:
- metadata:
- host: ${_param:openstack_proxy_control_address}
- port: 8000
- protocol: http
- waitcondition:
- host: ${_param:openstack_proxy_control_address}
- port: 8000
- protocol: http
- watch:
- host: ${_param:openstack_proxy_control_address}
- port: 8003
- protocol: http
- nova:
- controller:
- pkgs:
- - nova-api
- - nova-conductor
- - nova-consoleauth
- - nova-novncproxy
- - nova-scheduler
- - python-novaclient
- neutron:
- server:
- vlan_aware_vms: true
- keystone:
- server:
- cacert: /etc/ssl/certs/mcp_os_cacert
- bind:
- server:
- control:
- mgmt:
- enabled: true
- bind:
- address: ${_param:single_address}
- port: 953
- allow:
- - ${_param:openstack_control_node01_address}
- - ${_param:openstack_control_node02_address}
- - ${_param:openstack_control_node03_address}
- keys:
- - designate
- designate:
- server:
- pools:
- default:
- description: 'test pool'
- targets:
- default:
- description: 'test target1'
- default1:
- type: ${_param:designate_pool_target_type}
- description: 'test target2'
- masters: ${_param:designate_pool_target_masters}
- options:
- host: ${_param:openstack_control_node02_address}
- port: 53
- rndc_host: ${_param:openstack_control_node02_address}
- rndc_port: 953
- rndc_key_file: /etc/designate/rndc.key
- default2:
- type: ${_param:designate_pool_target_type}
- description: 'test target3'
- masters: ${_param:designate_pool_target_masters}
- options:
- host: ${_param:openstack_control_node03_address}
- port: 53
- rndc_host: ${_param:openstack_control_node03_address}
- rndc_port: 953
- rndc_key_file: /etc/designate/rndc.key
- system.glusterfs.client.cluster
- system.glusterfs.client.volume.glance
- system.glusterfs.client.volume.keystone
+{%- if not conf.MCP_VCP %}
# sync from kvm
- service.keepalived.cluster.single
- system.glusterfs.server.volume.glance
# - system.salt.control.cluster.stacklight_log_cluster
# - system.salt.control.cluster.stacklight_telemetry_cluster
- cluster.mcp-pike-common-ha.infra.kvm_pdf
- - cluster.mcp-pike-common-ha.include.proxy
+ - cluster.mcp-pike-common-ha.include.maas_proxy
+ - cluster.mcp-pike-common-ha.include.lab_proxy_pdf
+{%- endif %}
parameters:
_param:
+{%- if not conf.MCP_VCP %}
linux_system_codename: xenial # sync from kvm
# For NOVCP, we switch keepalived VIPs, to keep cluster_vip_address in ctl
single_nic: br-ctl # for keepalive_vip_interface interpolation
- keepalived_vip_interface: ${_param:single_nic} # sync from kvm
- keepalived_vip_virtual_router_id: 50
+ control_nic: ~ # Dummy value to keep reclass 1.5.2 happy
keepalived_openstack_web_public_vip_address: ${_param:openstack_proxy_address}
keepalived_openstack_web_public_vip_interface: br-ex
+{%- endif %}
+ keepalived_vip_interface: ${_param:single_nic}
+ keepalived_vip_virtual_router_id: 50
cluster_vip_address: ${_param:openstack_control_address}
cluster_local_address: ${_param:single_address}
cluster_node01_hostname: ${_param:openstack_control_node01_hostname}
cluster_node03_address: ${_param:openstack_control_node03_address}
nova_vncproxy_url: https://${_param:cluster_public_host}:6080
glusterfs_version: '3.13'
+{%- if conf.MCP_VCP %}
+ heat:
+ server:
+ metadata:
+ host: ${_param:openstack_proxy_control_address}
+ port: 8000
+ protocol: http
+ waitcondition:
+ host: ${_param:openstack_proxy_control_address}
+ port: 8000
+ protocol: http
+ watch:
+ host: ${_param:openstack_proxy_control_address}
+ port: 8003
+ protocol: http
+ nova:
+ controller:
+ pkgs:
+ - nova-api
+ - nova-conductor
+ - nova-consoleauth
+ - nova-novncproxy
+ - nova-scheduler
+ - python-novaclient
+{%- else %}
libvirt:
server:
service: libvirtd
unix_sock_group: libvirt
linux:
network:
- remove_iface_files:
- - '/etc/network/interfaces.d/50-cloud-init.cfg'
# Add public IPs here as overrides, no need to fork another kvm_pdf.j2
interface:
br-ex:
address: ${_param:external_address}
proto: static
+ apache:
+ server:
+ bind:
+ ~ports: ~
+ ~modules:
+ - rewrite
+ - wsgi
+ # sync from common-ha kvm role
+ glusterfs:
+ server:
+ service: glusterd
+ volumes:
+ nova_instances:
+ storage: /srv/glusterfs/nova_instances
+ replica: 3
+ bricks:
+ - ${_param:cluster_node01_address}:/srv/glusterfs/nova_instances
+ - ${_param:cluster_node02_address}:/srv/glusterfs/nova_instances
+ - ${_param:cluster_node03_address}:/srv/glusterfs/nova_instances
+ options:
+ cluster.readdir-optimize: 'True'
+ nfs.disable: 'True'
+ network.remote-dio: 'True'
+ cluster.favorite-child-policy: mtime
+ diagnostics.client-log-level: WARNING
+ diagnostics.brick-log-level: WARNING
+{%- endif %}
neutron:
server:
vlan_aware_vms: true
rndc_host: ${_param:openstack_control_node03_address}
rndc_port: 953
rndc_key_file: /etc/designate/rndc.key
- # sync from common-ha kvm role
- glusterfs:
- server:
- service: glusterd
- volumes:
- nova_instances:
- storage: /srv/glusterfs/nova_instances
- replica: 3
- bricks:
- - ${_param:cluster_node01_address}:/srv/glusterfs/nova_instances
- - ${_param:cluster_node02_address}:/srv/glusterfs/nova_instances
- - ${_param:cluster_node03_address}:/srv/glusterfs/nova_instances
- options:
- cluster.readdir-optimize: 'True'
- nfs.disable: 'True'
- network.remote-dio: 'True'
- cluster.favorite-child-policy: mtime
- diagnostics.client-log-level: WARNING
- diagnostics.brick-log-level: WARNING
openstack_version: pike
# openstack service addresses
+{%- if conf.MCP_VCP %}
openstack_proxy_control_address: ${_param:opnfv_openstack_proxy_control_address}
openstack_proxy_node01_control_address: ${_param:opnfv_openstack_proxy_node01_control_address}
openstack_proxy_node02_control_address: ${_param:opnfv_openstack_proxy_node02_control_address}
+{%- else %}
+ openstack_proxy_control_address: ${_param:opnfv_openstack_control_address}
+ openstack_proxy_node01_control_address: ${_param:opnfv_openstack_control_node01_address}
+ openstack_proxy_node02_control_address: ${_param:opnfv_openstack_control_node03_address}
+{%- endif %}
+
openstack_proxy_address: ${_param:opnfv_openstack_proxy_address}
openstack_proxy_node01_address: ${_param:opnfv_openstack_proxy_node01_address}
openstack_proxy_node02_address: ${_param:opnfv_openstack_proxy_node02_address}
+
openstack_control_address: ${_param:opnfv_openstack_control_address}
openstack_control_node01_address: ${_param:opnfv_openstack_control_node01_address}
openstack_control_node02_address: ${_param:opnfv_openstack_control_node02_address}
openstack_control_node03_address: ${_param:opnfv_openstack_control_node03_address}
+
+{%- if conf.MCP_VCP %}
openstack_database_address: ${_param:opnfv_openstack_database_address}
openstack_database_node01_address: ${_param:opnfv_openstack_database_node01_address}
openstack_database_node02_address: ${_param:opnfv_openstack_database_node02_address}
openstack_database_node03_address: ${_param:opnfv_openstack_database_node03_address}
+
openstack_message_queue_address: ${_param:opnfv_openstack_message_queue_address}
openstack_message_queue_node01_address: ${_param:opnfv_openstack_message_queue_node01_address}
openstack_message_queue_node02_address: ${_param:opnfv_openstack_message_queue_node02_address}
openstack_message_queue_node03_address: ${_param:opnfv_openstack_message_queue_node03_address}
-
- openstack_telemetry_hostname: mdb
- openstack_telemetry_node01_hostname: mdb01
- openstack_telemetry_node02_hostname: mdb02
- openstack_telemetry_node03_hostname: mdb03
-
openstack_telemetry_address: ${_param:opnfv_openstack_telemetry_address}
openstack_telemetry_node01_address: ${_param:opnfv_openstack_telemetry_node01_address}
openstack_telemetry_node02_address: ${_param:opnfv_openstack_telemetry_node02_address}
openstack_telemetry_node03_address: ${_param:opnfv_openstack_telemetry_node03_address}
+{%- else %}
+ openstack_database_address: ${_param:openstack_control_address}
+ openstack_database_node01_address: ${_param:openstack_control_node01_address}
+ openstack_database_node02_address: ${_param:openstack_control_node02_address}
+ openstack_database_node03_address: ${_param:openstack_control_node03_address}
+
+ openstack_message_queue_address: ${_param:openstack_control_address}
+ openstack_message_queue_node01_address: ${_param:openstack_control_node01_address}
+ openstack_message_queue_node02_address: ${_param:openstack_control_node02_address}
+ openstack_message_queue_node03_address: ${_param:openstack_control_node03_address}
+
+ openstack_telemetry_address: ${_param:openstack_control_address}
+ openstack_telemetry_node01_address: ${_param:openstack_control_node01_address}
+ openstack_telemetry_node02_address: ${_param:openstack_control_node02_address}
+ openstack_telemetry_node03_address: ${_param:openstack_control_node03_address}
+{%- endif %}
# OpenStack Compute
openstack_compute_node01_single_address: ${_param:opnfv_openstack_compute_node01_single_address}
openstack_compute_node02_external_address: ${_param:opnfv_openstack_compute_node02_external_address}
# openstack service hostnames
+{%- if conf.MCP_VCP %}
openstack_proxy_hostname: prx
openstack_proxy_node01_hostname: prx01
openstack_proxy_node02_hostname: prx02
openstack_message_queue_node01_hostname: msg01
openstack_message_queue_node02_hostname: msg02
openstack_message_queue_node03_hostname: msg03
+ openstack_telemetry_hostname: mdb
+ openstack_telemetry_node01_hostname: mdb01
+ openstack_telemetry_node02_hostname: mdb02
+ openstack_telemetry_node03_hostname: mdb03
+{%- else %}
+ openstack_proxy_hostname: ${_param:openstack_control_hostname}
+ openstack_proxy_node01_hostname: ${_param:openstack_control_node01_hostname}
+ openstack_proxy_node02_hostname: ${_param:openstack_control_node03_hostname}
+ openstack_control_hostname: kvm
+ openstack_control_node01_hostname: kvm01
+ openstack_control_node02_hostname: kvm02
+ openstack_control_node03_hostname: kvm03
+ openstack_database_hostname: ${_param:openstack_control_hostname}
+ openstack_database_node01_hostname: ${_param:openstack_control_node01_hostname}
+ openstack_database_node02_hostname: ${_param:openstack_control_node02_hostname}
+ openstack_database_node03_hostname: ${_param:openstack_control_node03_hostname}
+ openstack_message_queue_hostname: ${_param:openstack_control_hostname}
+ openstack_message_queue_node01_hostname: ${_param:openstack_control_node01_hostname}
+ openstack_message_queue_node02_hostname: ${_param:openstack_control_node02_hostname}
+ openstack_message_queue_node03_hostname: ${_param:openstack_control_node03_hostname}
+ openstack_telemetry_hostname: ${_param:openstack_control_hostname}
+ openstack_telemetry_node01_hostname: ${_param:openstack_control_node01_hostname}
+ openstack_telemetry_node02_hostname: ${_param:openstack_control_node02_hostname}
+ openstack_telemetry_node03_hostname: ${_param:openstack_control_node03_hostname}
+{%- endif %}
# openstack compute
openstack_compute_node01_hostname: cmp001
openstack_compute_node02_hostname: cmp002
+ # opendaylight options
+{%- if conf.MCP_VCP %}
+ opendaylight_server_node01_hostname: odl01
+ opendaylight_server_node01_single_address: ${_param:opnfv_opendaylight_server_node01_single_address}
+{%- else %}
+ opendaylight_control_hostname: ${_param:openstack_control_node02_hostname}
+ opendaylight_server_node01_hostname: ${_param:opendaylight_control_hostname}
+ opendaylight_server_node01_single_address: ${_param:opnfv_openstack_control_node02_address}
+{%- endif %}
+
openstack_region: RegionOne
admin_email: root@localhost
# Neutron osv/nodvr
nova_service_host: ${_param:openstack_control_address}
neutron_version: ${_param:openstack_version}
neutron_service_host: ${_param:openstack_control_address}
+{%- if conf.MCP_VCP %}
glusterfs_service_host: ${_param:infra_kvm_address}
+{%- else %}
+ glusterfs_service_host: ${_param:openstack_control_address}
+{%- endif %}
mysql_admin_user: root
mysql_admin_password: opnfv_secret
mysql_cinder_password: opnfv_secret
net.ipv4.tcp_fin_timeout: 30
network:
host:
+{%- if conf.MCP_VCP %}
prx:
address: ${_param:openstack_proxy_control_address}
names:
names:
- ${_param:openstack_telemetry_node03_hostname}
- ${_param:openstack_telemetry_node03_hostname}.${_param:cluster_domain}
+{%- else %}
+ kvm:
+ address: ${_param:openstack_control_address}
+ names:
+ - ${_param:openstack_control_hostname}
+ - ${_param:openstack_control_hostname}.${_param:cluster_domain}
+ kvm01:
+ address: ${_param:openstack_control_node01_address}
+ names:
+ - ${_param:openstack_control_node01_hostname}
+ - ${_param:openstack_control_node01_hostname}.${_param:cluster_domain}
+ kvm02:
+ address: ${_param:openstack_control_node02_address}
+ names:
+ - ${_param:openstack_control_node02_hostname}
+ - ${_param:openstack_control_node02_hostname}.${_param:cluster_domain}
+ kvm03:
+ address: ${_param:openstack_control_node03_address}
+ names:
+ - ${_param:openstack_control_node03_hostname}
+ - ${_param:openstack_control_node03_hostname}.${_param:cluster_domain}
+{%- endif %}
cmp001:
address: ${_param:openstack_compute_node01_control_address}
names:
+++ /dev/null
-##############################################################################
-# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-parameters:
- _param:
-
- openstack_version: pike
-
- # openstack service addresses
- openstack_proxy_control_address: ${_param:opnfv_openstack_control_address}
- openstack_proxy_node01_control_address: ${_param:opnfv_openstack_control_node01_address}
- openstack_proxy_node02_control_address: ${_param:opnfv_openstack_control_node03_address}
-
- openstack_proxy_address: ${_param:opnfv_openstack_proxy_address}
- openstack_proxy_node01_address: ${_param:opnfv_openstack_proxy_node01_address}
- openstack_proxy_node02_address: ${_param:opnfv_openstack_proxy_node02_address}
-
- openstack_control_address: ${_param:opnfv_openstack_control_address}
- openstack_control_node01_address: ${_param:opnfv_openstack_control_node01_address}
- openstack_control_node02_address: ${_param:opnfv_openstack_control_node02_address}
- openstack_control_node03_address: ${_param:opnfv_openstack_control_node03_address}
-
- openstack_database_address: ${_param:openstack_control_address}
- openstack_database_node01_address: ${_param:openstack_control_node01_address}
- openstack_database_node02_address: ${_param:openstack_control_node02_address}
- openstack_database_node03_address: ${_param:openstack_control_node03_address}
-
- openstack_message_queue_address: ${_param:openstack_control_address}
- openstack_message_queue_node01_address: ${_param:openstack_control_node01_address}
- openstack_message_queue_node02_address: ${_param:openstack_control_node02_address}
- openstack_message_queue_node03_address: ${_param:openstack_control_node03_address}
-
- openstack_telemetry_address: ${_param:openstack_control_address}
- openstack_telemetry_node01_address: ${_param:openstack_control_node01_address}
- openstack_telemetry_node02_address: ${_param:openstack_control_node02_address}
- openstack_telemetry_node03_address: ${_param:openstack_control_node03_address}
-
- # OpenStack Compute
- openstack_compute_node01_single_address: ${_param:opnfv_openstack_compute_node01_single_address}
- openstack_compute_node02_single_address: ${_param:opnfv_openstack_compute_node02_single_address}
- openstack_compute_node03_single_address: ${_param:opnfv_openstack_compute_node03_single_address}
- openstack_compute_node01_control_address: ${_param:opnfv_openstack_compute_node01_control_address}
- openstack_compute_node02_control_address: ${_param:opnfv_openstack_compute_node02_control_address}
- openstack_compute_node03_control_address: ${_param:opnfv_openstack_compute_node03_control_address}
- openstack_compute_node01_tenant_address: ${_param:opnfv_openstack_compute_node01_tenant_address}
- openstack_compute_node02_tenant_address: ${_param:opnfv_openstack_compute_node02_tenant_address}
- openstack_compute_node03_tenant_address: ${_param:opnfv_openstack_compute_node03_tenant_address}
- openstack_compute_node01_external_address: ${_param:opnfv_openstack_compute_node01_external_address}
- openstack_compute_node02_external_address: ${_param:opnfv_openstack_compute_node02_external_address}
-
- # openstack service hostnames
- openstack_proxy_hostname: ${_param:openstack_control_hostname}
- openstack_proxy_node01_hostname: ${_param:openstack_control_node01_hostname}
- openstack_proxy_node02_hostname: ${_param:openstack_control_node03_hostname}
- openstack_control_hostname: kvm
- openstack_control_node01_hostname: kvm01
- openstack_control_node02_hostname: kvm02
- openstack_control_node03_hostname: kvm03
- openstack_database_hostname: ${_param:openstack_control_hostname}
- openstack_database_node01_hostname: ${_param:openstack_control_node01_hostname}
- openstack_database_node02_hostname: ${_param:openstack_control_node02_hostname}
- openstack_database_node03_hostname: ${_param:openstack_control_node03_hostname}
- openstack_message_queue_hostname: ${_param:openstack_control_hostname}
- openstack_message_queue_node01_hostname: ${_param:openstack_control_node01_hostname}
- openstack_message_queue_node02_hostname: ${_param:openstack_control_node02_hostname}
- openstack_message_queue_node03_hostname: ${_param:openstack_control_node03_hostname}
- openstack_telemetry_hostname: ${_param:openstack_control_hostname}
- openstack_telemetry_node01_hostname: ${_param:openstack_control_node01_hostname}
- openstack_telemetry_node02_hostname: ${_param:openstack_control_node02_hostname}
- openstack_telemetry_node03_hostname: ${_param:openstack_control_node03_hostname}
-
- # openstack compute
- openstack_compute_node01_hostname: cmp001
- openstack_compute_node02_hostname: cmp002
-
- openstack_region: RegionOne
- admin_email: root@localhost
- # Neutron osv/nodvr
- neutron_control_dvr: 'False'
- neutron_l3_ha: 'True'
- neutron_global_physnet_mtu: 1500
- neutron_external_mtu: 1500
- neutron_gateway_dvr: 'False'
- neutron_gateway_agent_mode: legacy
- neutron_compute_dvr: 'False'
- neutron_compute_agent_mode: legacy
- neutron_compute_external_access: 'True'
- galera_server_cluster_name: openstack_cluster
- galera_server_maintenance_password: opnfv_secret
- galera_server_admin_password: opnfv_secret
- rabbitmq_secret_key: opnfv_secret
- rabbitmq_admin_password: opnfv_secret
- rabbitmq_openstack_password: opnfv_secret
- glance_version: ${_param:openstack_version}
- glance_service_host: ${_param:openstack_control_address}
- keystone_version: ${_param:openstack_version}
- keystone_service_host: ${_param:openstack_control_address}
- heat_version: ${_param:openstack_version}
- heat_service_host: ${_param:openstack_control_address}
- heat_domain_admin_password: opnfv_secret
- cinder_version: ${_param:openstack_version}
- cinder_service_host: ${_param:openstack_control_address}
- ceilometer_version: ${_param:openstack_version}
- ceilometer_service_host: ${_param:openstack_telemetry_address}
- ceilometer_influxdb_password: opnfv_secret
- nova_version: ${_param:openstack_version}
- nova_service_host: ${_param:openstack_control_address}
- neutron_version: ${_param:openstack_version}
- neutron_service_host: ${_param:openstack_control_address}
- glusterfs_service_host: ${_param:openstack_control_address}
- mysql_admin_user: root
- mysql_admin_password: opnfv_secret
- mysql_cinder_password: opnfv_secret
- mysql_ceilometer_password: opnfv_secret
- mysql_glance_password: opnfv_secret
- mysql_grafana_password: opnfv_secret
- mysql_heat_password: opnfv_secret
- mysql_keystone_password: opnfv_secret
- mysql_neutron_password: opnfv_secret
- mysql_nova_password: opnfv_secret
- mysql_aodh_password: opnfv_secret
- mysql_designate_password: opnfv_secret
- aodh_version: ${_param:openstack_version}
- keystone_aodh_password: opnfv_secret
- keystone_service_token: opnfv_secret
- keystone_admin_password: opnfv_secret
- keystone_ceilometer_password: opnfv_secret
- keystone_cinder_password: opnfv_secret
- keystone_glance_password: opnfv_secret
- keystone_heat_password: opnfv_secret
- keystone_keystone_password: opnfv_secret
- keystone_neutron_password: opnfv_secret
- keystone_nova_password: opnfv_secret
- keystone_designate_password: opnfv_secret
- ceilometer_secret_key: opnfv_secret
- horizon_version: ${_param:openstack_version}
- horizon_secret_key: opaesee8Que2yahJoh9fo0eefo1Aeyo6ahyei8zeiboh3aeth5loth7ieNa5xi5e
- horizon_identity_host: ${_param:openstack_control_address}
- horizon_identity_encryption: none
- horizon_identity_version: 3
- mongodb_server_replica_set: ceilometer
- mongodb_ceilometer_password: opnfv_secret
- mongodb_admin_password: opnfv_secret
- mongodb_shared_key: eoTh1AwahlahqueingeejooLughah4tei9feing0eeVaephooDi2li1TaeV1ooth
- metadata_password: opnfv_secret
- openstack_telemetry_keepalived_password: opnfv_secret
- aodh_service_host: ${_param:openstack_telemetry_address}
- designate_service_host: ${_param:openstack_control_address}
- designate_bind9_rndc_key: 4pc+X4PDqb2q+5o72dISm72LM1Ds9X2EYZjqg+nmsS7FhdTwzFFY8l/iEDmHxnyjkA33EQC8H+z0fLLBunoitw==
- designate_domain_id: 5186883b-91fb-4891-bd49-e6769234a8fc
- designate_pool_ns_records:
- - hostname: 'ns1.example.org.'
- priority: 10
- designate_pool_nameservers:
- - host: ${_param:openstack_control_node01_address}
- port: 53
- - host: ${_param:openstack_control_node02_address}
- port: 53
- - host: ${_param:openstack_control_node03_address}
- port: 53
- designate_pool_target_type: bind9
- designate_pool_target_masters:
- - host: ${_param:openstack_control_node01_address}
- port: 5354
- - host: ${_param:openstack_control_node02_address}
- port: 5354
- - host: ${_param:openstack_control_node03_address}
- port: 5354
- designate_pool_target_options:
- host: ${_param:openstack_control_node01_address}
- port: 53
- rndc_host: ${_param:openstack_control_node01_address}
- rndc_port: 953
- rndc_key_file: /etc/designate/rndc.key
- designate_version: ${_param:openstack_version}
- # Billing
- # keystone_billometer_password: opnfv_secret
- # keystone_billometer_address: ${_param:billometer_service_host}
- # billometer_service_host: ${_param:openstack_billing_address}
- # billometer_version: ${_param:openstack_version}
- # billometer_secret_key: opnfv_secretpasswordpasswordpassword
- # billometer_identity_password: ${_param:keystone_billometer_password}
- # billometer_identity_host: ${_param:openstack_control_address}
- # billometer_identity_token: ${_param:keystone_service_token}
- linux:
- system:
- repo:
- uca:
- source: "deb http://ubuntu-cloud.archive.canonical.com/ubuntu xenial-updates/${_param:openstack_version} main"
- architectures: amd64
- key_id: EC4926EA
- key_server: keyserver.ubuntu.com
- kernel:
- sysctl:
- net.ipv4.tcp_congestion_control: yeah
- net.ipv4.tcp_slow_start_after_idle: 0
- net.ipv4.tcp_fin_timeout: 30
- network:
- host:
- kvm:
- address: ${_param:openstack_control_address}
- names:
- - ${_param:openstack_control_hostname}
- - ${_param:openstack_control_hostname}.${_param:cluster_domain}
- kvm01:
- address: ${_param:openstack_control_node01_address}
- names:
- - ${_param:openstack_control_node01_hostname}
- - ${_param:openstack_control_node01_hostname}.${_param:cluster_domain}
- kvm02:
- address: ${_param:openstack_control_node02_address}
- names:
- - ${_param:openstack_control_node02_hostname}
- - ${_param:openstack_control_node02_hostname}.${_param:cluster_domain}
- kvm03:
- address: ${_param:openstack_control_node03_address}
- names:
- - ${_param:openstack_control_node03_hostname}
- - ${_param:openstack_control_node03_hostname}.${_param:cluster_domain}
- cmp001:
- address: ${_param:openstack_compute_node01_control_address}
- names:
- - ${_param:openstack_compute_node01_hostname}
- - ${_param:openstack_compute_node01_hostname}.${_param:cluster_domain}
- cmp002:
- address: ${_param:openstack_compute_node02_control_address}
- names:
- - ${_param:openstack_compute_node02_hostname}
- - ${_param:openstack_compute_node02_hostname}.${_param:cluster_domain}
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
---
+{%- if conf.MCP_VCP %}
classes:
- cluster.mcp-pike-common-ha.include.maas_proxy
- cluster.mcp-pike-common-ha.include.lab_proxy_pdf
name: ${_param:single_nic}
address: ${_param:single_address}
netmask: ${_param:opnfv_net_public_mask}
+{%- endif %}
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
---
+{%- if conf.MCP_VCP %}
classes:
- cluster.mcp-pike-common-ha.include.maas_proxy
- cluster.mcp-pike-common-ha.include.lab_proxy_pdf
name: ${_param:control_nic}
address: ${_param:control_address}
netmask: 255.255.255.0
+{%- endif %}
+++ /dev/null
-##############################################################################
-# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-classes:
- - system.nginx.server.single
- - system.nginx.server.proxy.openstack_api
- - system.nginx.server.proxy.openstack_vnc
- - system.nginx.server.proxy.openstack_web
- - system.nginx.server.proxy.openstack.aodh
- - system.nginx.server.proxy.openstack.ceilometer
- - system.horizon.server.single
- - system.salt.minion.cert.proxy
- - system.sphinx.server.doc.reclass
- - service.keepalived.cluster.single
- - system.keepalived.cluster.instance.openstack_web_public_vip
-parameters:
- _param:
- cluster_vip_address: ${_param:openstack_proxy_address}
- keepalived_openstack_web_public_vip_address: ${_param:cluster_vip_address}
- keepalived_openstack_web_public_vip_interface: ${_param:single_nic}
- keepalived_vip_address: ${_param:openstack_proxy_control_address}
- keepalived_vip_interface: ${_param:control_nic}
- keepalived_vip_virtual_router_id: 240
- nginx_proxy_ssl:
- enabled: true
- authority: ${_param:salt_minion_ca_authority}
- engine: salt
- mode: secure
- salt_minion_ca_host: cfg01.${_param:cluster_domain}
- linux:
- system:
- package:
- libapache2-mod-wsgi:
- version: latest
- salt:
- minion:
- cert:
- proxy:
- alternative_names: "IP:${_param:openstack_proxy_address}"
- key_usage: 'digitalSignature, keyEncipherment'
- keepalived:
- cluster:
- vrrp_scripts:
- check_pidof:
- args: 'nginx'
##############################################################################
---
classes:
- - cluster.mcp-pike-common-ha.openstack_proxy
+ - system.nginx.server.single
+ - system.nginx.server.proxy.openstack_api
+ - system.nginx.server.proxy.openstack_vnc
+ - system.nginx.server.proxy.openstack_web
+ - system.nginx.server.proxy.openstack.aodh
+ - system.nginx.server.proxy.openstack.ceilometer
+ - system.horizon.server.single
+ - system.salt.minion.cert.proxy
+ - system.sphinx.server.doc.reclass
+ - service.keepalived.cluster.single
- system.keepalived.cluster.instance.openstack_web_public_vip
parameters:
+ _param:
+ cluster_vip_address: ${_param:openstack_proxy_address}
+ keepalived_openstack_web_public_vip_address: ${_param:cluster_vip_address}
+ keepalived_openstack_web_public_vip_interface: ${_param:single_nic}
+ keepalived_vip_address: ${_param:openstack_proxy_control_address}
+ keepalived_vip_interface: ${_param:control_nic}
+ keepalived_vip_virtual_router_id: 240
+ nginx_proxy_ssl:
+ enabled: true
+ authority: ${_param:salt_minion_ca_authority}
+ engine: salt
+ mode: secure
+ salt_minion_ca_host: cfg01.${_param:cluster_domain}
linux:
+ system:
+ package:
+ libapache2-mod-wsgi:
+ version: latest
+{%- if not conf.MCP_VCP %}
# Set up routes similar to prx*ovs-ha
network:
interface:
<<: *nginx_openstack_proxy_address
nginx_static_reclass_doc:
<<: *nginx_openstack_proxy_address
+{%- endif %}
+ salt:
+ minion:
+ cert:
+ proxy:
+ alternative_names: "IP:${_param:openstack_proxy_address}"
+ key_usage: 'digitalSignature, keyEncipherment'
+ keepalived:
+ cluster:
+ vrrp_scripts:
+ check_pidof:
+ args: 'nginx'
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
---
+{%- if conf.MCP_VCP %}
classes:
- - cluster.mcp-pike-common-ha.openstack_message_queue
+ - cluster.mcp-pike-odl-ha.infra
+{%- endif %}
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
---
+{%- if conf.MCP_VCP %}
+# NOTE(armband): we don't want to pull in salt.control for novcp
classes:
- cluster.mcp-pike-common-ha.infra.kvm
- cluster.mcp-pike-odl-ha.infra
provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
image: ${_param:salt_control_xenial_image}
size: opendaylight.server
+{%- endif %}
classes:
- service.opendaylight.server.single
- cluster.mcp-pike-common-ha.openstack_interface_vcp_biport
+{%- if conf.MCP_VCP %}
- cluster.mcp-pike-odl-ha
+{%- endif %}
parameters:
_param:
linux_system_codename: xenial
classes:
- cluster.mcp-pike-common-ha.openstack_interface_vcp_biport
- cluster.mcp-pike-common-ha.openstack_database
- - cluster.mcp-pike-odl-ha.infra
+ - cluster.mcp-pike-odl-ha.infra.init_vcp
parameters:
_param:
# opendaylight options
- opendaylight_server_node01_single_address: ${_param:opnfv_opendaylight_server_node01_single_address}
opendaylight_service_host: ${_param:opendaylight_server_node01_single_address}
- opendaylight_server_node01_hostname: odl01
neutron_tenant_network_types: "flat,vxlan"
+{%- if conf.MCP_VCP %}
linux:
network:
host:
names:
- ${_param:opendaylight_server_node01_hostname}
- ${_param:opendaylight_server_node01_hostname}.${_param:cluster_domain}
+{%- endif %}
classes:
- cluster.mcp-pike-common-ha.openstack_interface_vcp_biport
- cluster.mcp-pike-common-ha.openstack_message_queue
- - cluster.mcp-pike-odl-ha.infra
+ - cluster.mcp-pike-odl-ha.infra.init_vcp
classes:
- cluster.mcp-pike-common-ha.openstack_interface_vcp_triport
- cluster.mcp-pike-common-ha.openstack_proxy
- - cluster.mcp-pike-odl-ha.infra
+ - cluster.mcp-pike-odl-ha.infra.init_vcp
parameters:
nginx:
server:
classes:
- cluster.mcp-pike-common-ha.openstack_interface_vcp_biport
- cluster.mcp-pike-common-ha.openstack_telemetry
- - cluster.mcp-pike-odl-ha.infra
+ - cluster.mcp-pike-odl-ha.infra.init_vcp
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
---
+{%- if conf.MCP_VCP %}
classes:
- - cluster.mcp-pike-common-ha.infra.kvm
- cluster.mcp-pike-ovn-ha.infra
+{%- endif %}
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
---
+{%- if conf.MCP_VCP %}
# NOTE(armband): we don't want to pull in salt.control for novcp
-# classes:
-# - cluster.mcp-pike-common-ha.infra.kvm
-# - cluster.mcp-pike-ovs-novcp-ha.infra
+classes:
+ - cluster.mcp-pike-common-ha.infra.kvm
+ - cluster.mcp-pike-ovn-ha.infra
+{%- endif %}
classes:
- cluster.mcp-pike-common-ha.openstack_interface_vcp_biport
- cluster.mcp-pike-common-ha.openstack_database
- - cluster.mcp-pike-ovn-ha.infra
+ - cluster.mcp-pike-ovn-ha.infra_vcp
classes:
- cluster.mcp-pike-common-ha.openstack_interface_vcp_biport
- cluster.mcp-pike-common-ha.openstack_message_queue
- - cluster.mcp-pike-ovn-ha.infra
+ - cluster.mcp-pike-ovn-ha.infra_vcp
classes:
- cluster.mcp-pike-common-ha.openstack_interface_vcp_triport
- cluster.mcp-pike-common-ha.openstack_proxy
- - cluster.mcp-pike-ovn-ha.infra
+ - cluster.mcp-pike-ovn-ha.infra_vcp
classes:
- cluster.mcp-pike-common-ha.openstack_interface_vcp_biport
- cluster.mcp-pike-common-ha.openstack_telemetry
- - cluster.mcp-pike-ovn-ha.infra
+ - cluster.mcp-pike-ovn-ha.infra_vcp
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
---
+{%- if conf.MCP_VCP %}
classes:
- - cluster.mcp-pike-common-ha.infra.kvm
- cluster.mcp-pike-ovs-dpdk-ha.infra
+{%- endif %}
--- /dev/null
+##############################################################################
+# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+{%- if conf.MCP_VCP %}
+# NOTE(armband): we don't want to pull in salt.control for novcp
+classes:
+ - cluster.mcp-pike-common-ha.infra.kvm
+ - cluster.mcp-pike-ovs-dpdk-ha.infra
+{%- endif %}
classes:
- cluster.mcp-pike-common-ha.openstack_interface_vcp_biport
- cluster.mcp-pike-common-ha.openstack_database
- - cluster.mcp-pike-ovs-dpdk-ha.infra
+ - cluster.mcp-pike-ovs-dpdk-ha.infra.init_vcp
classes:
- cluster.mcp-pike-common-ha.openstack_interface_vcp_biport
- cluster.mcp-pike-common-ha.openstack_message_queue
- - cluster.mcp-pike-ovs-dpdk-ha.infra
+ - cluster.mcp-pike-ovs-dpdk-ha.infra.init_vcp
classes:
- cluster.mcp-pike-common-ha.openstack_interface_vcp_triport
- cluster.mcp-pike-common-ha.openstack_proxy
- - cluster.mcp-pike-ovs-dpdk-ha.infra
+ - cluster.mcp-pike-ovs-dpdk-ha.infra.init_vcp
classes:
- cluster.mcp-pike-common-ha.openstack_interface_vcp_biport
- cluster.mcp-pike-common-ha.openstack_telemetry
- - cluster.mcp-pike-ovs-dpdk-ha.infra
+ - cluster.mcp-pike-ovs-dpdk-ha.infra.init_vcp
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
---
+{%- if conf.MCP_VCP %}
classes:
- - cluster.mcp-pike-common-ha.infra.kvm
- cluster.mcp-pike-ovs-ha.infra
+{%- endif %}
--- /dev/null
+##############################################################################
+# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+{%- if conf.MCP_VCP %}
+# NOTE(armband): we don't want to pull in salt.control for novcp
+classes:
+ - cluster.mcp-pike-common-ha.infra.kvm
+ - cluster.mcp-pike-ovs-ha.infra
+{%- endif %}
classes:
- cluster.mcp-pike-common-ha.openstack_interface_vcp_biport
- cluster.mcp-pike-common-ha.openstack_database
- - cluster.mcp-pike-ovs-ha.infra
+ - cluster.mcp-pike-ovs-ha.infra.init_vcp
classes:
- cluster.mcp-pike-common-ha.openstack_interface_vcp_biport
- cluster.mcp-pike-common-ha.openstack_message_queue
- - cluster.mcp-pike-ovs-ha.infra
+ - cluster.mcp-pike-ovs-ha.infra.init_vcp
classes:
- cluster.mcp-pike-common-ha.openstack_interface_vcp_triport
- cluster.mcp-pike-common-ha.openstack_proxy
- - cluster.mcp-pike-ovs-ha.infra
+ - cluster.mcp-pike-ovs-ha.infra.init_vcp
classes:
- cluster.mcp-pike-common-ha.openstack_interface_vcp_biport
- cluster.mcp-pike-common-ha.openstack_telemetry
- - cluster.mcp-pike-ovs-ha.infra
+ - cluster.mcp-pike-ovs-ha.infra.init_vcp
+++ /dev/null
-##############################################################################
-# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-classes:
- - cluster.mcp-pike-common-ha.infra.config
- - cluster.mcp-pike-ovs-novcp-ha.infra
- - cluster.all-mcp-arch-common.infra.config_pdf
-parameters:
- reclass:
- storage:
- node:
- openstack_control_node01: # openstack_proxy_node01
- params:
- external_address: ${_param:openstack_proxy_node01_address}
- openstack_control_node02: # no proxy role
- params:
- external_address: 0.0.0.0
- openstack_control_node03: # openstack_proxy_node02
- params:
- external_address: ${_param:openstack_proxy_node02_address}
+++ /dev/null
-##############################################################################
-# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-classes:
- - cluster.mcp-pike-common-ha.infra
- - cluster.mcp-pike-ovs-novcp-ha.openstack
-parameters:
- _param:
- cluster_name: mcp-pike-ovs-novcp-ha
- # For NOVCP, we override kvm addresses to overlap with ctl
- infra_kvm_node01_address: ${_param:openstack_control_node01_address}
- infra_kvm_node02_address: ${_param:openstack_control_node02_address}
- infra_kvm_node03_address: ${_param:openstack_control_node03_address}
+++ /dev/null
-##############################################################################
-# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-classes:
- - cluster.mcp-pike-common-ha.infra.maas
- - cluster.mcp-pike-ovs-novcp-ha.infra
+++ /dev/null
-##############################################################################
-# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-classes:
- - cluster.all-mcp-arch-common
- - cluster.mcp-pike-ovs-novcp-ha.infra
- - cluster.mcp-pike-ovs-novcp-ha.openstack
+++ /dev/null
-##############################################################################
-# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-classes:
- - cluster.mcp-pike-common-ha.openstack_compute
- - cluster.mcp-pike-ovs-novcp-ha.openstack.compute_pdf
- - cluster.mcp-pike-ovs-novcp-ha.infra
-parameters:
- nova:
- compute:
- libvirt_service: libvirtd
- libvirt_bin: /etc/default/libvirtd
+++ /dev/null
-../../mcp-pike-ovs-ha/openstack/compute_pdf.yml.j2
\ No newline at end of file
+++ /dev/null
-##############################################################################
-# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-classes:
- - system.neutron.control.openvswitch.cluster
- - cluster.mcp-pike-common-ha.openstack_control_novcp
- - cluster.mcp-pike-ovs-novcp-ha
-parameters:
- apache:
- server:
- modules:
- # NOTE(armband): We first override mods to ~, then to this to drop ssl
- - rewrite
- - wsgi
+++ /dev/null
-##############################################################################
-# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-classes:
- - cluster.mcp-pike-common-ha.openstack_database
+++ /dev/null
-##############################################################################
-# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-classes:
- - cluster.mcp-pike-common-ha.openstack_init_novcp
-parameters:
- _param:
- neutron_tenant_network_types: "flat,vxlan"
- apache:
- server:
- # NOTE(armband): override these to ~ first, so we can later drop ssl/443
- bind:
- ports: ~
- modules: ~
+++ /dev/null
-##############################################################################
-# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-classes:
- - cluster.mcp-pike-common-ha.openstack_telemetry
##############################################################################
-# Copyright (c) 2017 Mirantis Inc., Enea AB and others.
+# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
+++ /dev/null
-##############################################################################
-# Copyright (c) 2017 Mirantis Inc., Enea AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-classes:
- - cluster.mcp-pike-ovs-novcp-ha.infra.config
-parameters:
- _param:
- linux_system_codename: xenial
- reclass_data_revision: master
- linux:
- system:
- name: cfg01
- domain: mcp-pike-ovs-novcp-ha.local
wait_for 3.0 'salt -C "E@^(${NODE_MASK}).*" state.sls linux.system,linux.storage'
wait_for 2.0 'salt -C "E@^(${NODE_MASK}).*" state.sls linux.network' || true
+ salt -C "E@^(${NODE_MASK}).*" state.sls opnfv.route_wrapper
salt -C "E@^(${NODE_MASK}).*" system.reboot
wait_for 90.0 'salt -C "E@^(${NODE_MASK}).*" test.ping'
wait_for 3.0 'salt -C "E@^(${NODE_MASK}).*" pkg.upgrade refresh=False dist_upgrade=True'
{%- macro serialize_apt_pkg() -%}
{%- set arr = [] -%}
{%- set sections = [arch.common] -%}
- {%- if 'virtual_control_plane' in conf.cluster.states -%}
+ {%- if conf.MCP_VCP -%}
{%- do sections.append(arch.control) -%}
{%- endif -%}
{%- for c in sections -%}