#!/bin/bash -e
-# shellcheck disable=SC2034,SC2154,SC1090,SC1091
+# shellcheck disable=SC2034,SC2154,SC1090,SC1091,SC2155
##############################################################################
# Copyright (c) 2017 Ericsson AB, Mirantis Inc., Enea AB and others.
# jonas.bjurel@ericsson.com
source "${DEPLOY_DIR}/globals.sh"
source "${DEPLOY_DIR}/lib.sh"
+source "${DEPLOY_DIR}/lib_template.sh"
#
# END of variables to customize
# Clone git submodules and apply our patches
make -C "${REPO_ROOT_PATH}/mcp/patches" deepclean patches-import
-# Expand scenario files, pod_config based on PDF
-SCENARIO_DIR="$(readlink -f "../config/scenario")"
-do_templates "${REPO_ROOT_PATH}" "${STORAGE_DIR}" "${TARGET_LAB}" \
- "${TARGET_POD}" "${BASE_CONFIG_URI}" "${SCENARIO_DIR}"
-
# Check scenario file existence
-if [ ! -f "${SCENARIO_DIR}/${DEPLOY_SCENARIO}.yaml" ]; then
+SCENARIO_DIR="$(readlink -f "../config/scenario")"
+if [ ! -f "${SCENARIO_DIR}/${DEPLOY_SCENARIO}.yaml" ] && \
+ [ ! -f "${SCENARIO_DIR}/${DEPLOY_SCENARIO}.yaml.j2" ]; then
notify_e "[ERROR] Scenario definition file is missing!"
fi
-# Check defaults file existence
-if [ ! -f "${SCENARIO_DIR}/defaults-$(uname -i).yaml" ]; then
- notify_e "[ERROR] Scenario defaults file is missing!"
-fi
-
-# Get scenario data and (jumpserver) arch defaults
-eval "$(parse_yaml "${SCENARIO_DIR}/defaults-$(uname -i).yaml")"
-eval "$(parse_yaml "${SCENARIO_DIR}/${DEPLOY_SCENARIO}.yaml")"
-export CLUSTER_DOMAIN=${cluster_domain}
-
# key might not exist yet ...
generate_ssh_key
export MAAS_SSH_KEY="$(cat "$(basename "${SSH_KEY}").pub")"
# Expand jinja2 templates based on PDF data and env vars
-do_templates "${REPO_ROOT_PATH}" "${STORAGE_DIR}" "${TARGET_LAB}" \
- "${TARGET_POD}" "${BASE_CONFIG_URI}"
-
-# Serialize vnode data as '<name0>,<ram0>,<vcpu0>|<name1>,<ram1>,<vcpu1>[...]'
-for node in "${virtual_nodes[@]}"; do
- virtual_custom_ram="virtual_${node}_ram"
- virtual_custom_vcpus="virtual_${node}_vcpus"
- virtual_nodes_data+="${node},"
- virtual_nodes_data+="${!virtual_custom_ram:-$virtual_default_ram},"
- virtual_nodes_data+="${!virtual_custom_vcpus:-$virtual_default_vcpus}|"
-done
-virtual_nodes_data=${virtual_nodes_data%|}
-
-# Serialize repos, packages to (pre-)install/remove for:
-# - foundation node VM base image (virtual: all VMs, baremetal: cfg01|mas01)
-# - virtualized control plane VM base image (only when VCP is used)
-base_image_flavors=common
-if [[ "${cluster_states[*]}" =~ virtual_control ]]; then
- base_image_flavors+=" control"
-fi
-for sc in ${base_image_flavors}; do
- for va in apt_keys apt_repos pkg_install pkg_remove; do
- key=virtual_${sc}_${va}
- eval "${key}=\${${key}[@]// /|}"
- eval "${key}=\${${key}// /,}"
- virtual_repos_pkgs+="${!key}^"
- done
-done
-virtual_repos_pkgs=${virtual_repos_pkgs%^}
+export MCP_JUMP_ARCH=$(uname -i)
+do_templates_scenario "${STORAGE_DIR}" "${TARGET_LAB}" "${TARGET_POD}" \
+ "${BASE_CONFIG_URI}" "${SCENARIO_DIR}"
+do_templates_cluster "${STORAGE_DIR}" "${TARGET_LAB}" "${TARGET_POD}" \
+ "${REPO_ROOT_PATH}" \
+ "${SCENARIO_DIR}/defaults.yaml" \
+ "${SCENARIO_DIR}/${DEPLOY_SCENARIO}.yaml"
# Determine additional data (e.g. jump bridge names) based on XDF
source "${DEPLOY_DIR}/xdf_data.sh"
-notify "[NOTE] Using bridges: ${OPNFV_BRIDGES[*]}" 2
# Jumpserver prerequisites check
+notify "[NOTE] Using bridges: ${OPNFV_BRIDGES[*]}" 2
jumpserver_check_requirements "${virtual_nodes[*]}" "${OPNFV_BRIDGES[@]}"
# Infra setup
+++ /dev/null
-##############################################################################
-# Copyright (c) 2017 Mirantis Inc., Enea AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-base_image: https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
-virtual:
- default:
- vcpus: 2
- ram: 4096
- common:
- apt:
- keys:
- - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.11/SALTSTACK-GPG-KEY.pub
- repos:
- # <repo name> <repo prio> deb [arch=<arch>] <repo url> <repo dist> <repo comp>
- - saltstack 500 deb [arch=amd64] http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.11 xenial main
- pkg:
- install:
- - salt-minion
- control:
- pkg:
- install:
- - cloud-init
##############################################################################
-# Copyright (c) 2017 Mirantis Inc. and others.
+# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
---
-base_image: https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-arm64-uefi1.img
-virtual:
+x86_64:
+ base_image: https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+ default:
+ vcpus: 2
+ ram: 4096
+ common:
+ apt:
+ keys:
+ - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.11/SALTSTACK-GPG-KEY.pub
+ repos:
+ # <repo name> <repo prio> deb [arch=<arch>] <repo url> <repo dist> <repo comp>
+ - saltstack 500 deb [arch=amd64] http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.11 xenial main
+ pkg:
+ install:
+ - salt-minion
+ control:
+ apt: ~
+ pkg:
+ install:
+ - cloud-init
+aarch64:
+ base_image: https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-arm64-uefi1.img
default:
vcpus: 6
ram: 4096
- linux-headers-generic-hwe-16.04-edge
- salt-minion
control:
+ apt: ~
pkg:
install:
- cloud-init
#!/bin/bash -e
# shellcheck disable=SC2155,SC1001,SC2015,SC2128
##############################################################################
-# Copyright (c) 2017 Mirantis Inc., Enea AB and others.
+# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
echo "${value}"
fi
}
-
-function do_templates() {
- local git_repo_root=$1; shift
- local image_dir=$1; shift
- local target_lab=$1; shift
- local target_pod=$1; shift
- local lab_config_uri=$1; shift
- local scenario_dir=${1:-}
-
- RECLASS_CLUSTER_DIR=$(cd "${git_repo_root}/mcp/reclass/classes/cluster"; pwd)
- PHAROS_GEN_CFG="./pharos/config/utils/generate_config.py"
- PHAROS_IA=$(readlink -f "./pharos/config/installers/fuel/pod_config.yml.j2")
- PHAROS_VALIDATE_SCHEMA_SCRIPT="./pharos/config/utils/validate_schema.py"
- PHAROS_SCHEMA_PDF="./pharos/config/pdf/pod1.schema.yaml"
- PHAROS_SCHEMA_IDF="./pharos/config/pdf/idf-pod1.schema.yaml"
- BASE_CONFIG_PDF="${lab_config_uri}/labs/${target_lab}/${target_pod}.yaml"
- BASE_CONFIG_IDF="${lab_config_uri}/labs/${target_lab}/idf-${target_pod}.yaml"
- LOCAL_PDF="${image_dir}/$(basename "${BASE_CONFIG_PDF}")"
- LOCAL_IDF="${image_dir}/$(basename "${BASE_CONFIG_IDF}")"
-
- # Two-stage expansion, first stage handles pod_config and scenarios only
- if [ -n "${scenario_dir}" ]; then
- # Make sample PDF/IDF available via default lab-config (pharos submodule)
- ln -sf "$(readlink -f "../config/labs/local")" "./pharos/labs/"
-
- # Expand scenario file and main reclass input (pod_config.yaml) based on PDF
- if ! curl --create-dirs -o "${LOCAL_PDF}" "${BASE_CONFIG_PDF}"; then
- notify_e "[ERROR] Could not retrieve PDF (Pod Descriptor File)!"
- elif ! curl -o "${LOCAL_IDF}" "${BASE_CONFIG_IDF}"; then
- notify_e "[ERROR] Could not retrieve IDF (Installer Descriptor File)!"
- fi
- # Check first if configuration files are valid
- if [[ ! "$target_pod" =~ "virtual" ]]; then
- if ! "${PHAROS_VALIDATE_SCHEMA_SCRIPT}" -y "${LOCAL_PDF}" \
- -s "${PHAROS_SCHEMA_PDF}"; then
- notify_e "[ERROR] PDF does not match yaml schema!"
- elif ! "${PHAROS_VALIDATE_SCHEMA_SCRIPT}" -y "${LOCAL_IDF}" \
- -s "${PHAROS_SCHEMA_IDF}"; then
- notify_e "[ERROR] IDF does not match yaml schema!"
- fi
- fi
- if ! "${PHAROS_GEN_CFG}" -y "${LOCAL_PDF}" \
- -j "${PHAROS_IA}" -v > "${image_dir}/pod_config.yml"; then
- notify_e "[ERROR] Could not convert PDF+IDF to reclass model input!"
- fi
- template_dirs="${scenario_dir}"
- template_err_str='Could not convert j2 scenario definitions!'
- else
- # Expand reclass and virsh network templates based on PDF + IDF
- printenv | \
- awk '/^(SALT|MCP|MAAS|CLUSTER).*=/ { gsub(/=/,": "); print }' >> "${LOCAL_PDF}"
- template_dirs="${RECLASS_CLUSTER_DIR} $(readlink -f virsh_net) $(readlink -f ./*j2)"
- template_err_str='Could not convert PDF to network definitions!'
- fi
- # shellcheck disable=SC2086
- j2args=$(find $template_dirs -name '*.j2' -exec echo -j {} \;)
- # shellcheck disable=SC2086
- if ! "${PHAROS_GEN_CFG}" -y "${LOCAL_PDF}" ${j2args} -b -v \
- -i "$(dirname "${PHAROS_IA}")"; then
- notify_e "[ERROR] ${template_err_str}"
- fi
-}
--- /dev/null
+#!/bin/bash -e
+# shellcheck disable=SC2155,SC1001,SC2015,SC2128
+##############################################################################
+# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+#
+# Library of shell functions dedicated to j2 template handling
+#
+
+PHAROS_GEN_CFG='./pharos/config/utils/generate_config.py'
+PHAROS_IA='./pharos/config/installers/fuel/pod_config.yml.j2'
+PHAROS_VALIDATE_SCHEMA_SCRIPT='./pharos/config/utils/validate_schema.py'
+PHAROS_SCHEMA_PDF='./pharos/config/pdf/pod1.schema.yaml'
+PHAROS_SCHEMA_IDF='./pharos/config/pdf/idf-pod1.schema.yaml'
+
+# Handles pod_config and scenarios only
+function do_templates_scenario {
+ local image_dir=$1; shift
+ local target_lab=$1; shift
+ local target_pod=$1; shift
+ local lab_config_uri=$1; shift
+ local scenario_dir=$1
+
+ BASE_CONFIG_PDF="${lab_config_uri}/labs/${target_lab}/${target_pod}.yaml"
+ BASE_CONFIG_IDF="${lab_config_uri}/labs/${target_lab}/idf-${target_pod}.yaml"
+ LOCAL_PDF="${image_dir}/$(basename "${BASE_CONFIG_PDF}")"
+ LOCAL_IDF="${image_dir}/$(basename "${BASE_CONFIG_IDF}")"
+
+ # Make sample PDF/IDF available via default lab-config (pharos submodule)
+ ln -sf "$(readlink -f "../config/labs/local")" "./pharos/labs/"
+
+ # Expand scenario file and main reclass input (pod_config.yaml) based on PDF
+ if ! curl --create-dirs -o "${LOCAL_PDF}" "${BASE_CONFIG_PDF}"; then
+ notify_e "[ERROR] Could not retrieve PDF (Pod Descriptor File)!"
+ elif ! curl -o "${LOCAL_IDF}" "${BASE_CONFIG_IDF}"; then
+ notify_e "[ERROR] Could not retrieve IDF (Installer Descriptor File)!"
+ fi
+ # Check first if configuration files are valid
+ if [[ ! "$target_pod" =~ "virtual" ]]; then
+ if ! "${PHAROS_VALIDATE_SCHEMA_SCRIPT}" -y "${LOCAL_PDF}" \
+ -s "${PHAROS_SCHEMA_PDF}"; then
+ notify_e "[ERROR] PDF does not match yaml schema!"
+ elif ! "${PHAROS_VALIDATE_SCHEMA_SCRIPT}" -y "${LOCAL_IDF}" \
+ -s "${PHAROS_SCHEMA_IDF}"; then
+ notify_e "[ERROR] IDF does not match yaml schema!"
+ fi
+ fi
+ if ! "${PHAROS_GEN_CFG}" -y "${LOCAL_PDF}" \
+ -j "${PHAROS_IA}" -v > "${image_dir}/pod_config.yml"; then
+ notify_e "[ERROR] Could not convert PDF+IDF to reclass model input!"
+ fi
+ printenv | \
+ awk '/^(SALT|MCP|MAAS).*=/ { gsub(/=/,": "); print }' >> "${LOCAL_PDF}"
+ j2args=$(find "${scenario_dir}" -name '*.j2' -exec echo -j {} \;)
+ # shellcheck disable=SC2086
+ if ! "${PHAROS_GEN_CFG}" -y "${LOCAL_PDF}" ${j2args} -b -v \
+ -i "$(dirname "$(readlink -f "${PHAROS_IA}")")"; then
+ notify_e '[ERROR] Could not convert j2 scenario definitions!'
+ fi
+}
+
+# Expand reclass and virsh network templates based on PDF + IDF + others
+function do_templates_cluster {
+ local image_dir=$1; shift
+ local target_lab=$1; shift
+ local target_pod=$1; shift
+ local git_repo_root=$1; shift
+ local extra_yaml=("$@")
+
+ RECLASS_CLUSTER_DIR=$(cd "${git_repo_root}/mcp/reclass/classes/cluster"; pwd)
+ LOCAL_PDF="${image_dir}/${target_pod}.yaml"
+
+ for _yaml in "${extra_yaml[@]}"; do
+ awk '/^---$/{f=1;next;}f' "${_yaml}" >> "${LOCAL_PDF}"
+ done
+ # shellcheck disable=SC2046
+ j2args=$(find "${RECLASS_CLUSTER_DIR}" "$(readlink -f virsh_net)" $(readlink -f ./*j2) \
+ -name '*.j2' -exec echo -j {} \;)
+ # shellcheck disable=SC2086
+ if ! "${PHAROS_GEN_CFG}" -y "${LOCAL_PDF}" ${j2args} -b -v \
+ -i "$(dirname "$(readlink -f "${PHAROS_IA}")")"; then
+ notify_e '[ERROR] Could not convert PDF to network definitions!'
+ fi
+}
##############################################################################
rm /etc/salt/minion_id
rm -f /etc/salt/pki/minion/minion_master.pub
-echo "id: $(hostname).{{ conf.CLUSTER_DOMAIN }}" > /etc/salt/minion
+echo "id: $(hostname).{{ conf.cluster.domain }}" > /etc/salt/minion
{#- should be in sync with 'opnfv_infra_config_pxe_address' in 'pharos/config/installers/fuel/pod_config.yml.j2 #}
echo "master: {{ conf.idf.net_config.admin.network | ipaddr_index(2) }}" >> /etc/salt/minion
service salt-minion restart
#!/bin/bash
##############################################################################
-# Copyright (c) 2017 Mirantis Inc., Enea AB and others.
+# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
##############################################################################
rm /etc/salt/minion_id
rm -f /etc/salt/pki/minion/minion_master.pub
-echo "id: $(hostname).{{ conf.CLUSTER_DOMAIN }}" > /etc/salt/minion
+echo "id: $(hostname).{{ conf.cluster.domain }}" > /etc/salt/minion
echo "master: {{ conf.SALT_MASTER }}" >> /etc/salt/minion
service salt-minion restart
#!/bin/bash -e
+# shellcheck disable=SC2034
##############################################################################
# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
# All rights reserved. This program and the accompanying materials
# Data derived from XDF (PDF/IDF/SDF/etc), used as input in deploy.sh
#
-# Determine bridge names based on IDF, where all bridges are now mandatory
+{%- set arch = conf[conf.MCP_JUMP_ARCH] -%}
+
+{%- macro bash_arr(_l) -%}
+ ({%- for n in _l -%}'{{ n }}' {% endfor -%})
+{%- endmacro -%}
+
+{#- Pack list as `sep`-separated string, replacing spaces with '|' -#}
+{%- macro pack(x = [], sep = ',') -%}
+ {{ x | join(sep) | replace(' ', '|') }}
+{%- endmacro -%}
+
+{#- Pack all vnode data as string -#}
+{%- macro serialize_vnodes() -%}
+ {%- set V = conf.virtual -%}
+ {%- set arr = [] -%}
+ {%- for n in V.nodes -%}
+ {%- if n not in V -%}{%- do V.update({n: {}}) -%}{%- endif -%}
+ {%- do arr.append(pack([n, V[n].ram or arch.default.ram,
+ V[n].vcpus or arch.default.vcpus])) -%}
+ {%- endfor -%}
+ '{{ pack(arr, '|') }}'
+{%- endmacro -%}
+
+{#- Pack apt_pkg data as string -#}
+{%- macro serialize_apt_pkg() -%}
+ {%- set arr = [] -%}
+ {%- set sections = [arch.common] -%}
+ {%- if 'virtual_control_plane' in conf.cluster.states -%}
+ {%- do sections.append(arch.control) -%}
+ {%- endif -%}
+ {%- for c in sections -%}
+ {%- do arr.append(pack([pack(c.apt['keys']), pack(c.apt.repos),
+ pack(c.pkg.install), pack(c.pkg.remove)], '^')) -%}
+ {%- endfor -%}
+ '{{ pack(arr, '^') }}'
+{%- endmacro -%}
+
{%- set bridges = conf.idf.fuel.jumphost.bridges %}
+# Determine bridge names based on IDF, where all bridges are now mandatory
OPNFV_BRIDGES=(
'{{ bridges.admin or "pxebr" }}'
'{{ bridges.mgmt or "mgmt" }}'
'{{ bridges.private or "internal" }}'
'{{ bridges.public or "public" }}'
)
+
+export CLUSTER_DOMAIN={{ conf.cluster.domain }}
+cluster_states={{ bash_arr(conf.cluster.states) }}
+virtual_nodes={{ bash_arr(conf.virtual.nodes) }}
+base_image={{ arch.base_image }}
+
+# Serialize vnode data as '<name0>,<ram0>,<vcpu0>|<name1>,<ram1>,<vcpu1>[...]'
+virtual_nodes_data={{ serialize_vnodes() }}
+
+# Serialize repos, packages to (pre-)install/remove for:
+# - foundation node VM base image (virtual: all VMs, baremetal: cfg01|mas01)
+# - virtualized control plane VM base image (only when VCP is used)
+virtual_repos_pkgs={{ serialize_apt_pkg() }}