PDF parsing support 41/42141/14
authorAlexandru Avadanii <Alexandru.Avadanii@enea.com>
Sat, 16 Sep 2017 19:13:11 +0000 (21:13 +0200)
committerAlexandru Avadanii <Alexandru.Avadanii@enea.com>
Wed, 27 Sep 2017 12:40:39 +0000 (14:40 +0200)
- add new git submodule pointing to OPNFV Pharos;
- use Pharos 'generate_config.sh' to parse the PDF using the Fuel
  installer adapter and generate <pod_config.yml> dynamically;
- build <pod_config.yml> outside current git repo and sync separately
  to prevent sensitive data leak;
- add <pod1.yaml> PDF sample based on LF-POD2, should be used with
  'ci/deploy.sh -l local -p pod1';

Change-Id: I4e1b95f180bcd5ade5d86f516628eb8edbe64b1c
Signed-off-by: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
.gitmodules
ci/deploy.sh
mcp/config/labs/local/pod1.yaml [new file with mode: 0644]
mcp/reclass/classes/cluster/all-mcp-ocata-common/opnfv/.gitignore
mcp/reclass/classes/cluster/all-mcp-ocata-common/opnfv/pod_config.yml
mcp/reclass/classes/cluster/all-mcp-ocata-common/opnfv/pod_config.yml.example [new file with mode: 0644]
mcp/scripts/pharos [new submodule]
mcp/scripts/salt.sh

index d3ffb55..565943d 100644 (file)
@@ -7,3 +7,7 @@
        path = mcp/deploy/scripts
        url = https://github.com/salt-formulas/salt-formulas-scripts
        branch = master
+[submodule "pharos"]
+       path = mcp/scripts/pharos
+       url = https://github.com/opnfv/pharos
+       branch = master
index f6af1b5..dc5f774 100755 (executable)
@@ -61,8 +61,11 @@ It depends on the OPNFV official configuration directory/file structure
 and provides a fairly simple mechanism to execute a deployment.
 
 $(notify "Input parameters to the build script are:" 2)
--b Base URI to the configuration directory (needs to be provided in a URI
-   style, it can be a local resource: file:// or a remote resource http(s)://)
+-b Base URI to the configuration directory (needs to be provided in URI style,
+   it can be a local resource: file:// or a remote resource http(s)://).
+   A POD Descriptor File (PDF) should be available at:
+   <base-uri>/labs/<lab-name>/<pod-name>.yaml
+   The default is './mcp/config'.
 -B Bridges to be used by deploy script. It can be specified several times,
    or as a comma separated list of bridges, or both: -B br1 -B br2,br3
    First occurence sets PXE Brige, next Mgmt, then Internal and Public.
@@ -77,7 +80,7 @@ $(notify "Input parameters to the build script are:" 2)
 -h Print this message and exit
 -L Deployment log path and name, eg. -L /home/jenkins/job.log.tar.gz
 -l Lab name as defined in the configuration directory, e.g. lf
--p POD name as defined in the configuration directory, e.g. pod-1
+-p POD name as defined in the configuration directory, e.g. pod2
 -s Deployment-scenario, this points to a short deployment scenario name, which
    has to be defined in config directory (e.g. os-odl-nofeature-ha).
 -S Storage dir for VM images, default is mcp/deploy/images
@@ -97,7 +100,7 @@ $(notify "[NOTE] sudo & virsh priviledges are needed for this script to run" 3)
 Example:
 
 $(notify "sudo $(basename "$0") \\
-  -b file:///home/jenkins/lab-config \\
+  -b file:///home/jenkins/securedlab \\
   -l lf -p pod2 \\
   -s os-odl-nofeature-ha" 2)
 EOF
@@ -132,13 +135,14 @@ clean() {
 ##############################################################################
 # BEGIN of variables to customize
 #
-SCRIPT_PATH=$(readlink -f "$(dirname "${BASH_SOURCE[0]}")")
-DEPLOY_DIR=$(cd "${SCRIPT_PATH}/../mcp/scripts"; pwd)
-STORAGE_DIR=$(cd "${SCRIPT_PATH}/../mcp/deploy/images"; pwd)
-RECLASS_CLUSTER_DIR=$(cd "${SCRIPT_PATH}/../mcp/reclass/classes/cluster"; pwd)
+REPO_ROOT_PATH=$(readlink -f "$(dirname "${BASH_SOURCE[0]}")/..")
+DEPLOY_DIR=$(cd "${REPO_ROOT_PATH}/mcp/scripts"; pwd)
+STORAGE_DIR=$(cd "${REPO_ROOT_PATH}/mcp/deploy/images"; pwd)
+RECLASS_CLUSTER_DIR=$(cd "${REPO_ROOT_PATH}/mcp/reclass/classes/cluster"; pwd)
 DEPLOY_TYPE='baremetal'
 OPNFV_BRIDGES=('pxebr' 'mgmt' 'internal' 'public')
 URI_REGEXP='(file|https?|ftp)://.*'
+BASE_CONFIG_URI="file://${REPO_ROOT_PATH}/mcp/config"
 
 export SSH_KEY=${SSH_KEY:-"/var/lib/opnfv/mcp.rsa"}
 export SALT_MASTER=${INSTALLER_IP:-10.20.0.2}
@@ -300,6 +304,28 @@ if [ "$(uname -i)" = "aarch64" ]; then
   [ -n "$(command -v yum)" ] && sudo yum install -y --skip-broken vgabios
 fi
 
+# Clone git submodules and apply our patches
+make -C "${REPO_ROOT_PATH}/mcp/patches" deepclean patches-import
+
+# Convert Pharos-compatible POD Descriptor File (PDF) to reclass model input
+PHAROS_GEN_CONFIG_SCRIPT="./pharos/config/utils/generate_config.py"
+PHAROS_INSTALLER_ADAPTER="./pharos/config/installers/fuel/pod_config.yml.j2"
+BASE_CONFIG_PDF="${BASE_CONFIG_URI}/labs/${TARGET_LAB}/${TARGET_POD}.yaml"
+LOCAL_PDF="${STORAGE_DIR}/$(basename "${BASE_CONFIG_PDF}")"
+LOCAL_PDF_RECLASS="${STORAGE_DIR}/pod_config.yml"
+if ! curl --create-dirs -o "${LOCAL_PDF}" "${BASE_CONFIG_PDF}"; then
+    if [ "${DEPLOY_TYPE}" = 'baremetal' ]; then
+        notify "[ERROR] Could not retrieve PDF (Pod Descriptor File)!\n" 1>&2
+        exit 1
+    else
+        notify "[WARN] Could not retrieve PDF (Pod Descriptor File)!\n" 3
+    fi
+elif ! "${PHAROS_GEN_CONFIG_SCRIPT}" -y "${LOCAL_PDF}" \
+    -j "${PHAROS_INSTALLER_ADAPTER}" > "${LOCAL_PDF_RECLASS}"; then
+    notify "[ERROR] Could not convert PDF to reclass model input!\n" 1>&2
+    exit 1
+fi
+
 # Check scenario file existence
 SCENARIO_DIR="../config/scenario"
 if [ ! -f  "${SCENARIO_DIR}/${DEPLOY_TYPE}/${DEPLOY_SCENARIO}.yaml" ]; then
@@ -342,12 +368,12 @@ generate_ssh_key
 prepare_vms virtual_nodes "${base_image}" "${STORAGE_DIR}"
 create_networks OPNFV_BRIDGES
 create_vms virtual_nodes virtual_nodes_ram virtual_nodes_vcpus \
-  OPNFV_BRIDGES "${STORAGE_DIR}"
+    OPNFV_BRIDGES "${STORAGE_DIR}"
 update_mcpcontrol_network
 start_vms virtual_nodes
 check_connection
 
-./salt.sh
+./salt.sh "${LOCAL_PDF_RECLASS}"
 
 # Openstack cluster setup
 for state in "${cluster_states[@]}"; do
diff --git a/mcp/config/labs/local/pod1.yaml b/mcp/config/labs/local/pod1.yaml
new file mode 100644 (file)
index 0000000..702a533
--- /dev/null
@@ -0,0 +1,226 @@
+---
+### LF POD 2 descriptor file ###
+
+details:
+  pod_owner: Trevor Bramwell
+  contact: tbramwell@linuxfoundation.org
+  lab: LF Pharos Lab
+  location: Portland
+  type: production
+  link: https://wiki.opnfv.org/display/pharos/LF+POD+2
+##############################################################################
+net_config:
+  # NOTE: Network names are likely to change after the PDF spec is updated
+  oob:
+    interface: 0
+    ip-range: 172.30.8.65-172.30.8.75
+    vlan: 410
+  admin:
+    interface: 0
+    vlan: native
+    network: 192.168.11.0  # Untagged, 'PXE/Admin' on wiki, different IP
+    mask: 24
+  mgmt:
+    interface: 0
+    vlan: 300
+    network: 10.167.4.0    # Tagged, 'vlan 300' on wiki
+    mask: 24
+  storage:
+    interface: 3
+    vlan: 301
+    network: 10.2.0.0      # Tagged, not the same with 'storage' on wiki
+    mask: 24
+  private:
+    interface: 1
+    vlan: 1000
+    network: 10.1.0.0      # Tagged, not the same with 'private' on wiki
+    mask: 24
+  public:
+    interface: 2
+    vlan: native
+    network: 172.30.10.0   # Untagged, 'public' on wiki
+    mask: 24
+    gateway: 172.30.10.1
+    dns:
+      - 8.8.8.8
+      - 8.8.4.4
+##############################################################################
+jumphost:
+  name: pod2-jump
+  node:
+    type: baremetal
+    vendor: Cisco Systems Inc
+    model: UCSB-B200-M4
+    arch: x86_64
+    cpus: 2
+    cpu_cflags: haswell
+    cores: 8
+    memory: 128G
+  disks: &disks
+    - name: 'disk1'
+      disk_capacity: 2400G
+      disk_type: hdd
+      disk_interface: sas
+      disk_rotation:
+  os: centos-7
+  remote_params: &remote_params
+    type: ipmi
+    versions:
+      - 2.0
+    user: admin
+    pass: octopus
+  remote_management:
+    <<: *remote_params
+    address: 172.30.8.83
+    mac_address: "a8:9d:21:c9:c4:9e"
+  interfaces:
+    - mac_address: "00:25:b5:a0:00:1a"
+      speed: 40gb
+      features: 'dpdk|sriov'
+    - mac_address: "00:25:b5:a0:00:1b"
+      speed: 40gb
+      features: 'dpdk|sriov'
+    - mac_address: "00:25:b5:a0:00:1c"
+      speed: 40gb
+      features: 'dpdk|sriov'
+    - mac_address: "00:25:b5:a0:00:1d"
+      speed: 40gb
+      features: 'dpdk|sriov'
+  fixed_ips:
+    admin: 192.168.11.1
+    mgmt: 10.167.4.1
+    public: 172.30.10.72
+##############################################################################
+nodes:
+  - name: pod2-node1
+    node: &nodeparams
+      type: baremetal
+      vendor: Cisco Systems Inc
+      model: UCSB-B200-M4
+      arch: x86_64
+      cpus: 2
+      cpu_cflags: haswell
+      cores: 8
+      memory: 32G
+    disks: *disks
+    remote_management:
+      <<: *remote_params
+      address: 172.30.8.75
+      mac_address: "a8:9d:21:c9:8b:56"
+    interfaces:
+      - mac_address: "00:25:b5:a0:00:2a"
+        speed: 40gb
+        features: 'dpdk|sriov'
+      - mac_address: "00:25:b5:a0:00:2b"
+        speed: 40gb
+        features: 'dpdk|sriov'
+      - mac_address: "00:25:b5:a0:00:2c"
+        speed: 40gb
+        features: 'dpdk|sriov'
+      - mac_address: "00:25:b5:a0:00:2d"
+        speed: 40gb
+        features: 'dpdk|sriov'
+    fixed_ips:
+      admin: 192.168.11.2
+      mgmt: 10.167.4.2
+      public: 172.30.10.2
+  ############################################################################
+  - name: pod2-node2
+    node: *nodeparams
+    disks: *disks
+    remote_management:
+      <<: *remote_params
+      address: 172.30.8.65
+      mac_address: "a8:9d:21:c9:4d:26"
+    interfaces:
+      - mac_address: "00:25:b5:a0:00:3a"
+        speed: 40gb
+        features: 'dpdk|sriov'
+      - mac_address: "00:25:b5:a0:00:3b"
+        speed: 40gb
+        features: 'dpdk|sriov'
+      - mac_address: "00:25:b5:a0:00:3c"
+        speed: 40gb
+        features: 'dpdk|sriov'
+      - mac_address: "00:25:b5:a0:00:3d"
+        speed: 40gb
+        features: 'dpdk|sriov'
+    fixed_ips:
+      admin: 192.168.11.3
+      mgmt: 10.167.4.3
+      public: 172.30.10.3
+  ############################################################################
+  - name: pod2-node3
+    node: *nodeparams
+    disks: *disks
+    remote_management:
+      <<: *remote_params
+      address: 172.30.8.74
+      mac_address: "a8:9d:21:c9:3a:92"
+    interfaces:
+      - mac_address: "00:25:b5:a0:00:4a"
+        speed: 40gb
+        features: 'dpdk|sriov'
+      - mac_address: "00:25:b5:a0:00:4b"
+        speed: 40gb
+        features: 'dpdk|sriov'
+      - mac_address: "00:25:b5:a0:00:4c"
+        speed: 40gb
+        features: 'dpdk|sriov'
+      - mac_address: "00:25:b5:a0:00:4d"
+        speed: 40gb
+        features: 'dpdk|sriov'
+    fixed_ips:
+      admin: 192.168.11.4
+      mgmt: 10.167.4.4
+      public: 172.30.10.4
+  ############################################################################
+  - name: pod2-node4
+    node: *nodeparams
+    disks: *disks
+    remote_management:
+      <<: *remote_params
+      address: 172.30.8.73
+      mac_address: "74:a2:e6:a4:14:9c"
+    interfaces:
+      - mac_address: "00:25:b5:a0:00:5a"
+        speed: 40gb
+        features: 'dpdk|sriov'
+      - mac_address: "00:25:b5:a0:00:5b"
+        speed: 40gb
+        features: 'dpdk|sriov'
+      - mac_address: "00:25:b5:a0:00:5c"
+        speed: 40gb
+        features: 'dpdk|sriov'
+      - mac_address: "00:25:b5:a0:00:5d"
+        speed: 40gb
+        features: 'dpdk|sriov'
+    fixed_ips:
+      admin: 192.168.11.5
+      mgmt: 10.167.4.5
+      public: 172.30.10.5
+  ############################################################################
+  - name: pod2-node5
+    node: *nodeparams
+    disks: *disks
+    remote_management:
+      <<: *remote_params
+      address: 172.30.8.72
+      mac_address: "a8:9d:21:a0:15:9c"
+    interfaces:
+      - mac_address: "00:25:b5:a0:00:6a"
+        speed: 40gb
+        features: 'dpdk|sriov'
+      - mac_address: "00:25:b5:a0:00:6b"
+        speed: 40gb
+        features: 'dpdk|sriov'
+      - mac_address: "00:25:b5:a0:00:6c"
+        speed: 40gb
+        features: 'dpdk|sriov'
+      - mac_address: "00:25:b5:a0:00:6d"
+        speed: 40gb
+        features: 'dpdk|sriov'
+    fixed_ips:
+      admin: 192.168.11.6
+      mgmt: 10.167.4.6
+      public: 172.30.10.6
index ff2991a..22c6ce6 100644 (file)
@@ -1,94 +1,5 @@
 ---
 parameters:
   _param:
-    # infra service addresses
-    opnfv_infra_config_address: 10.167.4.100
-    opnfv_infra_config_deploy_address: 10.20.0.2
-    opnfv_infra_maas_node01_address: 10.167.4.3
-    opnfv_infra_maas_node01_deploy_address: 192.168.11.3
-    opnfv_infra_compute_node01_address: 10.167.4.141
-    opnfv_infra_compute_node02_address: 10.167.4.142
-    opnfv_infra_compute_node03_address: 10.167.4.143
-    opnfv_infra_kvm_address: 10.167.4.140
-    opnfv_infra_kvm_node01_address: 10.167.4.141
-    opnfv_infra_kvm_node02_address: 10.167.4.142
-    opnfv_infra_kvm_node03_address: 10.167.4.143
-
-    opnfv_openstack_gateway_node01_address: 10.167.4.124
-    opnfv_openstack_gateway_node02_address: 10.167.4.125
-    opnfv_openstack_gateway_node03_address: 10.167.4.126
-    opnfv_openstack_gateway_node01_tenant_address: 10.1.0.6
-    opnfv_openstack_gateway_node02_tenant_address: 10.1.0.7
-    opnfv_openstack_gateway_node03_tenant_address: 10.1.0.9
-    opnfv_openstack_proxy_address: 10.167.4.80
-    opnfv_openstack_proxy_node01_address: 10.167.4.81
-    opnfv_openstack_proxy_node02_address: 10.167.4.82
-    opnfv_openstack_control_address: 10.167.4.10
-    opnfv_openstack_control_node01_address: 10.167.4.11
-    opnfv_openstack_control_node02_address: 10.167.4.12
-    opnfv_openstack_control_node03_address: 10.167.4.13
-    opnfv_openstack_database_address: 10.167.4.50
-    opnfv_openstack_database_node01_address: 10.167.4.51
-    opnfv_openstack_database_node02_address: 10.167.4.52
-    opnfv_openstack_database_node03_address: 10.167.4.53
-    opnfv_openstack_message_queue_address: 10.167.4.40
-    opnfv_openstack_message_queue_node01_address: 10.167.4.41
-    opnfv_openstack_message_queue_node02_address: 10.167.4.42
-    opnfv_openstack_message_queue_node03_address: 10.167.4.43
-    opnfv_openstack_telemetry_address: 10.167.4.75
-    opnfv_openstack_telemetry_node01_address: 10.167.4.76
-    opnfv_openstack_telemetry_node02_address: 10.167.4.77
-    opnfv_openstack_telemetry_node03_address: 10.167.4.78
-    opnfv_openstack_compute_node01_single_address: 10.167.4.101
-    opnfv_openstack_compute_node02_single_address: 10.167.4.102
-    opnfv_openstack_compute_node03_single_address: 10.167.4.103
-    opnfv_openstack_compute_node01_control_address: 10.167.4.101
-    opnfv_openstack_compute_node02_control_address: 10.167.4.102
-    opnfv_openstack_compute_node03_control_address: 10.167.4.103
-    opnfv_openstack_compute_node01_tenant_address: 10.1.0.101
-    opnfv_openstack_compute_node02_tenant_address: 10.1.0.102
-    opnfv_openstack_compute_node03_tenant_address: 10.1.0.103
-    opnfv_openstack_compute_node01_external_address: 172.30.10.2
-    opnfv_openstack_compute_node02_external_address: 172.30.10.3
-
-    opnfv_opendaylight_server_node01_single_address: 10.167.4.111
-
-    opnfv_name_servers: ['8.8.8.8', '8.8.4.4']
-    opnfv_dns_server01: '8.8.8.8'
-    opnfv_net_mgmt_vlan: 300
-    opnfv_net_tenant_vlan: 1000
-
-    opnfv_maas_node01_architecture: amd64/generic
-    opnfv_maas_node01_power_address: 172.30.8.75
-    opnfv_maas_node01_power_type: ipmi
-    opnfv_maas_node01_power_user: admin
-    opnfv_maas_node01_power_password: octopus
-    opnfv_maas_node01_interface_mac: "00:25:b5:a0:00:2a"
-
-    opnfv_maas_node02_architecture: amd64/generic
-    opnfv_maas_node02_power_address: 172.30.8.65
-    opnfv_maas_node02_power_type: ipmi
-    opnfv_maas_node02_power_user: admin
-    opnfv_maas_node02_power_password: octopus
-    opnfv_maas_node02_interface_mac: "00:25:b5:a0:00:3a"
-
-    opnfv_maas_node03_architecture: amd64/generic
-    opnfv_maas_node03_power_address: 172.30.8.74
-    opnfv_maas_node03_power_type: ipmi
-    opnfv_maas_node03_power_user: admin
-    opnfv_maas_node03_power_password: octopus
-    opnfv_maas_node03_interface_mac: "00:25:b5:a0:00:4a"
-
-    opnfv_maas_node04_architecture: amd64/generic
-    opnfv_maas_node04_power_address: 172.30.8.73
-    opnfv_maas_node04_power_type: ipmi
-    opnfv_maas_node04_power_user: admin
-    opnfv_maas_node04_power_password: octopus
-    opnfv_maas_node04_interface_mac: "00:25:b5:a0:00:5a"
-
-    opnfv_maas_node05_architecture: amd64/generic
-    opnfv_maas_node05_power_address: 172.30.8.72
-    opnfv_maas_node05_power_type: ipmi
-    opnfv_maas_node05_power_user: admin
-    opnfv_maas_node05_power_password: octopus
-    opnfv_maas_node05_interface_mac: "00:25:b5:a0:00:6a"
+    # NOTE: This file is overwritten at runtime by parsing the PDF.
+    opnfv_use_pod_descriptor_file: true
diff --git a/mcp/reclass/classes/cluster/all-mcp-ocata-common/opnfv/pod_config.yml.example b/mcp/reclass/classes/cluster/all-mcp-ocata-common/opnfv/pod_config.yml.example
new file mode 100644 (file)
index 0000000..e77ad07
--- /dev/null
@@ -0,0 +1,93 @@
+---
+parameters:
+  _param:
+
+    opnfv_infra_config_address: 10.167.4.100
+    opnfv_infra_maas_node01_address: 10.167.4.3
+    opnfv_infra_maas_node01_deploy_address: 192.168.11.3
+    opnfv_infra_compute_node01_address: 10.167.4.141
+    opnfv_infra_compute_node02_address: 10.167.4.142
+    opnfv_infra_compute_node03_address: 10.167.4.143
+    opnfv_infra_kvm_address: 10.167.4.140
+    opnfv_infra_kvm_node01_address: 10.167.4.141
+    opnfv_infra_kvm_node02_address: 10.167.4.142
+    opnfv_infra_kvm_node03_address: 10.167.4.143
+
+    opnfv_openstack_gateway_node01_address: 10.167.4.124
+    opnfv_openstack_gateway_node02_address: 10.167.4.125
+    opnfv_openstack_gateway_node03_address: 10.167.4.126
+    opnfv_openstack_gateway_node01_tenant_address: 10.1.0.6
+    opnfv_openstack_gateway_node02_tenant_address: 10.1.0.7
+    opnfv_openstack_gateway_node03_tenant_address: 10.1.0.9
+    opnfv_openstack_proxy_address: 10.167.4.80
+    opnfv_openstack_proxy_node01_address: 10.167.4.81
+    opnfv_openstack_proxy_node02_address: 10.167.4.82
+    opnfv_openstack_control_address: 10.167.4.10
+    opnfv_openstack_control_node01_address: 10.167.4.11
+    opnfv_openstack_control_node02_address: 10.167.4.12
+    opnfv_openstack_control_node03_address: 10.167.4.13
+    opnfv_openstack_database_address: 10.167.4.50
+    opnfv_openstack_database_node01_address: 10.167.4.51
+    opnfv_openstack_database_node02_address: 10.167.4.52
+    opnfv_openstack_database_node03_address: 10.167.4.53
+    opnfv_openstack_message_queue_address: 10.167.4.40
+    opnfv_openstack_message_queue_node01_address: 10.167.4.41
+    opnfv_openstack_message_queue_node02_address: 10.167.4.42
+    opnfv_openstack_message_queue_node03_address: 10.167.4.43
+    opnfv_openstack_telemetry_address: 10.167.4.75
+    opnfv_openstack_telemetry_node01_address: 10.167.4.76
+    opnfv_openstack_telemetry_node02_address: 10.167.4.77
+    opnfv_openstack_telemetry_node03_address: 10.167.4.78
+    opnfv_openstack_compute_node01_single_address: 10.167.4.101
+    opnfv_openstack_compute_node02_single_address: 10.167.4.102
+    opnfv_openstack_compute_node03_single_address: 10.167.4.103
+    opnfv_openstack_compute_node01_control_address: 10.167.4.101
+    opnfv_openstack_compute_node02_control_address: 10.167.4.102
+    opnfv_openstack_compute_node03_control_address: 10.167.4.103
+    opnfv_openstack_compute_node01_tenant_address: 10.1.0.101
+    opnfv_openstack_compute_node02_tenant_address: 10.1.0.102
+    opnfv_openstack_compute_node03_tenant_address: 10.1.0.103
+    opnfv_openstack_compute_node01_external_address: 172.30.10.2
+    opnfv_openstack_compute_node02_external_address: 172.30.10.3
+
+    opnfv_opendaylight_server_node01_single_address: 10.167.4.111
+
+    opnfv_name_servers: ['8.8.8.8', '8.8.4.4']
+    opnfv_dns_server01: '8.8.8.8'
+    opnfv_net_mgmt_vlan: 300
+    opnfv_net_tenant_vlan: 1000
+
+    opnfv_maas_node01_architecture: 'amd64/generic'
+    opnfv_maas_node01_power_address: 172.30.8.75
+    opnfv_maas_node01_power_type: ipmi
+    opnfv_maas_node01_power_user: admin
+    opnfv_maas_node01_power_password: octopus
+    opnfv_maas_node01_interface_mac: '00:25:b5:a0:00:2a'
+
+    opnfv_maas_node02_architecture: 'amd64/generic'
+    opnfv_maas_node02_power_address: 172.30.8.65
+    opnfv_maas_node02_power_type: ipmi
+    opnfv_maas_node02_power_user: admin
+    opnfv_maas_node02_power_password: octopus
+    opnfv_maas_node02_interface_mac: '00:25:b5:a0:00:3a'
+
+    opnfv_maas_node03_architecture: 'amd64/generic'
+    opnfv_maas_node03_power_address: 172.30.8.74
+    opnfv_maas_node03_power_type: ipmi
+    opnfv_maas_node03_power_user: admin
+    opnfv_maas_node03_power_password: octopus
+    opnfv_maas_node03_interface_mac: '00:25:b5:a0:00:4a'
+
+    opnfv_maas_node04_architecture: 'amd64/generic'
+    opnfv_maas_node04_power_address: 172.30.8.73
+    opnfv_maas_node04_power_type: ipmi
+    opnfv_maas_node04_power_user: admin
+    opnfv_maas_node04_power_password: octopus
+    opnfv_maas_node04_interface_mac: '00:25:b5:a0:00:5a'
+
+    opnfv_maas_node05_architecture: 'amd64/generic'
+    opnfv_maas_node05_power_address: 172.30.8.72
+    opnfv_maas_node05_power_type: ipmi
+    opnfv_maas_node05_power_user: admin
+    opnfv_maas_node05_power_password: octopus
+    opnfv_maas_node05_interface_mac: '00:25:b5:a0:00:6a'
diff --git a/mcp/scripts/pharos b/mcp/scripts/pharos
new file mode 160000 (submodule)
index 0000000..c4b4629
--- /dev/null
@@ -0,0 +1 @@
+Subproject commit c4b4629b59923e049500fc776b8a251d4cd4c3b9
index 4effa6b..081513c 100755 (executable)
 F_GIT_ROOT=$(git rev-parse --show-toplevel)
 OPNFV_TMP_DIR="/home/${SALT_MASTER_USER}/fuel"
 OPNFV_FUEL_DIR="/root/fuel"
-
-# patch reclass-system-salt-model locally before copying it over
-make -C "${F_GIT_ROOT}/mcp/patches" deepclean patches-import
+OPNFV_RDIR="reclass/classes/cluster/all-mcp-ocata-common"
+LOCAL_PDF_RECLASS=$1
 
 # push to cfg01 current git repo first (including submodules), at ~ubuntu/fuel
 # later we move it to ~root/fuel and delete the temporary clone
 rsync -Erl --delete -e "ssh ${SSH_OPTS}" \
   --exclude-from="${F_GIT_ROOT}/.gitignore" \
   "${F_GIT_ROOT}/" "${SSH_SALT}:$(basename "${OPNFV_TMP_DIR}")/"
+if [ -n "${LOCAL_PDF_RECLASS}" ] && [ -f "${LOCAL_PDF_RECLASS}" ]; then
+  rsync -e "ssh ${SSH_OPTS}" "${LOCAL_PDF_RECLASS}" \
+    "${SSH_SALT}:$(basename "${OPNFV_TMP_DIR}")/mcp/${OPNFV_RDIR}/opnfv/"
+fi
 
 # ssh to cfg01
 # shellcheck disable=SC2086,2087
@@ -36,8 +39,7 @@ ssh ${SSH_OPTS} "${SSH_SALT}" bash -s << SALT_INSTALL_END
   mv ${OPNFV_TMP_DIR} ${OPNFV_FUEL_DIR} && chown -R root.root ${OPNFV_FUEL_DIR}
   ln -s ${OPNFV_FUEL_DIR}/mcp/reclass /srv/salt/reclass
   ln -s ${OPNFV_FUEL_DIR}/mcp/deploy/scripts /srv/salt/scripts
-  cd /srv/salt/reclass/classes/cluster/all-mcp-ocata-common && \
-    ln -s "\$(uname -i)" arch
+  cd /srv/salt/${OPNFV_RDIR} && ln -s "\$(uname -i)" arch
 
   cp -r ${OPNFV_FUEL_DIR}/mcp/metadata/service /usr/share/salt-formulas/reclass
   cd /srv/salt/reclass/classes/service && \