Merge "[baremetal] Containerize MaaS"
authorAlexandru Avadanii <Alexandru.Avadanii@enea.com>
Tue, 19 Feb 2019 15:17:25 +0000 (15:17 +0000)
committerGerrit Code Review <gerrit@opnfv.org>
Tue, 19 Feb 2019 15:17:25 +0000 (15:17 +0000)
35 files changed:
ci/deploy.sh
docs/release/installation/img/fuel_baremetal_ha.png [changed mode: 0644->0755]
docs/release/installation/img/fuel_baremetal_noha.png [changed mode: 0644->0755]
docs/release/installation/img/fuel_hybrid_noha.png [changed mode: 0644->0755]
docs/release/installation/img/fuel_virtual_noha.png [changed mode: 0644->0755]
docs/release/installation/installation.instruction.rst
docs/release/userguide/userguide.rst
mcp/config/scenario/defaults.yaml.j2
mcp/config/states/maas
mcp/config/states/virtual_init
mcp/reclass/classes/cluster/.gitignore
mcp/reclass/classes/cluster/all-mcp-arch-common/infra/maas.yml.j2
mcp/reclass/classes/cluster/all-mcp-arch-common/init.yml.j2
mcp/reclass/classes/cluster/mcp-common-ha/infra/config.yml.j2
mcp/reclass/classes/cluster/mcp-common-noha/infra/init.yml.j2
mcp/reclass/classes/cluster/mcp-common-noha/init_options.yml [moved from mcp/reclass/classes/cluster/mcp-common-noha/init_options.yml.j2 with 90% similarity]
mcp/reclass/classes/cluster/mcp-fdio-ha/infra/maas.yml
mcp/reclass/classes/cluster/mcp-fdio-noha/infra/maas.yml
mcp/reclass/classes/cluster/mcp-odl-ha/infra/maas.yml.j2
mcp/reclass/classes/cluster/mcp-odl-noha/infra/maas.yml
mcp/reclass/classes/cluster/mcp-ovn-ha/infra/maas.yml
mcp/reclass/classes/cluster/mcp-ovn-noha/infra/maas.yml
mcp/reclass/classes/cluster/mcp-ovs-dpdk-ha/infra/maas.yml
mcp/reclass/classes/cluster/mcp-ovs-dpdk-noha/infra/maas.yml
mcp/reclass/classes/cluster/mcp-ovs-ha/infra/maas.yml
mcp/reclass/classes/cluster/mcp-ovs-noha/infra/maas.yml
mcp/scripts/.gitignore
mcp/scripts/docker-compose/docker-compose.yaml.j2
mcp/scripts/docker-compose/files/entrypoint.sh
mcp/scripts/docker-compose/files/entrypoint_maas.sh.j2 [new file with mode: 0644]
mcp/scripts/lib_jump_common.sh
mcp/scripts/lib_jump_deploy.sh
mcp/scripts/user-data.sh.j2
mcp/scripts/virsh_net/net_mcpcontrol.xml.j2 [deleted file]
mcp/scripts/xdf_data.sh.j2

index 789721f..629f66b 100755 (executable)
@@ -295,7 +295,8 @@ source "${DEPLOY_DIR}/xdf_data.sh"
 
 # Jumpserver prerequisites check
 notify "[NOTE] Using bridges: ${OPNFV_BRIDGES[*]}" 2
-jumpserver_check_requirements "${virtual_nodes[*]}" "${OPNFV_BRIDGES[@]}"
+jumpserver_check_requirements "${cluster_states[*]}" "${virtual_nodes[*]}" \
+                              "${OPNFV_BRIDGES[@]}"
 
 # Infra setup
 if [ ${DRY_RUN} -eq 1 ]; then
@@ -311,7 +312,6 @@ else
     do_sysctl_cfg
     do_udev_cfg
     create_vms "${MCP_STORAGE_DIR}" "${virtual_nodes_data}" "${OPNFV_BRIDGES[@]}"
-    update_mcpcontrol_network
     start_vms "${virtual_nodes[@]}"
 
     # https://github.com/docker/libnetwork/issues/1743
old mode 100644 (file)
new mode 100755 (executable)
index f2ed610..af5f00f
Binary files a/docs/release/installation/img/fuel_baremetal_ha.png and b/docs/release/installation/img/fuel_baremetal_ha.png differ
old mode 100644 (file)
new mode 100755 (executable)
index 5a3b429..4b5aef0
Binary files a/docs/release/installation/img/fuel_baremetal_noha.png and b/docs/release/installation/img/fuel_baremetal_noha.png differ
old mode 100644 (file)
new mode 100755 (executable)
index 51449a7..f2debfe
Binary files a/docs/release/installation/img/fuel_hybrid_noha.png and b/docs/release/installation/img/fuel_hybrid_noha.png differ
old mode 100644 (file)
new mode 100755 (executable)
index 7d05a9d..710988a
Binary files a/docs/release/installation/img/fuel_virtual_noha.png and b/docs/release/installation/img/fuel_virtual_noha.png differ
index b0efd57..46a4350 100644 (file)
@@ -108,7 +108,7 @@ installation of ``Gambia`` using Fuel:
 |                  |                                                      |
 +==================+======================================================+
 | **1 Jumpserver** | A physical node (also called Foundation Node) that   |
-|                  | hosts the Salt Master container and MaaS VM          |
+|                  | hosts the Salt Master and MaaS containers            |
 +------------------+------------------------------------------------------+
 | **# of nodes**   | Minimum 5                                            |
 |                  |                                                      |
@@ -170,7 +170,7 @@ installation of ``Gambia`` using Fuel:
 |                  |                                                      |
 +==================+======================================================+
 | **1 Jumpserver** | A physical node (also called Foundation Node) that   |
-|                  | hosts the Salt Master container, MaaS VM and         |
+|                  | hosts the Salt Master and MaaS containers, and       |
 |                  | each of the virtual nodes defined in ``PDF``         |
 +------------------+------------------------------------------------------+
 | **# of nodes**   | .. NOTE::                                            |
@@ -424,6 +424,14 @@ Changes ``deploy.sh`` Will Perform to Jumpserver OS
     The install script will alter Jumpserver sysconf and disable
     ``net.bridge.bridge-nf-call``.
 
+.. WARNING::
+
+    On Jumpservers running Ubuntu with AppArmor enabled, when deploying
+    on baremetal nodes (i.e. when MaaS is used), the install script
+    will disable certain conflicting AppArmor profiles that interfere with
+    MaaS services inside the container, e.g. ``ntpd``, ``named``, ``dhcpd``,
+    ``tcpdump``.
+
 .. WARNING::
 
     The install script will automatically install and/or upgrade the
@@ -729,7 +737,7 @@ Sample ``public`` network configuration block:
                   private: 'trunk'
                   public: 'trunk'
                 trunks:
-                  # mgmt network is not decapsulated for jumpserver infra VMs,
+                  # mgmt network is not decapsulated for jumpserver infra nodes,
                   # to align with the VLAN configuration of baremetal nodes.
                   mgmt: True
 
@@ -991,15 +999,15 @@ A simplified overview of the steps ``deploy.sh`` will automatically perform is:
 
 - create a Salt Master Docker container on the jumpserver, which will drive
   the rest of the installation;
-- ``baremetal`` or ``hybrid`` only: create a ``MaaS`` infrastructure node VM,
+- ``baremetal`` or ``hybrid`` only: create a ``MaaS`` container node,
   which will be leveraged using Salt to handle OS provisioning on the
   ``baremetal`` nodes;
 - leverage Salt to install & configure OpenStack;
 
 .. NOTE::
 
-    A virtual network ``mcpcontrol`` is always created for initial connection
-    of the VMs on Jumphost.
+    A Docker network ``mcpcontrol`` is always created for initial connection
+    of the infrastructure containers (``cfg01``, ``mas01``) on Jumphost.
 
 .. WARNING::
 
@@ -1096,7 +1104,7 @@ each on a separate Jumphost node, both behind the same ``TOR`` switch:
    +-------------+------------------------------------------------------------+
    | ``cfg01``   | Salt Master Docker container                               |
    +-------------+------------------------------------------------------------+
-   | ``mas01``   | MaaS Node VM                                               |
+   | ``mas01``   | MaaS Node Docker container                                 |
    +-------------+------------------------------------------------------------+
    | ``ctl01``   | Baremetal controller node                                  |
    +-------------+------------------------------------------------------------+
@@ -1125,7 +1133,7 @@ each on a separate Jumphost node, both behind the same ``TOR`` switch:
    +---------------------------+----------------------------------------------+
    | ``cfg01``                 | Salt Master Docker container                 |
    +---------------------------+----------------------------------------------+
-   | ``mas01``                 | MaaS Node VM                                 |
+   | ``mas01``                 | MaaS Node Docker container                   |
    +---------------------------+----------------------------------------------+
    | ``kvm01``,                | Baremetals which hold the VMs with           |
    | ``kvm02``,                | controller functions                         |
@@ -1186,7 +1194,7 @@ each on a separate Jumphost node, both behind the same ``TOR`` switch:
    +-------------+------------------------------------------------------------+
    | ``cfg01``   | Salt Master Docker container                               |
    +-------------+------------------------------------------------------------+
-   | ``mas01``   | MaaS Node VM                                               |
+   | ``mas01``   | MaaS Node Docker container                                 |
    +-------------+------------------------------------------------------------+
    | ``ctl01``   | Controller VM                                              |
    +-------------+------------------------------------------------------------+
@@ -1324,10 +1332,10 @@ sequentially by the deploy script:
 +===========================+=================================================+
 | ``virtual_init``          | ``cfg01``: reclass node generation              |
 |                           |                                                 |
-|                           | ``jumpserver`` VMs (e.g. ``mas01``): basic OS   |
+|                           | ``jumpserver`` VMs (if present): basic OS       |
 |                           | config                                          |
 +---------------------------+-------------------------------------------------+
-| ``maas``                  | ``mas01``: OS, MaaS installation,               |
+| ``maas``                  | ``mas01``: OS, MaaS configuration               |
 |                           | ``baremetal`` node commissioning and deploy     |
 |                           |                                                 |
 |                           | .. NOTE::                                       |
index 25b5e13..50acf6f 100644 (file)
@@ -29,7 +29,8 @@ Fuel uses several networks to deploy and administer the cloud:
 | **PXE/admin**    | Used for booting the nodes via PXE and/or Salt           |
 |                  | control network                                          |
 +------------------+----------------------------------------------------------+
-| **mcpcontrol**   | Used to provision the infrastructure hosts (Salt & MaaS) |
+| **mcpcontrol**   | Docker network used to provision the infrastructure      |
+|                  | hosts (Salt & MaaS)                                      |
 +------------------+----------------------------------------------------------+
 | **management**   | Used for internal communication between                  |
 |                  | OpenStack components                                     |
@@ -45,20 +46,21 @@ Fuel uses several networks to deploy and administer the cloud:
 These networks - except ``mcpcontrol`` - can be Linux bridges configured
 before the deploy on the Jumpserver.
 If they don't exists at deploy time, they will be created by the scripts as
-``libvirt`` managed networks.
+``libvirt`` managed networks (except ``mcpcontrol``, which will be handled by
+Docker using the ``bridge`` driver).
 
 Network ``mcpcontrol``
 ~~~~~~~~~~~~~~~~~~~~~~
 
-``mcpcontrol`` is a virtual network, managed by libvirt. Its only purpose is to
+``mcpcontrol`` is a virtual network, managed by Docker. Its only purpose is to
 provide a simple method of assigning an arbitrary ``INSTALLER_IP`` to the Salt
 master node (``cfg01``), to maintain backwards compatibility with old OPNFV
 Fuel behavior. Normally, end-users only need to change the ``INSTALLER_IP`` if
 the default CIDR (``10.20.0.0/24``) overlaps with existing lab networks.
 
-``mcpcontrol`` has both NAT and DHCP enabled, so the Salt master (``cfg01``)
-and the MaaS VM (``mas01``, when present) get assigned predefined IPs (``.2``,
-``.3``, while the jumpserver bridge port gets ``.1``).
+``mcpcontrol`` uses the Docker bridge driver, so the Salt master (``cfg01``)
+and the MaaS containers (``mas01``, when present) get assigned predefined IPs
+(``.2``, ``.3``, while the jumpserver gets ``.1``).
 
 +------------------+---------------------------+-----------------------------+
 | Host             | Offset in IP range        | Default address             |
@@ -346,6 +348,18 @@ To login as ``ubuntu`` user, use the RSA private key ``/var/lib/opnfv/mcp.rsa``:
     jenkins@jumpserver:~$ docker exec -it fuel bash
     root@cfg01:~$
 
+Accessing the MaaS Node (``mas01``)
+===================================
+
+Starting with the ``Hunter`` release, the MaaS node (``mas01``) is
+containerized and no longer runs a ``sshd`` server. To access it (from
+``jumpserver`` only):
+
+.. code-block:: console
+
+    jenkins@jumpserver:~$ docker exec -it maas bash
+    root@mas01:~$
+
 Accessing Cluster Nodes
 =======================
 
@@ -382,19 +396,10 @@ Accessing the ``MaaS`` Dashboard
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 ``MaaS`` web-based dashboard is available at
-``http://<mas01 IP address>:5240/MAAS``, e.g.
-``http://172.16.10.12:5240/MAAS``.
+``http://<jumpserver IP address>:5240/MAAS``.
 
 The administrator credentials are ``opnfv``/``opnfv_secret``.
 
-.. NOTE::
-
-    ``mas01`` VM does not automatically get assigned an IP address in the
-    public network segment. If ``MaaS`` dashboard should be accesiable from
-    the public network, such an address can be manually added to the last
-    VM NIC interface in ``mas01`` (which is already connected to the public
-    network bridge).
-
 Ensure Commission/Deploy Timeouts Are Not Too Small
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
@@ -446,30 +451,31 @@ Check Network Connectivity Between Nodes on the Jumpserver
 ``cfg01`` is a Docker container running on the ``jumpserver``, connected to
 Docker networks (created by docker-compose automatically on container up),
 which in turn are connected using veth pairs to their ``libvirt`` managed
-counterparts.
+counterparts (or manually created bridges).
 
-For example, the ``mcpcontrol`` network(s) should look like below.
+For example, the ``mgmt`` network(s) should look like below for a ``virtual``
+deployment.
 
 .. code-block:: console
 
-    jenkins@jumpserver:~$ brctl show mcpcontrol
+    jenkins@jumpserver:~$ brctl show mgmt
     bridge name   bridge id           STP enabled   interfaces
-    mcpcontrol    8000.525400064f77   yes           mcpcontrol-nic
-                                                    veth_mcp0
+    mgmt          8000.525400064f77   yes           mgmt-nic
+                                                    veth_mcp2
                                                     vnet8
 
     jenkins@jumpserver:~$ docker network ls
     NETWORK ID    NAME                              DRIVER   SCOPE
-    81a0fdb3bd78  docker-compose_docker-mcpcontrol  macvlan  local
+    81a0fdb3bd78  docker-compose_mgmt               macvlan  local
     [...]
 
-    jenkins@jumpserver:~$ docker network inspect docker-compose_mcpcontrol
+    jenkins@jumpserver:~$ docker network inspect docker-compose_mgmt
     [
         {
-            "Name": "docker-compose_mcpcontrol",
+            "Name": "docker-compose_mgmt",
             [...]
             "Options": {
-                "parent": "veth_mcp1"
+                "parent": "veth_mcp3"
             },
         }
     ]
@@ -488,14 +494,13 @@ segment).
         inet addr:172.16.10.2   Bcast:0.0.0.0  Mask:255.255.255.0
         inet addr:192.168.11.2  Bcast:0.0.0.0  Mask:255.255.255.0
 
-For each network of interest (``mcpcontrol``, ``mgmt``, ``PXE/admin``), check
-that ``cfg01`` can ping the jumpserver IP in that network segment, as well as
-the ``mas01`` IP in that network.
+For each network of interest (``mgmt``, ``PXE/admin``), check
+that ``cfg01`` can ping the jumpserver IP in that network segment.
 
 .. NOTE::
 
-    ``mcpcontrol`` is set up at VM bringup, so it should always be available,
-    while the other networks are configured by Salt as part of the
+    ``mcpcontrol`` is set up at container bringup, so it should always be
+    available, while the other networks are configured by Salt as part of the
     ``virtual_init`` STATE file.
 
 .. code-block:: console
@@ -552,7 +557,7 @@ To confirm or rule out this possibility, monitor the serial console output of
 one (or more) cluster nodes during ``MaaS`` commissioning. If the node is
 properly configured to attempt PXE boot, yet it times out waiting for an IP
 address from ``mas01`` ``DHCP``, it's worth checking that ``DHCP`` packets
-reach the ``jumpserver``, respectively the ``mas01`` VM.
+reach the ``jumpserver``, respectively the ``mas01`` container.
 
 .. code-block:: console
 
index 73799c5..4c6a86f 100644 (file)
@@ -12,12 +12,6 @@ x86_64:
   default:
     vcpus: 2
     ram: 4096
-    virtual: &arch_default_virtual_nodes_infra
-      nodes:
-        infra:
-{%- if nm.cluster.has_baremetal_nodes %}
-          - mas01
-{%- endif %}
     cluster: &arch_default_cluster_states
       states:
         - virtual_init
@@ -49,7 +43,6 @@ aarch64:
   default:
     vcpus: 6
     ram: 4096
-    virtual: *arch_default_virtual_nodes_infra
     cluster: *arch_default_cluster_states
   common:
     apt:
index 47f66a4..28ef4ca 100755 (executable)
@@ -17,6 +17,8 @@ source "$(dirname "${BASH_SOURCE[0]}")/../../scripts/lib.sh"
 bm_nodes=$(salt --out yaml 'mas01*' pillar.get maas:region:machines | \
            awk '/^\s+\w+[[:digit:]]+:$/ {gsub(/:$/, "*"); printf "%s ", $1}')
 
+wait_for 60.0 "salt --out yaml -C 'mas01*' service.status maas-fixup | fgrep -q 'false'"
+
 # Optionally destroy MaaS machines from a previous run
 if [ "${ERASE_ENV}" -gt 1 ]; then
   cleanup_uefi
@@ -26,7 +28,7 @@ if [ "${ERASE_ENV}" -gt 1 ]; then
 fi
 
 # MaaS rack/region controller, node commissioning
-wait_for 10.0 "salt -C 'mas01*' state.apply linux,salt,openssh,ntp,iptables"
+wait_for 10.0 "salt -C 'mas01*' state.apply salt,iptables"
 salt -C 'mas01*' state.apply maas.cluster
 
 wait_for 10 "salt -C 'mas01*' state.apply maas.region"
index 46d8804..e883757 100755 (executable)
@@ -17,7 +17,7 @@ source "$(dirname "${BASH_SOURCE[0]}")/../../scripts/xdf_data.sh"
 CI_DEBUG=${CI_DEBUG:-0}; [[ "${CI_DEBUG}" =~ (false|0) ]] || set -x
 # shellcheck disable=SC2154,SC2086,SC2116
 LOCAL_VIRT_NODES=$(echo ${virtual_nodes[*]}) # unquoted to filter space
-[[ ! "${LOCAL_VIRT_NODES}" =~ mas01 ]] || LOCAL_VIRT_NODES='mas01'
+[[ ! "${cluster_states[*]}" =~ maas ]] || LOCAL_VIRT_NODES='mas01'
 NODE_MASK="${LOCAL_VIRT_NODES// /|}"
 
 wait_for 5.0 "salt-call state.sls reclass,linux.network,salt.minion \
@@ -28,13 +28,12 @@ wait_for 3.0 "salt -C 'E@^(${NODE_MASK}).*' saltutil.refresh_pillar"
 
 # Init specific to VMs on FN (all for virtual, mas for baremetal)
 wait_for 3.0 "(for n in ${LOCAL_VIRT_NODES}; do salt -C \${n}.* test.ping || exit; done)"
+wait_for 3.0 "salt -C 'E@^(${NODE_MASK}).*' saltutil.sync_all"
+[[ ! "${NODE_MASK}" =~ mas01 ]] || exit 0
+
+wait_for 3.0 "salt -C 'E@^(${NODE_MASK}).*' state.sls linux"
 
-wait_for 3.0 "salt -C 'E@^(${NODE_MASK}).*' state.sls linux.system,linux.storage"
-wait_for 2.0 "salt -C 'E@^(${NODE_MASK}).*' state.sls linux.network"
 salt -C "E@^(${NODE_MASK}).*" system.reboot
 wait_for 90.0 "salt -C 'E@^(${NODE_MASK}).*' test.ping"
 wait_for 3.0 "salt -C 'E@^(${NODE_MASK}).*' pkg.upgrade refresh=False dist_upgrade=True"
-wait_for 3.0 "salt -C 'E@^(${NODE_MASK}).*' saltutil.sync_all"
-wait_for 3.0 "salt -C 'E@^(${NODE_MASK}).*' state.apply salt"
-
-wait_for 3.0 "salt -C 'E@^(${NODE_MASK}).*' state.sls ntp"
+wait_for 3.0 "salt -C 'E@^(${NODE_MASK}).*' state.apply salt,ntp"
index 37832a8..ffeeca1 100644 (file)
@@ -20,6 +20,5 @@ mcp-odl-ha/openstack/init.yml
 mcp-odl-*/infra/config.yml
 mcp-*-noha/openstack/compute.yml
 mcp-common-noha/infra/init.yml
-mcp-common-noha/init_options.yml
 mcp-*-noha/openstack/gateway.yml
 mcp-fdio-noha/infra/config.yml
index 6727ab3..0397f9d 100644 (file)
@@ -16,14 +16,10 @@ classes:
   - cluster.all-mcp-arch-common.opnfv.pod_config
 parameters:
   _param:
-    mcpcontrol_interface: ${_param:opnfv_fn_vm_primary_interface}
-    primary_interface: ${_param:opnfv_fn_vm_secondary_interface}
-    pxe_admin_interface: ${_param:opnfv_fn_vm_tertiary_interface}
     linux_system_codename: xenial
     maas_admin_username: opnfv
     dns_server01: '{{ nm.dns_public[0] }}'
-    pxe_admin_address: ${_param:infra_maas_node01_deploy_address}
-    single_address: ${_param:pxe_admin_address}
+    single_address: ${_param:infra_maas_node01_deploy_address}
     hwe_kernel: 'hwe-16.04'
     opnfv_maas_timeout_comissioning: {{ nm.maas_timeout_comissioning }}
     opnfv_maas_timeout_deploying: {{ nm.maas_timeout_deploying }}
@@ -114,46 +110,23 @@ parameters:
         default_min_hwe_kernel: ${_param:hwe_kernel}
     cluster:
       saltstack_repo_xenial: "deb [arch=amd64] http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2017.7/ xenial main"
+      region:
+        port: 5240
   linux:
     system:
-      kernel:
+      repo:
+        armband_3:
+          enabled: false
+      ~locale: ''
+      ~kernel:
         sysctl:
           net.ipv4.ip_forward: 1
     network:
-      interface:
-        mcpcontrol_interface:
-          enabled: true
-          name: ${_param:mcpcontrol_interface}
-          type: eth
-          proto: dhcp
-          mtu: ${_param:interface_mtu}
-        primary_interface:
-          enabled: true
-          name: ${_param:primary_interface}
-          type: eth
-{%- if conf.idf.fuel.jumphost.get('trunks', {}).get('mgmt', False) and (nm.vlan_mgmt | int > 0) %}
-          proto: manual
-          mtu: ${_param:interface_mtu}
-        primary_interface_vlan:
-          enabled: true
-          type: vlan
-          name: ${_param:primary_interface}.{{ nm.vlan_mgmt }}
-          use_interfaces:
-            - ${_param:primary_interface}
-{%- endif %}
-          proto: static
-          mtu: ${_param:interface_mtu}
-          address: ${_param:infra_maas_node01_address}
-          netmask: ${_param:opnfv_net_mgmt_mask}
-        pxe_admin_interface:
-          enabled: true
-          name: ${_param:pxe_admin_interface}
-          # MaaS has issues using MTU > 1500 for PXE interface
-          mtu: 1500
-          proto: static
-          address: ${_param:single_address}
-          netmask: ${_param:opnfv_net_admin_mask}
-          type: eth
+      resolv:
+        dns:
+{%- for server in nm.dns_public %}
+          - {{ server }}
+{%- endfor %}
   iptables:
     schema:
       epoch: 1
index 0f3cab9..e5f7e31 100644 (file)
@@ -22,25 +22,12 @@ parameters:
     salt_control_trusty_image: ''  # Dummy value, to keep reclass 1.5.2 happy
     salt_control_xenial_image: salt://salt/files/control/images/base_image_opnfv_fuel_vcp.img
 
-    # VMs spawned on Foundation Node / Jump Host net ifaces (max 4)
     # VCP VMs spawned on KVM Hosts net ifaces (max 3)
-    # NOTE(armband): Only x86 VCP VMs spawned via salt.control names differ
-
 {%- if conf.MCP_JUMP_ARCH == 'aarch64' %}
-    opnfv_fn_vm_primary_interface: enp1s0
-    opnfv_fn_vm_secondary_interface: enp2s0
-    opnfv_fn_vm_tertiary_interface: enp3s0
-    opnfv_fn_vm_quaternary_interface: enp4s0
-
-    opnfv_vcp_vm_primary_interface: ${_param:opnfv_fn_vm_primary_interface}
-    opnfv_vcp_vm_secondary_interface: ${_param:opnfv_fn_vm_secondary_interface}
-    opnfv_vcp_vm_tertiary_interface: ${_param:opnfv_fn_vm_tertiary_interface}
+    opnfv_vcp_vm_primary_interface: enp1s0
+    opnfv_vcp_vm_secondary_interface: enp2s0
+    opnfv_vcp_vm_tertiary_interface: enp3s0
 {%- else %}
-    opnfv_fn_vm_primary_interface: ens3
-    opnfv_fn_vm_secondary_interface: ens4
-    opnfv_fn_vm_tertiary_interface: ens5
-    opnfv_fn_vm_quaternary_interface: ens6
-
     opnfv_vcp_vm_primary_interface: ens2
     opnfv_vcp_vm_secondary_interface: ens3
     opnfv_vcp_vm_tertiary_interface: ens4
index ee849a6..41d73e3 100644 (file)
@@ -23,12 +23,6 @@ classes:
 parameters:
   _param:
     salt_master_host: ${_param:infra_config_deploy_address}
-    single_address: ${_param:infra_config_address}
-    deploy_address: ${_param:infra_config_deploy_address}
-    pxe_admin_address: ${_param:opnfv_infra_config_pxe_admin_address}
-    mcpcontrol_nic: ${_param:opnfv_fn_vm_primary_interface}
-    single_nic: ${_param:opnfv_fn_vm_secondary_interface}
-    pxe_admin_nic: ${_param:opnfv_fn_vm_tertiary_interface}
   salt:
     master:
       accept_policy: open_mode
index e1e6298..d3e07e1 100644 (file)
@@ -11,6 +11,9 @@ classes:
   - cluster.all-mcp-arch-common
 parameters:
   _param:
+    # infra service addresses
+    infra_config_address: ${_param:opnfv_infra_config_address}
+    infra_config_deploy_address: {{ conf.SALT_MASTER }}
     cluster_domain: ${_param:cluster_name}.local
     reclass_config_master: ${_param:opnfv_infra_config_pxe_admin_address}
     infra_maas_node01_hostname: mas01
@@ -10,9 +10,6 @@ classes:
   - cluster.all-mcp-arch-common
 parameters:
   _param:
-    # infra service addresses
-    infra_config_address: ${_param:opnfv_infra_config_address}
-    infra_config_deploy_address: {{ conf.SALT_MASTER }}
     # openstack service addresses
     openstack_control_address: ${_param:opnfv_openstack_control_node01_address}
     openstack_control_node01_address: ${_param:opnfv_openstack_control_node01_address}
index d39e259..55c737f 100644 (file)
@@ -7,5 +7,5 @@
 ##############################################################################
 ---
 classes:
-  - cluster.all-mcp-arch-common.infra.maas
   - cluster.mcp-fdio-ha.infra
+  - cluster.all-mcp-arch-common.infra.maas
index 3520cc3..e64e9a1 100644 (file)
@@ -7,5 +7,5 @@
 ##############################################################################
 ---
 classes:
-  - cluster.all-mcp-arch-common.infra.maas
   - cluster.mcp-fdio-noha.infra
+  - cluster.all-mcp-arch-common.infra.maas
index b5c40ef..ff9eff5 100644 (file)
@@ -8,8 +8,8 @@
 {%- import 'net_map.j2' as nm with context %}
 ---
 classes:
-  - cluster.all-mcp-arch-common.infra.maas
   - cluster.mcp-odl-ha.infra
+  - cluster.all-mcp-arch-common.infra.maas
 {%- if 'aarch64' not in nm.cluster.arch %}
 parameters:
   _param:
index b359cfe..b91ba2c 100644 (file)
@@ -7,5 +7,5 @@
 ##############################################################################
 ---
 classes:
-  - cluster.all-mcp-arch-common.infra.maas
   - cluster.mcp-odl-noha.infra
+  - cluster.all-mcp-arch-common.infra.maas
index 655c2a3..5007749 100644 (file)
@@ -7,5 +7,5 @@
 ##############################################################################
 ---
 classes:
-  - cluster.all-mcp-arch-common.infra.maas
   - cluster.mcp-ovn-ha.infra
+  - cluster.all-mcp-arch-common.infra.maas
index 4d25f27..359ef36 100644 (file)
@@ -7,5 +7,5 @@
 ##############################################################################
 ---
 classes:
-  - cluster.all-mcp-arch-common.infra.maas
   - cluster.mcp-ovn-noha.infra
+  - cluster.all-mcp-arch-common.infra.maas
index 93fd6e7..2187ba7 100644 (file)
@@ -7,5 +7,5 @@
 ##############################################################################
 ---
 classes:
-  - cluster.all-mcp-arch-common.infra.maas
   - cluster.mcp-ovs-dpdk-ha.infra
+  - cluster.all-mcp-arch-common.infra.maas
index 57a87d6..49d2143 100644 (file)
@@ -7,5 +7,5 @@
 ##############################################################################
 ---
 classes:
-  - cluster.all-mcp-arch-common.infra.maas
   - cluster.mcp-ovs-dpdk-noha.infra
+  - cluster.all-mcp-arch-common.infra.maas
index e666d00..154675f 100644 (file)
@@ -7,5 +7,5 @@
 ##############################################################################
 ---
 classes:
-  - cluster.all-mcp-arch-common.infra.maas
   - cluster.mcp-ovs-ha.infra
+  - cluster.all-mcp-arch-common.infra.maas
index bae884f..0d54d3b 100644 (file)
@@ -7,5 +7,5 @@
 ##############################################################################
 ---
 classes:
-  - cluster.all-mcp-arch-common.infra.maas
   - cluster.mcp-ovs-noha.infra
+  - cluster.all-mcp-arch-common.infra.maas
index 6a95545..c21c3d9 100644 (file)
@@ -1,3 +1,4 @@
 mcp.rsa*
 user-data.sh
 xdf_data.sh
+docker-compose/files/entrypoint_maas.sh
index 891d559..bc8b3e8 100644 (file)
@@ -22,33 +22,60 @@ services:
       mgmt:
         ipv4_address: {{ nm.net_mgmt | ipnet_hostaddr(nm.start_ip[nm.net_mgmt] + nm.net_mgmt_hosts.index('opnfv_infra_config_address') +1) }}
     volumes:
-       - /run/dbus/system_bus_socket:/run/dbus/system_bus_socket:ro
-       - {{ conf.MCP_REPO_ROOT_PATH }}:/root/fuel
-       - {{ conf.MCP_REPO_ROOT_PATH }}/mcp/scripts/docker-compose/files/entrypoint.sh:/entrypoint.sh
-       - {{ conf.MCP_STORAGE_DIR }}/pod_config.yml:/root/pod_config.yml
-       - {{ conf.MCP_STORAGE_DIR }}/nodes:/srv/salt/reclass/nodes
-       - {{ conf.MCP_STORAGE_DIR }}/pki:/etc/pki
-       - {{ conf.MCP_STORAGE_DIR }}/salt:/etc/salt
-       - {{ conf.MCP_STORAGE_DIR }}/hosts:/etc/hosts
+      - /run/dbus/system_bus_socket:/run/dbus/system_bus_socket:ro
+      - {{ conf.MCP_REPO_ROOT_PATH }}:/root/fuel
+      - {{ conf.MCP_REPO_ROOT_PATH }}/mcp/scripts/docker-compose/files/entrypoint.sh:/entrypoint.sh
+      - {{ conf.MCP_STORAGE_DIR }}/pod_config.yml:/root/pod_config.yml
+      - {{ conf.MCP_STORAGE_DIR }}/nodes:/srv/salt/reclass/nodes
+      - {{ conf.MCP_STORAGE_DIR }}/pki:/etc/pki
+      - {{ conf.MCP_STORAGE_DIR }}/salt:/etc/salt
+      - {{ conf.MCP_STORAGE_DIR }}/hosts:/etc/hosts
 {%- if conf.MCP_VCP %}
-       - {{ conf.MCP_STORAGE_DIR }}/base_image_opnfv_fuel_vcp.img:/srv/salt/env/prd/salt/files/control/images/base_image_opnfv_fuel_vcp.img
+      - {{ conf.MCP_STORAGE_DIR }}/base_image_opnfv_fuel_vcp.img:/srv/salt/env/prd/salt/files/control/images/base_image_opnfv_fuel_vcp.img
 {%- endif %}
     hostname: cfg01
     domainname: {{ conf.cluster.domain }}
     privileged: true
+{%- if nm.cluster.has_baremetal_nodes %}
+  opnfv-fuel-maas:
+    container_name: "maas"
+    image: "opnfv/fuel:saltminion-maas-{{ conf.MCP_DOCKER_TAG }}"
+    networks:
+      mcpcontrol:
+        ipv4_address: {{ conf.MAAS_IP }}
+      pxebr:
+        ipv4_address: {{ nm.net_admin | ipnet_hostaddr(nm.start_ip[nm.net_admin] + nm.net_admin_hosts.index('opnfv_infra_maas_node01_deploy_address') +1) }}
+      mgmt:
+        ipv4_address: {{ nm.net_mgmt | ipnet_hostaddr(nm.start_ip[nm.net_mgmt] + nm.net_mgmt_hosts.index('opnfv_infra_maas_node01_address') +1) }}
+    volumes:
+      - /lib/modules:/lib/modules:ro
+      - /sys/fs/cgroup:/sys/fs/cgroup:ro
+      - /run/dbus/system_bus_socket:/run/dbus/system_bus_socket:ro
+      - {{ conf.MCP_REPO_ROOT_PATH }}/mcp/scripts/docker-compose/files/entrypoint_maas.sh:/entrypoint.sh:ro
+      - {{ conf.MCP_STORAGE_DIR }}/hosts:/etc/hosts:ro
+      - {{ conf.MCP_STORAGE_DIR }}/mas01/etc/iptables:/etc/iptables
+      - {{ conf.MCP_STORAGE_DIR }}/mas01/var/lib/postgresql:/var/lib/postgresql
+      - {{ conf.MCP_STORAGE_DIR }}/mas01/var/lib/maas:/var/lib/maas
+      - {{ conf.MCP_STORAGE_DIR }}/mas01/var/spool/maas-proxy:/var/spool/maas-proxy
+      - {{ conf.MCP_STORAGE_DIR }}/mas01/etc/maas:/etc/maas
+    hostname: mas01
+    domainname: {{ conf.cluster.domain }}
+    privileged: true
+    ports:
+      - 5240:5240
+{%- endif %}
 networks:
   mcpcontrol:
-    driver: macvlan
+    driver: bridge
     driver_opts:
-      parent: veth_mcp1  # Always untagged
+      com.docker.network.driver.mtu: 9000
     ipam:
       config:
         - subnet: {{ net_mcpcontrol }}
-          gateway: {{ net_mcpcontrol | ipnet_hostaddr(1) }}
   pxebr:
     driver: macvlan
     driver_opts:
-      parent: veth_mcp3  # Always untagged
+      parent: veth_mcp1  # Always untagged
     ipam:
       config:
         - subnet: {{ nm.net_admin }}
@@ -58,7 +85,7 @@ networks:
 {%- if conf.idf.fuel.jumphost.get('trunks', {}).get('mgmt', False) %}
       parent: {{ ma.interface_str('veth_mcp5', nm.vlan_mgmt) }}
 {%- else %}
-      parent: veth_mcp5  # Untagged by default
+      parent: veth_mcp3  # Untagged by default
 {%- endif %}
     ipam:
       config:
index 9830ea1..a0f72e2 100755 (executable)
@@ -21,11 +21,6 @@ if [ ! -f /home/ubuntu/.ssh/authorized_keys ]; then
     echo 'IdentityFile /root/fuel/mcp/scripts/mcp.rsa' >> /root/.ssh/config
 fi
 
-if ! grep -q localhost /etc/hosts; then
-    # overwrite hosts only on first container up, to preserve cluster nodes
-    cp -a /root/fuel/mcp/scripts/docker-compose/files/hosts /etc/hosts
-fi
-
 # salt state does not properly configure file_roots in master.conf, hard set it
 cp -a /root/fuel/mcp/scripts/docker-compose/files/opnfv_master.conf \
       /etc/salt/master.d/opnfv_master.conf
diff --git a/mcp/scripts/docker-compose/files/entrypoint_maas.sh.j2 b/mcp/scripts/docker-compose/files/entrypoint_maas.sh.j2
new file mode 100644 (file)
index 0000000..23b8d8f
--- /dev/null
@@ -0,0 +1,62 @@
+#!/bin/bash -e
+##############################################################################
+# Copyright (c) 2019 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+{%- import 'net_map.j2' as nm with context %}
+{%- set pxebr_addr = nm.net_admin | ipnet_hostaddr(nm.start_ip[nm.net_admin] + nm.net_admin_hosts.index('opnfv_infra_maas_node01_deploy_address') +1) %}
+if [ ! -e /var/lib/postgresql/*/main ]; then
+    cp -ar /var/lib/opnfv/{postgresql,maas} /var/lib/
+    cp -ar /var/lib/opnfv/etc/{ssh,maas} /etc/
+fi
+chown -R maas:maas /var/lib/maas
+chown -R postgres:postgres /var/lib/postgresql
+chown -R proxy:proxy /var/spool/maas-proxy
+
+if [ ! -f /etc/sysctl.d/99-salt.conf ]; then
+    echo 'net.ipv4.ip_forward = 1' > /etc/sysctl.d/99-salt.conf
+fi
+
+cat <<-EOF | tee /etc/resolv.conf
+{%- for server in nm.dns_public %}
+nameserver {{ server }}
+{%- endfor %}
+EOF
+
+cat <<-EOF | tee /etc/salt/minion.d/opnfv.conf
+id: mas01.{{ conf.cluster.domain }}
+master: {{ conf.SALT_MASTER }}
+grains:
+  virtual_subtype: Docker_
+EOF
+rm -f /etc/salt/minion.d/99-master-address.conf
+
+# Work around MaaS issues with PXE/admin using jumbo frames
+MAAS_MTU_SERVICE="/etc/systemd/system/maas-mtu.service"
+cat <<-EOF | tee "${MAAS_MTU_SERVICE}"
+[Unit]
+Requires=network-online.target
+After=network-online.target
+[Service]
+ExecStart=/bin/sh -ec '\
+  /sbin/ifconfig $(/sbin/ip addr | /bin/grep -Po "{{ pxebr_addr }}.* \K(.*)") mtu 1500'
+EOF
+ln -sf "${MAAS_MTU_SERVICE}" "/etc/systemd/system/multi-user.target.wants/"
+
+# Configure mass-region-controller if not already done previously
+[ ! -e /var/lib/maas/secret ] || exit 0
+MAAS_FIXUP_SERVICE="/etc/systemd/system/maas-fixup.service"
+cat <<-EOF | tee "${MAAS_FIXUP_SERVICE}"
+[Unit]
+After=postgresql.service
+[Service]
+ExecStart=/bin/sh -ec '\
+  echo "debconf debconf/frontend select Noninteractive" | debconf-set-selections && \
+  /var/lib/dpkg/info/maas-region-controller.config configure && \
+  /var/lib/dpkg/info/maas-region-controller.postinst configure'
+EOF
+ln -sf "${MAAS_FIXUP_SERVICE}" "/etc/systemd/system/multi-user.target.wants/"
+rm "/usr/sbin/policy-rc.d"
index 5b09c29..b89a33d 100644 (file)
@@ -60,6 +60,8 @@ function jumpserver_pkg_install {
 }
 
 function jumpserver_check_requirements {
+  # shellcheck disable=SC2178
+  local states=$1; shift
   # shellcheck disable=SC2178
   local vnodes=$1; shift
   local br=("$@")
@@ -67,7 +69,7 @@ function jumpserver_check_requirements {
   local err_br_virsh_net='is a virtual network, Linux bridge expected!'
   local warn_br_endpoint="Endpoints might be inaccessible from external hosts!"
   # MaaS requires a Linux bridge for PXE/admin
-  if [[ "${vnodes}" =~ mas01 ]]; then
+  if [[ "${states}" =~ maas ]]; then
     if ! brctl showmacs "${br[0]}" >/dev/null 2>&1; then
       notify_e "[ERROR] PXE/admin (${br[0]}) ${err_br_not_found}"
     fi
@@ -77,9 +79,9 @@ function jumpserver_check_requirements {
     fi
   fi
   # If virtual nodes are present, public should be a Linux bridge
-  if [ "$(echo "${vnodes}" | wc -w)" -gt 2 ]; then
+  if [ -n "${vnodes}" ]; then
     if ! brctl showmacs "${br[3]}" >/dev/null 2>&1; then
-      if [[ "${vnodes}" =~ mas01 ]]; then
+      if [[ "${states}" =~ maas ]]; then
         # Baremetal nodes *require* a proper public network
         notify_e "[ERROR] Public (${br[3]}) ${err_br_not_found}"
       else
@@ -88,7 +90,7 @@ function jumpserver_check_requirements {
       fi
     fi
     if ${VIRSH} net-info "${br[3]}" >/dev/null 2>&1; then
-      if [[ "${vnodes}" =~ mas01 ]]; then
+      if [[ "${states}" =~ maas ]]; then
         notify_e "[ERROR] ${br[3]} ${err_br_virsh_net}"
       else
         notify_n "[WARN] ${br[3]} ${err_br_virsh_net}" 3
index 9c4d8fb..bce54ad 100644 (file)
@@ -216,9 +216,6 @@ function prepare_vms {
   local image=base_image_opnfv_fuel.img
   local vcp_image=${image%.*}_vcp.img
   local _o=${base_image/*\/}
-  local _h=$(echo "${repos_pkgs_str}.$(md5sum "${image_dir}/${_o}")" | \
-             md5sum | cut -c -8)
-  local _tmp
   [ -n "${image_dir}" ] || exit 1
 
   cleanup_uefi
@@ -226,8 +223,10 @@ function prepare_vms {
   __get_base_image "${base_image}" "${image_dir}"
   IFS='^' read -r -a repos_pkgs <<< "${repos_pkgs_str}"
 
+  local _h=$(echo "${repos_pkgs_str}.$(md5sum "${image_dir}/${_o}")" | \
+             md5sum | cut -c -8)
+  local _tmp="${image%.*}.${_h}.img"
   echo "[INFO] Lookup cache / build patched base image for fingerprint: ${_h}"
-  _tmp="${image%.*}.${_h}.img"
   if [ "${image_dir}/${_tmp}" -ef "${image_dir}/${image}" ]; then
     echo "[INFO] Patched base image found"
   else
@@ -278,9 +277,9 @@ function prepare_vms {
 }
 
 function create_networks {
-  local all_vnode_networks=("mcpcontrol" "$@")
-  # create required networks, including constant "mcpcontrol"
-  for net in "${all_vnode_networks[@]}"; do
+  local all_vnode_networks=("$@")
+  # create required networks
+  for net in "mcpcontrol" "${all_vnode_networks[@]}"; do
     if ${VIRSH} net-info "${net}" >/dev/null 2>&1; then
       ${VIRSH} net-destroy "${net}" || true
       ${VIRSH} net-undefine "${net}"
@@ -293,8 +292,8 @@ function create_networks {
       ${VIRSH} net-start "${net}"
     fi
   done
-  # create veth pairs for relevant networks (mcpcontrol, pxebr, mgmt)
-  for i in $(seq 0 2 4); do
+  # create veth pairs for relevant networks (pxebr, mgmt)
+  for i in $(seq 0 2 2); do
     sudo ip link del "veth_mcp$i" || true
     sudo ip link add "veth_mcp$i" type veth peer name "veth_mcp$((i+1))"
     sudo ip link set "veth_mcp$i" up mtu 9000
@@ -337,14 +336,8 @@ function create_vms {
 
     # prepare network args
     local vnode_networks=("$@")
-    if [[ "${vnode_data[0]}" =~ ^(cfg01|mas01) ]]; then
-      net_args=" --network network=mcpcontrol,model=virtio"
-      # 3rd interface gets connected to PXE/Admin Bridge (cfg01, mas01)
-      vnode_networks[2]="${vnode_networks[0]}"
-    else
-      net_args=" --network bridge=${vnode_networks[0]},model=virtio"
-    fi
-    for net in "${vnode_networks[@]:1}"; do
+    local net_args=
+    for net in "${vnode_networks[@]}"; do
       net_args="${net_args} --network bridge=${net},model=virtio"
     done
 
@@ -370,27 +363,16 @@ function create_vms {
   done
 }
 
-function update_mcpcontrol_network {
-  # set static ip address for salt master node, MaaS node
-  local amac=$(${VIRSH} domiflist mas01 2>&1| awk '/mcpcontrol/ {print $5; exit}')
-  [ -z "${amac}" ] || ${VIRSH} net-update "mcpcontrol" add ip-dhcp-host \
-    "<host mac='${amac}' name='mas01' ip='${MAAS_IP}'/>" --live --config
-}
-
 function reset_vms {
   local vnodes=("$@")
   local cmd_str="ssh ${SSH_OPTS} ${SSH_SALT}"
 
   # reset non-infrastructure vms, wait for them to come back online
   for node in "${vnodes[@]}"; do
-    if [[ ! "${node}" =~ (cfg01|mas01) ]]; then
-      ${VIRSH} reset "${node}"
-    fi
+    ${VIRSH} reset "${node}"
   done
   for node in "${vnodes[@]}"; do
-    if [[ ! "${node}" =~ (cfg01|mas01) ]]; then
-      wait_for 20.0 "${cmd_str} sudo salt -C '${node}*' saltutil.sync_all"
-    fi
+    wait_for 20.0 "${cmd_str} sudo salt -C '${node}*' saltutil.sync_all"
   done
 }
 
@@ -414,9 +396,26 @@ function prepare_containers {
   if [[ ! "${MCP_DOCKER_TAG}" =~ 'verify' ]]; then
     "${COMPOSE_PREFIX}docker-compose" -f docker-compose/docker-compose.yaml pull
   fi
-  sudo rm -rf "${image_dir}/"{salt,hosts,pki} "${image_dir}/nodes/"*
-  mkdir -p "${image_dir}/salt/"{master.d,minion.d}
-  touch "${image_dir}/hosts"
+  # overwrite hosts only on first container up, to preserve cluster nodes
+  sudo cp docker-compose/files/hosts "${image_dir}/hosts"
+  sudo rm -rf "${image_dir}/"{salt,pki,mas01/etc} "${image_dir}/nodes/"*
+  find "${image_dir}/mas01/var/lib/" \
+    -mindepth 2 -maxdepth 2 -not -name boot-resources \
+    -exec sudo rm -rf {} \; || true
+  mkdir -p "${image_dir}/"{salt/master.d,salt/minion.d}
+
+  if grep -q -e 'maas' 'docker-compose/docker-compose.yaml'; then
+    chmod +x docker-compose/files/entrypoint*.sh
+    # Apparmor workaround for bind9 inside Docker containers using AUFS
+    for profile in 'usr.sbin.ntpd' 'usr.sbin.named' \
+                   'usr.sbin.dhcpd' 'usr.bin.tcpdump'; do
+      if [ -e "/etc/apparmor.d/${profile}" ] && \
+       [ ! -e "/etc/apparmor.d/disable/${profile}" ]; then
+        sudo ln -sf "/etc/apparmor.d/${profile}" "/etc/apparmor.d/disable/"
+        sudo apparmor_parser -R "/etc/apparmor.d/${profile}" || true
+      fi
+    done
+  fi
 }
 
 function start_containers {
index cebf3bc..d777732 100644 (file)
@@ -7,13 +7,8 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 {%- import 'net_map.j2' as nm with context %}
-{%- set infra_nodes = conf[conf.MCP_JUMP_ARCH].default.virtual.nodes.infra %}
 rm /etc/salt/minion_id
 rm -f /etc/salt/pki/minion/minion_master.pub
 echo "id: $(hostname).{{ conf.cluster.domain }}" > /etc/salt/minion
-if [[ "{{ infra_nodes or [] | join(' ') }}" =~ $(hostname) ]]; then
-  echo "master: {{ conf.SALT_MASTER }}" >> /etc/salt/minion
-else
-  echo "master: {{ nm.net_admin | ipnet_hostaddr(nm.start_ip[nm.net_admin] + nm.net_admin_hosts.index('opnfv_infra_config_pxe_admin_address') +1) }}" >> /etc/salt/minion
-fi
+echo "master: {{ nm.net_admin | ipnet_hostaddr(nm.start_ip[nm.net_admin] + nm.net_admin_hosts.index('opnfv_infra_config_pxe_admin_address') +1) }}" >> /etc/salt/minion
 service salt-minion restart
diff --git a/mcp/scripts/virsh_net/net_mcpcontrol.xml.j2 b/mcp/scripts/virsh_net/net_mcpcontrol.xml.j2
deleted file mode 100644 (file)
index 46798a3..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-<!--
- Copyright (c) 2017 Mirantis Inc., Enea AB and others.
-
- All rights reserved. This program and the accompanying materials
- are made available under the terms of the Apache License, Version 2.0
- which accompanies this distribution, and is available at
- http://www.apache.org/licenses/LICENSE-2.0
--->
-{#- conf.MCPCONTROL_NET & co are mandatory, defaults are set via globals.sh #}
-{%- set net_mcpcontrol = [conf.MCPCONTROL_NET, conf.MCPCONTROL_PREFIX] | join("/") %}
-<network>
-  <name>mcpcontrol</name>
-  <bridge name="mcpcontrol"/>
-  <forward mode="nat"/>
-  <ip address="{{ net_mcpcontrol | ipnet_hostaddr(1) }}" netmask="{{ net_mcpcontrol | ipnet_netmask }}">
-    <dhcp>
-      <range start="{{ net_mcpcontrol | ipnet_hostaddr(3) }}" end="{{ net_mcpcontrol | ipnet_hostmax }}"/>
-    </dhcp>
-  </ip>
-</network>
index 4db5593..30e41e9 100644 (file)
@@ -15,9 +15,7 @@
 {%- set cluster_states = conf.cluster.states if conf.MCP_NO_DEPLOY_ENVIRONMENT < 2 else [] -%}
 {%- set arch = conf[conf.MCP_JUMP_ARCH] -%}
 {%- set V = conf.virtual -%}
-{%- do V.nodes.update(arch.default.virtual.nodes) -%}
 {%- set section_map = {
-  'infra': -1,
   'control': nm.ctl01.idx,
   'compute': nm.cmp001.idx
 } -%}
@@ -36,7 +34,7 @@
   {%- set arr = [] -%}
   {%- for section in section_map -%}
     {%- for n in V.nodes[section] or [] -%}
-      {%- if section_map[section] < 0 or conf.nodes[section_map[section] + loop.index0].node.type == 'virtual' -%}
+      {%- if conf.nodes[section_map[section] + loop.index0].node.type == 'virtual' -%}
         {%- if n not in V -%}{%- do V.update({n: {}}) -%}{%- endif -%}
         {%- set cpu_topo = 'cpu_topology' in V[n] and not conf.MCP_CMP_SS -%}
         {%- if 'numa' in V[n] and cpu_topo -%}
@@ -63,9 +61,7 @@
   {%- set arr = [] -%}
   {%- for section in sections -%}
     {%- for n in V.nodes[section] or [] -%}
-      {%- if ( (section_map[section] < 0 and type == 'virtual') or
-               (section_map[section] >= 0 and
-                conf.nodes[section_map[section] + loop.index0].node.type in type) ) -%}
+      {%- if conf.nodes[section_map[section] + loop.index0].node.type in type -%}
         {%- do arr.append(n) -%}
       {%- endif -%}
     {%- endfor -%}