--- /dev/null
+parameter_defaults:
+    ComputeKernelArgs: 'kvmfornfv_kernel.rpm'
+resource_registry:
+    OS::TripleO::NodeUserData: kvm4nfv-1st-boot.yaml
 
--- /dev/null
+heat_template_version: 2014-10-16
+
+description: >
+  This is the firstboot configuration for kvmfornfv kernel of the compute nodes
+  via cloud-init. To enable this, replace the default
+  mapping of OS::TripleO::NodeUserData in ../overcloud_resource_registry*
+
+parameters:
+  ComputeKernelArgs:
+    description: >
+      Name of the kvmfornfv kernel rpm.
+      Example: "kvmfornfv_kernel.rpm"
+    type: string
+    default: ""
+  ComputeHostnameFormat:
+    type: string
+    default: ""
+
+resources:
+  userdata:
+    type: OS::Heat::MultipartMime
+    properties:
+      parts:
+      - config: {get_resource: compute_kernel_args}
+
+  # Verify the logs on /var/log/cloud-init.log on the overcloud node
+  compute_kernel_args:
+    type: OS::Heat::SoftwareConfig
+    properties:
+      config:
+        str_replace:
+          template: |
+            #!/bin/bash
+            set -x
+            FORMAT=$COMPUTE_HOSTNAME_FORMAT
+            if [[ -z $FORMAT ]] ; then
+              FORMAT="compute" ;
+            else
+              # Assumption: only %index% and %stackname% are the variables in Host name format
+              FORMAT=$(echo $FORMAT | sed  's/\%index\%//g' | sed 's/\%stackname\%//g') ;
+            fi
+            if [[ $(hostname) == *$FORMAT* ]] ; then
+              yum install -y /root/$KVMFORNFV_KERNEL_RPM
+              grub2-mkconfig -o /etc/grub2.cfg
+              sleep 5
+              reboot
+            fi
+          params:
+            $KVMFORNFV_KERNEL_RPM: {get_param: ComputeKernelArgs}
+            $COMPUTE_HOSTNAME_FORMAT: {get_param: ComputeHostnameFormat}
+
+outputs:
+  # This means get_resource from the parent template will get the userdata, see:
+  # http://docs.openstack.org/developer/heat/template_guide/composition.html#making-your-template-resource-more-transparent
+  # Note this is new-for-kilo, an alternative is returning a value then using
+  # get_attr in the parent template instead.
+  OS::stack_id:
+    value: {get_resource: userdata}
+
+
 
 gpgcheck=0
 EOF
 
+# Get Real Time Kernel from kvm4nfv
+populate_cache $kvmfornfv_uri_base/$kvmfornfv_kernel_rpm
+
 # Increase disk size by 500MB to accommodate more packages
 qemu-img resize overcloud-full_build.qcow2 +500MB
 
 # git clone vsperf into the overcloud image
 # upload the tacker puppet module and untar it
 # install tacker
+# upload the rt_kvm kernel
 LIBGUESTFS_BACKEND=direct virt-customize \
     --run-command "xfs_growfs /dev/sda" \
     --upload ${BUILD_DIR}/opnfv-puppet-tripleo.tar.gz:/etc/puppet/modules \
     --run-command "cd /usr/lib/python2.7/site-packages/ && patch -p1 < neutron-patch-NSDriver.patch" \
     --upload ${BUILD_ROOT}/patches/puppet-neutron-add-odl-settings.patch:/usr/share/openstack-puppet/modules/neutron/ \
     --run-command "cd /usr/share/openstack-puppet/modules/neutron && patch -p1 <  puppet-neutron-add-odl-settings.patch" \
+    --upload ${CACHE_DIR}/$kvmfornfv_kernel_rpm:/root/ \
     -a overcloud-full_build.qcow2
 
 mv -f overcloud-full_build.qcow2 overcloud-full.qcow2
 
 install config/deploy/os-nosdn-nofeature-ha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-nosdn-nofeature-ha.yaml
 install config/deploy/os-nosdn-performance-ha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-nosdn-performance-ha.yaml
 install config/deploy/os-nosdn-ovs-ha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-nosdn-ovs-ha.yaml
+install config/deploy/os-nosdn-kvm-ha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-nosdn-kvm-ha.yaml
+install config/deploy/os-nosdn-kvm-noha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-nosdn-kvm-noha.yaml
 install config/deploy/os-odl_l2-nofeature-ha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-odl_l2-nofeature-ha.yaml
 install config/deploy/os-odl_l2-sfc-noha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-odl_l2-sfc-noha.yaml
 install config/deploy/os-odl-bgpvpn-ha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-odl-bgpvpn-ha.yaml
 %{_sysconfdir}/opnfv-apex/os-nosdn-nofeature-ha.yaml
 %{_sysconfdir}/opnfv-apex/os-nosdn-performance-ha.yaml
 %{_sysconfdir}/opnfv-apex/os-nosdn-ovs-ha.yaml
+%{_sysconfdir}/opnfv-apex/os-nosdn-kvm-ha.yaml
+%{_sysconfdir}/opnfv-apex/os-nosdn-kvm-noha.yaml
 %{_sysconfdir}/opnfv-apex/os-odl_l2-nofeature-ha.yaml
 %{_sysconfdir}/opnfv-apex/os-odl_l2-sfc-noha.yaml
 %{_sysconfdir}/opnfv-apex/os-odl-bgpvpn-ha.yaml
 %changelog
 * Wed Mar 08 2017 Dan Radez <dradez@redhat.com> - 4.0-5
 - Commenting out ONOS, unsupported in Danube
+- Adding kvm4nfv files
 * Tue Feb 14 2017 Feng Pan <fpan@redhat.com> - 4.0-4
 - Add network_settings_vpp.yaml
 * Fri Feb 3 2017 Nikolas Hermanns <nikolas.hermanns@ericsson.com> - 4.0-3
 
 git archive --format=tar.gz --prefix=openstack-tripleo-heat-templates/ HEAD > ${BUILD_DIR}/opnfv-tht.tar.gz
 popd > /dev/null
 
+# inject rt_kvm kernel rpm name into the enable file
+sed -i "s/kvmfornfv_kernel.rpm/$kvmfornfv_kernel_rpm/" ${BUILD_ROOT}/enable_rt_kvm.yaml
+
 # installing forked opnfv-tht
 # enabling ceph OSDs to live on the controller
 # OpenWSMan package update supports the AMT Ironic driver for the TealBox
     --run-command "cp /usr/share/instack-undercloud/undercloud.conf.sample /home/stack/undercloud.conf && chown stack:stack /home/stack/undercloud.conf" \
     --upload ${BUILD_ROOT}/opnfv-environment.yaml:/home/stack/ \
     --upload ${BUILD_ROOT}/first-boot.yaml:/home/stack/ \
+    --upload ${BUILD_ROOT}/kvm4nfv-1st-boot.yaml:/home/stack/ \
+    --upload ${BUILD_ROOT}/enable_rt_kvm.yaml:/home/stack/ \
     --upload ${BUILD_ROOT}/csit-environment.yaml:/home/stack/ \
     --upload ${BUILD_ROOT}/virtual-environment.yaml:/home/stack/ \
     --install "python2-congressclient" \
 
 'ovs4opnfv-e8acab14-openvswitch-2.5.90-0.12032.gitc61e93d6.1.el7.centos.x86_64.rpm'
 )
 
+kvmfornfv_uri_base="http://artifacts.opnfv.org/kvmfornfv"
+kvmfornfv_kernel_rpm="kernel-4.4.6_rt14_1703030237nfv-1.x86_64.rpm"
+
 tacker_repo="http://github.com/openstack/tacker"
 tacker_branch="stable/newton"
 tacker_commit=$(git ls-remote ${tacker_repo} ${tacker_branch} | awk '{print substr($1,1,7)}')
 
   # 'ovs', 'ovs_dpdk' and 'fdio'.
   dataplane : ovs
 
+  # Whether to run the kvm real time kernel (rt_kvm) in the compute node(s) to
+  # reduce the network latencies caused by network function virtualization
+  rt_kvm: false
+
   # Whether to install and configure fdio functionality in the overcloud
   # The dataplane should be specified as fdio if this is set to true
   vpp: false
 
--- /dev/null
+global_params:
+  ha_enabled: true
+
+deploy_options:
+  sdn_controller: false
+  sdn_l3: false
+  tacker: true
+  congress: true
+  sfc: false
+  vpn: false
+  rt_kvm: true
 
--- /dev/null
+global_params:
+  ha_enabled: false
+
+deploy_options:
+  sdn_controller: false
+  sdn_l3: false
+  tacker: true
+  congress: true
+  sfc: false
+  vpn: false
+  rt_kvm: true
 
       compute:                       # Mapping for compute profile (nodes that will be used as Compute nodes)
         phys_type: interface         # Physical interface type (interface or bond)
         members:                     # Physical NIC members of this mapping (Single value allowed for interface phys_type)
-          - nic1
+          - eth0
       controller:                    # Mapping for controller profile (nodes that will be used as Controller nodes)
         phys_type: interface
         members:
         phys_type: interface         # Physical interface type (interface or bond)
         vlan: native                 # VLAN tag to use with this NIC
         members:                     # Physical NIC members of this mapping (Single value allowed for interface phys_type)
-          - nic2                     # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
+          - eth1                     # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
       controller:                    # Mapping for controller profile (nodes that will be used as Controller nodes)
         phys_type: interface
         vlan: native
           phys_type: interface       # Physical interface type (interface or bond)
           vlan: native               # VLAN tag to use with this NIC
           members:                   # Physical NIC members of this mapping (Single value allowed for interface phys_type)
-            - nic3
+            - eth2
         controller:                  # Mapping for controller profile (nodes that will be used as Controller nodes)
           phys_type: interface
           vlan: native
           phys_type: interface       # Physical interface type (interface or bond)
           vlan: 101                  # VLAN tag to use with this NIC
           members:                   # Physical NIC members of this mapping (Single value allowed for interface phys_type)
-            - nic3                   # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
+            - eth2                   # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
         controller:                  # Mapping for controller profile (nodes that will be used as Controller nodes)
           phys_type: interface
           vlan: 101
         phys_type: interface         # Physical interface type (interface or bond)
         vlan: native                 # VLAN tag to use with this NIC
         members:                     # Physical NIC members of this mapping (Single value allowed for interface phys_type)
-          - nic4                     # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
+          - eth3                     # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
       controller:                    # Mapping for controller profile (nodes that will be used as Controller nodes)
         phys_type: interface
         vlan: native
 
   local num_compute_nodes
   local num_control_nodes
 
+  # OPNFV Default Environment and Network settings
+  DEPLOY_OPTIONS+=" -e ${ENV_FILE}"
+  DEPLOY_OPTIONS+=" -e network-environment.yaml"
+
+  # Custom Deploy Environment Templates
   if [[ "${#deploy_options_array[@]}" -eq 0 || "${deploy_options_array['sdn_controller']}" == 'opendaylight' ]]; then
     if [ "${deploy_options_array['sfc']}" == 'True' ]; then
       DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/opendaylight_sfc.yaml"
     DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/enable_congress.yaml"
   fi
 
+  # Enable Real Time Kernel (kvm4nfv)
+  if [ "${deploy_options_array['rt_kvm']}" == 'True' ]; then
+    DEPLOY_OPTIONS+=" -e /home/stack/enable_rt_kvm.yaml"
+  fi
+
   # Make sure the correct overcloud image is available
   if [ ! -f $IMAGES/overcloud-full-${SDN_IMAGE}.qcow2 ]; then
       echo "${red} $IMAGES/overcloud-full-${SDN_IMAGE}.qcow2 is required to execute your deployment."
     DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml"
   fi
 
-  #DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/network-isolation.yaml"
-  DEPLOY_OPTIONS+=" -e network-environment.yaml"
-
-
   # get number of nodes available in inventory
   num_control_nodes=$(ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "grep -c profile:control /home/stack/instackenv.json")
   num_compute_nodes=$(ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "grep -c profile:compute /home/stack/instackenv.json")
      DEPLOY_OPTIONS+=" -e virtual-environment.yaml"
   fi
 
-  DEPLOY_OPTIONS+=" -e ${ENV_FILE}"
-
   echo -e "${blue}INFO: Deploy options set:\n${DEPLOY_OPTIONS}${reset}"
 
   ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
 
                        'vpn',
                        'vpp',
                        'ceph',
-                       'gluon']
+                       'gluon',
+                       'rt_kvm']
 
 OPT_DEPLOY_SETTINGS = ['performance',
                        'vsperf',
 
                     logging.info("{} enabled".format(network))
                     self._config_required_settings(network)
                     nicmap = _network['nic_mapping']
+                    self._validate_overcloud_nic_order(network)
                     iface = nicmap[CONTROLLER]['members'][0]
                     self._config_ip_range(network=network,
                                           interface=iface,
                                           ip_range='overcloud_ip_range',
                                           start_offset=21, end_offset=21)
                     self.enabled_network_list.append(network)
-                    self._validate_overcloud_nic_order(network)
                     # TODO self._config_optional_settings(network)
                 else:
                     logging.info("{} disabled, will collapse with "
 
             nic_index = 1
             print(ns.nics)
             for network in ns.enabled_network_list:
-                nic = 'nic' + str(nic_index)
+                if role == 'compute':
+                    nic = 'eth' + str(nic_index - 1)
+                else:
+                    nic = 'nic' + str(nic_index)
                 assert_equal(ns.nics[role][network], nic)
                 nic_index += 1
 
         ns = NetworkSettings(files_dir+'network_settings.yaml')
         storage_net_nicmap = ns['networks'][STORAGE_NETWORK]['nic_mapping']
         # set duplicate nic
-        storage_net_nicmap['compute']['members'][0] = 'nic1'
+        storage_net_nicmap['controller']['members'][0] = 'nic1'
         assert_raises(NetworkSettingsException, NetworkSettings, ns)
         # remove nic members
-        storage_net_nicmap['compute']['members'] = []
+        storage_net_nicmap['controller']['members'] = []
         assert_raises(NetworkSettingsException, NetworkSettings, ns)
 
     def test_missing_vlan(self):