Merge "Fixes missing bgpvpn service on controller"
authorTim Rozet <trozet@redhat.com>
Sat, 23 Sep 2017 02:52:53 +0000 (02:52 +0000)
committerGerrit Code Review <gerrit@opnfv.org>
Sat, 23 Sep 2017 02:52:53 +0000 (02:52 +0000)
apex/deploy.py
apex/inventory/inventory.py
apex/tests/config/inventory-virt-1-node.yaml [new file with mode: 0644]
apex/tests/config/inventory-virt.yaml [new file with mode: 0644]
apex/tests/test_apex_inventory.py
apex/virtual/virtual_utils.py
build/overcloud-full.sh

index 9ebc3f6..a056138 100644 (file)
@@ -83,7 +83,7 @@ def build_vms(inventory, network_settings,
             name, volume_path,
             baremetal_interfaces=network_settings.enabled_network_list,
             memory=node['memory'], cpus=node['cpu'],
-            macs=[node['mac_address']],
+            macs=node['mac'],
             template_dir=template_dir)
         virt_utils.host_setup({name: node['pm_port']})
 
@@ -280,10 +280,8 @@ def main():
         ansible_args = {
             'virsh_enabled_networks': net_settings.enabled_network_list
         }
-        ansible_path = os.path.join(args.lib_dir, ANSIBLE_PATH)
         utils.run_ansible(ansible_args,
-                          os.path.join(args.lib_dir,
-                                       ansible_path,
+                          os.path.join(args.lib_dir, ANSIBLE_PATH,
                                        'deploy_dependencies.yml'))
         uc_external = False
         if 'external' in net_settings.enabled_network_list:
@@ -328,8 +326,7 @@ def main():
                                        args.deploy_dir, APEX_TEMP_DIR)
         # Install Undercloud
         undercloud.configure(net_settings,
-                             os.path.join(args.lib_dir,
-                                          ansible_path,
+                             os.path.join(args.lib_dir, ANSIBLE_PATH,
                                           'configure_undercloud.yml'),
                              APEX_TEMP_DIR)
 
@@ -344,7 +341,7 @@ def main():
         overcloud_deploy.create_deploy_cmd(deploy_settings, net_settings,
                                            inventory, APEX_TEMP_DIR,
                                            args.virtual, args.env_file)
-        deploy_playbook = os.path.join(args.lib_dir, ansible_path,
+        deploy_playbook = os.path.join(args.lib_dir, ANSIBLE_PATH,
                                        'deploy_overcloud.yml')
         virt_env = 'virtual-environment.yaml'
         bm_env = 'baremetal-environment.yaml'
@@ -415,7 +412,7 @@ def main():
             deploy_vars['external_network_ipv6'] = True
         else:
             deploy_vars['external_network_ipv6'] = False
-        post_undercloud = os.path.join(args.lib_dir, ansible_path,
+        post_undercloud = os.path.join(args.lib_dir, ANSIBLE_PATH,
                                        'post_deploy_undercloud.yml')
         logging.info("Executing post deploy configuration undercloud playbook")
         try:
@@ -432,7 +429,7 @@ def main():
         deploy_vars['vpn'] = ds_opts['vpn']
         # TODO(trozet): pull all logs and store in tmp dir in overcloud
         # playbook
-        post_overcloud = os.path.join(args.lib_dir, ansible_path,
+        post_overcloud = os.path.join(args.lib_dir, ANSIBLE_PATH,
                                       'post_deploy_overcloud.yml')
         # Run per overcloud node
         for node, ip in deploy_vars['overcloud_nodes'].items():
index 71f8e52..3483e57 100644 (file)
@@ -40,7 +40,7 @@ class Inventory(dict):
 
         # move ipmi_* to pm_*
         # make mac a list
-        def munge_nodes(node):
+        def munge_node(node):
             node['pm_addr'] = node['ipmi_ip']
             node['pm_password'] = node['ipmi_pass']
             node['pm_user'] = node['ipmi_user']
@@ -54,23 +54,21 @@ class Inventory(dict):
 
             for i in ('ipmi_ip', 'ipmi_pass', 'ipmi_user', 'mac_address',
                       'disk_device'):
-                if i == 'disk_device' and 'disk_device' in node.keys():
-                    self.root_device = node[i]
-                else:
-                    continue
-                del node[i]
+                if i in node.keys():
+                    if i == 'disk_device':
+                        self.root_device = node[i]
+                    del node[i]
 
             return node
-
-        super().__init__({'nodes': list(map(munge_nodes, init_dict['nodes']))})
+        super().__init__({'nodes': list(map(munge_node, init_dict['nodes']))})
 
         # verify number of nodes
-        if ha and len(self['nodes']) < 5 and not virtual:
+        if ha and len(self['nodes']) < 5:
             raise InventoryException('You must provide at least 5 '
-                                     'nodes for HA baremetal deployment')
+                                     'nodes for HA deployment')
         elif len(self['nodes']) < 2:
             raise InventoryException('You must provide at least 2 nodes '
-                                     'for non-HA baremetal deployment')
+                                     'for non-HA deployment')
 
         if virtual:
             self['host-ip'] = '192.168.122.1'
diff --git a/apex/tests/config/inventory-virt-1-node.yaml b/apex/tests/config/inventory-virt-1-node.yaml
new file mode 100644 (file)
index 0000000..3e4b8dc
--- /dev/null
@@ -0,0 +1,13 @@
+nodes:
+  node0:
+    arch: x86_64
+    capabilities: profile:control
+    cpu: 4
+    disk: 41
+    ipmi_ip: 192.168.122.1
+    ipmi_pass: password
+    ipmi_user: admin
+    mac_address: 00:a8:58:29:f9:99
+    memory: 10240
+    pm_port: 6230
+    pm_type: pxe_ipmitool
diff --git a/apex/tests/config/inventory-virt.yaml b/apex/tests/config/inventory-virt.yaml
new file mode 100644 (file)
index 0000000..36184ea
--- /dev/null
@@ -0,0 +1,25 @@
+nodes:
+  node0:
+    arch: x86_64
+    capabilities: profile:control
+    cpu: 4
+    disk: 41
+    ipmi_ip: 192.168.122.1
+    ipmi_pass: password
+    ipmi_user: admin
+    mac_address: 00:a8:58:29:f9:99
+    memory: 10240
+    pm_port: 6230
+    pm_type: pxe_ipmitool
+  node1:
+    arch: x86_64
+    capabilities: profile:compute
+    cpu: 4
+    disk: 41
+    ipmi_ip: 192.168.122.1
+    ipmi_pass: password
+    ipmi_user: admin
+    mac_address: 00:9d:c8:10:d9:64
+    memory: 8192
+    pm_port: 6231
+    pm_type: pxe_ipmitool
index cca8068..87e7d50 100644 (file)
@@ -16,7 +16,10 @@ from nose.tools import (
 
 from apex import Inventory
 from apex.inventory.inventory import InventoryException
-from apex.tests.constants import TEST_CONFIG_DIR
+from apex.tests.constants import (
+    TEST_CONFIG_DIR,
+    TEST_DUMMY_CONFIG
+)
 
 inventory_files = ('intel_pod2_settings.yaml',
                    'nokia_pod1_settings.yaml',
@@ -40,26 +43,26 @@ class TestInventory:
     def teardown(self):
         """This method is run once after _each_ test method is executed"""
 
-    def test_init(self):
+    def test_inventory_baremetal(self):
         for f in inventory_files:
             i = Inventory(os.path.join(files_dir, f))
             assert_equal(i.dump_instackenv_json(), None)
 
-        # test virtual
-        i = Inventory(i, virtual=True)
-        assert_equal(i.dump_instackenv_json(), None)
+    def test_inventory_invalid_ha_count(self):
+        assert_raises(InventoryException, Inventory,
+                      os.path.join(TEST_DUMMY_CONFIG, 'inventory-virt.yaml'),
+                      virtual=True, ha=True)
 
-        # Remove nodes to violate HA node count
-        while len(i['nodes']) >= 5:
-            i['nodes'].pop()
-        assert_raises(InventoryException,
-                      Inventory, i)
+    def test_inventory_invalid_noha_count(self):
+        assert_raises(InventoryException, Inventory,
+                      os.path.join(TEST_DUMMY_CONFIG,
+                                   'inventory-virt-1-node.yaml'),
+                      virtual=True, ha=False)
 
-        # Remove nodes to violate non-HA node count
-        while len(i['nodes']) >= 2:
-            i['nodes'].pop()
-        assert_raises(InventoryException,
-                      Inventory, i, ha=False)
+    def test_inventory_virtual(self):
+        i = Inventory(os.path.join(TEST_DUMMY_CONFIG, 'inventory-virt.yaml'),
+                      virtual=True, ha=False)
+        assert_equal(i.dump_instackenv_json(), None)
 
     def test_exception(self):
         e = InventoryException("test")
index 255d2c6..1fe2c39 100644 (file)
@@ -14,6 +14,7 @@ import os
 import platform
 import pprint
 import subprocess
+import xml.etree.ElementTree as ET
 
 from apex.common import utils
 from apex.virtual import configure_vm as vm_lib
@@ -26,6 +27,28 @@ DEFAULT_PASS = 'password'
 DEFAULT_VIRT_IP = '192.168.122.1'
 
 
+def get_virt_ip():
+    try:
+        virsh_net_xml = subprocess.check_output(['virsh', 'net-dumpxml',
+                                                 'default'],
+                                                stderr=subprocess.STDOUT)
+    except subprocess.CalledProcessError:
+        logging.warning('Unable to detect default virsh network IP.  Will '
+                        'use 192.168.122.1')
+        return DEFAULT_VIRT_IP
+
+    tree = ET.fromstring(virsh_net_xml)
+    ip_tag = tree.find('ip')
+    if ip_tag:
+        virsh_ip = ip_tag.get('address')
+        if virsh_ip:
+            logging.debug("Detected virsh default network ip: "
+                          "{}".format(virsh_ip))
+            return virsh_ip
+
+    return DEFAULT_VIRT_IP
+
+
 def generate_inventory(target_file, ha_enabled=False, num_computes=1,
                        controller_ram=DEFAULT_RAM, arch=platform.machine(),
                        compute_ram=DEFAULT_RAM, vcpus=4):
@@ -42,7 +65,7 @@ def generate_inventory(target_file, ha_enabled=False, num_computes=1,
     """
 
     node = {'mac_address': '',
-            'ipmi_ip': DEFAULT_VIRT_IP,
+            'ipmi_ip': get_virt_ip(),
             'ipmi_user': DEFAULT_USER,
             'ipmi_pass': DEFAULT_PASS,
             'pm_type': 'pxe_ipmitool',
@@ -86,7 +109,7 @@ def host_setup(node):
     vbmc_manager = vbmc_lib.VirtualBMCManager()
     for name, port in node.items():
         vbmc_manager.add(username=DEFAULT_USER, password=DEFAULT_PASS,
-                         port=port, address=DEFAULT_VIRT_IP, domain_name=name,
+                         port=port, address=get_virt_ip(), domain_name=name,
                          libvirt_uri='qemu:///system',
                          libvirt_sasl_password=False,
                          libvirt_sasl_username=False)
index cc335c8..b7711a0 100755 (executable)
@@ -115,6 +115,18 @@ enabled=1
 gpgcheck=0
 EOF
 
+# Kubernetes Repo
+cat > ${BUILD_DIR}/kubernetes.repo << EOF
+[kubernetes]
+name=Kubernetes
+baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
+enabled=1
+gpgcheck=1
+repo_gpgcheck=1
+gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg
+        https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
+EOF
+
 # Get Real Time Kernel from kvm4nfv
 populate_cache $kvmfornfv_uri_base/$kvmfornfv_kernel_rpm
 
@@ -136,6 +148,7 @@ LIBGUESTFS_BACKEND=direct virt-customize \
     --upload ${BUILD_DIR}/puppet-fdio.tar.gz:/etc/puppet/modules \
     --run-command "cd /etc/puppet/modules && tar xzf puppet-fdio.tar.gz" \
     --upload ${BUILD_DIR}/fdio.repo:/etc/yum.repos.d/ \
+    --upload ${BUILD_DIR}/kubernetes.repo:/etc/yum.repos.d/ \
     --run-command "mkdir /root/fdio" \
     --upload ${BUILD_DIR}/noarch/$netvpp_pkg:/root/fdio \
     $fdio_pkg_str \
@@ -147,6 +160,7 @@ LIBGUESTFS_BACKEND=direct virt-customize \
     --install python2-networking-sfc \
     --install python-etcd,puppet-etcd \
     --install patch \
+    --install docker,kubelet,kubeadm,kubectl,kubernetes-cni \
     -a overcloud-full_build.qcow2
 
     # upload and install barometer packages