Add service in kubernetes context 49/40649/6
authorchenjiankun <chenjiankun1@huawei.com>
Wed, 30 Aug 2017 02:31:40 +0000 (02:31 +0000)
committerchenjiankun <chenjiankun1@huawei.com>
Thu, 21 Sep 2017 11:01:25 +0000 (11:01 +0000)
JIRA: YARDSTICK-803

Currently kubernetes test case can only run in master node.

We need to support it run in jump server.
So I add service and use nodePort type.

Then we can login the pod using nodePort.

Change-Id: Ia7900d263f1c5323f132435addec27ad10547ef9
Signed-off-by: chenjiankun <chenjiankun1@huawei.com>
tests/unit/benchmark/contexts/test_kubernetes.py
yardstick/benchmark/contexts/kubernetes.py
yardstick/benchmark/core/task.py
yardstick/common/kubernetes_utils.py
yardstick/orchestrator/kubernetes.py

index 4976a9f..3a926f8 100644 (file)
@@ -47,13 +47,15 @@ class KubernetesTestCase(unittest.TestCase):
         # clear kubernetes contexts from global list so we don't break other tests
         Context.list = []
 
+    @mock.patch('{}.KubernetesContext._delete_services'.format(prefix))
     @mock.patch('{}.KubernetesContext._delete_ssh_key'.format(prefix))
     @mock.patch('{}.KubernetesContext._delete_rcs'.format(prefix))
     @mock.patch('{}.KubernetesContext._delete_pods'.format(prefix))
     def test_undeploy(self,
                       mock_delete_pods,
                       mock_delete_rcs,
-                      mock_delete_ssh):
+                      mock_delete_ssh,
+                      mock_delete_services):
 
         k8s_context = KubernetesContext()
         k8s_context.init(context_cfg)
@@ -61,7 +63,9 @@ class KubernetesTestCase(unittest.TestCase):
         self.assertTrue(mock_delete_ssh.called)
         self.assertTrue(mock_delete_rcs.called)
         self.assertTrue(mock_delete_pods.called)
+        self.assertTrue(mock_delete_services.called)
 
+    @mock.patch('{}.KubernetesContext._create_services'.format(prefix))
     @mock.patch('{}.KubernetesContext._wait_until_running'.format(prefix))
     @mock.patch('{}.KubernetesTemplate.get_rc_pods'.format(prefix))
     @mock.patch('{}.KubernetesContext._create_rcs'.format(prefix))
@@ -70,7 +74,8 @@ class KubernetesTestCase(unittest.TestCase):
                     mock_set_ssh_key,
                     mock_create_rcs,
                     mock_get_rc_pods,
-                    mock_wait_until_running):
+                    mock_wait_until_running,
+                    mock_create_services):
 
         k8s_context = KubernetesContext()
         k8s_context.init(context_cfg)
@@ -78,6 +83,7 @@ class KubernetesTestCase(unittest.TestCase):
             k8s_context.deploy()
         self.assertTrue(mock_set_ssh_key.called)
         self.assertTrue(mock_create_rcs.called)
+        self.assertTrue(mock_create_services.called)
         self.assertTrue(mock_get_rc_pods.called)
         self.assertTrue(mock_wait_until_running.called)
 
@@ -106,14 +112,39 @@ class KubernetesTestCase(unittest.TestCase):
         mock_read_pod_status.return_value = 'Running'
         k8s_context._wait_until_running()
 
-    @mock.patch('{}.k8s_utils.get_pod_list'.format(prefix))
-    def test_get_server(self, mock_get_pod_list):
+    @mock.patch('{}.k8s_utils.get_pod_by_name'.format(prefix))
+    @mock.patch('{}.KubernetesContext._get_node_ip'.format(prefix))
+    @mock.patch('{}.k8s_utils.get_service_by_name'.format(prefix))
+    def test_get_server(self,
+                        mock_get_service_by_name,
+                        mock_get_node_ip,
+                        mock_get_pod_by_name):
+        class Service(object):
+            def __init__(self):
+                self.name = 'yardstick'
+                self.node_port = 30000
+
+        class Services(object):
+            def __init__(self):
+                self.ports = [Service()]
+
+        class Status(object):
+            def __init__(self):
+                self.pod_ip = '172.16.10.131'
+
+        class Pod(object):
+            def __init__(self):
+                self.status = Status()
+
         k8s_context = KubernetesContext()
         k8s_context.init(context_cfg)
 
-        mock_get_pod_list.return_value.items = []
+        mock_get_service_by_name.return_value = Services()
+        mock_get_pod_by_name.return_value = Pod()
+        mock_get_node_ip.return_value = '172.16.10.131'
+
         server = k8s_context._get_server('server')
-        self.assertIsNone(server)
+        self.assertIsNotNone(server)
 
     @mock.patch('{}.KubernetesContext._create_rc'.format(prefix))
     def test_create_rcs(self, mock_create_rc):
@@ -143,6 +174,28 @@ class KubernetesTestCase(unittest.TestCase):
         k8s_context._delete_rc({})
         self.assertTrue(mock_delete_replication_controller.called)
 
+    @mock.patch('{}.k8s_utils.get_node_list'.format(prefix))
+    def test_get_node_ip(self, mock_get_node_list):
+
+        k8s_context = KubernetesContext()
+        k8s_context.init(context_cfg)
+        k8s_context._get_node_ip()
+        self.assertTrue(mock_get_node_list.called)
+
+    @mock.patch('yardstick.orchestrator.kubernetes.ServiceObject.create')
+    def test_create_services(self, mock_create):
+        k8s_context = KubernetesContext()
+        k8s_context.init(context_cfg)
+        k8s_context._create_services()
+        self.assertTrue(mock_create.called)
+
+    @mock.patch('yardstick.orchestrator.kubernetes.ServiceObject.delete')
+    def test_delete_services(self, mock_delete):
+        k8s_context = KubernetesContext()
+        k8s_context.init(context_cfg)
+        k8s_context._delete_services()
+        self.assertTrue(mock_delete.called)
+
 
 def main():
     unittest.main()
index a39f631..2334e50 100644 (file)
@@ -54,6 +54,7 @@ class KubernetesContext(Context):
 
         LOG.info('Launch containers')
         self._create_rcs()
+        self._create_services()
         time.sleep(1)
         self.template.get_rc_pods()
 
@@ -63,6 +64,7 @@ class KubernetesContext(Context):
         self._delete_ssh_key()
         self._delete_rcs()
         self._delete_pods()
+        self._delete_services()
 
         super(KubernetesContext, self).undeploy()
 
@@ -80,6 +82,14 @@ class KubernetesContext(Context):
             return False
         return True
 
+    def _create_services(self):
+        for obj in self.template.service_objs:
+            obj.create()
+
+    def _delete_services(self):
+        for obj in self.template.service_objs:
+            obj.delete()
+
     def _create_rcs(self):
         for obj in self.template.k8s_objs:
             self._create_rc(obj.get_template())
@@ -126,15 +136,22 @@ class KubernetesContext(Context):
         utils.remove_file(self.public_key_path)
 
     def _get_server(self, name):
-        resp = k8s_utils.get_pod_list()
-        hosts = ({'name': n.metadata.name,
-                  'ip': n.status.pod_ip,
-                  'user': 'root',
-                  'key_filename': self.key_path,
-                  'private_ip': n.status.pod_ip}
-                 for n in resp.items if n.metadata.name.startswith(name))
-
-        return next(hosts, None)
+        service_name = '{}-service'.format(name)
+        service = k8s_utils.get_service_by_name(service_name).ports[0]
+
+        host = {
+            'name': service.name,
+            'ip': self._get_node_ip(),
+            'private_ip': k8s_utils.get_pod_by_name(name).status.pod_ip,
+            'ssh_port': service.node_port,
+            'user': 'root',
+            'key_filename': self.key_path,
+        }
+
+        return host
+
+    def _get_node_ip(self):
+        return k8s_utils.get_node_list().items[0].status.addresses[0].address
 
     def _get_network(self, attr_name):
         return None
index 0b6e323..a32e990 100644 (file)
@@ -333,7 +333,7 @@ class Task(object):     # pragma: no cover
                 context_cfg['target'] = {"ipaddr": target}
             else:
                 context_cfg['target'] = Context.get_server(target)
-                if self._is_same_heat_context(cfg["host"], target):
+                if self._is_same_context(cfg["host"], target):
                     context_cfg['target']["ipaddr"] = context_cfg['target']["private_ip"]
                 else:
                     context_cfg['target']["ipaddr"] = context_cfg['target']["ip"]
@@ -358,8 +358,8 @@ class Task(object):     # pragma: no cover
                     context_cfg['target'] = {}
                 else:
                     context_cfg['target'] = Context.get_server(target)
-                    if self._is_same_heat_context(scenario_cfg["host"],
-                                                  target):
+                    if self._is_same_context(scenario_cfg["host"],
+                                             target):
                         ip_list.append(context_cfg["target"]["private_ip"])
                     else:
                         ip_list.append(context_cfg["target"]["ip"])
@@ -377,7 +377,7 @@ class Task(object):     # pragma: no cover
 
         return runner
 
-    def _is_same_heat_context(self, host_attr, target_attr):
+    def _is_same_context(self, host_attr, target_attr):
         """check if two servers are in the same heat context
         host_attr: either a name for a server created by yardstick or a dict
         with attribute name mapping when using external heat templates
@@ -385,7 +385,7 @@ class Task(object):     # pragma: no cover
         with attribute name mapping when using external heat templates
         """
         for context in self.contexts:
-            if context.__context_type__ != "Heat":
+            if context.__context_type__ not in {"Heat", "Kubernetes"}:
                 continue
 
             host = context._get_server(host_attr)
index e4c2328..0cf7b9e 100644 (file)
@@ -28,6 +28,60 @@ def get_core_api():     # pragma: no cover
     return client.CoreV1Api()
 
 
+def get_node_list(**kwargs):        # pragma: no cover
+    core_v1_api = get_core_api()
+    try:
+        return core_v1_api.list_node(**kwargs)
+    except ApiException:
+        LOG.exception('Get node list failed')
+        raise
+
+
+def create_service(template,
+                   namespace='default',
+                   wait=False,
+                   **kwargs):       # pragma: no cover
+    core_v1_api = get_core_api()
+    metadata = client.V1ObjectMeta(**template.get('metadata', {}))
+
+    ports = [client.V1ServicePort(**port) for port in
+             template.get('spec', {}).get('ports', [])]
+    template['spec']['ports'] = ports
+    spec = client.V1ServiceSpec(**template.get('spec', {}))
+
+    service = client.V1Service(metadata=metadata, spec=spec)
+
+    try:
+        core_v1_api.create_namespaced_service('default', service)
+    except ApiException:
+        LOG.exception('Create Service failed')
+        raise
+
+
+def delete_service(name,
+                   namespace='default',
+                   **kwargs):       # pragma: no cover
+    core_v1_api = get_core_api()
+    try:
+        core_v1_api.delete_namespaced_service(name, namespace, **kwargs)
+    except ApiException:
+        LOG.exception('Delete Service failed')
+
+
+def get_service_list(namespace='default', **kwargs):
+    core_v1_api = get_core_api()
+    try:
+        return core_v1_api.list_namespaced_service(namespace, **kwargs)
+    except ApiException:
+        LOG.exception('Get Service list failed')
+        raise
+
+
+def get_service_by_name(name):      # pragma: no cover
+    service_list = get_service_list()
+    return next((s.spec for s in service_list.items if s.metadata.name == name), None)
+
+
 def create_replication_controller(template,
                                   namespace='default',
                                   wait=False,
@@ -135,3 +189,8 @@ def get_pod_list(namespace='default'):      # pragma: no cover
     except ApiException:
         LOG.exception('Get pod list failed')
         raise
+
+
+def get_pod_by_name(name):  # pragma: no cover
+    pod_list = get_pod_list()
+    return next((n for n in pod_list.items if n.metadata.name.startswith(name)), None)
index 6d7045f..9f94fd4 100644 (file)
@@ -37,7 +37,7 @@ class KubernetesObject(object):
                 "template": {
                     "metadata": {
                         "labels": {
-                            "app": ""
+                            "app": name
                         }
                     },
                     "spec": {
@@ -106,6 +106,35 @@ class KubernetesObject(object):
         self._add_volume(key_volume)
 
 
+class ServiceObject(object):
+
+    def __init__(self, name):
+        self.name = '{}-service'.format(name)
+        self.template = {
+            'metadata': {
+                'name': '{}-service'.format(name)
+            },
+            'spec': {
+                'type': 'NodePort',
+                'ports': [
+                    {
+                        'port': 22,
+                        'protocol': 'TCP'
+                    }
+                ],
+                'selector': {
+                    'app': name
+                }
+            }
+        }
+
+    def create(self):
+        k8s_utils.create_service(self.template)
+
+    def delete(self):
+        k8s_utils.delete_service(self.name)
+
+
 class KubernetesTemplate(object):
 
     def __init__(self, name, template_cfg):
@@ -117,6 +146,8 @@ class KubernetesTemplate(object):
                                           ssh_key=self.ssh_key,
                                           **cfg)
                          for rc, cfg in template_cfg.items()]
+        self.service_objs = [ServiceObject(s) for s in self.rcs]
+
         self.pods = []
 
     def _get_rc_name(self, rc_name):