Fix conversion to JS for HTML reports 59/65159/8
authorPatrice Buriez <patrice.buriez@intel.com>
Thu, 15 Nov 2018 11:30:30 +0000 (12:30 +0100)
committerPatrice Buriez <patrice.buriez@intel.com>
Fri, 21 Dec 2018 18:07:59 +0000 (19:07 +0100)
Some new fields in InfluxDB have plain text contents that AST cannot
parse as valid expressions, resulting in exceptions being raised by
yardstick CLI commands "report generate" and "report generate-nsb".

Reworked _generate_common() to properly handle and convert:
- unicode, str and missing keys
- None, unicode, str, float, long and int values
- float, long and int stored as unicode or str values

Added test__generate_common() unit test, to verify all possible
conversion flavors.

Also renamed incorrect "tasks" to "metrics".

Also fixed warning in Python3 for some unit tests, caused by
deprecation of unittest.TestCase.assertRaisesRegexp().

JIRA: YARDSTICK-1367
Topic: report/html_table (10 of 12)

Change-Id: Iff75bfd2c1dcaf3806f67f52f0ea594f10aceb5b
Signed-off-by: Patrice Buriez <patrice.buriez@intel.com>
yardstick/benchmark/core/report.py
yardstick/tests/unit/benchmark/core/test_report.py

index 0bc392f..17a9fe4 100644 (file)
 
 """ Handler for yardstick command 'report' """
 
-import ast
 import re
+import six
 import uuid
 
 import jinja2
 from api.utils import influx
-from oslo_utils import encodeutils
 from oslo_utils import uuidutils
 from yardstick.common import constants as consts
 from yardstick.common.utils import cliargs
@@ -115,10 +114,10 @@ class Report(object):
         else:
             raise KeyError("Test case not found.")
 
-    def _get_tasks(self):
-        task_cmd = "select * from \"%s\" where task_id= '%s'"
-        task_query = task_cmd % (self.yaml_name, self.task_id)
-        query_exec = influx.query(task_query)
+    def _get_metrics(self):
+        metrics_cmd = "select * from \"%s\" where task_id = '%s'"
+        metrics_query = metrics_cmd % (self.yaml_name, self.task_id)
+        query_exec = influx.query(metrics_query)
         if query_exec:
             return query_exec
         else:
@@ -132,38 +131,72 @@ class Report(object):
         """
         self._validate(args.yaml_name[0], args.task_id[0])
 
-        self.db_fieldkeys = self._get_fieldkeys()
+        db_fieldkeys = self._get_fieldkeys()
+        # list of dicts of:
+        # - PY2: unicode key and unicode value
+        # - PY3: str key and str value
 
-        self.db_task = self._get_tasks()
+        db_metrics = self._get_metrics()
+        # list of dicts of:
+        # - PY2: unicode key and { None | unicode | float | long | int } value
+        # - PY3: str key and { None | str | float | int } value
 
-        field_keys = []
-        datasets = []
-        table_vals = {}
+        # extract fieldKey entries, and convert them to str where needed
+        field_keys = [key if isinstance(key, str)       # PY3: already str
+                          else key.encode('utf8')       # PY2: unicode to str
+                      for key in
+                          [field['fieldKey']
+                           for field in db_fieldkeys]]
 
-        field_keys = [encodeutils.to_utf8(field['fieldKey'])
-                      for field in self.db_fieldkeys]
+        # extract timestamps
+        self.Timestamp = []
+        for metric in db_metrics:
+            metric_time = metric['time']                    # in RFC3339 format
+            if not isinstance(metric_time, str):
+                metric_time = metric_time.encode('utf8')    # PY2: unicode to str
+            metric_time = metric_time[11:]                  # skip date, keep time
+            head, _, tail = metric_time.partition('.')      # split HH:MM:SS and nsZ
+            metric_time = head + '.' + tail[:6]             # join HH:MM:SS and .us
+            self.Timestamp.append(metric_time)              # HH:MM:SS.micros
+
+        # prepare return values
+        datasets = []
+        table_vals = {'Timestamp': self.Timestamp}
 
+        # extract and convert field values
         for key in field_keys:
-            self.Timestamp = []
             values = []
-            for task in self.db_task:
-                task_time = encodeutils.to_utf8(task['time'])
-                if not isinstance(task_time, str):
-                    task_time = str(task_time, 'utf8')
-                if not isinstance(key, str):
-                    key = str(key, 'utf8')
-                task_time = task_time[11:]
-                head, _, tail = task_time.partition('.')
-                task_time = head + "." + tail[:6]
-                self.Timestamp.append(task_time)
-                if task[key] is None:
-                    values.append(None)
-                elif isinstance(task[key], (int, float)):
-                    values.append(task[key])
+            for metric in db_metrics:
+                val = metric.get(key, None)
+                if val is None:
+                    # keep explicit None or missing entry as is
+                    pass
+                elif isinstance(val, (int, float)):
+                    # keep plain int or float as is
+                    pass
+                elif six.PY2 and isinstance(val,
+                            long):  # pylint: disable=undefined-variable
+                    # PY2: long value would be rendered with trailing L,
+                    # which JS does not support, so convert it to float
+                    val = float(val)
+                elif isinstance(val, six.string_types):
+                    s = val
+                    if not isinstance(s, str):
+                        s = s.encode('utf8')            # PY2: unicode to str
+                    try:
+                        # convert until failure
+                        val = s
+                        val = float(s)
+                        val = int(s)
+                        if six.PY2 and isinstance(val,
+                                    long):  # pylint: disable=undefined-variable
+                            val = float(val)            # PY2: long to float
+                    except ValueError:
+                        pass
                 else:
-                    values.append(ast.literal_eval(task[key]))
+                    raise ValueError("Cannot convert %r" % val)
+                values.append(val)
             datasets.append({'label': key, 'data': values})
-            table_vals['Timestamp'] = self.Timestamp
             table_vals[key] = values
 
         return datasets, table_vals
index 11d017f..41991dd 100644 (file)
@@ -9,6 +9,7 @@
 ##############################################################################
 
 import mock
+import six
 import unittest
 import uuid
 
@@ -19,13 +20,82 @@ from yardstick.cmd.commands import change_osloobj_to_paras
 GOOD_YAML_NAME = 'fake_name'
 GOOD_TASK_ID = str(uuid.uuid4())
 GOOD_DB_FIELDKEYS = [{'fieldKey': 'fake_key'}]
-GOOD_DB_TASK = [{
+GOOD_DB_METRICS = [{
         'fake_key': 1.234,
         'time': '0000-00-00T12:34:56.789012Z',
         }]
 GOOD_TIMESTAMP = ['12:34:56.789012']
 BAD_YAML_NAME = 'F@KE_NAME'
 BAD_TASK_ID = 'aaaaaa-aaaaaaaa-aaaaaaaaaa-aaaaaa'
+MORE_DB_FIELDKEYS = [
+        {'fieldKey': 'fake_key'},
+        {'fieldKey': 'str_str'},
+        {'fieldKey': u'str_unicode'},
+        {u'fieldKey': 'unicode_str'},
+        {u'fieldKey': u'unicode_unicode'},
+        ]
+MORE_DB_METRICS = [{
+        'fake_key': None,
+        'time': '0000-00-00T00:00:00.000000Z',
+        }, {
+        'fake_key': 123,
+        'time': '0000-00-00T00:00:01.000000Z',
+        }, {
+        'fake_key': 4.56,
+        'time': '0000-00-00T00:00:02.000000Z',
+        }, {
+        'fake_key': 9876543210987654321,
+        'time': '0000-00-00T00:00:03.000000Z',
+        }, {
+        'fake_key': 'str_str value',
+        'time': '0000-00-00T00:00:04.000000Z',
+        }, {
+        'fake_key': u'str_unicode value',
+        'time': '0000-00-00T00:00:05.000000Z',
+        }, {
+        u'fake_key': 'unicode_str value',
+        'time': '0000-00-00T00:00:06.000000Z',
+        }, {
+        u'fake_key': u'unicode_unicode value',
+        'time': '0000-00-00T00:00:07.000000Z',
+        }, {
+        'fake_key': '7.89',
+        'time': '0000-00-00T00:00:08.000000Z',
+        }, {
+        'fake_key': '1011',
+        'time': '0000-00-00T00:00:09.000000Z',
+        }, {
+        'fake_key': '9876543210123456789',
+        'time': '0000-00-00T00:00:10.000000Z',
+        }]
+MORE_TIMESTAMP = ['00:00:%02d.000000' % n for n in range(len(MORE_DB_METRICS))]
+MORE_EMPTY_DATA = [None] * len(MORE_DB_METRICS)
+MORE_EXPECTED_TABLE_VALS = {
+        'Timestamp': MORE_TIMESTAMP,
+        'fake_key': [
+            None,
+            123,
+            4.56,
+            9876543210987654321 if six.PY3 else 9.876543210987655e+18,
+            'str_str value',
+            'str_unicode value',
+            'unicode_str value',
+            'unicode_unicode value',
+            7.89,
+            1011,
+            9876543210123456789 if six.PY3 else 9.876543210123457e+18,
+            ],
+        'str_str': MORE_EMPTY_DATA,
+        'str_unicode': MORE_EMPTY_DATA,
+        'unicode_str': MORE_EMPTY_DATA,
+        'unicode_unicode': MORE_EMPTY_DATA,
+        }
+MORE_EXPECTED_DATASETS = [{
+        'label': key,
+        'data': MORE_EXPECTED_TABLE_VALS[key],
+        }
+        for key in map(str, [field['fieldKey'] for field in MORE_DB_FIELDKEYS])
+        ]
 
 
 class JSTreeTestCase(unittest.TestCase):
@@ -117,11 +187,11 @@ class ReportTestCase(unittest.TestCase):
         self.assertEqual(GOOD_TASK_ID, str(self.rep.task_id))
 
     def test__validate_invalid_yaml_name(self):
-        with self.assertRaisesRegexp(ValueError, "yaml*"):
+        with six.assertRaisesRegex(self, ValueError, "yaml*"):
             self.rep._validate(BAD_YAML_NAME, GOOD_TASK_ID)
 
     def test__validate_invalid_task_id(self):
-        with self.assertRaisesRegexp(ValueError, "task*"):
+        with six.assertRaisesRegex(self, ValueError, "task*"):
             self.rep._validate(GOOD_YAML_NAME, BAD_TASK_ID)
 
     @mock.patch.object(influx, 'query')
@@ -141,42 +211,51 @@ class ReportTestCase(unittest.TestCase):
         mock_query.return_value = []
         self.rep.yaml_name = GOOD_YAML_NAME
         self.rep.task_id = GOOD_TASK_ID
-        self.assertRaisesRegexp(KeyError, "Test case", self.rep._get_fieldkeys)
+        six.assertRaisesRegex(self, KeyError, "Test case", self.rep._get_fieldkeys)
 
     @mock.patch.object(influx, 'query')
-    def test__get_tasks(self, mock_query):
-        mock_query.return_value = GOOD_DB_TASK
+    def test__get_metrics(self, mock_query):
+        mock_query.return_value = GOOD_DB_METRICS
         self.rep.yaml_name = GOOD_YAML_NAME
         self.rep.task_id = GOOD_TASK_ID
-        self.assertEqual(GOOD_DB_TASK, self.rep._get_tasks())
+        self.assertEqual(GOOD_DB_METRICS, self.rep._get_metrics())
 
     @mock.patch.object(influx, 'query')
-    def test__get_tasks_task_not_found(self, mock_query):
+    def test__get_metrics_task_not_found(self, mock_query):
         mock_query.return_value = []
         self.rep.yaml_name = GOOD_YAML_NAME
         self.rep.task_id = GOOD_TASK_ID
-        self.assertRaisesRegexp(KeyError, "Task ID", self.rep._get_tasks)
+        six.assertRaisesRegex(self, KeyError, "Task ID", self.rep._get_metrics)
+
+    @mock.patch.object(report.Report, '_get_metrics')
+    @mock.patch.object(report.Report, '_get_fieldkeys')
+    def test__generate_common(self, mock_keys, mock_metrics):
+        mock_metrics.return_value = MORE_DB_METRICS
+        mock_keys.return_value = MORE_DB_FIELDKEYS
+        datasets, table_vals = self.rep._generate_common(self.param)
+        self.assertEqual(MORE_EXPECTED_DATASETS, datasets)
+        self.assertEqual(MORE_EXPECTED_TABLE_VALS, table_vals)
 
-    @mock.patch.object(report.Report, '_get_tasks')
+    @mock.patch.object(report.Report, '_get_metrics')
     @mock.patch.object(report.Report, '_get_fieldkeys')
     @mock.patch.object(report.Report, '_validate')
-    def test_generate(self, mock_valid, mock_keys, mock_tasks):
-        mock_tasks.return_value = GOOD_DB_TASK
+    def test_generate(self, mock_valid, mock_keys, mock_metrics):
+        mock_metrics.return_value = GOOD_DB_METRICS
         mock_keys.return_value = GOOD_DB_FIELDKEYS
         self.rep.generate(self.param)
         mock_valid.assert_called_once_with(GOOD_YAML_NAME, GOOD_TASK_ID)
-        mock_tasks.assert_called_once_with()
+        mock_metrics.assert_called_once_with()
         mock_keys.assert_called_once_with()
         self.assertEqual(GOOD_TIMESTAMP, self.rep.Timestamp)
 
-    @mock.patch.object(report.Report, '_get_tasks')
+    @mock.patch.object(report.Report, '_get_metrics')
     @mock.patch.object(report.Report, '_get_fieldkeys')
     @mock.patch.object(report.Report, '_validate')
-    def test_generate_nsb(self, mock_valid, mock_keys, mock_tasks):
-        mock_tasks.return_value = GOOD_DB_TASK
+    def test_generate_nsb(self, mock_valid, mock_keys, mock_metrics):
+        mock_metrics.return_value = GOOD_DB_METRICS
         mock_keys.return_value = GOOD_DB_FIELDKEYS
         self.rep.generate_nsb(self.param)
         mock_valid.assert_called_once_with(GOOD_YAML_NAME, GOOD_TASK_ID)
-        mock_tasks.assert_called_once_with()
+        mock_metrics.assert_called_once_with()
         mock_keys.assert_called_once_with()
         self.assertEqual(GOOD_TIMESTAMP, self.rep.Timestamp)