Merge "Cleanup unittests for test_lmbench"
[yardstick.git] / yardstick / cmd / commands / task.py
index e2e8bf6..c6379e5 100644 (file)
@@ -7,10 +7,6 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 
-""" Handler for yardstick command 'task' """
-from __future__ import print_function
-from __future__ import absolute_import
-
 import logging
 
 from yardstick.benchmark.core.task import Task
@@ -29,6 +25,7 @@ class TaskCommands(object):     # pragma: no cover
 
        Set of commands to manage benchmark tasks.
        """
+    EXIT_TEST_FAILED = 2
 
     @cliargs("inputfile", type=str, help="path to task or suite file", nargs=1)
     @cliargs("--task-args", dest="task_args",
@@ -42,6 +39,8 @@ class TaskCommands(object):     # pragma: no cover
              action="store_true")
     @cliargs("--parse-only", help="parse the config file and exit",
              action="store_true")
+    @cliargs("--render-only", help="Render the tasks files, store the result "
+             "in the directory given and exit", type=str, dest="render_only")
     @cliargs("--output-file", help="file where output is stored, default %s" %
              output_file_default, default=output_file_default)
     @cliargs("--suite", help="process test suite file instead of a task file",
@@ -50,19 +49,20 @@ class TaskCommands(object):     # pragma: no cover
         param = change_osloobj_to_paras(args)
         self.output_file = param.output_file
 
-        result = {}
         LOG.info('Task START')
         try:
             result = Task().start(param, **kwargs)
-        except Exception as e:
+        except Exception as e:  # pylint: disable=broad-except
             self._write_error_data(e)
-            LOG.exception("")
-
-        if result.get('result', {}).get('criteria') == 'PASS':
-            LOG.info('Task SUCCESS')
-        else:
             LOG.info('Task FAILED')
-            raise RuntimeError('Task Failed')
+            raise
+        else:
+            if result.get('result', {}).get('criteria') == 'PASS':
+                LOG.info('Task SUCCESS')
+            else:
+                LOG.info('Task FAILED')
+                # exit without backtrace
+                raise SystemExit(self.EXIT_TEST_FAILED)
 
     def _write_error_data(self, error):
         data = {'status': 2, 'result': str(error)}