@click.argument('name')
@pass_context
def show(ctx, name):
- pass
+ metric = MetricSpec('{}.yaml'.format(name))
+ cnt = metric.content
+ output = utils.render('metric', cnt)
+ click.echo(output)
@cli.command('run', help='Run tests to run Performance Metrics')
@click.argument('name')
@pass_context
def show(ctx, name):
- pass
+ plan = Plan('{}.yaml'.format(name))
+ cnt = plan.content
+ output = utils.render('plan', cnt)
+ click.echo(output)
@cli.command('run', help='Execute a Plan')
@click.argument('name')
@pass_context
def show(ctx, name):
- pass
+ qpi = QPISpec('{}.yaml'.format(name))
+ cnt = qpi.content
+ output = utils.render('qpi', cnt)
+ click.echo(output)
@cli.command('run', help='Run performance tests for the specified QPI')
--- /dev/null
+Name: {{ name }}
+Description: {{ description }}
+Workloads:
+{% for wl in workloads %}
+ {{ wl }}
+{% endfor %}
--- /dev/null
+Name: {{ name }}
+Description: {{ description }}
--- /dev/null
+Name: {{ title }}
+Description: {{ description }}
+{% for section in sections %}
+ Name: {{ section.name }}
+ Weight: {{ section.weight }}
+ Formula: {{ section.formula }}
+ Metrics:
+ {% for metric in section.metrics %}
+ {{ metric }}
+ {% endfor %}
+{% endfor %}
+
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+from jinja2 import Environment
+from jinja2 import FileSystemLoader
+from os import path
from prettytable import PrettyTable
table.align[name] = 'l'
[table.add_row([component['name'][0:-5]]) for component in components]
return table
+
+
+def render(name, var_dict):
+ """ Get the templates to render for specific component """
+ tmpl_path = path.join(path.dirname(__file__), 'templates')
+ tmpl_loader = FileSystemLoader(tmpl_path)
+ env = Environment(loader=tmpl_loader)
+ template = env.get_template('{}.j2'.format(name))
+ result = template.render(var_dict)
+ return result
def test_show(runner):
- result = runner.invoke(cli, ['metric', 'show', 'fake-metric'])
- assert result.output == ''
+ result = runner.invoke(cli, ['metric', 'show', 'dhrystone'])
+ assert 'Name: dhrystone' in result.output
+ assert 'Description: A synthetic computing benchmark program intended to be representative of' \
+ 'system (integer) programming.'
result = runner.invoke(cli, ['metric', 'show'])
assert 'Missing argument "name".' in result.output
def test_show(runner):
- result = runner.invoke(cli, ['plan', 'show', 'fake-plan'])
- assert result.output == ''
+ result = runner.invoke(cli, ['plan', 'show', 'compute'])
+ assert 'Name: compute QPI' in result.output
+ assert 'Description: compute QPI profile'
result = runner.invoke(cli, ['plan', 'show'])
assert 'Missing argument "name".' in result.output
def test_show(runner):
- result = runner.invoke(cli, ['qpi', 'show', 'fake-qpi'])
- assert result.output == ''
+ result = runner.invoke(cli, ['qpi', 'show', 'compute'])
+ assert 'Name: compute' in result.output
+ assert 'Description: sample performance index of computing' in result.output
result = runner.invoke(cli, ['qpi', 'show'])
assert 'Missing argument "name".' in result.output