Add "render-only" option to "task" command
[yardstick.git] / yardstick / benchmark / contexts / node.py
1 ##############################################################################
2 # Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 from __future__ import absolute_import
11 import errno
12 import subprocess
13 import os
14 import collections
15 import logging
16 import tempfile
17
18 import six
19 import pkg_resources
20
21 from yardstick import ssh
22 from yardstick.benchmark.contexts.base import Context
23 from yardstick.common.constants import ANSIBLE_DIR, YARDSTICK_ROOT_PATH
24 from yardstick.common.ansible_common import AnsibleCommon
25 from yardstick.common.yaml_loader import yaml_load
26
27 LOG = logging.getLogger(__name__)
28
29 DEFAULT_DISPATCH = 'script'
30
31
32 class NodeContext(Context):
33     """Class that handle nodes info"""
34
35     __context_type__ = "Node"
36
37     def __init__(self):
38         self.file_path = None
39         self.nodes = []
40         self.networks = {}
41         self.controllers = []
42         self.computes = []
43         self.baremetals = []
44         self.env = {}
45         self.attrs = {}
46         self.DISPATCH_TYPES = {
47             "ansible": self._dispatch_ansible,
48             "script": self._dispatch_script,
49         }
50         super(NodeContext, self).__init__()
51
52     def read_config_file(self):
53         """Read from config file"""
54
55         with open(self.file_path) as stream:
56             LOG.info("Parsing pod file: %s", self.file_path)
57             cfg = yaml_load(stream)
58         return cfg
59
60     def init(self, attrs):
61         """initializes itself from the supplied arguments"""
62         super(NodeContext, self).init(attrs)
63
64         self.file_path = file_path = attrs.get("file", "pod.yaml")
65
66         try:
67             cfg = self.read_config_file()
68         except IOError as io_error:
69             if io_error.errno != errno.ENOENT:
70                 raise
71
72             self.file_path = os.path.join(YARDSTICK_ROOT_PATH, file_path)
73             cfg = self.read_config_file()
74
75         self.nodes.extend(cfg["nodes"])
76         self.controllers.extend([node for node in cfg["nodes"]
77                                  if node.get("role") == "Controller"])
78         self.computes.extend([node for node in cfg["nodes"]
79                               if node.get("role") == "Compute"])
80         self.baremetals.extend([node for node in cfg["nodes"]
81                                 if node.get("role") == "Baremetal"])
82         LOG.debug("Nodes: %r", self.nodes)
83         LOG.debug("Controllers: %r", self.controllers)
84         LOG.debug("Computes: %r", self.computes)
85         LOG.debug("BareMetals: %r", self.baremetals)
86
87         self.env = attrs.get('env', {})
88         self.attrs = attrs
89         LOG.debug("Env: %r", self.env)
90
91         # add optional static network definition
92         self.networks.update(cfg.get("networks", {}))
93
94     def deploy(self):
95         config_type = self.env.get('type', DEFAULT_DISPATCH)
96         self.DISPATCH_TYPES[config_type]("setup")
97
98     def undeploy(self):
99         config_type = self.env.get('type', DEFAULT_DISPATCH)
100         self.DISPATCH_TYPES[config_type]("teardown")
101         super(NodeContext, self).undeploy()
102
103     def _dispatch_script(self, key):
104         steps = self.env.get(key, [])
105         for step in steps:
106             for host, info in step.items():
107                 self._execute_script(host, info)
108
109     def _dispatch_ansible(self, key):
110         try:
111             playbooks = self.env[key]
112         except KeyError:
113             pass
114         else:
115             self._do_ansible_job(playbooks)
116
117     def _do_ansible_job(self, playbooks):
118         self.ansible_exec = AnsibleCommon(nodes=self.nodes,
119                                           test_vars=self.env)
120         # playbooks relative to ansible dir
121         # playbooks can also be a list of playbooks
122         self.ansible_exec.gen_inventory_ini_dict()
123         if isinstance(playbooks, six.string_types):
124             playbooks = [playbooks]
125         playbooks = [self.fix_ansible_path(playbook) for playbook in playbooks]
126
127         tmpdir = tempfile.mkdtemp(prefix='ansible-')
128         self.ansible_exec.execute_ansible(playbooks, tmpdir,
129                                           verbose=self.env.get("verbose",
130                                                                False))
131
132     def fix_ansible_path(self, playbook):
133         if not os.path.isabs(playbook):
134             #  make relative paths absolute in ANSIBLE_DIR
135             playbook = os.path.join(ANSIBLE_DIR, playbook)
136         return playbook
137
138     def _get_server(self, attr_name):
139         """lookup server info by name from context
140         attr_name: a name for a server listed in nodes config file
141         """
142         node_name, name = self.split_name(attr_name)
143         if name is None or self.name != name:
144             return None
145
146         matching_nodes = (n for n in self.nodes if n["name"] == node_name)
147
148         try:
149             # A clone is created in order to avoid affecting the
150             # original one.
151             node = dict(next(matching_nodes))
152         except StopIteration:
153             return None
154
155         try:
156             duplicate = next(matching_nodes)
157         except StopIteration:
158             pass
159         else:
160             raise ValueError("Duplicate nodes!!! Nodes: %s %s" %
161                              (node, duplicate))
162
163         node["name"] = attr_name
164         node.setdefault("interfaces", {})
165         return node
166
167     def _get_network(self, attr_name):
168         if not isinstance(attr_name, collections.Mapping):
169             network = self.networks.get(attr_name)
170
171         else:
172             # Don't generalize too much  Just support vld_id
173             vld_id = attr_name.get('vld_id', {})
174             # for node context networks are dicts
175             iter1 = (n for n in self.networks.values() if n.get('vld_id') == vld_id)
176             network = next(iter1, None)
177
178         if network is None:
179             return None
180
181         result = {
182             # name is required
183             "name": network["name"],
184             "vld_id": network.get("vld_id"),
185             "segmentation_id": network.get("segmentation_id"),
186             "network_type": network.get("network_type"),
187             "physical_network": network.get("physical_network"),
188         }
189         return result
190
191     def _execute_script(self, node_name, info):
192         if node_name == 'local':
193             self._execute_local_script(info)
194         else:
195             self._execute_remote_script(node_name, info)
196
197     def _execute_remote_script(self, node_name, info):
198         prefix = self.env.get('prefix', '')
199         script, options = self._get_script(info)
200
201         script_file = pkg_resources.resource_filename(prefix, script)
202
203         self._get_client(node_name)
204         self.client._put_file_shell(script_file, '~/{}'.format(script))
205
206         cmd = 'sudo bash {} {}'.format(script, options)
207         status, _, stderr = self.client.execute(cmd)
208         if status:
209             raise RuntimeError(stderr)
210
211     def _execute_local_script(self, info):
212         script, options = self._get_script(info)
213         script = os.path.join(YARDSTICK_ROOT_PATH, script)
214         cmd = ['bash', script, options]
215
216         p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
217         LOG.debug('\n%s', p.communicate()[0])
218
219     def _get_script(self, info):
220         return info.get('script'), info.get('options', '')
221
222     def _get_client(self, node_name):
223         node = self._get_node_info(node_name.strip())
224
225         if node is None:
226             raise SystemExit('No such node')
227
228         self.client = ssh.SSH.from_node(node, defaults={'user': 'ubuntu'})
229
230         self.client.wait(timeout=600)
231
232     def _get_node_info(self, name):
233         return next((n for n in self.nodes if n['name'].strip() == name))