1 ##############################################################################
2 # Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
15 from vstf.controller.sw_perf import model
16 from vstf.common import perfmark as mark
17 import vstf.common.constants as cst
18 import vstf.common.decorator as deco
19 from vstf.rpc_frame_work.rpc_producer import Server
20 from vstf.controller.settings.flows_settings import FlowsSettings
21 from vstf.controller.settings.tool_settings import ToolSettings
22 from vstf.controller.settings.perf_settings import PerfSettings
23 from vstf.controller.sw_perf.perf_provider import PerfProvider, get_agent_dict
24 from vstf.controller.sw_perf.flow_producer import FlowsProducer
25 from vstf.controller.settings.tester_settings import TesterSettings
26 from vstf.controller.fabricant import Fabricant
28 LOG = logging.getLogger(__name__)
31 class Performance(object):
32 def __init__(self, conn, provider):
33 self._provider = provider
41 self._cpuwatcher = None
43 def create(self, tool, tpro):
45 agents = self._provider.get_cleaners(tool, tpro)
48 cleaner = Fabricant(agent, self._conn)
49 cleaner.clean_all_namespace()
51 for tester_info in self._provider.get_testers:
52 dst = tester_info["agent"]
53 params = tester_info["params"]
55 driver_mgr = Fabricant(dst, self._conn)
56 ret = driver_mgr.install_drivers(drivers=params["drivers"])
59 self.create_namespace(tool)
60 self.create_senders(tool, tpro)
61 self.create_receivers(tool, tpro)
62 self.create_watchers(tool)
63 self.create_cpuwatcher()
65 def destory(self, tool):
66 self.clear_namespace(tool)
68 def create_namespace(self, tool):
69 devices = self._provider.get_namespaces(tool)
70 agents = get_agent_dict(devices)
72 for device in devices:
74 params = device["params"]
76 model.NetDeviceMgr.clear(dst, self._conn)
79 model.NetDeviceMgr.add(dst, self._conn, params)
81 def clear_namespace(self, tool):
82 devices = self._provider.get_namespaces(tool)
83 for device in devices:
85 params = device["params"]
86 model.NetDeviceMgr.remove(dst, self._conn, params)
88 def create_senders(self, tool, tpro):
89 sender_infos = self._provider.get_senders(tool, tpro)
90 LOG.info(sender_infos)
91 for sender_info in sender_infos:
92 dst = sender_info["agent"]
93 params = sender_info["params"]
94 send = model.Sender(dst, self._conn, tool, params)
95 self._senders.append(send)
97 def create_receivers(self, tool, tpro):
98 receiver_infos = self._provider.get_receivers(tool, tpro)
99 LOG.info(receiver_infos)
100 for receiver_info in receiver_infos:
101 dst = receiver_info["agent"]
102 params = receiver_info["params"]
103 receive = model.Receiver(dst, self._conn, tool, params)
104 self._receivers.append(receive)
106 def create_watchers(self, tool):
107 watcher_infos = self._provider.get_watchers(tool)
108 LOG.info(watcher_infos)
109 for watcher_info in watcher_infos:
110 dst = watcher_info["agent"]
111 params = watcher_info["params"]
112 watch = model.NicWatcher(dst, self._conn, params)
113 self._watchers.append(watch)
115 def create_cpuwatcher(self):
116 watcher_info = self._provider.get_cpuwatcher
117 LOG.info(watcher_info)
118 dst = watcher_info["agent"]
119 self._cpuwatcher = model.CpuWatcher(dst, self._conn)
121 def start_receivers(self, **kwargs):
122 for receiver in self._receivers:
123 receiver.start(**kwargs)
125 def start_senders(self, pktsize, **kwargs):
126 for sender in self._senders:
127 sender.start(pktsize, **kwargs)
129 def start_watchers(self):
130 for watcher in self._watchers:
133 def stop_receivers(self):
134 for receiver in self._receivers:
137 def stop_senders(self):
138 for sender in self._senders:
141 def stop_watchers(self):
142 for watcher in self._watchers:
145 def start_cpuwatcher(self, enable=True):
146 if self._cpuwatcher and enable:
147 self._cpuwatcher.start()
149 def stop_cpuwatcher(self, enable=True):
150 if self._cpuwatcher and enable:
151 self._cpuwatcher.stop()
153 def getlimitspeed(self, ptype, size):
157 ctl = self._provider.get_cpu_affctl
159 driver_mgr = Fabricant(ctl["agent"], self._conn)
160 ret = driver_mgr.affctl_load(policy=ctl["params"]["policy"])
163 def run_pre_affability_settings(self, tool, tpro, pktsize, **kwargs):
164 LOG.info("run_pre_affability_settings start")
165 self.create(tool, tpro)
166 self.start_receivers()
167 self.start_senders(pktsize, **kwargs)
171 self.stop_receivers()
173 LOG.info("run_pre_affability_settings end")
175 @deco.check("ratep", defaults=0)
176 @deco.check("cpu_watch", defaults=False)
177 def run_bandwidth_test(self, tool, tpro, pktsize, **kwargs):
178 LOG.info("run_bandwidth_test ")
179 cpu_watch = kwargs.pop("cpu_watch")
180 self.create(tool, tpro)
181 self.start_receivers()
182 self.start_senders(pktsize, **kwargs)
183 time.sleep(self._provider.wait_balance(tool))
184 self.start_watchers()
185 self.start_cpuwatcher(cpu_watch)
186 time.sleep(self._provider.duration(tool))
188 self.stop_cpuwatcher(cpu_watch)
190 self.stop_receivers()
192 LOG.info("run_bandwidth_test end")
194 @deco.check("ratep", defaults=0)
195 def run_latency_test(self, tool, tpro, pktsize, **kwargs):
196 LOG.info("run_latency_test start")
197 self.create(tool, tpro)
198 self.start_receivers()
199 self.start_senders(pktsize, **kwargs)
200 time.sleep(self._provider.duration(tool))
202 self.stop_receivers()
204 LOG.info("run_latency_test end")
206 def run(self, tool, protocol, ttype, sizes, affctl=False):
209 pre_tpro = protocol + "_bw"
211 self.run_pre_affability_settings(tool, pre_tpro, size, ratep=0)
214 if ttype in ['throughput', 'frameloss']:
215 realspeed = self.getlimitspeed(ttype, size)
216 bw_tpro = protocol + "_bw"
218 self.run_bandwidth_test(tool, bw_tpro, size, ratep=realspeed)
219 bw_result = self.result(tool, bw_type)
223 lat_tpro = protocol + '_lat'
224 self.run_latency_test(lat_tool, lat_tpro, size, ratep=realspeed)
225 lat_result = self.result(tool, lat_type)
228 lat_result.pop('OfferedLoad')
229 bw_result.update(lat_result)
230 result[size] = bw_result
232 elif ttype in ['latency']:
233 lat_tpro = protocol + '_lat'
235 self.run_latency_test(tool, lat_tpro, size, ratep=0)
236 lat_result = self.result(tool, lat_type)
237 result[size] = lat_result
239 raise Exception("error:protocol type:%s" % (ttype))
242 def result(self, tool, ttype):
243 if ttype in {'throughput', 'frameloss'}:
258 cpu_data = self._cpuwatcher.result()
259 print self._cpuwatcher, cpu_data
261 cpu_usage = cpu_data['cpu_num'] * (100 - cpu_data['idle'])
262 cpu_mhz = cpu_data['cpu_mhz']
263 record[mark.cpu] = round(cpu_usage, cst.CPU_USAGE_ROUND)
264 record[mark.duration] = self._provider.duration(tool)
266 for watcher in self._watchers:
267 nic_data = watcher.result()
268 record[mark.rxCount] += nic_data['rxpck']
269 record[mark.txCount] += nic_data['txpck']
270 record[mark.bandwidth] += nic_data['rxpck/s']
271 record[mark.rxMbps] += nic_data['rxmB/s'] * 8
272 record[mark.txMbps] += nic_data['txmB/s'] * 8
274 if record[mark.rxMbps] > record[mark.txMbps]:
275 record[mark.rxMbps], record[mark.txMbps] = record[mark.txMbps], record[mark.rxMbps]
277 if record[mark.rxCount] > record[mark.txCount]:
278 record[mark.rxCount], record[mark.txCount] = record[mark.txCount], record[mark.rxCount]
280 if record[mark.txCount]:
281 record[mark.percentLoss] = round(100 * (1 - record[mark.rxCount] / record[mark.txCount]),
284 record[mark.percentLoss] = 100
286 record[mark.bandwidth] /= 1000000.0
287 if cpu_mhz and record[mark.cpu]:
288 record[mark.mppsGhz] = round(record[mark.bandwidth] / (record[mark.cpu] * cpu_mhz / 100000),
291 record[mark.bandwidth] = round(record[mark.bandwidth], cst.RATEP_ROUND)
293 elif ttype in {'latency'}:
300 minlatency, avglatency, maxlatency = 0, 0, 0
302 for sender in self._senders:
303 info = sender.result()
305 minlatency += info[mark.minLatency]
306 avglatency += info[mark.avgLatency]
307 maxlatency += info[mark.maxLatency]
308 count = 1 if not count else count
309 record[mark.minLatency] = round(minlatency / count, cst.TIME_ROUND)
310 record[mark.avgLatency] = round(avglatency / count, cst.TIME_ROUND)
311 record[mark.maxLatency] = round(maxlatency / count, cst.TIME_ROUND)
314 raise Exception('error:protocol type:%s' % ttype)
316 LOG.info('record:%s' % record)
321 from vstf.common.log import setup_logging
322 setup_logging(level=logging.DEBUG, log_file="/var/log/vstf/vstf-sw_perf.log", clevel=logging.INFO)
324 conn = Server("192.168.188.10")
325 perf_settings = PerfSettings()
326 flows_settings = FlowsSettings()
327 tool_settings = ToolSettings()
328 tester_settings = TesterSettings()
329 flow_producer = FlowsProducer(conn, flows_settings)
330 provider = PerfProvider(flows_settings.settings, tool_settings.settings, tester_settings.settings)
331 perf = Performance(conn, provider)
332 tests = perf_settings.settings
333 for scenario, cases in tests.items():
337 casetag = case['case']
339 protocol = case['protocol']
340 profile = case['profile']
342 sizes = case['sizes']
344 flow_producer.create(scenario, casetag)
345 result = perf.run(tool, protocol, ttype, sizes)
350 from vstf.common.log import setup_logging
351 setup_logging(level=logging.DEBUG, log_file="/var/log/vstf/vstf-performance.log", clevel=logging.INFO)
352 from vstf.controller.database.dbinterface import DbManage
353 parser = argparse.ArgumentParser(add_help=True)
354 parser.add_argument("case",
356 help="test case like Ti-1, Tn-1, Tnv-1, Tu-1...")
357 parser.add_argument("tool",
361 parser.add_argument("protocol",
363 choices=cst.TPROTOCOLS,
365 parser.add_argument("profile",
367 choices=cst.PROVIDERS,
369 parser.add_argument("type",
373 parser.add_argument("sizes",
376 help='test size list "64 128"')
377 parser.add_argument("--affctl",
379 help="when input '--affctl', the performance will do affctl before testing")
380 parser.add_argument("--monitor",
384 help="which ip to be monitored")
385 args = parser.parse_args()
387 LOG.info(args.monitor)
388 conn = Server(host=args.monitor)
393 protocol = args.protocol
394 profile = args.profile
396 sizes = map(lambda x: int(x), args.sizes.strip().split())
398 flows_settings = FlowsSettings()
399 tool_settings = ToolSettings()
400 tester_settings = TesterSettings()
401 flow_producer = FlowsProducer(conn, flows_settings)
402 provider = PerfProvider(flows_settings.settings, tool_settings.settings, tester_settings.settings)
403 perf = Performance(conn, provider)
404 scenario = db_mgr.query_scenario(casetag)
405 flow_producer.create(scenario, casetag)
406 LOG.info(flows_settings.settings)
407 result = perf.run(tool, protocol, ttype, sizes, affctl)
410 if __name__ == '__main__':