1 ##############################################################################
2 # Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
15 from vstf.controller.sw_perf import model
16 from vstf.common import perfmark as mark
17 import vstf.common.constants as cst
18 import vstf.common.decorator as deco
19 from vstf.rpc_frame_work.rpc_producer import Server
20 from vstf.controller.settings.flows_settings import FlowsSettings
21 from vstf.controller.settings.tool_settings import ToolSettings
22 from vstf.controller.settings.perf_settings import PerfSettings
23 from vstf.controller.sw_perf.perf_provider import PerfProvider, get_agent_dict
24 from vstf.controller.sw_perf.flow_producer import FlowsProducer
25 from vstf.controller.settings.tester_settings import TesterSettings
26 from vstf.controller.fabricant import Fabricant
28 LOG = logging.getLogger(__name__)
31 class Performance(object):
33 def __init__(self, conn, provider):
34 self._provider = provider
42 self._cpuwatcher = None
44 def create(self, tool, tpro):
46 agents = self._provider.get_cleaners(tool, tpro)
49 cleaner = Fabricant(agent, self._conn)
50 cleaner.clean_all_namespace()
52 for tester_info in self._provider.get_testers:
53 dst = tester_info["agent"]
54 params = tester_info["params"]
56 driver_mgr = Fabricant(dst, self._conn)
57 ret = driver_mgr.install_drivers(drivers=params["drivers"])
60 self.create_namespace(tool)
61 self.create_senders(tool, tpro)
62 self.create_receivers(tool, tpro)
63 self.create_watchers(tool)
64 self.create_cpuwatcher()
66 def destory(self, tool):
67 self.clear_namespace(tool)
69 def create_namespace(self, tool):
70 devices = self._provider.get_namespaces(tool)
71 agents = get_agent_dict(devices)
73 for device in devices:
75 params = device["params"]
77 model.NetDeviceMgr.clear(dst, self._conn)
80 model.NetDeviceMgr.add(dst, self._conn, params)
82 def clear_namespace(self, tool):
83 devices = self._provider.get_namespaces(tool)
84 for device in devices:
86 params = device["params"]
87 model.NetDeviceMgr.remove(dst, self._conn, params)
89 def create_senders(self, tool, tpro):
90 sender_infos = self._provider.get_senders(tool, tpro)
91 LOG.info(sender_infos)
92 for sender_info in sender_infos:
93 dst = sender_info["agent"]
94 params = sender_info["params"]
95 send = model.Sender(dst, self._conn, tool, params)
96 self._senders.append(send)
98 def create_receivers(self, tool, tpro):
99 receiver_infos = self._provider.get_receivers(tool, tpro)
100 LOG.info(receiver_infos)
101 for receiver_info in receiver_infos:
102 dst = receiver_info["agent"]
103 params = receiver_info["params"]
104 receive = model.Receiver(dst, self._conn, tool, params)
105 self._receivers.append(receive)
107 def create_watchers(self, tool):
108 watcher_infos = self._provider.get_watchers(tool)
109 LOG.info(watcher_infos)
110 for watcher_info in watcher_infos:
111 dst = watcher_info["agent"]
112 params = watcher_info["params"]
113 watch = model.NicWatcher(dst, self._conn, params)
114 self._watchers.append(watch)
116 def create_cpuwatcher(self):
117 watcher_info = self._provider.get_cpuwatcher
118 LOG.info(watcher_info)
119 dst = watcher_info["agent"]
120 self._cpuwatcher = model.CpuWatcher(dst, self._conn)
122 def start_receivers(self, **kwargs):
123 for receiver in self._receivers:
124 receiver.start(**kwargs)
126 def start_senders(self, pktsize, **kwargs):
127 for sender in self._senders:
128 sender.start(pktsize, **kwargs)
130 def start_watchers(self):
131 for watcher in self._watchers:
134 def stop_receivers(self):
135 for receiver in self._receivers:
138 def stop_senders(self):
139 for sender in self._senders:
142 def stop_watchers(self):
143 for watcher in self._watchers:
146 def start_cpuwatcher(self, enable=True):
147 if self._cpuwatcher and enable:
148 self._cpuwatcher.start()
150 def stop_cpuwatcher(self, enable=True):
151 if self._cpuwatcher and enable:
152 self._cpuwatcher.stop()
154 def getlimitspeed(self, ptype, size):
158 ctl = self._provider.get_cpu_affctl
160 driver_mgr = Fabricant(ctl["agent"], self._conn)
161 ret = driver_mgr.affctl_load(policy=ctl["params"]["policy"])
164 def run_pre_affability_settings(self, tool, tpro, pktsize, **kwargs):
165 LOG.info("run_pre_affability_settings start")
166 self.create(tool, tpro)
167 self.start_receivers()
168 self.start_senders(pktsize, **kwargs)
172 self.stop_receivers()
174 LOG.info("run_pre_affability_settings end")
176 @deco.check("ratep", defaults=0)
177 @deco.check("cpu_watch", defaults=False)
178 def run_bandwidth_test(self, tool, tpro, pktsize, **kwargs):
179 LOG.info("run_bandwidth_test ")
180 cpu_watch = kwargs.pop("cpu_watch")
181 self.create(tool, tpro)
182 self.start_receivers()
183 self.start_senders(pktsize, **kwargs)
184 time.sleep(self._provider.wait_balance(tool))
185 self.start_watchers()
186 self.start_cpuwatcher(cpu_watch)
187 time.sleep(self._provider.duration(tool))
189 self.stop_cpuwatcher(cpu_watch)
191 self.stop_receivers()
193 LOG.info("run_bandwidth_test end")
195 @deco.check("ratep", defaults=0)
196 def run_latency_test(self, tool, tpro, pktsize, **kwargs):
197 LOG.info("run_latency_test start")
198 self.create(tool, tpro)
199 self.start_receivers()
200 self.start_senders(pktsize, **kwargs)
201 time.sleep(self._provider.duration(tool))
203 self.stop_receivers()
205 LOG.info("run_latency_test end")
207 def run(self, tool, protocol, ttype, sizes, affctl=False):
210 pre_tpro = protocol + "_bw"
212 self.run_pre_affability_settings(tool, pre_tpro, size, ratep=0)
215 if ttype in ['throughput', 'frameloss']:
216 realspeed = self.getlimitspeed(ttype, size)
217 bw_tpro = protocol + "_bw"
219 self.run_bandwidth_test(tool, bw_tpro, size, ratep=realspeed)
220 bw_result = self.result(tool, bw_type)
224 lat_tpro = protocol + '_lat'
225 self.run_latency_test(
226 lat_tool, lat_tpro, size, ratep=realspeed)
227 lat_result = self.result(tool, lat_type)
230 lat_result.pop('OfferedLoad')
231 bw_result.update(lat_result)
232 result[size] = bw_result
234 elif ttype in ['latency']:
235 lat_tpro = protocol + '_lat'
237 self.run_latency_test(tool, lat_tpro, size, ratep=0)
238 lat_result = self.result(tool, lat_type)
239 result[size] = lat_result
241 raise Exception("error:protocol type:%s" % (ttype))
244 def result(self, tool, ttype):
245 if ttype in {'throughput', 'frameloss'}:
260 cpu_data = self._cpuwatcher.result()
261 print self._cpuwatcher, cpu_data
263 cpu_usage = cpu_data['cpu_num'] * (100 - cpu_data['idle'])
264 cpu_mhz = cpu_data['cpu_mhz']
265 record[mark.cpu] = round(cpu_usage, cst.CPU_USAGE_ROUND)
266 record[mark.duration] = self._provider.duration(tool)
268 for watcher in self._watchers:
269 nic_data = watcher.result()
270 record[mark.rxCount] += nic_data['rxpck']
271 record[mark.txCount] += nic_data['txpck']
272 record[mark.bandwidth] += nic_data['rxpck/s']
273 record[mark.rxMbps] += nic_data['rxmB/s'] * 8
274 record[mark.txMbps] += nic_data['txmB/s'] * 8
276 if record[mark.rxMbps] > record[mark.txMbps]:
278 mark.rxMbps], record[
279 mark.txMbps] = record[
280 mark.txMbps], record[
283 if record[mark.rxCount] > record[mark.txCount]:
285 mark.rxCount], record[
286 mark.txCount] = record[
287 mark.txCount], record[
290 if record[mark.txCount]:
291 record[mark.percentLoss] = round(
292 100 * (1 - record[mark.rxCount] / record[mark.txCount]), cst.PKTLOSS_ROUND)
294 record[mark.percentLoss] = 100
296 record[mark.bandwidth] /= 1000000.0
297 if cpu_mhz and record[mark.cpu]:
298 record[mark.mppsGhz] = round(
299 record[mark.bandwidth] / (record[mark.cpu] * cpu_mhz / 100000), cst.CPU_USAGE_ROUND)
301 record[mark.bandwidth] = round(
302 record[mark.bandwidth], cst.RATEP_ROUND)
304 elif ttype in {'latency'}:
311 minlatency, avglatency, maxlatency = 0, 0, 0
313 for sender in self._senders:
314 info = sender.result()
316 minlatency += info[mark.minLatency]
317 avglatency += info[mark.avgLatency]
318 maxlatency += info[mark.maxLatency]
319 count = 1 if not count else count
320 record[mark.minLatency] = round(minlatency / count, cst.TIME_ROUND)
321 record[mark.avgLatency] = round(avglatency / count, cst.TIME_ROUND)
322 record[mark.maxLatency] = round(maxlatency / count, cst.TIME_ROUND)
325 raise Exception('error:protocol type:%s' % ttype)
327 LOG.info('record:%s' % record)
332 from vstf.common.log import setup_logging
335 log_file="/var/log/vstf/vstf-sw_perf.log",
338 conn = Server("192.168.188.10")
339 perf_settings = PerfSettings()
340 flows_settings = FlowsSettings()
341 tool_settings = ToolSettings()
342 tester_settings = TesterSettings()
343 flow_producer = FlowsProducer(conn, flows_settings)
344 provider = PerfProvider(
345 flows_settings.settings,
346 tool_settings.settings,
347 tester_settings.settings)
348 perf = Performance(conn, provider)
349 tests = perf_settings.settings
350 for scenario, cases in tests.items():
354 casetag = case['case']
356 protocol = case['protocol']
357 profile = case['profile']
359 sizes = case['sizes']
361 flow_producer.create(scenario, casetag)
362 result = perf.run(tool, protocol, ttype, sizes)
367 from vstf.common.log import setup_logging
370 log_file="/var/log/vstf/vstf-performance.log",
372 from vstf.controller.database.dbinterface import DbManage
373 parser = argparse.ArgumentParser(add_help=True)
374 parser.add_argument("case",
376 help="test case like Ti-1, Tn-1, Tnv-1, Tu-1...")
377 parser.add_argument("tool",
381 parser.add_argument("protocol",
383 choices=cst.TPROTOCOLS,
385 parser.add_argument("profile",
387 choices=cst.PROVIDERS,
389 parser.add_argument("type",
393 parser.add_argument("sizes",
396 help='test size list "64 128"')
400 help="when input '--affctl', the performance will do affctl before testing")
401 parser.add_argument("--monitor",
405 help="which ip to be monitored")
406 args = parser.parse_args()
408 LOG.info(args.monitor)
409 conn = Server(host=args.monitor)
414 protocol = args.protocol
415 profile = args.profile
417 sizes = map(lambda x: int(x), args.sizes.strip().split())
419 flows_settings = FlowsSettings()
420 tool_settings = ToolSettings()
421 tester_settings = TesterSettings()
422 flow_producer = FlowsProducer(conn, flows_settings)
423 provider = PerfProvider(
424 flows_settings.settings,
425 tool_settings.settings,
426 tester_settings.settings)
427 perf = Performance(conn, provider)
428 scenario = db_mgr.query_scenario(casetag)
429 flow_producer.create(scenario, casetag)
430 LOG.info(flows_settings.settings)
431 result = perf.run(tool, protocol, ttype, sizes, affctl)
434 if __name__ == '__main__':