Merge "Change PTL informatin in INFO"
[bottlenecks.git] / testsuites / vstf / vstf_scripts / vstf / controller / sw_perf / performance.py
1 ##############################################################################
2 # Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10
11 import time
12 import argparse
13 import logging
14
15 from vstf.controller.sw_perf import model
16 from vstf.common import perfmark as mark
17 import vstf.common.constants as cst
18 import vstf.common.decorator as deco
19 from vstf.rpc_frame_work.rpc_producer import Server
20 from vstf.controller.settings.flows_settings import FlowsSettings
21 from vstf.controller.settings.tool_settings import ToolSettings
22 from vstf.controller.settings.perf_settings import PerfSettings
23 from vstf.controller.sw_perf.perf_provider import PerfProvider, get_agent_dict
24 from vstf.controller.sw_perf.flow_producer import FlowsProducer
25 from vstf.controller.settings.tester_settings import TesterSettings
26 from vstf.controller.fabricant import Fabricant
27
28 LOG = logging.getLogger(__name__)
29
30
31 class Performance(object):
32
33     def __init__(self, conn, provider):
34         self._provider = provider
35         self._conn = conn
36         self._init()
37
38     def _init(self):
39         self._senders = []
40         self._receivers = []
41         self._watchers = []
42         self._cpuwatcher = None
43
44     def create(self, tool, tpro):
45         self._init()
46         agents = self._provider.get_cleaners(tool, tpro)
47         LOG.info(agents)
48         for agent in agents:
49             cleaner = Fabricant(agent, self._conn)
50             cleaner.clean_all_namespace()
51
52         for tester_info in self._provider.get_testers:
53             dst = tester_info["agent"]
54             params = tester_info["params"]
55             LOG.info(tester_info)
56             driver_mgr = Fabricant(dst, self._conn)
57             ret = driver_mgr.install_drivers(drivers=params["drivers"])
58             LOG.info(ret)
59
60         self.create_namespace(tool)
61         self.create_senders(tool, tpro)
62         self.create_receivers(tool, tpro)
63         self.create_watchers(tool)
64         self.create_cpuwatcher()
65
66     def destory(self, tool):
67         self.clear_namespace(tool)
68
69     def create_namespace(self, tool):
70         devices = self._provider.get_namespaces(tool)
71         agents = get_agent_dict(devices)
72         LOG.info(agents)
73         for device in devices:
74             dst = device["agent"]
75             params = device["params"]
76             if not agents[dst]:
77                 model.NetDeviceMgr.clear(dst, self._conn)
78                 agents[dst] = True
79
80             model.NetDeviceMgr.add(dst, self._conn, params)
81
82     def clear_namespace(self, tool):
83         devices = self._provider.get_namespaces(tool)
84         for device in devices:
85             dst = device["agent"]
86             params = device["params"]
87             model.NetDeviceMgr.remove(dst, self._conn, params)
88
89     def create_senders(self, tool, tpro):
90         sender_infos = self._provider.get_senders(tool, tpro)
91         LOG.info(sender_infos)
92         for sender_info in sender_infos:
93             dst = sender_info["agent"]
94             params = sender_info["params"]
95             send = model.Sender(dst, self._conn, tool, params)
96             self._senders.append(send)
97
98     def create_receivers(self, tool, tpro):
99         receiver_infos = self._provider.get_receivers(tool, tpro)
100         LOG.info(receiver_infos)
101         for receiver_info in receiver_infos:
102             dst = receiver_info["agent"]
103             params = receiver_info["params"]
104             receive = model.Receiver(dst, self._conn, tool, params)
105             self._receivers.append(receive)
106
107     def create_watchers(self, tool):
108         watcher_infos = self._provider.get_watchers(tool)
109         LOG.info(watcher_infos)
110         for watcher_info in watcher_infos:
111             dst = watcher_info["agent"]
112             params = watcher_info["params"]
113             watch = model.NicWatcher(dst, self._conn, params)
114             self._watchers.append(watch)
115
116     def create_cpuwatcher(self):
117         watcher_info = self._provider.get_cpuwatcher
118         LOG.info(watcher_info)
119         dst = watcher_info["agent"]
120         self._cpuwatcher = model.CpuWatcher(dst, self._conn)
121
122     def start_receivers(self, **kwargs):
123         for receiver in self._receivers:
124             receiver.start(**kwargs)
125
126     def start_senders(self, pktsize, **kwargs):
127         for sender in self._senders:
128             sender.start(pktsize, **kwargs)
129
130     def start_watchers(self):
131         for watcher in self._watchers:
132             watcher.start()
133
134     def stop_receivers(self):
135         for receiver in self._receivers:
136             receiver.stop()
137
138     def stop_senders(self):
139         for sender in self._senders:
140             sender.stop()
141
142     def stop_watchers(self):
143         for watcher in self._watchers:
144             watcher.stop()
145
146     def start_cpuwatcher(self, enable=True):
147         if self._cpuwatcher and enable:
148             self._cpuwatcher.start()
149
150     def stop_cpuwatcher(self, enable=True):
151         if self._cpuwatcher and enable:
152             self._cpuwatcher.stop()
153
154     def getlimitspeed(self, ptype, size):
155         return 0
156
157     def affctl(self):
158         ctl = self._provider.get_cpu_affctl
159         LOG.info(ctl)
160         driver_mgr = Fabricant(ctl["agent"], self._conn)
161         ret = driver_mgr.affctl_load(policy=ctl["params"]["policy"])
162         LOG.info(ret)
163
164     def run_pre_affability_settings(self, tool, tpro, pktsize, **kwargs):
165         LOG.info("run_pre_affability_settings start")
166         self.create(tool, tpro)
167         self.start_receivers()
168         self.start_senders(pktsize, **kwargs)
169         self.affctl()
170         time.sleep(2)
171         self.stop_senders()
172         self.stop_receivers()
173         self.destory(tool)
174         LOG.info("run_pre_affability_settings end")
175
176     @deco.check("ratep", defaults=0)
177     @deco.check("cpu_watch", defaults=False)
178     def run_bandwidth_test(self, tool, tpro, pktsize, **kwargs):
179         LOG.info("run_bandwidth_test ")
180         cpu_watch = kwargs.pop("cpu_watch")
181         self.create(tool, tpro)
182         self.start_receivers()
183         self.start_senders(pktsize, **kwargs)
184         time.sleep(self._provider.wait_balance(tool))
185         self.start_watchers()
186         self.start_cpuwatcher(cpu_watch)
187         time.sleep(self._provider.duration(tool))
188         self.stop_watchers()
189         self.stop_cpuwatcher(cpu_watch)
190         self.stop_senders()
191         self.stop_receivers()
192         self.destory(tool)
193         LOG.info("run_bandwidth_test end")
194
195     @deco.check("ratep", defaults=0)
196     def run_latency_test(self, tool, tpro, pktsize, **kwargs):
197         LOG.info("run_latency_test start")
198         self.create(tool, tpro)
199         self.start_receivers()
200         self.start_senders(pktsize, **kwargs)
201         time.sleep(self._provider.duration(tool))
202         self.stop_senders()
203         self.stop_receivers()
204         self.destory(tool)
205         LOG.info("run_latency_test end")
206
207     def run(self, tool, protocol, ttype, sizes, affctl=False):
208         result = {}
209         if affctl:
210             pre_tpro = protocol + "_bw"
211             size = sizes[0]
212             self.run_pre_affability_settings(tool, pre_tpro, size, ratep=0)
213
214         for size in sizes:
215             if ttype in ['throughput', 'frameloss']:
216                 realspeed = self.getlimitspeed(ttype, size)
217                 bw_tpro = protocol + "_bw"
218                 bw_type = ttype
219                 self.run_bandwidth_test(tool, bw_tpro, size, ratep=realspeed)
220                 bw_result = self.result(tool, bw_type)
221
222                 lat_tool = "qperf"
223                 lat_type = 'latency'
224                 lat_tpro = protocol + '_lat'
225                 self.run_latency_test(
226                     lat_tool, lat_tpro, size, ratep=realspeed)
227                 lat_result = self.result(tool, lat_type)
228                 LOG.info(bw_result)
229                 LOG.info(lat_result)
230                 lat_result.pop('OfferedLoad')
231                 bw_result.update(lat_result)
232                 result[size] = bw_result
233
234             elif ttype in ['latency']:
235                 lat_tpro = protocol + '_lat'
236                 lat_type = ttype
237                 self.run_latency_test(tool, lat_tpro, size, ratep=0)
238                 lat_result = self.result(tool, lat_type)
239                 result[size] = lat_result
240             else:
241                 raise Exception("error:protocol type:%s" % (ttype))
242         return result
243
244     def result(self, tool, ttype):
245         if ttype in {'throughput', 'frameloss'}:
246             record = {
247                 mark.rxCount: 0,
248                 mark.txCount: 0,
249                 mark.bandwidth: 0,
250                 mark.offLoad: 100.0,
251                 mark.mppsGhz: 0,
252                 mark.percentLoss: 0,
253                 mark.avgLatency: 0,
254                 mark.maxLatency: 0,
255                 mark.minLatency: 0,
256                 mark.rxMbps: 0,
257                 mark.txMbps: 0
258             }
259
260             cpu_data = self._cpuwatcher.result()
261             print self._cpuwatcher, cpu_data
262             if cpu_data:
263                 cpu_usage = cpu_data['cpu_num'] * (100 - cpu_data['idle'])
264                 cpu_mhz = cpu_data['cpu_mhz']
265                 record[mark.cpu] = round(cpu_usage, cst.CPU_USAGE_ROUND)
266                 record[mark.duration] = self._provider.duration(tool)
267
268             for watcher in self._watchers:
269                 nic_data = watcher.result()
270                 record[mark.rxCount] += nic_data['rxpck']
271                 record[mark.txCount] += nic_data['txpck']
272                 record[mark.bandwidth] += nic_data['rxpck/s']
273                 record[mark.rxMbps] += nic_data['rxmB/s'] * 8
274                 record[mark.txMbps] += nic_data['txmB/s'] * 8
275
276             if record[mark.rxMbps] > record[mark.txMbps]:
277                 record[
278                     mark.rxMbps], record[
279                     mark.txMbps] = record[
280                     mark.txMbps], record[
281                     mark.rxMbps]
282
283             if record[mark.rxCount] > record[mark.txCount]:
284                 record[
285                     mark.rxCount], record[
286                     mark.txCount] = record[
287                     mark.txCount], record[
288                     mark.rxCount]
289
290             if record[mark.txCount]:
291                 record[mark.percentLoss] = round(
292                     100 * (1 - record[mark.rxCount] / record[mark.txCount]), cst.PKTLOSS_ROUND)
293             else:
294                 record[mark.percentLoss] = 100
295
296             record[mark.bandwidth] /= 1000000.0
297             if cpu_mhz and record[mark.cpu]:
298                 record[mark.mppsGhz] = round(
299                     record[mark.bandwidth] / (record[mark.cpu] * cpu_mhz / 100000), cst.CPU_USAGE_ROUND)
300
301             record[mark.bandwidth] = round(
302                 record[mark.bandwidth], cst.RATEP_ROUND)
303
304         elif ttype in {'latency'}:
305             record = {
306                 mark.offLoad: 0.0,
307                 mark.avgLatency: 0,
308                 mark.maxLatency: 0,
309                 mark.minLatency: 0
310             }
311             minlatency, avglatency, maxlatency = 0, 0, 0
312             count = 0
313             for sender in self._senders:
314                 info = sender.result()
315                 LOG.info(info)
316                 minlatency += info[mark.minLatency]
317                 avglatency += info[mark.avgLatency]
318                 maxlatency += info[mark.maxLatency]
319             count = 1 if not count else count
320             record[mark.minLatency] = round(minlatency / count, cst.TIME_ROUND)
321             record[mark.avgLatency] = round(avglatency / count, cst.TIME_ROUND)
322             record[mark.maxLatency] = round(maxlatency / count, cst.TIME_ROUND)
323
324         else:
325             raise Exception('error:protocol type:%s' % ttype)
326
327         LOG.info('record:%s' % record)
328         return record
329
330
331 def unit_test():
332     from vstf.common.log import setup_logging
333     setup_logging(
334         level=logging.DEBUG,
335         log_file="/var/log/vstf/vstf-sw_perf.log",
336         clevel=logging.INFO)
337
338     conn = Server("192.168.188.10")
339     perf_settings = PerfSettings()
340     flows_settings = FlowsSettings()
341     tool_settings = ToolSettings()
342     tester_settings = TesterSettings()
343     flow_producer = FlowsProducer(conn, flows_settings)
344     provider = PerfProvider(
345         flows_settings.settings,
346         tool_settings.settings,
347         tester_settings.settings)
348     perf = Performance(conn, provider)
349     tests = perf_settings.settings
350     for scenario, cases in tests.items():
351         if not cases:
352             continue
353         for case in cases:
354             casetag = case['case']
355             tool = case['tool']
356             protocol = case['protocol']
357             profile = case['profile']
358             ttype = case['type']
359             sizes = case['sizes']
360
361             flow_producer.create(scenario, casetag)
362             result = perf.run(tool, protocol, ttype, sizes)
363             LOG.info(result)
364
365
366 def main():
367     from vstf.common.log import setup_logging
368     setup_logging(
369         level=logging.DEBUG,
370         log_file="/var/log/vstf/vstf-performance.log",
371         clevel=logging.INFO)
372     from vstf.controller.database.dbinterface import DbManage
373     parser = argparse.ArgumentParser(add_help=True)
374     parser.add_argument("case",
375                         action="store",
376                         help="test case like Ti-1, Tn-1, Tnv-1, Tu-1...")
377     parser.add_argument("tool",
378                         action="store",
379                         choices=cst.TOOLS,
380                         )
381     parser.add_argument("protocol",
382                         action="store",
383                         choices=cst.TPROTOCOLS,
384                         )
385     parser.add_argument("profile",
386                         action="store",
387                         choices=cst.PROVIDERS,
388                         )
389     parser.add_argument("type",
390                         action="store",
391                         choices=cst.TTYPES,
392                         )
393     parser.add_argument("sizes",
394                         action="store",
395                         default="64",
396                         help='test size list "64 128"')
397     parser.add_argument(
398         "--affctl",
399         action="store_true",
400         help="when input '--affctl', the performance will do affctl before testing")
401     parser.add_argument("--monitor",
402                         dest="monitor",
403                         default="localhost",
404                         action="store",
405                         help="which ip to be monitored")
406     args = parser.parse_args()
407
408     LOG.info(args.monitor)
409     conn = Server(host=args.monitor)
410     db_mgr = DbManage()
411
412     casetag = args.case
413     tool = args.tool
414     protocol = args.protocol
415     profile = args.profile
416     ttype = args.type
417     sizes = map(lambda x: int(x), args.sizes.strip().split())
418
419     flows_settings = FlowsSettings()
420     tool_settings = ToolSettings()
421     tester_settings = TesterSettings()
422     flow_producer = FlowsProducer(conn, flows_settings)
423     provider = PerfProvider(
424         flows_settings.settings,
425         tool_settings.settings,
426         tester_settings.settings)
427     perf = Performance(conn, provider)
428     scenario = db_mgr.query_scenario(casetag)
429     flow_producer.create(scenario, casetag)
430     LOG.info(flows_settings.settings)
431     result = perf.run(tool, protocol, ttype, sizes, affctl)
432
433
434 if __name__ == '__main__':
435     main()