Upload the contribution of vstf as bottleneck network framework.
[bottlenecks.git] / vstf / vstf / controller / sw_perf / performance.py
1 #!/usr/bin/python
2 # -*- coding: utf8 -*-
3 # author: wly
4 # date: 2015-09-19
5 # see license for license details
6
7 import time
8 import argparse
9 import logging
10
11 from vstf.controller.sw_perf import model
12 from vstf.common import perfmark as mark
13 import vstf.common.constants as cst
14 from vstf.rpc_frame_work.rpc_producer import Server
15 from vstf.controller.settings.flows_settings import FlowsSettings
16 from vstf.controller.settings.tool_settings import ToolSettings
17 from vstf.controller.settings.perf_settings import PerfSettings
18 from vstf.controller.sw_perf.perf_provider import PerfProvider, get_agent_dict
19 from vstf.controller.sw_perf.flow_producer import FlowsProducer
20 from vstf.controller.settings.tester_settings import TesterSettings
21 from vstf.controller.fabricant import Fabricant
22
23 LOG = logging.getLogger(__name__)
24
25
26 class Performance(object):
27     def __init__(self, conn, provider):
28         self._provider = provider
29         self._conn = conn
30         self._init()
31
32     def _init(self):
33         self._senders = []
34         self._receivers = []
35         self._watchers = []
36         self._cpuwatcher = None
37
38     def create(self, tool, tpro):
39         self._init()
40         agents = self._provider.get_cleaners(tool, tpro)
41         LOG.info(agents)
42         for agent in agents:
43             cleaner = Fabricant(agent, self._conn)
44             cleaner.clean_all_namespace()
45
46         for tester_info in self._provider.get_testers:
47             dst = tester_info["agent"]
48             params = tester_info["params"]
49             LOG.info(tester_info)
50             driver_mgr = Fabricant(dst, self._conn)
51             ret = driver_mgr.install_drivers(drivers=params["drivers"])
52             LOG.info(ret)
53
54         self.create_namespace(tool)
55         self.create_senders(tool, tpro)
56         self.create_receivers(tool, tpro)
57         self.create_watchers(tool)
58         self.create_cpuwatcher()
59
60     def destory(self, tool):
61         self.clear_namespace(tool)
62
63     def create_namespace(self, tool):
64         devices = self._provider.get_namespaces(tool)
65         agents = get_agent_dict(devices)
66         LOG.info(agents)
67         for device in devices:
68             dst = device["agent"]
69             params = device["params"]
70             if not agents[dst]:
71                 model.NetDeviceMgr.clear(dst, self._conn)
72                 agents[dst] = True
73
74             model.NetDeviceMgr.add(dst, self._conn, params)
75
76     def clear_namespace(self, tool):
77         devices = self._provider.get_namespaces(tool)
78         for device in devices:
79             dst = device["agent"]
80             params = device["params"]
81             model.NetDeviceMgr.remove(dst, self._conn, params)
82
83     def create_senders(self, tool, tpro):
84         sender_infos = self._provider.get_senders(tool, tpro)
85         LOG.info(sender_infos)
86         for sender_info in sender_infos:
87             dst = sender_info["agent"]
88             params = sender_info["params"]
89             send = model.Sender(dst, self._conn, tool, params)
90             self._senders.append(send)
91
92     def create_receivers(self, tool, tpro):
93         receiver_infos = self._provider.get_receivers(tool, tpro)
94         LOG.info(receiver_infos)
95         for receiver_info in receiver_infos:
96             dst = receiver_info["agent"]
97             params = receiver_info["params"]
98             receive = model.Receiver(dst, self._conn, tool, params)
99             self._receivers.append(receive)
100
101     def create_watchers(self, tool):
102         watcher_infos = self._provider.get_watchers(tool)
103         LOG.info(watcher_infos)
104         for watcher_info in watcher_infos:
105             dst = watcher_info["agent"]
106             params = watcher_info["params"]
107             watch = model.NicWatcher(dst, self._conn, params)
108             self._watchers.append(watch)
109
110     def create_cpuwatcher(self):
111         watcher_info = self._provider.get_cpuwatcher
112         LOG.info(watcher_info)
113         dst = watcher_info["agent"]
114         self._cpuwatcher = model.CpuWatcher(dst, self._conn)
115
116     def start_receivers(self, **kwargs):
117         for receiver in self._receivers:
118             receiver.start(**kwargs)
119
120     def start_senders(self, pktsize, **kwargs):
121         for sender in self._senders:
122             sender.start(pktsize, **kwargs)
123
124     def start_watchers(self):
125         for watcher in self._watchers:
126             watcher.start()
127
128     def stop_receivers(self):
129         for receiver in self._receivers:
130             receiver.stop()
131
132     def stop_senders(self):
133         for sender in self._senders:
134             sender.stop()
135
136     def stop_watchers(self):
137         for watcher in self._watchers:
138             watcher.stop()
139
140     def start_cpuwatcher(self):
141         if self._cpuwatcher:
142             self._cpuwatcher.start()
143
144     def stop_cpuwatcher(self):
145         if self._cpuwatcher:
146             self._cpuwatcher.stop()
147
148     def getlimitspeed(self, ptype, size):
149         return 0
150
151     def affctl(self):
152         ctl = self._provider.get_cpu_affctl
153         LOG.info(ctl)
154         driver_mgr = Fabricant(ctl["agent"], self._conn)
155         ret = driver_mgr.affctl_load(policy=ctl["params"]["policy"])
156         LOG.info(ret)
157
158     def run_pre_affability_settings(self, tool, tpro, pktsize, **kwargs):
159         LOG.info("run_pre_affability_settings start")
160         self.create(tool, tpro)
161         self.start_receivers()
162         self.start_senders(pktsize, **kwargs)
163         self.affctl()
164         time.sleep(2)
165         self.stop_senders()
166         self.stop_receivers()
167         self.destory(tool)
168         LOG.info("run_pre_affability_settings end")
169
170     def run_bandwidth_test(self, tool, tpro, pktsize, **kwargs):
171         LOG.info("run_bandwidth_test ")
172         self.create(tool, tpro)
173         self.start_receivers()
174         self.start_senders(pktsize, **kwargs)
175         time.sleep(self._provider.wait_balance(tool))
176         self.start_watchers()
177         self.start_cpuwatcher()
178         time.sleep(self._provider.duration(tool))
179         self.stop_watchers()
180         self.stop_cpuwatcher()
181         self.stop_senders()
182         self.stop_receivers()
183         self.destory(tool)
184         LOG.info("run_bandwidth_test end")
185
186     def run_latency_test(self, tool, tpro, pktsize, **kwargs):
187         LOG.info("run_latency_test start")
188         self.create(tool, tpro)
189         self.start_receivers()
190         self.start_senders(pktsize, **kwargs)
191         time.sleep(self._provider.duration(tool))
192         self.stop_senders()
193         self.stop_receivers()
194         self.destory(tool)
195         LOG.info("run_latency_test end")
196
197     def run(self, tool, protocol, ttype, sizes, affctl=False):
198         result = {}
199         if affctl:
200             pre_tpro = protocol + "_bw"
201             size = sizes[0]
202             self.run_pre_affability_settings(tool, pre_tpro, size, ratep=0)
203
204         for size in sizes:
205             if ttype in ['throughput', 'frameloss']:
206                 realspeed = self.getlimitspeed(ttype, size)
207                 bw_tpro = protocol + "_bw"
208                 bw_type = ttype
209                 self.run_bandwidth_test(tool, bw_tpro, size, ratep=realspeed)
210                 bw_result = self.result(tool, bw_type)
211
212                 lat_tool = "qperf"
213                 lat_type = 'latency'
214                 lat_tpro = protocol + '_lat'
215                 self.run_latency_test(lat_tool, lat_tpro, size, ratep=realspeed)
216                 lat_result = self.result(tool, lat_type)
217                 LOG.info(bw_result)
218                 LOG.info(lat_result)
219                 lat_result.pop('OfferedLoad')
220                 bw_result.update(lat_result)
221                 result[size] = bw_result
222
223             elif ttype in ['latency']:
224                 lat_tpro = protocol + '_lat'
225                 lat_type = ttype
226                 self.run_latency_test(tool, lat_tpro, size, ratep=None)
227                 lat_result = self.result(tool, lat_type)
228                 result[size] = lat_result
229             else:
230                 raise Exception("error:protocol type:%s" % (ttype))
231         return result
232
233     def result(self, tool, ttype):
234         if ttype in {'throughput', 'frameloss'}:
235             record = {
236                 mark.rxCount: 0,
237                 mark.txCount: 0,
238                 mark.bandwidth: 0,
239                 mark.offLoad: 100.0,
240                 mark.mppsGhz: 0,
241                 mark.percentLoss: 0,
242                 mark.avgLatency: 0,
243                 mark.maxLatency: 0,
244                 mark.minLatency: 0,
245                 mark.rxMbps:0,
246                 mark.txMbps:0
247             }
248
249             cpu_data = self._cpuwatcher.result()
250             print self._cpuwatcher, cpu_data
251             if cpu_data:
252                 cpu_usage = cpu_data['cpu_num'] * (100 - cpu_data['idle'])
253                 cpu_mhz = cpu_data['cpu_mhz']
254                 record[mark.cpu] = round(cpu_usage, cst.CPU_USAGE_ROUND)
255                 record[mark.duration] = self._provider.duration(tool)
256
257             for watcher in self._watchers:
258                 nic_data = watcher.result()
259                 record[mark.rxCount] += nic_data['rxpck']
260                 record[mark.txCount] += nic_data['txpck']
261                 record[mark.bandwidth] += nic_data['rxpck/s']
262                 record[mark.rxMbps] += nic_data['rxmB/s']
263                 record[mark.txMbps] += nic_data['txmB/s']
264
265             if record[mark.txCount]:
266                 record[mark.percentLoss] = round(100 * (1 - record[mark.rxCount] / record[mark.txCount]),
267                                               cst.PKTLOSS_ROUND)
268             else:
269                 record[mark.percentLoss] = 100
270
271             record[mark.bandwidth] /= 1000000.0
272             if cpu_mhz and record[mark.cpu]:
273                 record[mark.mppsGhz] = round(record[mark.bandwidth] / (record[mark.cpu] * cpu_mhz / 100000),
274                                            cst.CPU_USAGE_ROUND)
275
276             record[mark.bandwidth] = round(record[mark.bandwidth], cst.RATEP_ROUND)
277
278         elif ttype in {'latency'}:
279             record = {
280                 mark.offLoad: 0.0,
281                 mark.avgLatency: 0,
282                 mark.maxLatency: 0,
283                 mark.minLatency: 0
284             }
285             minlatency, avglatency, maxlatency = 0, 0, 0
286             count = 0
287             for sender in self._senders:
288                 info = sender.result()
289                 LOG.info(info)
290                 minlatency += info[mark.minLatency]
291                 avglatency += info[mark.avgLatency]
292                 maxlatency += info[mark.maxLatency]
293             count = 1 if not count else count
294             record[mark.minLatency] = round(minlatency / count, cst.TIME_ROUND)
295             record[mark.avgLatency] = round(avglatency / count, cst.TIME_ROUND)
296             record[mark.maxLatency] = round(maxlatency / count, cst.TIME_ROUND)
297
298         else:
299             raise Exception('error:protocol type:%s' % ttype)
300
301         LOG.info('record:%s' % record)
302         return record
303
304
305 def unit_test():
306     from vstf.common.log import setup_logging
307     setup_logging(level=logging.DEBUG, log_file="/var/log/vstf/vstf-sw_perf.log", clevel=logging.INFO)
308
309     conn = Server("192.168.188.10")
310     perf_settings = PerfSettings()
311     flows_settings = FlowsSettings()
312     tool_settings = ToolSettings()
313     tester_settings = TesterSettings()
314     flow_producer = FlowsProducer(conn, flows_settings)
315     provider = PerfProvider(flows_settings.settings, tool_settings.settings, tester_settings.settings)
316     perf = Performance(conn, provider)
317     tests = perf_settings.settings
318     for scenario, cases in tests.items():
319         if not cases:
320             continue
321         for case in cases:
322             casetag = case['case']
323             tool = case['tool']
324             protocol = case['protocol']
325             profile = case['profile']
326             ttype = case['type']
327             sizes = case['sizes']
328
329             flow_producer.create(scenario, casetag)
330             result = perf.run(tool, protocol, ttype, sizes)
331             LOG.info(result)
332
333
334 def main():
335     from vstf.common.log import setup_logging
336     setup_logging(level=logging.DEBUG, log_file="/var/log/vstf/vstf-performance.log", clevel=logging.INFO)
337     from vstf.controller.database.dbinterface import DbManage
338     parser = argparse.ArgumentParser(add_help=True)
339     parser.add_argument("case",
340                         action="store",
341                         help="test case like Ti-1, Tn-1, Tnv-1, Tu-1...")
342     parser.add_argument("tool",
343                         action="store",
344                         choices=cst.TOOLS,
345                         )
346     parser.add_argument("protocol",
347                         action="store",
348                         choices=cst.TPROTOCOLS,
349                         )
350     parser.add_argument("profile",
351                         action="store",
352                         choices=cst.PROFILES,
353                         )
354     parser.add_argument("type",
355                         action="store",
356                         choices=cst.TTYPES,
357                         )
358     parser.add_argument("sizes",
359                         action="store",
360                         default="64",
361                         help='test size list "64 128"')
362     parser.add_argument("--affctl",
363                         action="store_true",
364                         help="when input '--affctl', the performance will do affctl before testing")
365     parser.add_argument("--monitor",
366                         dest="monitor",
367                         default="localhost",
368                         action="store",
369                         help="which ip to be monitored")
370     args = parser.parse_args()
371
372     LOG.info(args.monitor)
373     conn = Server(host=args.monitor)
374     db_mgr = DbManage()
375
376     casetag = args.case
377     tool = args.tool
378     protocol = args.protocol
379     profile = args.profile
380     ttype = args.type
381     sizes = map(lambda x: int(x), args.sizes.strip().split())
382
383     flows_settings = FlowsSettings()
384     tool_settings = ToolSettings()
385     tester_settings = TesterSettings()
386     flow_producer = FlowsProducer(conn, flows_settings)
387     provider = PerfProvider(flows_settings.settings, tool_settings.settings, tester_settings.settings)
388     perf = Performance(conn, provider)
389     scenario = db_mgr.query_scenario(casetag)
390     flow_producer.create(scenario, casetag)
391     LOG.info(flows_settings.settings)
392     result = perf.run(tool, protocol, ttype, sizes, affctl)
393
394
395 if __name__ == '__main__':
396     main()