5 # see license for license details
11 from vstf.controller.sw_perf import model
12 from vstf.common import perfmark as mark
13 import vstf.common.constants as cst
14 from vstf.rpc_frame_work.rpc_producer import Server
15 from vstf.controller.settings.flows_settings import FlowsSettings
16 from vstf.controller.settings.tool_settings import ToolSettings
17 from vstf.controller.settings.perf_settings import PerfSettings
18 from vstf.controller.sw_perf.perf_provider import PerfProvider, get_agent_dict
19 from vstf.controller.sw_perf.flow_producer import FlowsProducer
20 from vstf.controller.settings.tester_settings import TesterSettings
21 from vstf.controller.fabricant import Fabricant
23 LOG = logging.getLogger(__name__)
26 class Performance(object):
27 def __init__(self, conn, provider):
28 self._provider = provider
36 self._cpuwatcher = None
38 def create(self, tool, tpro):
40 agents = self._provider.get_cleaners(tool, tpro)
43 cleaner = Fabricant(agent, self._conn)
44 cleaner.clean_all_namespace()
46 for tester_info in self._provider.get_testers:
47 dst = tester_info["agent"]
48 params = tester_info["params"]
50 driver_mgr = Fabricant(dst, self._conn)
51 ret = driver_mgr.install_drivers(drivers=params["drivers"])
54 self.create_namespace(tool)
55 self.create_senders(tool, tpro)
56 self.create_receivers(tool, tpro)
57 self.create_watchers(tool)
58 self.create_cpuwatcher()
60 def destory(self, tool):
61 self.clear_namespace(tool)
63 def create_namespace(self, tool):
64 devices = self._provider.get_namespaces(tool)
65 agents = get_agent_dict(devices)
67 for device in devices:
69 params = device["params"]
71 model.NetDeviceMgr.clear(dst, self._conn)
74 model.NetDeviceMgr.add(dst, self._conn, params)
76 def clear_namespace(self, tool):
77 devices = self._provider.get_namespaces(tool)
78 for device in devices:
80 params = device["params"]
81 model.NetDeviceMgr.remove(dst, self._conn, params)
83 def create_senders(self, tool, tpro):
84 sender_infos = self._provider.get_senders(tool, tpro)
85 LOG.info(sender_infos)
86 for sender_info in sender_infos:
87 dst = sender_info["agent"]
88 params = sender_info["params"]
89 send = model.Sender(dst, self._conn, tool, params)
90 self._senders.append(send)
92 def create_receivers(self, tool, tpro):
93 receiver_infos = self._provider.get_receivers(tool, tpro)
94 LOG.info(receiver_infos)
95 for receiver_info in receiver_infos:
96 dst = receiver_info["agent"]
97 params = receiver_info["params"]
98 receive = model.Receiver(dst, self._conn, tool, params)
99 self._receivers.append(receive)
101 def create_watchers(self, tool):
102 watcher_infos = self._provider.get_watchers(tool)
103 LOG.info(watcher_infos)
104 for watcher_info in watcher_infos:
105 dst = watcher_info["agent"]
106 params = watcher_info["params"]
107 watch = model.NicWatcher(dst, self._conn, params)
108 self._watchers.append(watch)
110 def create_cpuwatcher(self):
111 watcher_info = self._provider.get_cpuwatcher
112 LOG.info(watcher_info)
113 dst = watcher_info["agent"]
114 self._cpuwatcher = model.CpuWatcher(dst, self._conn)
116 def start_receivers(self, **kwargs):
117 for receiver in self._receivers:
118 receiver.start(**kwargs)
120 def start_senders(self, pktsize, **kwargs):
121 for sender in self._senders:
122 sender.start(pktsize, **kwargs)
124 def start_watchers(self):
125 for watcher in self._watchers:
128 def stop_receivers(self):
129 for receiver in self._receivers:
132 def stop_senders(self):
133 for sender in self._senders:
136 def stop_watchers(self):
137 for watcher in self._watchers:
140 def start_cpuwatcher(self):
142 self._cpuwatcher.start()
144 def stop_cpuwatcher(self):
146 self._cpuwatcher.stop()
148 def getlimitspeed(self, ptype, size):
152 ctl = self._provider.get_cpu_affctl
154 driver_mgr = Fabricant(ctl["agent"], self._conn)
155 ret = driver_mgr.affctl_load(policy=ctl["params"]["policy"])
158 def run_pre_affability_settings(self, tool, tpro, pktsize, **kwargs):
159 LOG.info("run_pre_affability_settings start")
160 self.create(tool, tpro)
161 self.start_receivers()
162 self.start_senders(pktsize, **kwargs)
166 self.stop_receivers()
168 LOG.info("run_pre_affability_settings end")
170 def run_bandwidth_test(self, tool, tpro, pktsize, **kwargs):
171 LOG.info("run_bandwidth_test ")
172 self.create(tool, tpro)
173 self.start_receivers()
174 self.start_senders(pktsize, **kwargs)
175 time.sleep(self._provider.wait_balance(tool))
176 self.start_watchers()
177 self.start_cpuwatcher()
178 time.sleep(self._provider.duration(tool))
180 self.stop_cpuwatcher()
182 self.stop_receivers()
184 LOG.info("run_bandwidth_test end")
186 def run_latency_test(self, tool, tpro, pktsize, **kwargs):
187 LOG.info("run_latency_test start")
188 self.create(tool, tpro)
189 self.start_receivers()
190 self.start_senders(pktsize, **kwargs)
191 time.sleep(self._provider.duration(tool))
193 self.stop_receivers()
195 LOG.info("run_latency_test end")
197 def run(self, tool, protocol, ttype, sizes, affctl=False):
200 pre_tpro = protocol + "_bw"
202 self.run_pre_affability_settings(tool, pre_tpro, size, ratep=0)
205 if ttype in ['throughput', 'frameloss']:
206 realspeed = self.getlimitspeed(ttype, size)
207 bw_tpro = protocol + "_bw"
209 self.run_bandwidth_test(tool, bw_tpro, size, ratep=realspeed)
210 bw_result = self.result(tool, bw_type)
214 lat_tpro = protocol + '_lat'
215 self.run_latency_test(lat_tool, lat_tpro, size, ratep=realspeed)
216 lat_result = self.result(tool, lat_type)
219 lat_result.pop('OfferedLoad')
220 bw_result.update(lat_result)
221 result[size] = bw_result
223 elif ttype in ['latency']:
224 lat_tpro = protocol + '_lat'
226 self.run_latency_test(tool, lat_tpro, size, ratep=None)
227 lat_result = self.result(tool, lat_type)
228 result[size] = lat_result
230 raise Exception("error:protocol type:%s" % (ttype))
233 def result(self, tool, ttype):
234 if ttype in {'throughput', 'frameloss'}:
249 cpu_data = self._cpuwatcher.result()
250 print self._cpuwatcher, cpu_data
252 cpu_usage = cpu_data['cpu_num'] * (100 - cpu_data['idle'])
253 cpu_mhz = cpu_data['cpu_mhz']
254 record[mark.cpu] = round(cpu_usage, cst.CPU_USAGE_ROUND)
255 record[mark.duration] = self._provider.duration(tool)
257 for watcher in self._watchers:
258 nic_data = watcher.result()
259 record[mark.rxCount] += nic_data['rxpck']
260 record[mark.txCount] += nic_data['txpck']
261 record[mark.bandwidth] += nic_data['rxpck/s']
262 record[mark.rxMbps] += nic_data['rxmB/s']
263 record[mark.txMbps] += nic_data['txmB/s']
265 if record[mark.txCount]:
266 record[mark.percentLoss] = round(100 * (1 - record[mark.rxCount] / record[mark.txCount]),
269 record[mark.percentLoss] = 100
271 record[mark.bandwidth] /= 1000000.0
272 if cpu_mhz and record[mark.cpu]:
273 record[mark.mppsGhz] = round(record[mark.bandwidth] / (record[mark.cpu] * cpu_mhz / 100000),
276 record[mark.bandwidth] = round(record[mark.bandwidth], cst.RATEP_ROUND)
278 elif ttype in {'latency'}:
285 minlatency, avglatency, maxlatency = 0, 0, 0
287 for sender in self._senders:
288 info = sender.result()
290 minlatency += info[mark.minLatency]
291 avglatency += info[mark.avgLatency]
292 maxlatency += info[mark.maxLatency]
293 count = 1 if not count else count
294 record[mark.minLatency] = round(minlatency / count, cst.TIME_ROUND)
295 record[mark.avgLatency] = round(avglatency / count, cst.TIME_ROUND)
296 record[mark.maxLatency] = round(maxlatency / count, cst.TIME_ROUND)
299 raise Exception('error:protocol type:%s' % ttype)
301 LOG.info('record:%s' % record)
306 from vstf.common.log import setup_logging
307 setup_logging(level=logging.DEBUG, log_file="/var/log/vstf/vstf-sw_perf.log", clevel=logging.INFO)
309 conn = Server("192.168.188.10")
310 perf_settings = PerfSettings()
311 flows_settings = FlowsSettings()
312 tool_settings = ToolSettings()
313 tester_settings = TesterSettings()
314 flow_producer = FlowsProducer(conn, flows_settings)
315 provider = PerfProvider(flows_settings.settings, tool_settings.settings, tester_settings.settings)
316 perf = Performance(conn, provider)
317 tests = perf_settings.settings
318 for scenario, cases in tests.items():
322 casetag = case['case']
324 protocol = case['protocol']
325 profile = case['profile']
327 sizes = case['sizes']
329 flow_producer.create(scenario, casetag)
330 result = perf.run(tool, protocol, ttype, sizes)
335 from vstf.common.log import setup_logging
336 setup_logging(level=logging.DEBUG, log_file="/var/log/vstf/vstf-performance.log", clevel=logging.INFO)
337 from vstf.controller.database.dbinterface import DbManage
338 parser = argparse.ArgumentParser(add_help=True)
339 parser.add_argument("case",
341 help="test case like Ti-1, Tn-1, Tnv-1, Tu-1...")
342 parser.add_argument("tool",
346 parser.add_argument("protocol",
348 choices=cst.TPROTOCOLS,
350 parser.add_argument("profile",
352 choices=cst.PROFILES,
354 parser.add_argument("type",
358 parser.add_argument("sizes",
361 help='test size list "64 128"')
362 parser.add_argument("--affctl",
364 help="when input '--affctl', the performance will do affctl before testing")
365 parser.add_argument("--monitor",
369 help="which ip to be monitored")
370 args = parser.parse_args()
372 LOG.info(args.monitor)
373 conn = Server(host=args.monitor)
378 protocol = args.protocol
379 profile = args.profile
381 sizes = map(lambda x: int(x), args.sizes.strip().split())
383 flows_settings = FlowsSettings()
384 tool_settings = ToolSettings()
385 tester_settings = TesterSettings()
386 flow_producer = FlowsProducer(conn, flows_settings)
387 provider = PerfProvider(flows_settings.settings, tool_settings.settings, tester_settings.settings)
388 perf = Performance(conn, provider)
389 scenario = db_mgr.query_scenario(casetag)
390 flow_producer.create(scenario, casetag)
391 LOG.info(flows_settings.settings)
392 result = perf.run(tool, protocol, ttype, sizes, affctl)
395 if __name__ == '__main__':