4 ## Copyright (c) 2010-2020 Intel Corporation
6 ## Licensed under the Apache License, Version 2.0 (the "License");
7 ## you may not use this file except in compliance with the License.
8 ## You may obtain a copy of the License at
10 ## http://www.apache.org/licenses/LICENSE-2.0
12 ## Unless required by applicable law or agreed to in writing, software
13 ## distributed under the License is distributed on an "AS IS" BASIS,
14 ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 ## See the License for the specific language governing permissions and
16 ## limitations under the License.
19 from __future__ import print_function
29 from logging.handlers import RotatingFileHandler
30 from logging import handlers
31 from prox_ctrl import prox_ctrl
41 env = "rapid.env" #Default string for environment
42 test_file = "basicrapid.test" #Default string for test
43 machine_map_file = "machine.map" #Default string for machine map file
44 loglevel="DEBUG" # sets log level for writing to file
45 screenloglevel="INFO" # sets log level for writing to screen
46 runtime=10 # time in seconds for 1 test run
47 configonly = False # IF True, the system will upload all the necessary config fiels to the VMs, but not start PROX and the actual testing
48 rundir = "/home/centos" # Directory where to find the tools in the machines running PROX
51 print("usage: runrapid [--version] [-v]")
52 print(" [--env ENVIRONMENT_NAME]")
53 print(" [--test TEST_NAME]")
54 print(" [--map MACHINE_MAP_FILE]")
55 print(" [--runtime TIME_FOR_TEST]")
56 print(" [--configonly False|True]")
57 print(" [--log DEBUG|INFO|WARNING|ERROR|CRITICAL]")
58 print(" [-h] [--help]")
60 print("Command-line interface to runrapid")
62 print("optional arguments:")
63 print(" -v, --version Show program's version number and exit")
64 print(" --env ENVIRONMENT_NAME Parameters will be read from ENVIRONMENT_NAME. Default is %s."%env)
65 print(" --test TEST_NAME Test cases will be read from TEST_NAME. Default is %s."%test_file)
66 print(" --map MACHINE_MAP_FILE Machine mapping will be read from MACHINE_MAP_FILE. Default is %s."%machine_map_file)
67 print(" --runtime Specify time in seconds for 1 test run")
68 print(" --configonly If this option is specified, only upload all config files to the VMs, do not run the tests")
69 print(" --log Specify logging level for log file output, default is DEBUG")
70 print(" --screenlog Specify logging level for screen output, default is INFO")
71 print(" -h, --help Show help message and exit.")
75 opts, args = getopt.getopt(sys.argv[1:], "vh", ["version","help", "env=", "test=", "map=", "runtime=","configonly","log=","screenlog="])
76 except getopt.GetoptError as err:
77 print("===========================================")
79 print("===========================================")
86 if opt in ["-h", "--help"]:
89 if opt in ["-v", "--version"]:
90 print("Rapid Automated Performance Indication for Dataplane "+version)
97 machine_map_file = arg
98 if opt in ["--runtime"]:
100 if opt in ["--configonly"]:
102 print('No actual runs, only uploading configuration files')
105 print ("Log level: "+ loglevel)
106 if opt in ["--screenlog"]:
108 print ("Screen Log level: "+ screenloglevel)
110 print ("Using '"+env+"' as name for the environment")
111 print ("Using '"+test_file+"' for test case definition")
112 print ("Using '"+machine_map_file+"' for machine mapping")
113 print ("Runtime: "+ str(runtime))
123 UNDERLINE = '\033[4m'
126 screen_formatter = logging.Formatter("%(message)s")
127 file_formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
129 # get a top-level logger,
131 # BUT PREVENT IT from propagating messages to the root logger
133 log = logging.getLogger()
134 numeric_level = getattr(logging, loglevel.upper(), None)
135 if not isinstance(numeric_level, int):
136 raise ValueError('Invalid log level: %s' % loglevel)
137 log.setLevel(numeric_level)
140 # create a console handler
141 # and set its log level to the command-line option
143 console_handler = logging.StreamHandler(sys.stdout)
144 #console_handler.setLevel(logging.INFO)
145 numeric_screenlevel = getattr(logging, screenloglevel.upper(), None)
146 if not isinstance(numeric_screenlevel, int):
147 raise ValueError('Invalid screenlog level: %s' % screenloglevel)
148 console_handler.setLevel(numeric_screenlevel)
149 console_handler.setFormatter(screen_formatter)
151 # create a file handler
152 # and set its log level
154 log_file = 'RUN{}.{}.log'.format(env,test_file)
155 file_handler = logging.handlers.RotatingFileHandler(log_file, backupCount=10)
156 #file_handler = log.handlers.TimedRotatingFileHandler(log_file, 'D', 1, 5)
157 file_handler.setLevel(numeric_level)
158 file_handler.setFormatter(file_formatter)
160 # add handlers to the logger
162 log.addHandler(file_handler)
163 log.addHandler(console_handler)
165 # Check if log exists and should therefore be rolled
166 needRoll = os.path.isfile(log_file)
169 # This is a stale log, so roll it
172 log.debug('\n---------\nLog closed on %s.\n---------\n' % time.asctime())
174 # Roll over on application start
175 log.handlers[0].doRollover()
178 log.debug('\n---------\nLog started on %s.\n---------\n' % time.asctime())
180 log.debug("runrapid.py version: "+version)
181 #========================================================================
182 def connect_socket(client):
184 log.debug("Trying to connect to PROX (just launched) on %s, attempt: %d" % (client.ip(), attempts))
187 sock = client.prox_sock()
192 log.exception("Failed to connect to PROX on %s after %d attempts" % (client.ip(), attempts))
193 raise Exception("Failed to connect to PROX on %s after %d attempts" % (client.ip(), attempts))
195 log.debug("Trying to connect to PROX (just launched) on %s, attempt: %d" % (client.ip(), attempts))
196 log.info("Connected to PROX on %s" % client.ip())
199 def connect_client(client):
201 log.debug("Trying to connect to VM which was just launched on %s, attempt: %d" % (client.ip(), attempts))
206 except RuntimeWarning, ex:
209 log.exception("Failed to connect to VM after %d attempts:\n%s" % (attempts, ex))
210 raise Exception("Failed to connect to VM after %d attempts:\n%s" % (attempts, ex))
212 log.debug("Trying to connect to VM which was just launched on %s, attempt: %d" % (client.ip(), attempts))
213 log.debug("Connected to VM on %s" % client.ip())
215 def report_result(flow_number,size,speed,pps_req_tx,pps_tx,pps_sut_tx,pps_rx,lat_avg,lat_max,tx,rx,tot_drop,elapsed_time,speed_prefix='',lat_avg_prefix='',lat_max_prefix='',abs_drop_rate_prefix='',drop_rate_prefix=''):
216 if pps_req_tx == None:
217 pps_req_tx_str = '{0: >14}'.format(' NA |')
219 pps_req_tx_str = '{:>7.3f} Mpps |'.format(pps_req_tx)
221 pps_tx_str = '{0: >14}'.format(' NA |')
223 pps_tx_str = '{:>7.3f} Mpps |'.format(pps_tx)
224 if pps_sut_tx == None:
225 pps_sut_tx_str = '{0: >14}'.format(' NA |')
227 pps_sut_tx_str = '{:>7.3f} Mpps |'.format(pps_sut_tx)
229 pps_rx_str = '{0: >24}'.format('NA ')
231 pps_rx_str = bcolors.OKBLUE + '{:>4.1f} Gb/s |{:7.3f} Mpps '.format(get_speed(pps_rx,size),pps_rx) + bcolors.ENDC
233 tot_drop_str = ' | NA | '
235 tot_drop_str = ' | {:>9.0f} | '.format(tot_drop)
236 if elapsed_time == None:
237 elapsed_time_str = ' NA |'
239 elapsed_time_str = '{:>3.0f} |'.format(elapsed_time)
240 return('|{:>7}'.format(flow_number)+' |' + '{:>5.1f}'.format(speed) + '% '+speed_prefix +'{:>6.3f}'.format(get_pps(speed,size)) + ' Mpps|'+ pps_req_tx_str + pps_tx_str + bcolors.ENDC + pps_sut_tx_str + pps_rx_str +lat_avg_prefix+ '| {:>5.0f}'.format(lat_avg)+' us |'+lat_max_prefix+'{:>5.0f}'.format(lat_max)+' us | ' + '{:>9.0f}'.format(tx) + ' | {:>9.0f}'.format(rx) + ' | '+ abs_drop_rate_prefix+ '{:>9.0f}'.format(tx-rx) + tot_drop_str +drop_rate_prefix+ '{:>5.2f}'.format(float(tx-rx)/tx) +bcolors.ENDC+' |' + elapsed_time_str)
242 def run_iteration(gensock, sutsock, requested_duration,flow_number,size,speed):
245 time.sleep(sleep_time)
246 # Sleep_time is needed to be able to do accurate measurements to check for packet loss. We need to make this time large enough so that we do not take the first measurement while some packets from the previous tests migth still be in flight
247 while (r < TST009_MAXr):
248 t1_rx, t1_non_dp_rx, t1_tx, t1_non_dp_tx, t1_drop, t1_tx_fail, t1_tsc, abs_tsc_hz = gensock.core_stats(genstatcores,gentasks)
249 t1_dp_rx = t1_rx - t1_non_dp_rx
250 t1_dp_tx = t1_tx - t1_non_dp_tx
251 gensock.start(gencores)
252 time.sleep(2) ## Needs to be 2 seconds since this the time that PROX uses to refresh the stats. Note that this can be changed in PROX!! Don't do it.
254 t2_sut_rx, t2_sut_non_dp_rx, t2_sut_tx, t2_sut_non_dp_tx, t2_sut_drop, t2_sut_tx_fail, t2_sut_tsc, sut_tsc_hz = sutsock.core_stats(sutstatcores,tasks)
255 ##t2_sut_rx = t2_sut_rx - t2_sut_non_dp_rx
256 ##t2_sut_tx = t2_sut_tx - t2_sut_non_dp_tx
257 t2_rx, t2_non_dp_rx, t2_tx, t2_non_dp_tx, t2_drop, t2_tx_fail, t2_tsc, tsc_hz = gensock.core_stats(genstatcores,gentasks)
258 # Ask PROX to calibrate the bucket size once we have a PROX function to do this.
259 # Measure latency statistics per second
260 lat_min, lat_max, lat_avg, used_avg, t2_lat_tsc, lat_hz = gensock.lat_stats(latcores)
262 dp_tx = tx - (t2_non_dp_tx - t1_non_dp_tx )
263 dp_rx = t2_rx - t1_rx - (t2_non_dp_rx - t1_non_dp_rx)
264 tot_dp_drop = dp_tx - dp_rx
266 log.critical("TX = 0. Test interrupted since no packet has been sent.")
267 raise Exception("TX = 0")
269 log.critical("Only non-dataplane packets (e.g. ARP) sent. Test interrupted since no packet has been sent.")
270 raise Exception("Only non-dataplane packets (e.g. ARP) sent")
271 if test == 'fixed_rate':
272 log.info(report_result(flow_number,size,speed,None,None,None,None,lat_avg,lat_max, dp_tx, dp_rx , None, None))
273 tot_rx = tot_non_dp_rx = tot_tx = tot_non_dp_tx = tot_drop = 0
274 lat_avg = used_avg = 0
275 tot_lat_measurement_duration = float(0)
276 tot_core_measurement_duration = float(0)
277 tot_sut_core_measurement_duration = float(0)
278 tot_sut_rx = tot_sut_non_dp_rx = tot_sut_tx = tot_sut_non_dp_tx = tot_sut_drop = tot_sut_tx_fail = tot_sut_tsc = 0
279 lat_avail = core_avail = sut_avail = False
280 while (tot_core_measurement_duration - float(requested_duration) <= 0.1) or (tot_sut_core_measurement_duration - float(requested_duration) <= 0.1) or (tot_lat_measurement_duration - float(requested_duration) <= 0.1):
282 lat_min_sample, lat_max_sample, lat_avg_sample, used_sample, t3_lat_tsc, lat_hz = gensock.lat_stats(latcores)
283 single_lat_measurement_duration = (t3_lat_tsc - t2_lat_tsc) * 1.0 / lat_hz # time difference between the 2 measurements, expressed in seconds.
284 # Get statistics after some execution time
285 if single_lat_measurement_duration != 0:
286 # A second has passed in between to lat_stats requests. Hence we need to process the results
287 tot_lat_measurement_duration = tot_lat_measurement_duration + single_lat_measurement_duration
288 if lat_min > lat_min_sample:
289 lat_min = lat_min_sample
290 if lat_max < lat_max_sample:
291 lat_max = lat_max_sample
292 lat_avg = lat_avg + lat_avg_sample * single_lat_measurement_duration # Sometimes, There is more than 1 second between 2 lat_stats. Hence we will take the latest measurement
293 used_avg = used_avg + used_sample * single_lat_measurement_duration # and give it more weigth.
294 t2_lat_tsc = t3_lat_tsc
296 t3_rx, t3_non_dp_rx, t3_tx, t3_non_dp_tx, t3_drop, t3_tx_fail, t3_tsc, tsc_hz = gensock.core_stats(genstatcores,gentasks)
297 single_core_measurement_duration = (t3_tsc - t2_tsc) * 1.0 / tsc_hz # time difference between the 2 measurements, expressed in seconds.
298 if single_core_measurement_duration!= 0:
299 stored_single_core_measurement_duration = single_core_measurement_duration
300 tot_core_measurement_duration = tot_core_measurement_duration + single_core_measurement_duration
301 delta_rx = t3_rx - t2_rx
303 delta_non_dp_rx = t3_non_dp_rx - t2_non_dp_rx
304 tot_non_dp_rx += delta_non_dp_rx
305 delta_tx = t3_tx - t2_tx
307 delta_non_dp_tx = t3_non_dp_tx - t2_non_dp_tx
308 tot_non_dp_tx += delta_non_dp_tx
309 delta_dp_tx = delta_tx -delta_non_dp_tx
310 delta_dp_rx = delta_rx -delta_non_dp_rx
311 delta_dp_drop = delta_dp_tx - delta_dp_rx
312 tot_dp_drop += delta_dp_drop
313 delta_drop = t3_drop - t2_drop
314 tot_drop += delta_drop
315 t2_rx, t2_non_dp_rx, t2_tx, t2_non_dp_tx, t2_drop, t2_tx_fail, t2_tsc = t3_rx, t3_non_dp_rx, t3_tx, t3_non_dp_tx, t3_drop, t3_tx_fail, t3_tsc
318 t3_sut_rx, t3_sut_non_dp_rx, t3_sut_tx, t3_sut_non_dp_tx, t3_sut_drop, t3_sut_tx_fail, t3_sut_tsc, sut_tsc_hz = sutsock.core_stats(sutstatcores,tasks)
319 single_sut_core_measurement_duration = (t3_sut_tsc - t2_sut_tsc) * 1.0 / tsc_hz # time difference between the 2 measurements, expressed in seconds.
320 if single_sut_core_measurement_duration!= 0:
321 stored_single_sut_core_measurement_duration = single_sut_core_measurement_duration
322 tot_sut_core_measurement_duration = tot_sut_core_measurement_duration + single_sut_core_measurement_duration
323 tot_sut_rx += t3_sut_rx - t2_sut_rx
324 tot_sut_non_dp_rx += t3_sut_non_dp_rx - t2_sut_non_dp_rx
325 delta_sut_tx = t3_sut_tx - t2_sut_tx
326 tot_sut_tx += delta_sut_tx
327 delta_sut_non_dp_tx = t3_sut_non_dp_tx - t2_sut_non_dp_tx
328 tot_sut_non_dp_tx += delta_sut_non_dp_tx
329 t2_sut_rx, t2_sut_non_dp_rx, t2_sut_tx, t2_sut_non_dp_tx, t2_sut_drop, t2_sut_tx_fail, t2_sut_tsc = t3_sut_rx, t3_sut_non_dp_rx, t3_sut_tx, t3_sut_non_dp_tx, t3_sut_drop, t3_sut_tx_fail, t3_sut_tsc
331 if test == 'fixed_rate':
332 if lat_avail == core_avail == sut_avail == True:
333 lat_avail = core_avail = sut_avail = False
334 pps_req_tx = (delta_tx + delta_drop - delta_rx)/stored_single_core_measurement_duration/1000000
335 pps_tx = delta_tx/stored_single_core_measurement_duration/1000000
337 pps_sut_tx = delta_sut_tx/stored_single_sut_core_measurement_duration/1000000
340 pps_rx = delta_rx/stored_single_core_measurement_duration/1000000
341 log.info(report_result(flow_number,size,speed,pps_req_tx,pps_tx,pps_sut_tx,pps_rx,lat_avg_sample,lat_max_sample,delta_dp_tx,delta_dp_rx,tot_dp_drop,stored_single_core_measurement_duration))
343 gensock.stop(gencores)
345 lat_avg = lat_avg / float(tot_lat_measurement_duration)
346 used_avg = used_avg / float(tot_lat_measurement_duration)
348 while t4_tsc == t2_tsc:
349 t4_rx, t4_non_dp_rx, t4_tx, t4_non_dp_tx, t4_drop, t4_tx_fail, t4_tsc, abs_tsc_hz = gensock.core_stats(genstatcores,gentasks)
350 if test == 'fixed_rate':
351 t4_lat_tsc = t2_lat_tsc
352 while t4_lat_tsc == t2_lat_tsc:
353 lat_min_sample, lat_max_sample, lat_avg_sample, used_sample, t4_lat_tsc, lat_hz = gensock.lat_stats(latcores)
354 lat_max = lat_max_sample
355 lat_avg = lat_avg_sample
356 delta_rx = t4_rx - t2_rx
357 delta_non_dp_rx = t4_non_dp_rx - t2_non_dp_rx
358 delta_tx = t4_tx - t2_tx
359 delta_non_dp_tx = t4_non_dp_tx - t2_non_dp_tx
360 delta_dp_tx = delta_tx -delta_non_dp_tx
361 delta_dp_rx = delta_rx -delta_non_dp_rx
364 tot_dp_drop += delta_dp_tx - delta_dp_rx
369 drop_rate = 100.0*(dp_tx-dp_rx)/dp_tx
370 tot_core_measurement_duration = None
371 break ## Not really needed since the while loop will stop when evaluating the value of r
373 pps_req_tx = (tot_tx + tot_drop - tot_rx)/tot_core_measurement_duration/1000000.0 # tot_drop is all packets dropped by all tasks. This includes packets dropped at the generator task + packets dropped by the nop task. In steady state, this equals to the number of packets received by this VM
374 pps_tx = tot_tx/tot_core_measurement_duration/1000000.0 # tot_tx is all generated packets actually accepted by the interface
375 pps_rx = tot_rx/tot_core_measurement_duration/1000000.0 # tot_rx is all packets received by the nop task = all packets received in the gen VM
377 pps_sut_tx = tot_sut_tx / tot_sut_core_measurement_duration / 1000000.0
380 dp_tx = (t4_tx - t1_tx) - (t4_non_dp_tx - t1_non_dp_tx)
381 dp_rx = (t4_rx - t1_rx) - (t4_non_dp_rx - t1_non_dp_rx)
382 tot_dp_drop = dp_tx - dp_rx
383 drop_rate = 100.0*tot_dp_drop/dp_tx
384 if ((drop_rate < DROP_RATE_TRESHOLD) or (tot_dp_drop == DROP_RATE_TRESHOLD ==0) or (tot_dp_drop > TST009_MAXz)):
386 return(pps_req_tx,pps_tx,pps_sut_tx,pps_rx,lat_avg,lat_max,dp_tx,dp_rx,tot_dp_drop,(t4_tx_fail - t1_tx_fail),drop_rate,lat_min,used_avg,r,tot_core_measurement_duration)
388 def new_speed(speed,size,success):
389 if test == 'fixed_rate':
396 TST009_L = TST009_m + 1
398 TST009_R = TST009_m - 1
399 TST009_m = int ((TST009_L + TST009_R)/2)
400 return (get_percentageof10Gbs(TST009_S[TST009_m],size))
408 return ((minspeed + maxspeed)/2.0)
410 def get_start_speed_and_init(size):
411 if test == 'fixed_rate':
418 TST009_R = TST009_n - 1
419 TST009_m = int((TST009_L + TST009_R) / 2)
420 return (get_percentageof10Gbs(TST009_S[TST009_m],size))
425 maxspeed = STARTSPEED
428 def resolution_achieved():
429 if test == 'fixed_rate':
432 return (TST009_L == TST009_R)
434 return ((maxspeed - minspeed) <= ACCURACY)
436 def get_percentageof10Gbs(pps_speed,size):
437 # speed is given in pps, returning % of 10Gb/s
438 return (pps_speed / 1000000.0 * 0.08 * (size+24))
440 def get_pps(speed,size):
441 # speed is given in % of 10Gb/s, returning Mpps
442 return (speed * 100.0 / (8*(size+24)))
444 def get_speed(packet_speed,size):
445 # return speed in Gb/s
446 return (packet_speed / 1000.0 * (8*(size+24)))
448 def run_flow_size_test(gensock,sutsock):
451 #fieldnames = ['Flows','PacketSize','Gbps','Mpps','AvgLatency','MaxLatency','PacketsDropped','PacketDropRate']
452 fieldnames = ['Flows','PacketSize','RequestedPPS','GeneratedPPS','SentPPS','ForwardedPPS','ReceivedPPS','AvgLatencyUSEC','MaxLatencyUSEC','Sent','Received','Lost','LostTotal']
453 writer = csv.DictWriter(data_csv_file, fieldnames=fieldnames)
455 gensock.start(latcores)
456 for size in packet_size_list:
458 gensock.set_size(gencores,0,size) # This is setting the frame size
459 gensock.set_value(gencores,0,16,(size-14),2) # 18 is the difference between the frame size and IP size = size of (MAC addresses, ethertype and FCS)
460 gensock.set_value(gencores,0,38,(size-34),2) # 38 is the difference between the frame size and UDP size = 18 + size of IP header (=20)
461 # This will only work when using sending UDP packets. For different protocls and ethernet types, we would need a different calculation
462 log.info("+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+")
463 log.info("| UDP, "+ '{:>5}'.format(size+4) +" bytes, different number of flows by randomizing SRC & DST UDP port |")
464 log.info("+--------+------------------+-------------+-------------+-------------+------------------------+----------+----------+-----------+-----------+-----------+-----------+-------+----+")
465 log.info("| Flows | Speed requested | Gen by core | Sent by NIC | Fwrd by SUT | Rec. by core | Avg. Lat.| Max. Lat.| Sent | Received | Lost | Total Lost|L.Ratio|Time|")
466 log.info("+--------+------------------+-------------+-------------+-------------+------------------------+----------+----------+-----------+-----------+-----------+-----------+-------+----+")
467 for flow_number in flow_size_list:
469 gensock.reset_stats()
471 sutsock.reset_stats()
472 source_port,destination_port = flows[flow_number]
473 gensock.set_random(gencores,0,34,source_port,2)
474 gensock.set_random(gencores,0,36,destination_port,2)
476 speed = get_start_speed_and_init(size)
480 print(str(flow_number)+' flows: Measurement ongoing at speed: ' + str(round(speed,2)) + '% ',end='\r')
482 # Start generating packets at requested speed (in % of a 10Gb/s link)
483 gensock.speed(speed / len(gencores) / len (gentasks), gencores, gentasks)
485 # Get statistics now that the generation is stable and initial ARP messages are dealt with
486 pps_req_tx,pps_tx,pps_sut_tx,pps_rx,lat_avg,lat_max, abs_tx,abs_rx,abs_dropped, abs_tx_fail, drop_rate, lat_min, lat_used, r, actual_duration = run_iteration(gensock,sutsock,float(runtime),flow_number,size,speed)
488 retry_warning = bcolors.WARNING + ' {:1} retries needed'.format(r) + bcolors.ENDC
491 # Drop rate is expressed in percentage. lat_used is a ratio (0 to 1). The sum of these 2 should be 100%.
492 # If the some is lower than 95, it means that more than 5% of the latency measurements where dropped for accuray reasons.
493 if (drop_rate + lat_used * 100) < 95:
494 lat_warning = bcolors.WARNING + ' Latency accuracy issue?: {:>3.0f}%'.format(lat_used*100) + bcolors.ENDC
497 # The following if statement is testing if we pass the success criteria of a certain drop rate, average latenecy and maximum latency below the threshold
498 # The drop rate success can be achieved in 2 ways: either the drop rate is below a treshold, either we want that no packet has been lost during the test
499 # This can be specified by putting 0 in the .test file
500 if test == 'fixed_rate':
508 endabs_dropped = abs_dropped
509 enddrop_rate = drop_rate
513 speed_prefix = lat_avg_prefix = lat_max_prefix = abs_drop_rate_prefix = drop_rate_prefix = bcolors.ENDC
514 elif ((drop_rate < DROP_RATE_TRESHOLD) or (abs_dropped==DROP_RATE_TRESHOLD ==0)) and (lat_avg< LAT_AVG_TRESHOLD) and (lat_max < LAT_MAX_TRESHOLD):
515 lat_avg_prefix = bcolors.ENDC
516 lat_max_prefix = bcolors.ENDC
517 abs_drop_rate_prefix = bcolors.ENDC
518 drop_rate_prefix = bcolors.ENDC
519 if ((get_pps(speed,size) - pps_tx)/get_pps(speed,size))>0.01:
520 speed_prefix = bcolors.WARNING
522 gen_warning = bcolors.WARNING + ' Network limit?: requesting {:<.3f} Mpps and getting {:<.3f} Mpps - {} failed to be transmitted'.format(get_pps(speed,size), pps_tx, abs_tx_fail) + bcolors.ENDC
524 gen_warning = bcolors.WARNING + ' Generator limit?: requesting {:<.3f} Mpps and getting {:<.3f} Mpps'.format(get_pps(speed,size), pps_tx) + bcolors.ENDC
526 speed_prefix = bcolors.ENDC
529 endspeed_prefix = speed_prefix
530 endpps_req_tx = pps_req_tx
532 endpps_sut_tx = pps_sut_tx
536 ##endabs_dropped = abs_dropped
537 endabs_dropped = None
538 enddrop_rate = drop_rate
541 if lat_warning or gen_warning or retry_warning:
542 endwarning = '| | {:167.167} |'.format(retry_warning + lat_warning + gen_warning)
544 success_message=' SUCCESS'
545 log.debug(report_result(attempts,size,speed,pps_req_tx,pps_tx,pps_sut_tx,pps_rx,lat_avg,lat_max,abs_tx,abs_rx,abs_dropped,actual_duration,speed_prefix,lat_avg_prefix,lat_max_prefix,abs_drop_rate_prefix,drop_rate_prefix)+ success_message + retry_warning + lat_warning + gen_warning)
547 success_message=' FAILED'
549 abs_drop_rate_prefix = bcolors.ENDC
550 if ((abs_dropped>0) and (DROP_RATE_TRESHOLD ==0)):
551 abs_drop_rate_prefix = bcolors.FAIL
552 if (drop_rate < DROP_RATE_TRESHOLD):
553 drop_rate_prefix = bcolors.ENDC
555 drop_rate_prefix = bcolors.FAIL
556 if (lat_avg< LAT_AVG_TRESHOLD):
557 lat_avg_prefix = bcolors.ENDC
559 lat_avg_prefix = bcolors.FAIL
560 if (lat_max< LAT_MAX_TRESHOLD):
561 lat_max_prefix = bcolors.ENDC
563 lat_max_prefix = bcolors.FAIL
564 if (((get_pps(speed,size) - pps_tx)/get_pps(speed,size))<0.001):
565 speed_prefix = bcolors.ENDC
567 speed_prefix = bcolors.FAIL
569 log.debug(report_result(attempts,size,speed,pps_req_tx,pps_tx,pps_sut_tx,pps_rx,lat_avg,lat_max,abs_tx,abs_rx,abs_dropped,actual_duration,speed_prefix,lat_avg_prefix,lat_max_prefix,abs_drop_rate_prefix,drop_rate_prefix)+ success_message + retry_warning + lat_warning + gen_warning)
570 speed = new_speed(speed, size, success)
571 if resolution_achieved():
574 log.info(report_result(flow_number,size,endspeed,endpps_req_tx,endpps_tx,endpps_sut_tx,endpps_rx,endlat_avg,endlat_max,endabs_tx,endabs_rx,endabs_dropped,actual_duration,speed_prefix,lat_avg_prefix,lat_max_prefix,abs_drop_rate_prefix,drop_rate_prefix))
576 log.info (endwarning)
577 log.info("+--------+------------------+-------------+-------------+-------------+------------------------+----------+----------+-----------+-----------+-----------+-----------+-------+----+")
578 writer.writerow({'Flows':flow_number,'PacketSize':(size+4),'RequestedPPS':get_pps(endspeed,size),'GeneratedPPS':endpps_req_tx,'SentPPS':endpps_tx,'ForwardedPPS':endpps_sut_tx,'ReceivedPPS':endpps_rx,'AvgLatencyUSEC':endlat_avg,'MaxLatencyUSEC':endlat_max,'Sent':endabs_tx,'Received':endabs_rx,'Lost':endabs_dropped,'LostTotal':endabs_dropped})
580 URL = PushGateway + '/metrics/job/' + TestName + '/instance/' + env
581 DATA = 'Flows {}\nPacketSize {}\nRequestedPPS {}\nGeneratedPPS {}\nSentPPS {}\nForwardedPPS {}\nReceivedPPS {}\nAvgLatencyUSEC {}\nMaxLatencyUSEC {}\nSent {}\nReceived {}\nLost {}\nLostTotal {}\n'.format(flow_number,size+4,get_pps(endspeed,size),endpps_req_tx,endpps_tx,endpps_sut_tx,endpps_rx,endlat_avg,endlat_max,endabs_tx,endabs_rx,endabs_Dropped,endabs_dropped)
582 HEADERS = {'X-Requested-With': 'Python requests', 'Content-type': 'text/xml'}
583 response = requests.post(url=URL, data=DATA,headers=HEADERS)
585 log.info('|{:>7}'.format(str(flow_number))+" | Speed 0 or close to 0")
586 gensock.stop(latcores)
588 def run_core_stats(socks):
589 fieldnames = ['PROXID','Time','Received','Sent','NonDPReceived','NonDPSent','Delta','NonDPDelta','Dropped']
590 writer = csv.DictWriter(data_csv_file, fieldnames=fieldnames)
592 log.info("+------------------------------------------------------------------------------------------------------------------+")
593 log.info("| Measuring core statistics on 1 or more PROX instances |")
594 log.info("+-----------+-----------+------------+------------+------------+------------+------------+------------+------------+")
595 log.info("| PROX ID | Time | RX | TX | non DP RX | non DP TX | TX - RX | nonDP TX-RX| DROP TOT |")
596 log.info("+-----------+-----------+------------+------------+------------+------------+------------+------------+------------+")
599 duration = float(runtime)
601 old_rx = []; old_non_dp_rx = []; old_tx = []; old_non_dp_tx = []; old_drop = []; old_tx_fail = []; old_tsc = []
602 new_rx = []; new_non_dp_rx = []; new_tx = []; new_non_dp_tx = []; new_drop = []; new_tx_fail = []; new_tsc = []
603 sockets_to_go = len (socks)
604 for i,sock in enumerate(socks,start=0):
606 old_rx.append(0); old_non_dp_rx.append(0); old_tx.append(0); old_non_dp_tx.append(0); old_drop.append(0); old_tx_fail.append(0); old_tsc.append(0)
607 old_rx[-1], old_non_dp_rx[-1], old_tx[-1], old_non_dp_tx[-1], old_drop[-1], old_tx_fail[-1], old_tsc[-1], tsc_hz = sock.core_stats(cores[i],tasks)
608 new_rx.append(0); new_non_dp_rx.append(0); new_tx.append(0); new_non_dp_tx.append(0); new_drop.append(0); new_tx_fail.append(0); new_tsc.append(0)
609 while (duration > 0):
611 # Get statistics after some execution time
612 for i,sock in enumerate(socks,start=0):
613 new_rx[i], new_non_dp_rx[i], new_tx[i], new_non_dp_tx[i], new_drop[i], new_tx_fail[i], new_tsc[i], tsc_hz = sock.core_stats(cores[i],tasks)
614 drop = new_drop[i]-old_drop[i]
615 rx = new_rx[i] - old_rx[i]
616 tx = new_tx[i] - old_tx[i]
617 non_dp_rx = new_non_dp_rx[i] - old_non_dp_rx[i]
618 non_dp_tx = new_non_dp_tx[i] - old_non_dp_tx[i]
619 tsc = new_tsc[i] - old_tsc[i]
623 old_drop[i] = new_drop[i]
624 old_rx[i] = new_rx[i]
625 old_tx[i] = new_tx[i]
626 old_non_dp_rx[i] = new_non_dp_rx[i]
627 old_non_dp_tx[i] = new_non_dp_tx[i]
628 old_tsc[i] = new_tsc[i]
629 tot_drop[i] = tot_drop[i] + tx - rx
630 log.info('|{:>10.0f}'.format(i)+ ' |{:>10.0f}'.format(duration)+' | ' + '{:>10.0f}'.format(rx) + ' | ' +'{:>10.0f}'.format(tx) + ' | '+'{:>10.0f}'.format(non_dp_rx)+' | '+'{:>10.0f}'.format(non_dp_tx)+' | ' + '{:>10.0f}'.format(tx-rx) + ' | '+ '{:>10.0f}'.format(non_dp_tx-non_dp_rx) + ' | '+'{:>10.0f}'.format(tot_drop[i]) +' |')
631 writer.writerow({'PROXID':i,'Time':duration,'Received':rx,'Sent':tx,'NonDPReceived':non_dp_rx,'NonDPSent':non_dp_tx,'Delta':tx-rx,'NonDPDelta':non_dp_tx-non_dp_rx,'Dropped':tot_drop[i]})
633 URL = PushGateway + '/metrics/job/' + TestName + '/instance/' + env + str(i)
634 DATA = 'PROXID {}\nTime {}\n Received {}\nSent {}\nNonDPReceived {}\nNonDPSent {}\nDelta {}\nNonDPDelta {}\nDropped {}\n'.format(i,duration,rx,tx,non_dp_rx,non_dp_tx,tx-rx,non_dp_tx-non_dp_rx,tot_drop[i])
635 HEADERS = {'X-Requested-With': 'Python requests', 'Content-type': 'text/xml'}
636 response = requests.post(url=URL, data=DATA,headers=HEADERS)
637 if sockets_to_go == 0:
638 duration = duration - 1
639 sockets_to_go = len (socks)
640 log.info("+-----------+-----------+------------+------------+------------+------------+------------+------------+------------+")
642 def run_port_stats(socks):
643 fieldnames = ['PROXID','Time','Received','Sent','NoMbufs','iErrMiss']
644 writer = csv.DictWriter(data_csv_file, fieldnames=fieldnames)
646 log.info("+---------------------------------------------------------------------------+")
647 log.info("| Measuring port statistics on 1 or more PROX instances |")
648 log.info("+-----------+-----------+------------+------------+------------+------------+")
649 log.info("| PROX ID | Time | RX | TX | no MBUFS | ierr&imiss |")
650 log.info("+-----------+-----------+------------+------------+------------+------------+")
653 duration = float(runtime)
654 old_rx = []; old_tx = []; old_no_mbufs = []; old_errors = []; old_tsc = []
655 new_rx = []; new_tx = []; new_no_mbufs = []; new_errors = []; new_tsc = []
656 sockets_to_go = len (socks)
657 for i,sock in enumerate(socks,start=0):
658 old_rx.append(0); old_tx.append(0); old_no_mbufs.append(0); old_errors.append(0); old_tsc.append(0)
659 old_rx[-1], old_tx[-1], old_no_mbufs[-1], old_errors[-1], old_tsc[-1] = sock.multi_port_stats(ports[i])
660 new_rx.append(0); new_tx.append(0); new_no_mbufs.append(0); new_errors.append(0); new_tsc.append(0)
661 while (duration > 0):
663 # Get statistics after some execution time
664 for i,sock in enumerate(socks,start=0):
665 new_rx[i], new_tx[i], new_no_mbufs[i], new_errors[i], new_tsc[i] = sock.multi_port_stats(ports[i])
666 rx = new_rx[i] - old_rx[i]
667 tx = new_tx[i] - old_tx[i]
668 no_mbufs = new_no_mbufs[i] - old_no_mbufs[i]
669 errors = new_errors[i] - old_errors[i]
670 tsc = new_tsc[i] - old_tsc[i]
674 old_rx[i] = new_rx[i]
675 old_tx[i] = new_tx[i]
676 old_no_mbufs[i] = new_no_mbufs[i]
677 old_errors[i] = new_errors[i]
678 old_tsc[i] = new_tsc[i]
679 log.info('|{:>10.0f}'.format(i)+ ' |{:>10.0f}'.format(duration)+' | ' + '{:>10.0f}'.format(rx) + ' | ' +'{:>10.0f}'.format(tx) + ' | '+'{:>10.0f}'.format(no_mbufs)+' | '+'{:>10.0f}'.format(errors)+' |')
680 writer.writerow({'PROXID':i,'Time':duration,'Received':rx,'Sent':tx,'NoMbufs':no_mbufs,'iErrMiss':errors})
682 URL = PushGateway + '/metrics/job/' + TestName + '/instance/' + env + str(i)
683 DATA = 'PROXID {}\nTime {}\n Received {}\nSent {}\nNoMbufs {}\niErrMiss {}\n'.format(i,duration,rx,tx,no_mbufs,errors)
684 HEADERS = {'X-Requested-With': 'Python requests', 'Content-type': 'text/xml'}
685 response = requests.post(url=URL, data=DATA,headers=HEADERS)
686 if sockets_to_go == 0:
687 duration = duration - 1
688 sockets_to_go = len (socks)
689 log.info("+-----------+-----------+------------+------------+------------+------------+")
691 def run_irqtest(socks):
692 log.info("+----------------------------------------------------------------------------------------------------------------------------")
693 log.info("| Measuring time probably spent dealing with an interrupt. Interrupting DPDK cores for more than 50us might be problematic ")
694 log.info("| and result in packet loss. The first row shows the interrupted time buckets: first number is the bucket between 0us and ")
695 log.info("| that number expressed in us and so on. The numbers in the other rows show how many times per second, the program was ")
696 log.info("| interrupted for a time as specified by its bucket. '0' is printed when there are no interrupts in this bucket throughout ")
697 log.info("| the duration of the test. 0.00 means there were interrupts in this bucket but very few. Due to rounding this shows as 0.00 ")
698 log.info("+----------------------------------------------------------------------------------------------------------------------------")
700 for sock_index,sock in enumerate(socks,start=0):
701 buckets=socks[sock_index].show_irq_buckets(1)
702 print('Measurement ongoing ... ',end='\r')
703 socks[sock_index].stop(cores[sock_index])
704 old_irq = [[0 for x in range(len(buckets)+1)] for y in range(len(cores[sock_index])+1)]
705 irq = [[0 for x in range(len(buckets)+1)] for y in range(len(cores[sock_index])+1)]
706 irq[0][0] = 'bucket us'
707 for j,bucket in enumerate(buckets,start=1):
708 irq[0][j] = '<'+ bucket
709 irq[0][-1] = '>'+ buckets [-2]
710 socks[sock_index].start(cores[sock_index])
712 for j,bucket in enumerate(buckets,start=1):
713 for i,irqcore in enumerate(cores[sock_index],start=1):
714 old_irq[i][j] = socks[sock_index].irq_stats(irqcore,j-1)
715 time.sleep(float(runtime))
716 socks[sock_index].stop(cores[sock_index])
717 for i,irqcore in enumerate(cores[sock_index],start=1):
718 irq[i][0]='core %s '%irqcore
719 for j,bucket in enumerate(buckets,start=1):
720 diff = socks[sock_index].irq_stats(irqcore,j-1) - old_irq[i][j]
724 irq[i][j] = str(round(diff/float(runtime), 2))
725 log.info('Results for PROX instance %s'%sock_index)
727 log.info(''.join(['{:>12}'.format(item) for item in row]))
729 def run_impairtest(gensock,sutsock):
730 fieldnames = ['Flows','PacketSize','RequestedPPS','GeneratedPPS','SentPPS','ForwardedPPS','ReceivedPPS','AvgLatencyUSEC','MaxLatencyUSEC','Dropped','DropRate']
731 writer = csv.DictWriter(data_csv_file, fieldnames=fieldnames)
734 log.info("+-----------------------------------------------------------------------------------------------------------------------------------------------------------------+")
735 log.info("| Generator is sending UDP ("+'{:>5}'.format(FLOWSIZE)+" flow) packets ("+ '{:>5}'.format(size+4) +" bytes) to SUT via GW dropping and delaying packets. SUT sends packets back. Use ctrl-c to stop the test |")
736 log.info("+--------+--------------------+----------------+----------------+----------------+----------------+----------------+----------------+----------------+------------+")
737 log.info("| Test | Speed requested | Sent to NIC | Sent by Gen | Forward by SUT | Rec. by Gen | Avg. Latency | Max. Latency | Packets Lost | Loss Ratio |")
738 log.info("+--------+--------------------+----------------+----------------+----------------+----------------+----------------+----------------+----------------+------------+")
740 gensock.set_size(gencores,0,size) # This is setting the frame size
741 gensock.set_value(gencores,0,16,(size-14),2) # 18 is the difference between the frame size and IP size = size of (MAC addresses, ethertype and FCS)
742 gensock.set_value(gencores,0,38,(size-34),2) # 38 is the difference between the frame size and UDP size = 18 + size of IP header (=20)
743 # This will only work when using sending UDP packets. For different protocols and ethernet types, we would need a different calculation
744 source_port,destination_port = flows[FLOWSIZE]
745 gensock.set_random(gencores,0,34,source_port,2)
746 gensock.set_random(gencores,0,36,destination_port,2)
747 gensock.start(latcores)
749 gensock.speed(speed / len(gencores) / len(gentasks), gencores, gentasks)
752 print('Measurement ongoing at speed: ' + str(round(speed,2)) + '% ',end='\r')
755 # Get statistics now that the generation is stable and NO ARP messages any more
756 pps_req_tx,pps_tx,pps_sut_tx_str,pps_rx,lat_avg,lat_max, abs_dropped, abs_tx_fail, abs_tx, lat_min, lat_used, r, actual_duration = run_iteration(gensock,sutsock,runtime)
757 drop_rate = 100.0*abs_dropped/abs_tx
759 lat_warning = bcolors.FAIL + ' Potential latency accuracy problem: {:>3.0f}%'.format(lat_used*100) + bcolors.ENDC
762 log.info('|{:>7}'.format(str(attempts))+" | " + '{:>5.1f}'.format(speed) + '% ' +'{:>6.3f}'.format(get_pps(speed,size)) + ' Mpps | '+ '{:>9.3f}'.format(pps_req_tx)+' Mpps | '+ '{:>9.3f}'.format(pps_tx) +' Mpps | ' + '{:>9}'.format(pps_sut_tx_str) +' Mpps | '+ '{:>9.3f}'.format(pps_rx)+' Mpps | '+ '{:>9.0f}'.format(lat_avg)+' us | '+ '{:>9.0f}'.format(lat_max)+' us | '+ '{:>14d}'.format(abs_dropped)+ ' |''{:>9.2f}'.format(drop_rate)+ '% |'+lat_warning)
763 writer.writerow({'Flows':FLOWSIZE,'PacketSize':(size+4),'RequestedPPS':get_pps(speed,size),'GeneratedPPS':pps_req_tx,'SentPPS':pps_tx,'ForwardedPPS':pps_sut_tx_str,'ReceivedPPS':pps_rx,'AvgLatencyUSEC':lat_avg,'MaxLatencyUSEC':lat_max,'Dropped':abs_dropped,'DropRate':drop_rate})
765 URL = PushGateway + '/metrics/job/' + TestName + '/instance/' + env
766 DATA = 'Flows {}\nPacketSize {}\nRequestedPPS {}\nGeneratedPPS {}\nSentPPS {}\nForwardedPPS {}\nReceivedPPS {}\nAvgLatencyUSEC {}\nMaxLatencyUSEC {}\nDropped {}\nDropRate {}\n'.format(FLOWSIZE,size+4,get_pps(speed,size),pps_req_tx,pps_tx,pps_sut_tx_str,pps_rx,lat_avg,lat_max,abs_dropped,drop_rate)
767 HEADERS = {'X-Requested-With': 'Python requests', 'Content-type': 'text/xml'}
768 response = requests.post(url=URL, data=DATA,headers=HEADERS)
770 def run_warmuptest(gensock):
771 # Running at low speed to make sure the ARP messages can get through.
772 # If not doing this, the ARP message could be dropped by a switch in overload and then the test will not give proper results
773 # Note hoever that if we would run the test steps during a very long time, the ARP would expire in the switch.
774 # PROX will send a new ARP request every seconds so chances are very low that they will all fail to get through
775 gensock.speed(WARMUPSPEED / len(gencores) /len (gentasks), gencores, gentasks)
777 gensock.set_size(gencores,0,size) # This is setting the frame size
778 gensock.set_value(gencores,0,16,(size-14),2) # 18 is the difference between the frame size and IP size = size of (MAC addresses, ethertype and FCS)
779 gensock.set_value(gencores,0,38,(size-34),2) # 38 is the difference between the frame size and UDP size = 18 + size of IP header (=20)
780 gensock.set_value(gencores,0,56,1,1)
781 # This will only work when using sending UDP packets. For different protocols and ethernet types, we would need a different calculation
782 source_port,destination_port = flows[FLOWSIZE]
783 gensock.set_random(gencores,0,34,source_port,2)
784 gensock.set_random(gencores,0,36,destination_port,2)
785 gensock.start(genstatcores)
786 time.sleep(WARMUPTIME)
787 gensock.stop(genstatcores)
788 gensock.set_value(gencores,0,56,50,1)
789 time.sleep(WARMUPTIME)
791 # To generate a desired number of flows, PROX will randomize the bits in source and destination ports, as specified by the bit masks in the flows variable.
793 1: ['1000000000000000','1000000000000000'],\
794 2: ['1000000000000000','100000000000000X'],\
795 4: ['100000000000000X','100000000000000X'],\
796 8: ['100000000000000X','10000000000000XX'],\
797 16: ['10000000000000XX','10000000000000XX'],\
798 32: ['10000000000000XX','1000000000000XXX'],\
799 64: ['1000000000000XXX','1000000000000XXX'],\
800 128: ['1000000000000XXX','100000000000XXXX'],\
801 256: ['100000000000XXXX','100000000000XXXX'],\
802 512: ['100000000000XXXX','10000000000XXXXX'],\
803 1024: ['10000000000XXXXX','10000000000XXXXX'],\
804 2048: ['10000000000XXXXX','1000000000XXXXXX'],\
805 4096: ['1000000000XXXXXX','1000000000XXXXXX'],\
806 8192: ['1000000000XXXXXX','100000000XXXXXXX'],\
807 16384: ['100000000XXXXXXX','100000000XXXXXXX'],\
808 32768: ['100000000XXXXXXX','10000000XXXXXXXX'],\
809 65536: ['10000000XXXXXXXX','10000000XXXXXXXX'],\
810 131072: ['10000000XXXXXXXX','1000000XXXXXXXXX'],\
811 262144: ['1000000XXXXXXXXX','1000000XXXXXXXXX'],\
812 524288: ['1000000XXXXXXXXX','100000XXXXXXXXXX'],\
813 1048576:['100000XXXXXXXXXX','100000XXXXXXXXXX'],}
832 data_file = 'RUN{}.{}.csv'.format(env,test_file)
833 data_csv_file = open(data_file,'w')
834 testconfig = ConfigParser.RawConfigParser()
835 testconfig.read(test_file)
836 required_number_of_test_machines = testconfig.get('DEFAULT', 'total_number_of_test_machines')
837 TestName = testconfig.get('DEFAULT', 'name')
838 if testconfig.has_option('DEFAULT', 'PushGateway'):
839 PushGateway = testconfig.get('DEFAULT', 'PushGateway')
840 log.info('Measurements will be pushed to %s'%PushGateway)
843 config = ConfigParser.RawConfigParser()
845 machine_map = ConfigParser.RawConfigParser()
846 machine_map.read(machine_map_file)
847 key = config.get('ssh', 'key')
848 user = config.get('ssh', 'user')
849 total_number_of_machines = config.get('rapid', 'total_number_of_machines')
850 if int(required_number_of_test_machines) > int(total_number_of_machines):
851 log.exception("Not enough VMs for this test: %s needed and only %s available" % (required_number_of_test_machines,total_number_of_machines))
852 raise Exception("Not enough VMs for this test: %s needed and only %s available" % (required_number_of_test_machines,total_number_of_machines))
853 for vm in range(1, int(total_number_of_machines)+1):
854 vmAdminIP.append(config.get('M%d'%vm, 'admin_ip'))
855 vmDPmac.append(config.get('M%d'%vm, 'dp_mac'))
856 vmDPIP.append(config.get('M%d'%vm, 'dp_ip'))
857 ip = vmDPIP[-1].split('.')
858 hexDPIP.append(hex(int(ip[0]))[2:].zfill(2) + ' ' + hex(int(ip[1]))[2:].zfill(2) + ' ' + hex(int(ip[2]))[2:].zfill(2) + ' ' + hex(int(ip[3]))[2:].zfill(2))
860 for vm in range(1, int(required_number_of_test_machines)+1):
861 machine_index.append(int(machine_map.get('TestM%d'%vm, 'machine_index'))-1)
862 prox_socket.append(testconfig.getboolean('TestM%d'%vm, 'prox_socket'))
863 for vm in range(1, int(required_number_of_test_machines)+1):
864 if prox_socket[vm-1]:
865 prox_launch_exit.append(testconfig.getboolean('TestM%d'%vm, 'prox_launch_exit'))
866 config_file.append(testconfig.get('TestM%d'%vm, 'config_file'))
867 # Looking for all task definitions in the PROX cfg files. Constructing a list of all tasks used
868 textfile = open (config_file[-1], 'r')
869 filetext = textfile.read()
871 tasks_for_this_cfg = set(re.findall("task\s*=\s*(\d+)",filetext))
872 with open('{}_{}_parameters{}.lua'.format(env,test_file,vm), "w") as f:
873 f.write('name="%s"\n'% testconfig.get('TestM%d'%vm, 'name'))
874 f.write('local_ip="%s"\n'% vmDPIP[machine_index[vm-1]])
875 f.write('local_hex_ip="%s"\n'% hexDPIP[machine_index[vm-1]])
876 if testconfig.has_option('TestM%d'%vm, 'cores'):
877 cores.append(ast.literal_eval(testconfig.get('TestM%d'%vm, 'cores')))
878 f.write('cores="%s"\n'% ','.join(map(str, cores[-1])))
881 if testconfig.has_option('TestM%d'%vm, 'ports'):
882 ports.append(ast.literal_eval(testconfig.get('TestM%d'%vm, 'ports')))
883 f.write('ports="%s"\n'% ','.join(map(str, ports[-1])))
886 if re.match('(l2){0,1}gen(_bare){0,1}.*\.cfg',config_file[-1]):
887 gencores = ast.literal_eval(testconfig.get('TestM%d'%vm, 'gencores'))
888 latcores = ast.literal_eval(testconfig.get('TestM%d'%vm, 'latcores'))
889 genstatcores = gencores + latcores
890 gentasks = tasks_for_this_cfg
891 auto_start.append(False)
892 mach_type.append('gen')
893 f.write('gencores="%s"\n'% ','.join(map(str, gencores)))
894 f.write('latcores="%s"\n'% ','.join(map(str, latcores)))
895 destVMindex = int(testconfig.get('TestM%d'%vm, 'dest_vm'))-1
896 f.write('dest_ip="%s"\n'% vmDPIP[machine_index[destVMindex]])
897 f.write('dest_hex_ip="%s"\n'% hexDPIP[machine_index[destVMindex]])
898 f.write('dest_hex_mac="%s"\n'% vmDPmac[machine_index[destVMindex]].replace(':',' '))
899 elif re.match('(l2){0,1}gen_gw.*\.cfg',config_file[-1]):
900 gencores = ast.literal_eval(testconfig.get('TestM%d'%vm, 'gencores'))
901 latcores = ast.literal_eval(testconfig.get('TestM%d'%vm, 'latcores'))
902 genstatcores = gencores + latcores
903 gentasks = tasks_for_this_cfg
904 auto_start.append(False)
905 mach_type.append('gen')
906 f.write('gencores="%s"\n'% ','.join(map(str, gencores)))
907 f.write('latcores="%s"\n'% ','.join(map(str, latcores)))
908 gwVMindex = int(testconfig.get('TestM%d'%vm, 'gw_vm')) -1
909 f.write('gw_ip="%s"\n'% vmDPIP[machine_index[gwVMindex]])
910 f.write('gw_hex_ip="%s"\n'% hexDPIP[machine_index[gwVMindex]])
911 destVMindex = int(testconfig.get('TestM%d'%vm, 'dest_vm'))-1
912 f.write('dest_ip="%s"\n'% vmDPIP[machine_index[destVMindex]])
913 f.write('dest_hex_ip="%s"\n'% hexDPIP[machine_index[destVMindex]])
914 f.write('dest_hex_mac="%s"\n'% vmDPmac[machine_index[destVMindex]].replace(':',' '))
915 elif re.match('(l2){0,1}swap.*\.cfg',config_file[-1]):
916 sutstatcores = cores[-1]
917 auto_start.append(True)
918 mach_type.append('sut')
919 elif re.match('secgw1.*\.cfg',config_file[-1]):
920 auto_start.append(True)
921 mach_type.append('none')
922 destVMindex = int(testconfig.get('TestM%d'%vm, 'dest_vm'))-1
923 f.write('dest_ip="%s"\n'% vmDPIP[machine_index[destVMindex]])
924 f.write('dest_hex_ip="%s"\n'% hexDPIP[machine_index[destVMindex]])
925 f.write('dest_hex_mac="%s"\n'% vmDPmac[machine_index[destVMindex]].replace(':',' '))
926 elif re.match('secgw2.*\.cfg',config_file[-1]):
927 sutstatcores = cores[-1]
928 auto_start.append(True)
929 mach_type.append('sut')
931 auto_start.append(True)
932 mach_type.append('none')
934 tasks = tasks_for_this_cfg.union(tasks)
935 log.debug("Tasks detected in all PROX config files %r"%tasks)
936 #####################################################################################
938 log.debug ('exit cleanup')
939 for index, sock in enumerate(socks):
940 if socks_control[index]:
942 for client in clients:
947 atexit.register(exit_handler)
949 for vm in range(0, int(required_number_of_test_machines)):
951 clients.append(prox_ctrl(vmAdminIP[machine_index[vm]], key,user))
952 connect_client(clients[-1])
953 # Creating script to bind the right network interface to the poll mode driver
954 devbindfile = '{}_{}_devbindvm{}.sh'.format(env,test_file, vm+1)
955 with open(devbindfile, "w") as f:
956 newText= 'link="$(ip -o link | grep '+vmDPmac[machine_index[vm]]+' |cut -d":" -f 2)"\n'
958 newText= 'if [ -n "$link" ];\n'
962 newText= ' echo Need to bind\n'
964 newText= ' sudo ' + rundir + '/dpdk/usertools/dpdk-devbind.py --force --bind igb_uio $('+rundir+'/dpdk/usertools/dpdk-devbind.py --status |grep $link | cut -d" " -f 1)\n'
968 newText= ' echo Assuming port is already bound to DPDK\n'
974 st = os.stat(devbindfile)
975 os.chmod(devbindfile, st.st_mode | stat.S_IEXEC)
976 clients[-1].scp_put('./%s'%devbindfile, rundir+'/devbind.sh')
977 cmd = 'sudo ' + rundir+ '/devbind.sh'
978 clients[-1].run_cmd(cmd)
979 log.debug("devbind.sh running on VM%d"%(vm+1))
980 clients[-1].scp_put('./%s'%config_file[vm], rundir+'/%s'%config_file[vm])
981 clients[-1].scp_put('./{}_{}_parameters{}.lua'.format(env,test_file, vm+1), rundir + '/parameters.lua')
983 if prox_launch_exit[vm]:
984 log.debug("Starting PROX on VM%d"%(vm+1))
986 cmd = 'sudo ' +rundir + '/prox/build/prox -t -o cli -f ' + rundir + '/%s'%config_file[vm]
988 cmd = 'sudo ' +rundir + '/prox/build/prox -e -t -o cli -f ' + rundir + '/%s'%config_file[vm]
989 clients[-1].fork_cmd(cmd, 'PROX Testing on TestM%d'%(vm+1))
990 socks_control.append(prox_launch_exit[vm])
991 socks.append(connect_socket(clients[-1]))
992 sock_type.append(mach_type[vm])
994 def get_BinarySearchParams() :
995 global DROP_RATE_TRESHOLD
996 global LAT_AVG_TRESHOLD
997 global LAT_MAX_TRESHOLD
1003 DROP_RATE_TRESHOLD = float(testconfig.get('BinarySearchParams', 'drop_rate_threshold'))
1004 LAT_AVG_TRESHOLD = float(testconfig.get('BinarySearchParams', 'lat_avg_threshold'))
1005 LAT_MAX_TRESHOLD = float(testconfig.get('BinarySearchParams', 'lat_max_threshold'))
1006 ACCURACY = float(testconfig.get('BinarySearchParams', 'accuracy'))
1007 STARTSPEED = float(testconfig.get('BinarySearchParams', 'startspeed'))
1012 def get_FixedRateParams() :
1013 global DROP_RATE_TRESHOLD
1014 global LAT_AVG_TRESHOLD
1015 global LAT_MAX_TRESHOLD
1016 global flow_size_list
1017 global packet_size_list
1022 DROP_RATE_TRESHOLD = inf
1023 LAT_AVG_TRESHOLD = inf
1024 LAT_MAX_TRESHOLD = inf
1028 packet_size_list = ast.literal_eval(testconfig.get('test%d'%test_nr, 'packetsizes'))
1029 flow_size_list = ast.literal_eval(testconfig.get('test%d'%test_nr, 'flows'))
1030 STARTSPEED = float(testconfig.get('test%d'%test_nr, 'speed'))
1032 def get_TST009SearchParams() :
1033 global DROP_RATE_TRESHOLD
1034 global LAT_AVG_TRESHOLD
1035 global LAT_MAX_TRESHOLD
1039 global TST009_MAXFramesAllIngress
1040 global TST009_StepSize
1045 if testconfig.has_option('TST009SearchParams', 'drop_rate_threshold'):
1046 DROP_RATE_TRESHOLD = float(testconfig.get('TST009SearchParams', 'drop_rate_threshold'))
1048 DROP_RATE_TRESHOLD = 0
1049 LAT_AVG_TRESHOLD = inf
1050 LAT_MAX_TRESHOLD = inf
1051 TST009_MAXr = float(testconfig.get('TST009SearchParams', 'MAXr'))
1052 TST009_MAXz = float(testconfig.get('TST009SearchParams', 'MAXz'))
1053 TST009_MAXFramesAllIngress = int(testconfig.get('TST009SearchParams', 'MAXFramesPerSecondAllIngress'))
1054 TST009_StepSize = int(testconfig.get('TST009SearchParams', 'StepSize'))
1055 TST009_n = int(ceil(TST009_MAXFramesAllIngress / TST009_StepSize))
1058 TST009_R = TST009_n - 1
1059 for m in range(0, TST009_n):
1060 TST009_S.append((m+1) * TST009_StepSize)
1064 ####################################################
1066 # Best to run the flow test at the end since otherwise the tests coming after might be influenced by the big number of entries in the switch flow tables
1067 ####################################################
1068 gensock_index = sock_type.index('gen') if 'gen' in sock_type else -1
1069 sutsock_index = sock_type.index('sut') if 'sut' in sock_type else -1
1070 number_of_tests = testconfig.get('DEFAULT', 'number_of_tests')
1071 for test_nr in range(1, int(number_of_tests)+1):
1072 test=testconfig.get('test%d'%test_nr,'test')
1074 if test == 'flowsizetest':
1075 get_BinarySearchParams()
1076 packet_size_list = ast.literal_eval(testconfig.get('test%d'%test_nr, 'packetsizes'))
1077 flow_size_list = ast.literal_eval(testconfig.get('test%d'%test_nr, 'flows'))
1078 run_flow_size_test(socks[gensock_index],socks[sutsock_index])
1079 elif test == 'TST009test':
1080 get_TST009SearchParams()
1081 packet_size_list = ast.literal_eval(testconfig.get('test%d'%test_nr, 'packetsizes'))
1082 flow_size_list = ast.literal_eval(testconfig.get('test%d'%test_nr, 'flows'))
1083 run_flow_size_test(socks[gensock_index],socks[sutsock_index])
1084 elif test == 'fixed_rate':
1085 get_FixedRateParams()
1086 run_flow_size_test(socks[gensock_index],socks[sutsock_index])
1087 elif test == 'corestats':
1088 run_core_stats(socks)
1089 elif test == 'portstats':
1090 run_port_stats(socks)
1091 elif test == 'impairtest':
1092 get_BinarySearchParams()
1093 PACKETSIZE = int(testconfig.get('test%d'%test_nr, 'packetsize'))
1094 FLOWSIZE = int(testconfig.get('test%d'%test_nr, 'flowsize'))
1095 run_impairtest(socks[gensock_index],socks[sutsock_index])
1096 elif test == 'irqtest':
1098 elif test == 'warmuptest':
1099 PACKETSIZE = int(testconfig.get('test%d'%test_nr, 'packetsize'))
1100 FLOWSIZE = int(testconfig.get('test%d'%test_nr, 'flowsize'))
1101 WARMUPSPEED = int(testconfig.get('test%d'%test_nr, 'warmupspeed'))
1102 WARMUPTIME = int(testconfig.get('test%d'%test_nr, 'warmuptime'))
1103 run_warmuptest(socks[gensock_index])
1104 ####################################################