4 ## Copyright (c) 2010-2020 Intel Corporation
6 ## Licensed under the Apache License, Version 2.0 (the "License");
7 ## you may not use this file except in compliance with the License.
8 ## You may obtain a copy of the License at
10 ## http://www.apache.org/licenses/LICENSE-2.0
12 ## Unless required by applicable law or agreed to in writing, software
13 ## distributed under the License is distributed on an "AS IS" BASIS,
14 ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 ## See the License for the specific language governing permissions and
16 ## limitations under the License.
19 from __future__ import print_function
29 from logging.handlers import RotatingFileHandler
30 from logging import handlers
31 from prox_ctrl import prox_ctrl
41 env = "rapid.env" #Default string for environment
42 test_file = "basicrapid.test" #Default string for test
43 machine_map_file = "machine.map" #Default string for machine map file
44 loglevel="DEBUG" # sets log level for writing to file
45 screenloglevel="INFO" # sets log level for writing to screen
46 runtime=10 # time in seconds for 1 test run
47 configonly = False # IF True, the system will upload all the necessary config fiels to the VMs, but not start PROX and the actual testing
48 rundir = "/home/centos" # Directory where to find the tools in the machines running PROX
51 print("usage: runrapid [--version] [-v]")
52 print(" [--env ENVIRONMENT_NAME]")
53 print(" [--test TEST_NAME]")
54 print(" [--map MACHINE_MAP_FILE]")
55 print(" [--runtime TIME_FOR_TEST]")
56 print(" [--configonly False|True]")
57 print(" [--log DEBUG|INFO|WARNING|ERROR|CRITICAL]")
58 print(" [-h] [--help]")
60 print("Command-line interface to runrapid")
62 print("optional arguments:")
63 print(" -v, --version Show program's version number and exit")
64 print(" --env ENVIRONMENT_NAME Parameters will be read from ENVIRONMENT_NAME. Default is %s."%env)
65 print(" --test TEST_NAME Test cases will be read from TEST_NAME. Default is %s."%test_file)
66 print(" --map MACHINE_MAP_FILE Machine mapping will be read from MACHINE_MAP_FILE. Default is %s."%machine_map_file)
67 print(" --runtime Specify time in seconds for 1 test run")
68 print(" --configonly If this option is specified, only upload all config files to the VMs, do not run the tests")
69 print(" --log Specify logging level for log file output, default is DEBUG")
70 print(" --screenlog Specify logging level for screen output, default is INFO")
71 print(" -h, --help Show help message and exit.")
75 opts, args = getopt.getopt(sys.argv[1:], "vh", ["version","help", "env=", "test=", "map=", "runtime=","configonly","log=","screenlog="])
76 except getopt.GetoptError as err:
77 print("===========================================")
79 print("===========================================")
86 if opt in ["-h", "--help"]:
89 if opt in ["-v", "--version"]:
90 print("Rapid Automated Performance Indication for Dataplane "+version)
97 machine_map_file = arg
98 if opt in ["--runtime"]:
100 if opt in ["--configonly"]:
102 print('No actual runs, only uploading configuration files')
105 print ("Log level: "+ loglevel)
106 if opt in ["--screenlog"]:
108 print ("Screen Log level: "+ screenloglevel)
110 print ("Using '"+env+"' as name for the environment")
111 print ("Using '"+test_file+"' for test case definition")
112 print ("Using '"+machine_map_file+"' for machine mapping")
113 print ("Runtime: "+ str(runtime))
123 UNDERLINE = '\033[4m'
126 screen_formatter = logging.Formatter("%(message)s")
127 file_formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
129 # get a top-level logger,
131 # BUT PREVENT IT from propagating messages to the root logger
133 log = logging.getLogger()
134 numeric_level = getattr(logging, loglevel.upper(), None)
135 if not isinstance(numeric_level, int):
136 raise ValueError('Invalid log level: %s' % loglevel)
137 log.setLevel(numeric_level)
140 # create a console handler
141 # and set its log level to the command-line option
143 console_handler = logging.StreamHandler(sys.stdout)
144 #console_handler.setLevel(logging.INFO)
145 numeric_screenlevel = getattr(logging, screenloglevel.upper(), None)
146 if not isinstance(numeric_screenlevel, int):
147 raise ValueError('Invalid screenlog level: %s' % screenloglevel)
148 console_handler.setLevel(numeric_screenlevel)
149 console_handler.setFormatter(screen_formatter)
151 # create a file handler
152 # and set its log level
154 log_file = 'RUN{}.{}.log'.format(env,test_file)
155 file_handler = logging.handlers.RotatingFileHandler(log_file, backupCount=10)
156 #file_handler = log.handlers.TimedRotatingFileHandler(log_file, 'D', 1, 5)
157 file_handler.setLevel(numeric_level)
158 file_handler.setFormatter(file_formatter)
160 # add handlers to the logger
162 log.addHandler(file_handler)
163 log.addHandler(console_handler)
165 # Check if log exists and should therefore be rolled
166 needRoll = os.path.isfile(log_file)
169 # This is a stale log, so roll it
172 log.debug('\n---------\nLog closed on %s.\n---------\n' % time.asctime())
174 # Roll over on application start
175 log.handlers[0].doRollover()
178 log.debug('\n---------\nLog started on %s.\n---------\n' % time.asctime())
180 log.debug("runrapid.py version: "+version)
181 #========================================================================
182 def connect_socket(client):
184 log.debug("Trying to connect to PROX (just launched) on %s, attempt: %d" % (client.ip(), attempts))
187 sock = client.prox_sock()
192 log.exception("Failed to connect to PROX on %s after %d attempts" % (client.ip(), attempts))
193 raise Exception("Failed to connect to PROX on %s after %d attempts" % (client.ip(), attempts))
195 log.debug("Trying to connect to PROX (just launched) on %s, attempt: %d" % (client.ip(), attempts))
196 log.info("Connected to PROX on %s" % client.ip())
199 def connect_client(client):
201 log.debug("Trying to connect to VM which was just launched on %s, attempt: %d" % (client.ip(), attempts))
206 except RuntimeWarning, ex:
209 log.exception("Failed to connect to VM after %d attempts:\n%s" % (attempts, ex))
210 raise Exception("Failed to connect to VM after %d attempts:\n%s" % (attempts, ex))
212 log.debug("Trying to connect to VM which was just launched on %s, attempt: %d" % (client.ip(), attempts))
213 log.debug("Connected to VM on %s" % client.ip())
215 def report_result(flow_number,size,speed,pps_req_tx,pps_tx,pps_sut_tx,pps_rx,lat_avg,lat_perc,lat_perc_max,lat_max,tx,rx,tot_drop,elapsed_time,speed_prefix='',lat_avg_prefix='',lat_perc_prefix='',lat_max_prefix='',abs_drop_rate_prefix='',drop_rate_prefix=''):
216 if pps_req_tx == None:
217 pps_req_tx_str = '{0: >14}'.format(' NA |')
219 pps_req_tx_str = '{:>7.3f} Mpps |'.format(pps_req_tx)
221 pps_tx_str = '{0: >14}'.format(' NA |')
223 pps_tx_str = '{:>7.3f} Mpps |'.format(pps_tx)
224 if pps_sut_tx == None:
225 pps_sut_tx_str = '{0: >14}'.format(' NA |')
227 pps_sut_tx_str = '{:>7.3f} Mpps |'.format(pps_sut_tx)
229 pps_rx_str = '{0: >24}'.format('NA ')
231 pps_rx_str = bcolors.OKBLUE + '{:>4.1f} Gb/s |{:7.3f} Mpps '.format(get_speed(pps_rx,size),pps_rx) + bcolors.ENDC
233 tot_drop_str = ' | NA | '
235 tot_drop_str = ' | {:>9.0f} | '.format(tot_drop)
237 lat_perc_str = ' |{:^10.10}|'.format('NA')
238 elif lat_perc_max == True:
239 lat_perc_str = ' |>{}{:>5.0f} us{} |'.format(lat_perc_prefix,float(lat_perc), bcolors.ENDC)
241 lat_perc_str = ' | {}{:>5.0f} us{} |'.format(lat_perc_prefix,float(lat_perc), bcolors.ENDC)
242 if elapsed_time == None:
243 elapsed_time_str = ' NA |'
245 elapsed_time_str = '{:>3.0f} |'.format(elapsed_time)
246 return('|{:>7}'.format(flow_number)+' |' + '{:>5.1f}'.format(speed) + '% '+speed_prefix +'{:>6.3f}'.format(get_pps(speed,size)) + ' Mpps|'+ pps_req_tx_str + pps_tx_str + bcolors.ENDC + pps_sut_tx_str + pps_rx_str +lat_avg_prefix+ '| {:>5.0f}'.format(lat_avg)+' us'+lat_perc_str+lat_max_prefix+'{:>6.0f}'.format(lat_max)+' us | ' + '{:>9.0f}'.format(tx) + ' | {:>9.0f}'.format(rx) + ' | '+ abs_drop_rate_prefix+ '{:>9.0f}'.format(tx-rx) + tot_drop_str +drop_rate_prefix+ '{:>5.2f}'.format(float(tx-rx)/tx) +bcolors.ENDC+' |' + elapsed_time_str)
248 def run_iteration(gensock, sutsock, requested_duration,flow_number,size,speed):
251 while (r < TST009_MAXr):
252 time.sleep(sleep_time)
253 # Sleep_time is needed to be able to do accurate measurements to check for packet loss. We need to make this time large enough so that we do not take the first measurement while some packets from the previous tests migth still be in flight
254 t1_rx, t1_non_dp_rx, t1_tx, t1_non_dp_tx, t1_drop, t1_tx_fail, t1_tsc, abs_tsc_hz = gensock.core_stats(genstatcores,gentasks)
255 t1_dp_rx = t1_rx - t1_non_dp_rx
256 t1_dp_tx = t1_tx - t1_non_dp_tx
257 gensock.start(gencores)
258 time.sleep(2) ## Needs to be 2 seconds since this the time that PROX uses to refresh the stats. Note that this can be changed in PROX!! Don't do it.
260 t2_sut_rx, t2_sut_non_dp_rx, t2_sut_tx, t2_sut_non_dp_tx, t2_sut_drop, t2_sut_tx_fail, t2_sut_tsc, sut_tsc_hz = sutsock.core_stats(sutstatcores,tasks)
261 ##t2_sut_rx = t2_sut_rx - t2_sut_non_dp_rx
262 ##t2_sut_tx = t2_sut_tx - t2_sut_non_dp_tx
263 t2_rx, t2_non_dp_rx, t2_tx, t2_non_dp_tx, t2_drop, t2_tx_fail, t2_tsc, tsc_hz = gensock.core_stats(genstatcores,gentasks)
265 dp_tx = tx - (t2_non_dp_tx - t1_non_dp_tx )
266 dp_rx = t2_rx - t1_rx - (t2_non_dp_rx - t1_non_dp_rx)
267 tot_dp_drop = dp_tx - dp_rx
269 log.critical("TX = 0. Test interrupted since no packet has been sent.")
270 raise Exception("TX = 0")
272 log.critical("Only non-dataplane packets (e.g. ARP) sent. Test interrupted since no packet has been sent.")
273 raise Exception("Only non-dataplane packets (e.g. ARP) sent")
274 # Ask PROX to calibrate the bucket size once we have a PROX function to do this.
275 # Measure latency statistics per second
276 lat_min, lat_max, lat_avg, used_avg, t2_lat_tsc, lat_hz, buckets = gensock.lat_stats(latcores)
277 lat_samples = sum(buckets)
279 for sample_percentile, bucket in enumerate(buckets,start=1):
280 sample_count += bucket
281 if sample_count > (lat_samples * LAT_PERCENTILE):
283 if sample_percentile == len(buckets):
284 percentile_max = True
286 percentile_max = False
287 sample_percentile = sample_percentile * float(2 ** BUCKET_SIZE_EXP) / (float(lat_hz)/float(10**6))
288 if test == 'fixed_rate':
289 log.info(report_result(flow_number,size,speed,None,None,None,None,lat_avg,sample_percentile,percentile_max,lat_max, dp_tx, dp_rx , None, None))
290 tot_rx = tot_non_dp_rx = tot_tx = tot_non_dp_tx = tot_drop = 0
291 lat_avg = used_avg = 0
292 buckets_total = [0] * 128
294 tot_lat_measurement_duration = float(0)
295 tot_core_measurement_duration = float(0)
296 tot_sut_core_measurement_duration = float(0)
297 tot_sut_rx = tot_sut_non_dp_rx = tot_sut_tx = tot_sut_non_dp_tx = tot_sut_drop = tot_sut_tx_fail = tot_sut_tsc = 0
298 lat_avail = core_avail = sut_avail = False
299 while (tot_core_measurement_duration - float(requested_duration) <= 0.1) or (tot_sut_core_measurement_duration - float(requested_duration) <= 0.1) or (tot_lat_measurement_duration - float(requested_duration) <= 0.1):
301 lat_min_sample, lat_max_sample, lat_avg_sample, used_sample, t3_lat_tsc, lat_hz, buckets = gensock.lat_stats(latcores)
302 single_lat_measurement_duration = (t3_lat_tsc - t2_lat_tsc) * 1.0 / lat_hz # time difference between the 2 measurements, expressed in seconds.
303 # Get statistics after some execution time
304 if single_lat_measurement_duration != 0:
305 # A second has passed in between to lat_stats requests. Hence we need to process the results
306 tot_lat_measurement_duration = tot_lat_measurement_duration + single_lat_measurement_duration
307 if lat_min > lat_min_sample:
308 lat_min = lat_min_sample
309 if lat_max < lat_max_sample:
310 lat_max = lat_max_sample
311 lat_avg = lat_avg + lat_avg_sample * single_lat_measurement_duration # Sometimes, There is more than 1 second between 2 lat_stats. Hence we will take the latest measurement
312 used_avg = used_avg + used_sample * single_lat_measurement_duration # and give it more weigth.
313 lat_samples = sum(buckets)
314 tot_lat_samples += lat_samples
316 for sample_percentile, bucket in enumerate(buckets,start=1):
317 sample_count += bucket
318 if sample_count > lat_samples * LAT_PERCENTILE:
320 if sample_percentile == len(buckets):
321 percentile_max = True
323 percentile_max = False
324 sample_percentile = sample_percentile * float(2 ** BUCKET_SIZE_EXP) / (float(lat_hz)/float(10**6))
325 buckets_total = [buckets_total[i] + buckets[i] for i in range(len(buckets_total))]
326 t2_lat_tsc = t3_lat_tsc
328 t3_rx, t3_non_dp_rx, t3_tx, t3_non_dp_tx, t3_drop, t3_tx_fail, t3_tsc, tsc_hz = gensock.core_stats(genstatcores,gentasks)
329 single_core_measurement_duration = (t3_tsc - t2_tsc) * 1.0 / tsc_hz # time difference between the 2 measurements, expressed in seconds.
330 if single_core_measurement_duration!= 0:
331 stored_single_core_measurement_duration = single_core_measurement_duration
332 tot_core_measurement_duration = tot_core_measurement_duration + single_core_measurement_duration
333 delta_rx = t3_rx - t2_rx
335 delta_non_dp_rx = t3_non_dp_rx - t2_non_dp_rx
336 tot_non_dp_rx += delta_non_dp_rx
337 delta_tx = t3_tx - t2_tx
339 delta_non_dp_tx = t3_non_dp_tx - t2_non_dp_tx
340 tot_non_dp_tx += delta_non_dp_tx
341 delta_dp_tx = delta_tx -delta_non_dp_tx
342 delta_dp_rx = delta_rx -delta_non_dp_rx
343 delta_dp_drop = delta_dp_tx - delta_dp_rx
344 tot_dp_drop += delta_dp_drop
345 delta_drop = t3_drop - t2_drop
346 tot_drop += delta_drop
347 t2_rx, t2_non_dp_rx, t2_tx, t2_non_dp_tx, t2_drop, t2_tx_fail, t2_tsc = t3_rx, t3_non_dp_rx, t3_tx, t3_non_dp_tx, t3_drop, t3_tx_fail, t3_tsc
350 t3_sut_rx, t3_sut_non_dp_rx, t3_sut_tx, t3_sut_non_dp_tx, t3_sut_drop, t3_sut_tx_fail, t3_sut_tsc, sut_tsc_hz = sutsock.core_stats(sutstatcores,tasks)
351 single_sut_core_measurement_duration = (t3_sut_tsc - t2_sut_tsc) * 1.0 / tsc_hz # time difference between the 2 measurements, expressed in seconds.
352 if single_sut_core_measurement_duration!= 0:
353 stored_single_sut_core_measurement_duration = single_sut_core_measurement_duration
354 tot_sut_core_measurement_duration = tot_sut_core_measurement_duration + single_sut_core_measurement_duration
355 tot_sut_rx += t3_sut_rx - t2_sut_rx
356 tot_sut_non_dp_rx += t3_sut_non_dp_rx - t2_sut_non_dp_rx
357 delta_sut_tx = t3_sut_tx - t2_sut_tx
358 tot_sut_tx += delta_sut_tx
359 delta_sut_non_dp_tx = t3_sut_non_dp_tx - t2_sut_non_dp_tx
360 tot_sut_non_dp_tx += delta_sut_non_dp_tx
361 t2_sut_rx, t2_sut_non_dp_rx, t2_sut_tx, t2_sut_non_dp_tx, t2_sut_drop, t2_sut_tx_fail, t2_sut_tsc = t3_sut_rx, t3_sut_non_dp_rx, t3_sut_tx, t3_sut_non_dp_tx, t3_sut_drop, t3_sut_tx_fail, t3_sut_tsc
363 if test == 'fixed_rate':
364 if lat_avail == core_avail == sut_avail == True:
365 lat_avail = core_avail = sut_avail = False
366 pps_req_tx = (delta_tx + delta_drop - delta_rx)/stored_single_core_measurement_duration/1000000
367 pps_tx = delta_tx/stored_single_core_measurement_duration/1000000
369 pps_sut_tx = delta_sut_tx/stored_single_sut_core_measurement_duration/1000000
372 pps_rx = delta_rx/stored_single_core_measurement_duration/1000000
373 log.info(report_result(flow_number,size,speed,pps_req_tx,pps_tx,pps_sut_tx,pps_rx,lat_avg_sample,sample_percentile,percentile_max,lat_max_sample,delta_dp_tx,delta_dp_rx,tot_dp_drop,stored_single_core_measurement_duration))
375 gensock.stop(gencores)
377 lat_avg = lat_avg / float(tot_lat_measurement_duration)
378 used_avg = used_avg / float(tot_lat_measurement_duration)
380 while t4_tsc == t2_tsc:
381 t4_rx, t4_non_dp_rx, t4_tx, t4_non_dp_tx, t4_drop, t4_tx_fail, t4_tsc, abs_tsc_hz = gensock.core_stats(genstatcores,gentasks)
382 if test == 'fixed_rate':
383 t4_lat_tsc = t2_lat_tsc
384 while t4_lat_tsc == t2_lat_tsc:
385 lat_min_sample, lat_max_sample, lat_avg_sample, used_sample, t4_lat_tsc, lat_hz, buckets = gensock.lat_stats(latcores)
387 lat_samples = sum(buckets)
388 for percentile, bucket in enumerate(buckets,start=1):
389 sample_count += bucket
390 if sample_count > lat_samples * LAT_PERCENTILE:
392 if percentile == len(buckets):
393 percentile_max = True
395 percentile_max = False
396 percentile = percentile * float(2 ** BUCKET_SIZE_EXP) / (float(lat_hz)/float(10**6))
397 lat_max = lat_max_sample
398 lat_avg = lat_avg_sample
399 delta_rx = t4_rx - t2_rx
400 delta_non_dp_rx = t4_non_dp_rx - t2_non_dp_rx
401 delta_tx = t4_tx - t2_tx
402 delta_non_dp_tx = t4_non_dp_tx - t2_non_dp_tx
403 delta_dp_tx = delta_tx -delta_non_dp_tx
404 delta_dp_rx = delta_rx -delta_non_dp_rx
407 tot_dp_drop += delta_dp_tx - delta_dp_rx
412 drop_rate = 100.0*(dp_tx-dp_rx)/dp_tx
413 tot_core_measurement_duration = None
414 break ## Not really needed since the while loop will stop when evaluating the value of r
417 for percentile, bucket in enumerate(buckets_total,start=1):
418 sample_count += bucket
419 if sample_count > tot_lat_samples * LAT_PERCENTILE:
421 if percentile == len(buckets):
422 percentile_max = True
424 percentile_max = False
425 percentile = percentile * float(2 ** BUCKET_SIZE_EXP) / (float(lat_hz)/float(10**6))
426 pps_req_tx = (tot_tx + tot_drop - tot_rx)/tot_core_measurement_duration/1000000.0 # tot_drop is all packets dropped by all tasks. This includes packets dropped at the generator task + packets dropped by the nop task. In steady state, this equals to the number of packets received by this VM
427 pps_tx = tot_tx/tot_core_measurement_duration/1000000.0 # tot_tx is all generated packets actually accepted by the interface
428 pps_rx = tot_rx/tot_core_measurement_duration/1000000.0 # tot_rx is all packets received by the nop task = all packets received in the gen VM
430 pps_sut_tx = tot_sut_tx / tot_sut_core_measurement_duration / 1000000.0
433 dp_tx = (t4_tx - t1_tx) - (t4_non_dp_tx - t1_non_dp_tx)
434 dp_rx = (t4_rx - t1_rx) - (t4_non_dp_rx - t1_non_dp_rx)
435 tot_dp_drop = dp_tx - dp_rx
436 drop_rate = 100.0*tot_dp_drop/dp_tx
437 if ((drop_rate < DROP_RATE_TRESHOLD) or (tot_dp_drop == DROP_RATE_TRESHOLD ==0) or (tot_dp_drop > TST009_MAXz)):
439 return(pps_req_tx,pps_tx,pps_sut_tx,pps_rx,lat_avg,percentile,percentile_max,lat_max,dp_tx,dp_rx,tot_dp_drop,(t4_tx_fail - t1_tx_fail),drop_rate,lat_min,used_avg,r,tot_core_measurement_duration)
441 def new_speed(speed,size,success):
442 if test == 'fixed_rate':
449 TST009_L = TST009_m + 1
451 TST009_R = max(TST009_m - 1, TST009_L)
452 TST009_m = int ((TST009_L + TST009_R)/2)
453 return (get_percentageof10Gbs(TST009_S[TST009_m],size))
461 return ((minspeed + maxspeed)/2.0)
463 def get_start_speed_and_init(size):
464 if test == 'fixed_rate':
471 TST009_R = TST009_n - 1
472 TST009_m = int((TST009_L + TST009_R) / 2)
473 return (get_percentageof10Gbs(TST009_S[TST009_m],size))
478 maxspeed = STARTSPEED
481 def resolution_achieved():
482 if test == 'fixed_rate':
485 return (TST009_L == TST009_R)
487 return ((maxspeed - minspeed) <= ACCURACY)
489 def get_percentageof10Gbs(pps_speed,size):
490 # speed is given in pps, returning % of 10Gb/s
491 return (pps_speed / 1000000.0 * 0.08 * (size+24))
493 def get_pps(speed,size):
494 # speed is given in % of 10Gb/s, returning Mpps
495 return (speed * 100.0 / (8*(size+24)))
497 def get_speed(packet_speed,size):
498 # return speed in Gb/s
499 return (packet_speed / 1000.0 * (8*(size+24)))
501 def run_flow_size_test(gensock,sutsock):
504 #fieldnames = ['Flows','PacketSize','Gbps','Mpps','AvgLatency','MaxLatency','PacketsDropped','PacketDropRate']
505 fieldnames = ['Flows','PacketSize','RequestedPPS','GeneratedPPS','SentPPS','ForwardedPPS','ReceivedPPS','AvgLatencyUSEC','MaxLatencyUSEC','Sent','Received','Lost','LostTotal']
506 writer = csv.DictWriter(data_csv_file, fieldnames=fieldnames)
508 gensock.start(latcores)
509 for size in packet_size_list:
511 gensock.set_size(gencores,0,size) # This is setting the frame size
512 gensock.set_value(gencores,0,16,(size-14),2) # 18 is the difference between the frame size and IP size = size of (MAC addresses, ethertype and FCS)
513 gensock.set_value(gencores,0,38,(size-34),2) # 38 is the difference between the frame size and UDP size = 18 + size of IP header (=20)
514 # This will only work when using sending UDP packets. For different protocls and ethernet types, we would need a different calculation
515 log.info("+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+")
516 log.info("| UDP, "+ '{:>5}'.format(size+4) +" bytes, different number of flows by randomizing SRC & DST UDP port |")
517 log.info("+--------+------------------+-------------+-------------+-------------+------------------------+----------+----------+----------+-----------+-----------+-----------+-----------+-------+----+")
518 log.info("| Flows | Speed requested | Gen by core | Sent by NIC | Fwrd by SUT | Rec. by core | Avg. Lat.|" + '{:.0f} '.format(LAT_PERCENTILE*100) +"Pcentil| Max. Lat.| Sent | Received | Lost | Total Lost|L.Ratio|Time|")
519 log.info("+--------+------------------+-------------+-------------+-------------+------------------------+----------+----------+----------+-----------+-----------+-----------+-----------+-------+----+")
520 for flow_number in flow_size_list:
522 gensock.reset_stats()
524 sutsock.reset_stats()
525 source_port,destination_port = flows[flow_number]
526 gensock.set_random(gencores,0,34,source_port,2)
527 gensock.set_random(gencores,0,36,destination_port,2)
529 speed = get_start_speed_and_init(size)
533 print(str(flow_number)+' flows: Measurement ongoing at speed: ' + str(round(speed,2)) + '% ',end='\r')
535 # Start generating packets at requested speed (in % of a 10Gb/s link)
536 gensock.speed(speed / len(gencores) / len (gentasks), gencores, gentasks)
538 # Get statistics now that the generation is stable and initial ARP messages are dealt with
539 pps_req_tx,pps_tx,pps_sut_tx,pps_rx,lat_avg,lat_perc , lat_perc_max, lat_max, abs_tx,abs_rx,abs_dropped, abs_tx_fail, drop_rate, lat_min, lat_used, r, actual_duration = run_iteration(gensock,sutsock,float(runtime),flow_number,size,speed)
541 retry_warning = bcolors.WARNING + ' {:1} retries needed'.format(r) + bcolors.ENDC
544 # Drop rate is expressed in percentage. lat_used is a ratio (0 to 1). The sum of these 2 should be 100%.
545 # If the some is lower than 95, it means that more than 5% of the latency measurements where dropped for accuracy reasons.
546 if (drop_rate + lat_used * 100) < 95:
547 lat_warning = bcolors.WARNING + ' Latency accuracy issue?: {:>3.0f}%'.format(lat_used*100) + bcolors.ENDC
550 if test == 'fixed_rate':
557 endlat_perc = lat_perc
558 endlat_perc_max = lat_perc_max
560 endabs_dropped = abs_dropped
561 enddrop_rate = drop_rate
564 if lat_warning or gen_warning or retry_warning:
565 endwarning = '| | {:177.177} |'.format(retry_warning + lat_warning + gen_warning)
567 speed_prefix = lat_avg_prefix = lat_perc_prefix = lat_max_prefix = abs_drop_rate_prefix = drop_rate_prefix = bcolors.ENDC
568 # The following if statement is testing if we pass the success criteria of a certain drop rate, average latency and maximum latency below the threshold
569 # The drop rate success can be achieved in 2 ways: either the drop rate is below a treshold, either we want that no packet has been lost during the test
570 # This can be specified by putting 0 in the .test file
571 elif ((drop_rate < DROP_RATE_TRESHOLD) or (abs_dropped==DROP_RATE_TRESHOLD ==0)) and (lat_avg< LAT_AVG_TRESHOLD) and (lat_perc< LAT_PERC_TRESHOLD) and (lat_max < LAT_MAX_TRESHOLD):
572 lat_avg_prefix = bcolors.ENDC
573 lat_perc_prefix = bcolors.ENDC
574 lat_max_prefix = bcolors.ENDC
575 abs_drop_rate_prefix = bcolors.ENDC
576 drop_rate_prefix = bcolors.ENDC
577 if ((get_pps(speed,size) - pps_tx)/get_pps(speed,size))>0.01:
578 speed_prefix = bcolors.WARNING
580 gen_warning = bcolors.WARNING + ' Network limit?: requesting {:<.3f} Mpps and getting {:<.3f} Mpps - {} failed to be transmitted'.format(get_pps(speed,size), pps_tx, abs_tx_fail) + bcolors.ENDC
582 gen_warning = bcolors.WARNING + ' Generator limit?: requesting {:<.3f} Mpps and getting {:<.3f} Mpps'.format(get_pps(speed,size), pps_tx) + bcolors.ENDC
584 speed_prefix = bcolors.ENDC
587 endspeed_prefix = speed_prefix
588 endpps_req_tx = pps_req_tx
590 endpps_sut_tx = pps_sut_tx
593 endlat_perc = lat_perc
594 endlat_perc_max = lat_perc_max
596 endabs_dropped = None
597 enddrop_rate = drop_rate
600 if lat_warning or gen_warning or retry_warning:
601 endwarning = '| | {:177.177} |'.format(retry_warning + lat_warning + gen_warning)
603 success_message=' SUCCESS'
604 log.debug(report_result(-attempts,size,speed,pps_req_tx,pps_tx,pps_sut_tx,pps_rx,lat_avg,lat_perc,lat_perc_max,lat_max,abs_tx,abs_rx,abs_dropped,actual_duration,speed_prefix,lat_avg_prefix,lat_max_prefix,abs_drop_rate_prefix,drop_rate_prefix)+ success_message + retry_warning + lat_warning + gen_warning)
606 success_message=' FAILED'
608 abs_drop_rate_prefix = bcolors.ENDC
609 if ((abs_dropped>0) and (DROP_RATE_TRESHOLD ==0)):
610 abs_drop_rate_prefix = bcolors.FAIL
611 if (drop_rate < DROP_RATE_TRESHOLD):
612 drop_rate_prefix = bcolors.ENDC
614 drop_rate_prefix = bcolors.FAIL
615 if (lat_avg< LAT_AVG_TRESHOLD):
616 lat_avg_prefix = bcolors.ENDC
618 lat_avg_prefix = bcolors.FAIL
619 if (lat_perc< LAT_PERC_TRESHOLD):
620 lat_perc_prefix = bcolors.ENDC
622 lat_perc_prefix = bcolors.FAIL
623 if (lat_max< LAT_MAX_TRESHOLD):
624 lat_max_prefix = bcolors.ENDC
626 lat_max_prefix = bcolors.FAIL
627 if (((get_pps(speed,size) - pps_tx)/get_pps(speed,size))<0.001):
628 speed_prefix = bcolors.ENDC
630 speed_prefix = bcolors.FAIL
632 log.debug(report_result(-attempts,size,speed,pps_req_tx,pps_tx,pps_sut_tx,pps_rx,lat_avg,lat_perc,lat_perc_max,lat_max,abs_tx,abs_rx,abs_dropped,actual_duration,speed_prefix,lat_avg_prefix,lat_perc_prefix,lat_max_prefix,abs_drop_rate_prefix,drop_rate_prefix)+ success_message + retry_warning + lat_warning + gen_warning)
633 speed = new_speed(speed, size, success)
634 if resolution_achieved():
637 log.info(report_result(flow_number,size,endspeed,endpps_req_tx,endpps_tx,endpps_sut_tx,endpps_rx,endlat_avg,endlat_perc,endlat_perc_max,endlat_max,endabs_tx,endabs_rx,endabs_dropped,actual_duration,speed_prefix,lat_avg_prefix,lat_perc_prefix,lat_max_prefix,abs_drop_rate_prefix,drop_rate_prefix))
639 log.info (endwarning)
640 log.info("+--------+------------------+-------------+-------------+-------------+------------------------+----------+----------+----------+-----------+-----------+-----------+-----------+-------+----+")
641 writer.writerow({'Flows':flow_number,'PacketSize':(size+4),'RequestedPPS':get_pps(endspeed,size),'GeneratedPPS':endpps_req_tx,'SentPPS':endpps_tx,'ForwardedPPS':endpps_sut_tx,'ReceivedPPS':endpps_rx,'AvgLatencyUSEC':endlat_avg,'MaxLatencyUSEC':endlat_max,'Sent':endabs_tx,'Received':endabs_rx,'Lost':endabs_dropped,'LostTotal':endabs_dropped})
643 URL = PushGateway + '/metrics/job/' + TestName + '/instance/' + env
644 DATA = 'Flows {}\nPacketSize {}\nRequestedPPS {}\nGeneratedPPS {}\nSentPPS {}\nForwardedPPS {}\nReceivedPPS {}\nAvgLatencyUSEC {}\nMaxLatencyUSEC {}\nSent {}\nReceived {}\nLost {}\nLostTotal {}\n'.format(flow_number,size+4,get_pps(endspeed,size),endpps_req_tx,endpps_tx,endpps_sut_tx,endpps_rx,endlat_avg,endlat_max,endabs_tx,endabs_rx,endabs_Dropped,endabs_dropped)
645 HEADERS = {'X-Requested-With': 'Python requests', 'Content-type': 'text/xml'}
646 response = requests.post(url=URL, data=DATA,headers=HEADERS)
648 log.info('|{:>7}'.format(str(flow_number))+" | Speed 0 or close to 0")
649 gensock.stop(latcores)
651 def run_core_stats(socks):
652 fieldnames = ['PROXID','Time','Received','Sent','NonDPReceived','NonDPSent','Delta','NonDPDelta','Dropped']
653 writer = csv.DictWriter(data_csv_file, fieldnames=fieldnames)
655 log.info("+------------------------------------------------------------------------------------------------------------------+")
656 log.info("| Measuring core statistics on 1 or more PROX instances |")
657 log.info("+-----------+-----------+------------+------------+------------+------------+------------+------------+------------+")
658 log.info("| PROX ID | Time | RX | TX | non DP RX | non DP TX | TX - RX | nonDP TX-RX| DROP TOT |")
659 log.info("+-----------+-----------+------------+------------+------------+------------+------------+------------+------------+")
662 duration = float(runtime)
664 old_rx = []; old_non_dp_rx = []; old_tx = []; old_non_dp_tx = []; old_drop = []; old_tx_fail = []; old_tsc = []
665 new_rx = []; new_non_dp_rx = []; new_tx = []; new_non_dp_tx = []; new_drop = []; new_tx_fail = []; new_tsc = []
666 sockets_to_go = len (socks)
667 for i,sock in enumerate(socks,start=0):
669 old_rx.append(0); old_non_dp_rx.append(0); old_tx.append(0); old_non_dp_tx.append(0); old_drop.append(0); old_tx_fail.append(0); old_tsc.append(0)
670 old_rx[-1], old_non_dp_rx[-1], old_tx[-1], old_non_dp_tx[-1], old_drop[-1], old_tx_fail[-1], old_tsc[-1], tsc_hz = sock.core_stats(cores[i],tasks)
671 new_rx.append(0); new_non_dp_rx.append(0); new_tx.append(0); new_non_dp_tx.append(0); new_drop.append(0); new_tx_fail.append(0); new_tsc.append(0)
672 while (duration > 0):
674 # Get statistics after some execution time
675 for i,sock in enumerate(socks,start=0):
676 new_rx[i], new_non_dp_rx[i], new_tx[i], new_non_dp_tx[i], new_drop[i], new_tx_fail[i], new_tsc[i], tsc_hz = sock.core_stats(cores[i],tasks)
677 drop = new_drop[i]-old_drop[i]
678 rx = new_rx[i] - old_rx[i]
679 tx = new_tx[i] - old_tx[i]
680 non_dp_rx = new_non_dp_rx[i] - old_non_dp_rx[i]
681 non_dp_tx = new_non_dp_tx[i] - old_non_dp_tx[i]
682 tsc = new_tsc[i] - old_tsc[i]
686 old_drop[i] = new_drop[i]
687 old_rx[i] = new_rx[i]
688 old_tx[i] = new_tx[i]
689 old_non_dp_rx[i] = new_non_dp_rx[i]
690 old_non_dp_tx[i] = new_non_dp_tx[i]
691 old_tsc[i] = new_tsc[i]
692 tot_drop[i] = tot_drop[i] + tx - rx
693 log.info('|{:>10.0f}'.format(i)+ ' |{:>10.0f}'.format(duration)+' | ' + '{:>10.0f}'.format(rx) + ' | ' +'{:>10.0f}'.format(tx) + ' | '+'{:>10.0f}'.format(non_dp_rx)+' | '+'{:>10.0f}'.format(non_dp_tx)+' | ' + '{:>10.0f}'.format(tx-rx) + ' | '+ '{:>10.0f}'.format(non_dp_tx-non_dp_rx) + ' | '+'{:>10.0f}'.format(tot_drop[i]) +' |')
694 writer.writerow({'PROXID':i,'Time':duration,'Received':rx,'Sent':tx,'NonDPReceived':non_dp_rx,'NonDPSent':non_dp_tx,'Delta':tx-rx,'NonDPDelta':non_dp_tx-non_dp_rx,'Dropped':tot_drop[i]})
696 URL = PushGateway + '/metrics/job/' + TestName + '/instance/' + env + str(i)
697 DATA = 'PROXID {}\nTime {}\n Received {}\nSent {}\nNonDPReceived {}\nNonDPSent {}\nDelta {}\nNonDPDelta {}\nDropped {}\n'.format(i,duration,rx,tx,non_dp_rx,non_dp_tx,tx-rx,non_dp_tx-non_dp_rx,tot_drop[i])
698 HEADERS = {'X-Requested-With': 'Python requests', 'Content-type': 'text/xml'}
699 response = requests.post(url=URL, data=DATA,headers=HEADERS)
700 if sockets_to_go == 0:
701 duration = duration - 1
702 sockets_to_go = len (socks)
703 log.info("+-----------+-----------+------------+------------+------------+------------+------------+------------+------------+")
705 def run_port_stats(socks):
706 fieldnames = ['PROXID','Time','Received','Sent','NoMbufs','iErrMiss']
707 writer = csv.DictWriter(data_csv_file, fieldnames=fieldnames)
709 log.info("+---------------------------------------------------------------------------+")
710 log.info("| Measuring port statistics on 1 or more PROX instances |")
711 log.info("+-----------+-----------+------------+------------+------------+------------+")
712 log.info("| PROX ID | Time | RX | TX | no MBUFS | ierr&imiss |")
713 log.info("+-----------+-----------+------------+------------+------------+------------+")
716 duration = float(runtime)
717 old_rx = []; old_tx = []; old_no_mbufs = []; old_errors = []; old_tsc = []
718 new_rx = []; new_tx = []; new_no_mbufs = []; new_errors = []; new_tsc = []
719 sockets_to_go = len (socks)
720 for i,sock in enumerate(socks,start=0):
721 old_rx.append(0); old_tx.append(0); old_no_mbufs.append(0); old_errors.append(0); old_tsc.append(0)
722 old_rx[-1], old_tx[-1], old_no_mbufs[-1], old_errors[-1], old_tsc[-1] = sock.multi_port_stats(ports[i])
723 new_rx.append(0); new_tx.append(0); new_no_mbufs.append(0); new_errors.append(0); new_tsc.append(0)
724 while (duration > 0):
726 # Get statistics after some execution time
727 for i,sock in enumerate(socks,start=0):
728 new_rx[i], new_tx[i], new_no_mbufs[i], new_errors[i], new_tsc[i] = sock.multi_port_stats(ports[i])
729 rx = new_rx[i] - old_rx[i]
730 tx = new_tx[i] - old_tx[i]
731 no_mbufs = new_no_mbufs[i] - old_no_mbufs[i]
732 errors = new_errors[i] - old_errors[i]
733 tsc = new_tsc[i] - old_tsc[i]
737 old_rx[i] = new_rx[i]
738 old_tx[i] = new_tx[i]
739 old_no_mbufs[i] = new_no_mbufs[i]
740 old_errors[i] = new_errors[i]
741 old_tsc[i] = new_tsc[i]
742 log.info('|{:>10.0f}'.format(i)+ ' |{:>10.0f}'.format(duration)+' | ' + '{:>10.0f}'.format(rx) + ' | ' +'{:>10.0f}'.format(tx) + ' | '+'{:>10.0f}'.format(no_mbufs)+' | '+'{:>10.0f}'.format(errors)+' |')
743 writer.writerow({'PROXID':i,'Time':duration,'Received':rx,'Sent':tx,'NoMbufs':no_mbufs,'iErrMiss':errors})
745 URL = PushGateway + '/metrics/job/' + TestName + '/instance/' + env + str(i)
746 DATA = 'PROXID {}\nTime {}\n Received {}\nSent {}\nNoMbufs {}\niErrMiss {}\n'.format(i,duration,rx,tx,no_mbufs,errors)
747 HEADERS = {'X-Requested-With': 'Python requests', 'Content-type': 'text/xml'}
748 response = requests.post(url=URL, data=DATA,headers=HEADERS)
749 if sockets_to_go == 0:
750 duration = duration - 1
751 sockets_to_go = len (socks)
752 log.info("+-----------+-----------+------------+------------+------------+------------+")
754 def run_irqtest(socks):
755 log.info("+----------------------------------------------------------------------------------------------------------------------------")
756 log.info("| Measuring time probably spent dealing with an interrupt. Interrupting DPDK cores for more than 50us might be problematic ")
757 log.info("| and result in packet loss. The first row shows the interrupted time buckets: first number is the bucket between 0us and ")
758 log.info("| that number expressed in us and so on. The numbers in the other rows show how many times per second, the program was ")
759 log.info("| interrupted for a time as specified by its bucket. '0' is printed when there are no interrupts in this bucket throughout ")
760 log.info("| the duration of the test. 0.00 means there were interrupts in this bucket but very few. Due to rounding this shows as 0.00 ")
761 log.info("+----------------------------------------------------------------------------------------------------------------------------")
763 for sock_index,sock in enumerate(socks,start=0):
764 buckets=socks[sock_index].show_irq_buckets(1)
765 print('Measurement ongoing ... ',end='\r')
766 socks[sock_index].stop(cores[sock_index])
767 old_irq = [[0 for x in range(len(buckets)+1)] for y in range(len(cores[sock_index])+1)]
768 irq = [[0 for x in range(len(buckets)+1)] for y in range(len(cores[sock_index])+1)]
769 irq[0][0] = 'bucket us'
770 for j,bucket in enumerate(buckets,start=1):
771 irq[0][j] = '<'+ bucket
772 irq[0][-1] = '>'+ buckets [-2]
773 socks[sock_index].start(cores[sock_index])
775 for j,bucket in enumerate(buckets,start=1):
776 for i,irqcore in enumerate(cores[sock_index],start=1):
777 old_irq[i][j] = socks[sock_index].irq_stats(irqcore,j-1)
778 time.sleep(float(runtime))
779 socks[sock_index].stop(cores[sock_index])
780 for i,irqcore in enumerate(cores[sock_index],start=1):
781 irq[i][0]='core %s '%irqcore
782 for j,bucket in enumerate(buckets,start=1):
783 diff = socks[sock_index].irq_stats(irqcore,j-1) - old_irq[i][j]
787 irq[i][j] = str(round(diff/float(runtime), 2))
788 log.info('Results for PROX instance %s'%sock_index)
790 log.info(''.join(['{:>12}'.format(item) for item in row]))
792 def run_impairtest(gensock,sutsock):
793 fieldnames = ['Flows','PacketSize','RequestedPPS','GeneratedPPS','SentPPS','ForwardedPPS','ReceivedPPS','AvgLatencyUSEC','MaxLatencyUSEC','Dropped','DropRate']
794 writer = csv.DictWriter(data_csv_file, fieldnames=fieldnames)
797 log.info("+-----------------------------------------------------------------------------------------------------------------------------------------------------------------+")
798 log.info("| Generator is sending UDP ("+'{:>5}'.format(FLOWSIZE)+" flow) packets ("+ '{:>5}'.format(size+4) +" bytes) to SUT via GW dropping and delaying packets. SUT sends packets back. Use ctrl-c to stop the test |")
799 log.info("+--------+--------------------+----------------+----------------+----------------+----------------+----------------+----------------+----------------+------------+")
800 log.info("| Test | Speed requested | Sent to NIC | Sent by Gen | Forward by SUT | Rec. by Gen | Avg. Latency | Max. Latency | Packets Lost | Loss Ratio |")
801 log.info("+--------+--------------------+----------------+----------------+----------------+----------------+----------------+----------------+----------------+------------+")
803 gensock.set_size(gencores,0,size) # This is setting the frame size
804 gensock.set_value(gencores,0,16,(size-14),2) # 18 is the difference between the frame size and IP size = size of (MAC addresses, ethertype and FCS)
805 gensock.set_value(gencores,0,38,(size-34),2) # 38 is the difference between the frame size and UDP size = 18 + size of IP header (=20)
806 # This will only work when using sending UDP packets. For different protocols and ethernet types, we would need a different calculation
807 source_port,destination_port = flows[FLOWSIZE]
808 gensock.set_random(gencores,0,34,source_port,2)
809 gensock.set_random(gencores,0,36,destination_port,2)
810 gensock.start(latcores)
812 gensock.speed(speed / len(gencores) / len(gentasks), gencores, gentasks)
815 print('Measurement ongoing at speed: ' + str(round(speed,2)) + '% ',end='\r')
818 # Get statistics now that the generation is stable and NO ARP messages any more
819 pps_req_tx,pps_tx,pps_sut_tx_str,pps_rx,lat_avg, lat_perc, lat_perc_max, lat_max, abs_dropped, abs_tx_fail, abs_tx, lat_min, lat_used, r, actual_duration = run_iteration(gensock,sutsock,runtime)
820 drop_rate = 100.0*abs_dropped/abs_tx
822 lat_warning = bcolors.FAIL + ' Potential latency accuracy problem: {:>3.0f}%'.format(lat_used*100) + bcolors.ENDC
825 log.info('|{:>7}'.format(str(attempts))+" | " + '{:>5.1f}'.format(speed) + '% ' +'{:>6.3f}'.format(get_pps(speed,size)) + ' Mpps | '+ '{:>9.3f}'.format(pps_req_tx)+' Mpps | '+ '{:>9.3f}'.format(pps_tx) +' Mpps | ' + '{:>9}'.format(pps_sut_tx_str) +' Mpps | '+ '{:>9.3f}'.format(pps_rx)+' Mpps | '+ '{:>9.0f}'.format(lat_avg)+' us | '+ '{:>9.0f}'.format(lat_max)+' us | '+ '{:>14d}'.format(abs_dropped)+ ' |''{:>9.2f}'.format(drop_rate)+ '% |'+lat_warning)
826 writer.writerow({'Flows':FLOWSIZE,'PacketSize':(size+4),'RequestedPPS':get_pps(speed,size),'GeneratedPPS':pps_req_tx,'SentPPS':pps_tx,'ForwardedPPS':pps_sut_tx_str,'ReceivedPPS':pps_rx,'AvgLatencyUSEC':lat_avg,'MaxLatencyUSEC':lat_max,'Dropped':abs_dropped,'DropRate':drop_rate})
828 URL = PushGateway + '/metrics/job/' + TestName + '/instance/' + env
829 DATA = 'Flows {}\nPacketSize {}\nRequestedPPS {}\nGeneratedPPS {}\nSentPPS {}\nForwardedPPS {}\nReceivedPPS {}\nAvgLatencyUSEC {}\nMaxLatencyUSEC {}\nDropped {}\nDropRate {}\n'.format(FLOWSIZE,size+4,get_pps(speed,size),pps_req_tx,pps_tx,pps_sut_tx_str,pps_rx,lat_avg,lat_max,abs_dropped,drop_rate)
830 HEADERS = {'X-Requested-With': 'Python requests', 'Content-type': 'text/xml'}
831 response = requests.post(url=URL, data=DATA,headers=HEADERS)
833 def run_warmuptest(gensock):
834 # Running at low speed to make sure the ARP messages can get through.
835 # If not doing this, the ARP message could be dropped by a switch in overload and then the test will not give proper results
836 # Note hoever that if we would run the test steps during a very long time, the ARP would expire in the switch.
837 # PROX will send a new ARP request every seconds so chances are very low that they will all fail to get through
838 gensock.speed(WARMUPSPEED / len(gencores) /len (gentasks), gencores, gentasks)
840 gensock.set_size(gencores,0,size) # This is setting the frame size
841 gensock.set_value(gencores,0,16,(size-14),2) # 18 is the difference between the frame size and IP size = size of (MAC addresses, ethertype and FCS)
842 gensock.set_value(gencores,0,38,(size-34),2) # 38 is the difference between the frame size and UDP size = 18 + size of IP header (=20)
843 gensock.set_value(gencores,0,56,1,1)
844 # This will only work when using sending UDP packets. For different protocols and ethernet types, we would need a different calculation
845 source_port,destination_port = flows[FLOWSIZE]
846 gensock.set_random(gencores,0,34,source_port,2)
847 gensock.set_random(gencores,0,36,destination_port,2)
848 gensock.start(genstatcores)
849 time.sleep(WARMUPTIME)
850 gensock.stop(genstatcores)
851 gensock.set_value(gencores,0,56,50,1)
852 time.sleep(WARMUPTIME)
854 # To generate a desired number of flows, PROX will randomize the bits in source and destination ports, as specified by the bit masks in the flows variable.
856 1: ['1000000000000000','1000000000000000'],\
857 2: ['1000000000000000','100000000000000X'],\
858 4: ['100000000000000X','100000000000000X'],\
859 8: ['100000000000000X','10000000000000XX'],\
860 16: ['10000000000000XX','10000000000000XX'],\
861 32: ['10000000000000XX','1000000000000XXX'],\
862 64: ['1000000000000XXX','1000000000000XXX'],\
863 128: ['1000000000000XXX','100000000000XXXX'],\
864 256: ['100000000000XXXX','100000000000XXXX'],\
865 512: ['100000000000XXXX','10000000000XXXXX'],\
866 1024: ['10000000000XXXXX','10000000000XXXXX'],\
867 2048: ['10000000000XXXXX','1000000000XXXXXX'],\
868 4096: ['1000000000XXXXXX','1000000000XXXXXX'],\
869 8192: ['1000000000XXXXXX','100000000XXXXXXX'],\
870 16384: ['100000000XXXXXXX','100000000XXXXXXX'],\
871 32768: ['100000000XXXXXXX','10000000XXXXXXXX'],\
872 65536: ['10000000XXXXXXXX','10000000XXXXXXXX'],\
873 131072: ['10000000XXXXXXXX','1000000XXXXXXXXX'],\
874 262144: ['1000000XXXXXXXXX','1000000XXXXXXXXX'],\
875 524288: ['1000000XXXXXXXXX','100000XXXXXXXXXX'],\
876 1048576:['100000XXXXXXXXXX','100000XXXXXXXXXX'],}
895 data_file = 'RUN{}.{}.csv'.format(env,test_file)
896 data_csv_file = open(data_file,'w')
897 testconfig = ConfigParser.RawConfigParser()
898 testconfig.read(test_file)
899 required_number_of_test_machines = testconfig.get('DEFAULT', 'total_number_of_test_machines')
900 TestName = testconfig.get('DEFAULT', 'name')
901 if testconfig.has_option('DEFAULT', 'PushGateway'):
902 PushGateway = testconfig.get('DEFAULT', 'PushGateway')
903 log.info('Measurements will be pushed to %s'%PushGateway)
906 if testconfig.has_option('DEFAULT', 'lat_percentile'):
907 LAT_PERCENTILE = float(testconfig.get('DEFAULT', 'lat_percentile')) /100.0
909 LAT_PERCENTILE = 0.99
910 log.info('Latency percentile measured at {:.0f}%'.format(LAT_PERCENTILE*100))
911 config = ConfigParser.RawConfigParser()
913 machine_map = ConfigParser.RawConfigParser()
914 machine_map.read(machine_map_file)
915 key = config.get('ssh', 'key')
916 user = config.get('ssh', 'user')
917 total_number_of_machines = config.get('rapid', 'total_number_of_machines')
918 if int(required_number_of_test_machines) > int(total_number_of_machines):
919 log.exception("Not enough VMs for this test: %s needed and only %s available" % (required_number_of_test_machines,total_number_of_machines))
920 raise Exception("Not enough VMs for this test: %s needed and only %s available" % (required_number_of_test_machines,total_number_of_machines))
921 for vm in range(1, int(total_number_of_machines)+1):
922 vmAdminIP.append(config.get('M%d'%vm, 'admin_ip'))
923 vmDPmac.append(config.get('M%d'%vm, 'dp_mac'))
924 vmDPIP.append(config.get('M%d'%vm, 'dp_ip'))
925 ip = vmDPIP[-1].split('.')
926 hexDPIP.append(hex(int(ip[0]))[2:].zfill(2) + ' ' + hex(int(ip[1]))[2:].zfill(2) + ' ' + hex(int(ip[2]))[2:].zfill(2) + ' ' + hex(int(ip[3]))[2:].zfill(2))
928 for vm in range(1, int(required_number_of_test_machines)+1):
929 machine_index.append(int(machine_map.get('TestM%d'%vm, 'machine_index'))-1)
930 prox_socket.append(testconfig.getboolean('TestM%d'%vm, 'prox_socket'))
931 for vm in range(1, int(required_number_of_test_machines)+1):
932 if prox_socket[vm-1]:
933 prox_launch_exit.append(testconfig.getboolean('TestM%d'%vm, 'prox_launch_exit'))
934 config_file.append(testconfig.get('TestM%d'%vm, 'config_file'))
935 # Looking for all task definitions in the PROX cfg files. Constructing a list of all tasks used
936 textfile = open (config_file[-1], 'r')
937 filetext = textfile.read()
939 tasks_for_this_cfg = set(re.findall("task\s*=\s*(\d+)",filetext))
940 with open('{}_{}_parameters{}.lua'.format(env,test_file,vm), "w") as f:
941 f.write('name="%s"\n'% testconfig.get('TestM%d'%vm, 'name'))
942 f.write('local_ip="%s"\n'% vmDPIP[machine_index[vm-1]])
943 f.write('local_hex_ip="%s"\n'% hexDPIP[machine_index[vm-1]])
944 if testconfig.has_option('TestM%d'%vm, 'cores'):
945 cores.append(ast.literal_eval(testconfig.get('TestM%d'%vm, 'cores')))
946 f.write('cores="%s"\n'% ','.join(map(str, cores[-1])))
949 if testconfig.has_option('TestM%d'%vm, 'ports'):
950 ports.append(ast.literal_eval(testconfig.get('TestM%d'%vm, 'ports')))
951 f.write('ports="%s"\n'% ','.join(map(str, ports[-1])))
954 if re.match('(l2){0,1}gen(_bare){0,1}.*\.cfg',config_file[-1]):
955 gencores = ast.literal_eval(testconfig.get('TestM%d'%vm, 'gencores'))
956 latcores = ast.literal_eval(testconfig.get('TestM%d'%vm, 'latcores'))
957 genstatcores = gencores + latcores
958 gentasks = tasks_for_this_cfg
959 auto_start.append(False)
960 mach_type.append('gen')
961 f.write('gencores="%s"\n'% ','.join(map(str, gencores)))
962 f.write('latcores="%s"\n'% ','.join(map(str, latcores)))
963 destVMindex = int(testconfig.get('TestM%d'%vm, 'dest_vm'))-1
964 f.write('dest_ip="%s"\n'% vmDPIP[machine_index[destVMindex]])
965 f.write('dest_hex_ip="%s"\n'% hexDPIP[machine_index[destVMindex]])
966 f.write('dest_hex_mac="%s"\n'% vmDPmac[machine_index[destVMindex]].replace(':',' '))
967 if testconfig.has_option('TestM%d'%vm, 'bucket_size_exp'):
968 BUCKET_SIZE_EXP = int(testconfig.get('TestM%d'%vm, 'bucket_size_exp'))
971 f.write('bucket_size_exp="%s"\n'% BUCKET_SIZE_EXP)
972 elif re.match('(l2){0,1}gen_gw.*\.cfg',config_file[-1]):
973 if testconfig.has_option('TestM%d'%vm, 'bucket_size_exp'):
974 BUCKET_SIZE_EXP = int(testconfig.get('TestM%d'%vm, 'bucket_size_exp'))
977 gencores = ast.literal_eval(testconfig.get('TestM%d'%vm, 'gencores'))
978 latcores = ast.literal_eval(testconfig.get('TestM%d'%vm, 'latcores'))
979 genstatcores = gencores + latcores
980 gentasks = tasks_for_this_cfg
981 auto_start.append(False)
982 mach_type.append('gen')
983 f.write('gencores="%s"\n'% ','.join(map(str, gencores)))
984 f.write('latcores="%s"\n'% ','.join(map(str, latcores)))
985 gwVMindex = int(testconfig.get('TestM%d'%vm, 'gw_vm')) -1
986 f.write('gw_ip="%s"\n'% vmDPIP[machine_index[gwVMindex]])
987 f.write('gw_hex_ip="%s"\n'% hexDPIP[machine_index[gwVMindex]])
988 destVMindex = int(testconfig.get('TestM%d'%vm, 'dest_vm'))-1
989 f.write('dest_ip="%s"\n'% vmDPIP[machine_index[destVMindex]])
990 f.write('dest_hex_ip="%s"\n'% hexDPIP[machine_index[destVMindex]])
991 f.write('dest_hex_mac="%s"\n'% vmDPmac[machine_index[destVMindex]].replace(':',' '))
992 if testconfig.has_option('TestM%d'%vm, 'bucket_size_exp'):
993 BUCKET_SIZE_EXP = int(testconfig.get('TestM%d'%vm, 'bucket_size_exp'))
996 f.write('bucket_size_exp="%s"\n'% BUCKET_SIZE_EXP)
997 elif re.match('(l2){0,1}swap.*\.cfg',config_file[-1]):
998 sutstatcores = cores[-1]
999 auto_start.append(True)
1000 mach_type.append('sut')
1001 elif re.match('secgw1.*\.cfg',config_file[-1]):
1002 auto_start.append(True)
1003 mach_type.append('none')
1004 destVMindex = int(testconfig.get('TestM%d'%vm, 'dest_vm'))-1
1005 f.write('dest_ip="%s"\n'% vmDPIP[machine_index[destVMindex]])
1006 f.write('dest_hex_ip="%s"\n'% hexDPIP[machine_index[destVMindex]])
1007 f.write('dest_hex_mac="%s"\n'% vmDPmac[machine_index[destVMindex]].replace(':',' '))
1008 elif re.match('secgw2.*\.cfg',config_file[-1]):
1009 sutstatcores = cores[-1]
1010 auto_start.append(True)
1011 mach_type.append('sut')
1013 auto_start.append(True)
1014 mach_type.append('none')
1016 tasks = tasks_for_this_cfg.union(tasks)
1017 log.debug("Tasks detected in all PROX config files %r"%tasks)
1018 #####################################################################################
1020 log.debug ('exit cleanup')
1021 for index, sock in enumerate(socks):
1022 if socks_control[index]:
1024 for client in clients:
1029 atexit.register(exit_handler)
1031 for vm in range(0, int(required_number_of_test_machines)):
1033 clients.append(prox_ctrl(vmAdminIP[machine_index[vm]], key,user))
1034 connect_client(clients[-1])
1035 # Creating script to bind the right network interface to the poll mode driver
1036 devbindfile = '{}_{}_devbindvm{}.sh'.format(env,test_file, vm+1)
1037 with open(devbindfile, "w") as f:
1038 newText= 'link="$(ip -o link | grep '+vmDPmac[machine_index[vm]]+' |cut -d":" -f 2)"\n'
1040 newText= 'if [ -n "$link" ];\n'
1044 newText= ' echo Need to bind\n'
1046 newText= ' sudo ' + rundir + '/dpdk/usertools/dpdk-devbind.py --force --bind igb_uio $('+rundir+'/dpdk/usertools/dpdk-devbind.py --status |grep $link | cut -d" " -f 1)\n'
1050 newText= ' echo Assuming port is already bound to DPDK\n'
1056 st = os.stat(devbindfile)
1057 os.chmod(devbindfile, st.st_mode | stat.S_IEXEC)
1058 clients[-1].scp_put('./%s'%devbindfile, rundir+'/devbind.sh')
1059 cmd = 'sudo ' + rundir+ '/devbind.sh'
1060 clients[-1].run_cmd(cmd)
1061 log.debug("devbind.sh running on VM%d"%(vm+1))
1062 clients[-1].scp_put('./%s'%config_file[vm], rundir+'/%s'%config_file[vm])
1063 clients[-1].scp_put('./{}_{}_parameters{}.lua'.format(env,test_file, vm+1), rundir + '/parameters.lua')
1065 if prox_launch_exit[vm]:
1066 log.debug("Starting PROX on VM%d"%(vm+1))
1068 cmd = 'sudo ' +rundir + '/prox/build/prox -t -o cli -f ' + rundir + '/%s'%config_file[vm]
1070 cmd = 'sudo ' +rundir + '/prox/build/prox -e -t -o cli -f ' + rundir + '/%s'%config_file[vm]
1071 clients[-1].fork_cmd(cmd, 'PROX Testing on TestM%d'%(vm+1))
1072 socks_control.append(prox_launch_exit[vm])
1073 socks.append(connect_socket(clients[-1]))
1074 sock_type.append(mach_type[vm])
1076 def get_BinarySearchParams() :
1077 global DROP_RATE_TRESHOLD
1078 global LAT_AVG_TRESHOLD
1079 global LAT_PERC_TRESHOLD
1080 global LAT_MAX_TRESHOLD
1086 DROP_RATE_TRESHOLD = float(testconfig.get('BinarySearchParams', 'drop_rate_threshold'))
1087 LAT_AVG_TRESHOLD = float(testconfig.get('BinarySearchParams', 'lat_avg_threshold'))
1088 LAT_PERC_TRESHOLD = float(testconfig.get('BinarySearchParams', 'lat_perc_threshold'))
1089 LAT_MAX_TRESHOLD = float(testconfig.get('BinarySearchParams', 'lat_max_threshold'))
1090 ACCURACY = float(testconfig.get('BinarySearchParams', 'accuracy'))
1091 STARTSPEED = float(testconfig.get('BinarySearchParams', 'startspeed'))
1096 def get_FixedRateParams() :
1097 global DROP_RATE_TRESHOLD
1098 global LAT_AVG_TRESHOLD
1099 global LAT_PERC_TRESHOLD
1100 global LAT_MAX_TRESHOLD
1101 global flow_size_list
1102 global packet_size_list
1107 DROP_RATE_TRESHOLD = inf
1108 LAT_AVG_TRESHOLD = inf
1109 LAT_PERC_TRESHOLD = inf
1110 LAT_MAX_TRESHOLD = inf
1114 packet_size_list = ast.literal_eval(testconfig.get('test%d'%test_nr, 'packetsizes'))
1115 flow_size_list = ast.literal_eval(testconfig.get('test%d'%test_nr, 'flows'))
1116 STARTSPEED = float(testconfig.get('test%d'%test_nr, 'speed'))
1118 def get_TST009SearchParams() :
1119 global DROP_RATE_TRESHOLD
1120 global LAT_AVG_TRESHOLD
1121 global LAT_PERC_TRESHOLD
1122 global LAT_MAX_TRESHOLD
1126 global TST009_MAXFramesAllIngress
1127 global TST009_StepSize
1132 if testconfig.has_option('TST009SearchParams', 'drop_rate_threshold'):
1133 DROP_RATE_TRESHOLD = float(testconfig.get('TST009SearchParams', 'drop_rate_threshold'))
1135 DROP_RATE_TRESHOLD = 0
1136 LAT_AVG_TRESHOLD = inf
1137 LAT_PERC_TRESHOLD = inf
1138 LAT_MAX_TRESHOLD = inf
1139 TST009_MAXr = float(testconfig.get('TST009SearchParams', 'MAXr'))
1140 TST009_MAXz = float(testconfig.get('TST009SearchParams', 'MAXz'))
1141 TST009_MAXFramesAllIngress = int(testconfig.get('TST009SearchParams', 'MAXFramesPerSecondAllIngress'))
1142 TST009_StepSize = int(testconfig.get('TST009SearchParams', 'StepSize'))
1143 TST009_n = int(ceil(TST009_MAXFramesAllIngress / TST009_StepSize))
1146 TST009_R = TST009_n - 1
1147 for m in range(0, TST009_n):
1148 TST009_S.append((m+1) * TST009_StepSize)
1152 ####################################################
1154 # Best to run the flow test at the end since otherwise the tests coming after might be influenced by the big number of entries in the switch flow tables
1155 ####################################################
1156 gensock_index = sock_type.index('gen') if 'gen' in sock_type else -1
1157 sutsock_index = sock_type.index('sut') if 'sut' in sock_type else -1
1158 number_of_tests = testconfig.get('DEFAULT', 'number_of_tests')
1159 for test_nr in range(1, int(number_of_tests)+1):
1160 test=testconfig.get('test%d'%test_nr,'test')
1162 if test == 'flowsizetest':
1163 get_BinarySearchParams()
1164 packet_size_list = ast.literal_eval(testconfig.get('test%d'%test_nr, 'packetsizes'))
1165 flow_size_list = ast.literal_eval(testconfig.get('test%d'%test_nr, 'flows'))
1166 run_flow_size_test(socks[gensock_index],socks[sutsock_index])
1167 elif test == 'TST009test':
1168 get_TST009SearchParams()
1169 packet_size_list = ast.literal_eval(testconfig.get('test%d'%test_nr, 'packetsizes'))
1170 flow_size_list = ast.literal_eval(testconfig.get('test%d'%test_nr, 'flows'))
1171 run_flow_size_test(socks[gensock_index],socks[sutsock_index])
1172 elif test == 'fixed_rate':
1173 get_FixedRateParams()
1174 run_flow_size_test(socks[gensock_index],socks[sutsock_index])
1175 elif test == 'corestats':
1176 run_core_stats(socks)
1177 elif test == 'portstats':
1178 run_port_stats(socks)
1179 elif test == 'impairtest':
1180 get_BinarySearchParams()
1181 PACKETSIZE = int(testconfig.get('test%d'%test_nr, 'packetsize'))
1182 FLOWSIZE = int(testconfig.get('test%d'%test_nr, 'flowsize'))
1183 run_impairtest(socks[gensock_index],socks[sutsock_index])
1184 elif test == 'irqtest':
1186 elif test == 'warmuptest':
1187 PACKETSIZE = int(testconfig.get('test%d'%test_nr, 'packetsize'))
1188 FLOWSIZE = int(testconfig.get('test%d'%test_nr, 'flowsize'))
1189 WARMUPSPEED = int(testconfig.get('test%d'%test_nr, 'warmupspeed'))
1190 WARMUPTIME = int(testconfig.get('test%d'%test_nr, 'warmuptime'))
1191 run_warmuptest(socks[gensock_index])
1192 ####################################################