4 ## Copyright (c) 2020 Intel Corporation
6 ## Licensed under the Apache License, Version 2.0 (the "License");
7 ## you may not use this file except in compliance with the License.
8 ## You may obtain a copy of the License at
11 ## http://www.apache.org/licenses/LICENSE-2.0
13 ## Unless required by applicable law or agreed to in writing, software
14 ## distributed under the License is distributed on an "AS IS" BASIS,
15 ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 ## See the License for the specific language governing permissions and
17 ## limitations under the License.
24 from past.utils import old_div
25 from rapid_log import RapidLog
26 from rapid_log import bcolors
28 from datetime import datetime as dt
30 class RapidTest(object):
32 Class to manage the testing
34 def __init__(self, test_param, runtime, testname, environment_file ):
35 self.test = test_param
36 self.test['runtime'] = runtime
37 self.test['testname'] = testname
38 self.test['environment_file'] = environment_file
39 if 'maxr' not in self.test.keys():
41 if 'maxz' not in self.test.keys():
42 self.test['maxz'] = inf
43 with open('format.yaml') as f:
44 self.data_format = yaml.load(f, Loader=yaml.FullLoader)
47 def get_percentageof10Gbps(pps_speed,size):
48 # speed is given in pps, returning % of 10Gb/s
49 # 12 bytes is the inter packet gap
50 # pre-amble is 7 bytes
51 # SFD (start of frame delimiter) is 1 byte
52 # Total of 20 bytes overhead per packet
53 return (pps_speed / 1000000.0 * 0.08 * (size+20))
56 def get_pps(speed,size):
57 # speed is given in % of 10Gb/s, returning Mpps
58 # 12 bytes is the inter packet gap
59 # pre-amble is 7 bytes
60 # SFD (start of frame delimiter) is 1 byte
61 # Total of 20 bytes overhead per packet
62 return (speed * 100.0 / (8*(size+20)))
65 def get_speed(packet_speed,size):
66 # return speed in Gb/s
67 # 12 bytes is the inter packet gap
68 # pre-amble is 7 bytes
69 # SFD (start of frame delimiter) is 1 byte
70 # Total of 20 bytes overhead per packet
71 return (packet_speed / 1000.0 * (8*(size+20)))
74 def set_background_flows(background_machines, number_of_flows):
75 for machine in background_machines:
76 _ = machine.set_flows(number_of_flows)
79 def set_background_speed(background_machines, speed):
80 for machine in background_machines:
81 machine.set_generator_speed(speed)
84 def set_background_size(background_machines, imix):
85 # imixs is a list of packet sizes
86 for machine in background_machines:
87 machine.set_udp_packet_size(imix)
90 def start_background_traffic(background_machines):
91 for machine in background_machines:
95 def stop_background_traffic(background_machines):
96 for machine in background_machines:
100 def parse_data_format_dict(data_format, variables):
101 for k, v in data_format.items():
103 RapidTest.parse_data_format_dict(v, variables)
105 if v in variables.keys():
106 data_format[k] = variables[v]
108 def post_data(self, variables):
109 test_type = type(self).__name__
110 var = copy.deepcopy(self.data_format)
111 self.parse_data_format_dict(var, variables)
112 if var.keys() >= {'URL', test_type, 'Format'}:
114 for value in var['URL'].values():
116 HEADERS = {'X-Requested-With': 'Python requests', 'Content-type': 'application/rapid'}
117 if var['Format'] == 'PushGateway':
118 data = "\n".join("{} {}".format(k, v) for k, v in var[test_type].items()) + "\n"
119 response = requests.post(url=URL, data=data,headers=HEADERS)
120 elif var['Format'] == 'Xtesting':
121 data = var[test_type]
122 response = requests.post(url=URL, json=data)
123 if (response.status_code >= 300):
124 RapidLog.info('Cannot send metrics to {}'.format(URL))
126 return (var[test_type])
129 def report_result(flow_number, size, data, prefix):
131 flow_number_str = '| ({:>4}) |'.format(abs(flow_number))
133 flow_number_str = '|{:>7} |'.format(flow_number)
134 if data['pps_req_tx'] is None:
135 pps_req_tx_str = '{0: >14}'.format(' NA |')
137 pps_req_tx_str = '{:>7.3f} Mpps |'.format(data['pps_req_tx'])
138 if data['pps_tx'] is None:
139 pps_tx_str = '{0: >14}'.format(' NA |')
141 pps_tx_str = '{:>7.3f} Mpps |'.format(data['pps_tx'])
142 if data['pps_sut_tx'] is None:
143 pps_sut_tx_str = '{0: >14}'.format(' NA |')
145 pps_sut_tx_str = '{:>7.3f} Mpps |'.format(data['pps_sut_tx'])
146 if data['pps_rx'] is None:
147 pps_rx_str = '{0: >25}'.format('NA |')
149 pps_rx_str = bcolors.OKBLUE + '{:>4.1f} Gb/s |{:7.3f} Mpps {}|'.format(
150 RapidTest.get_speed(data['pps_rx'],size),data['pps_rx'],bcolors.ENDC)
151 if data['abs_dropped'] is None:
152 tot_drop_str = ' | NA | '
154 tot_drop_str = ' | {:>9.0f} | '.format(data['abs_dropped'])
155 if data['lat_perc'] is None:
156 lat_perc_str = '|{:^10.10}|'.format('NA')
157 elif data['lat_perc_max'] == True:
158 lat_perc_str = '|>{}{:>5.0f} us{} |'.format(prefix['lat_perc'],
159 float(data['lat_perc']), bcolors.ENDC)
161 lat_perc_str = '| {}{:>5.0f} us{} |'.format(prefix['lat_perc'],
162 float(data['lat_perc']), bcolors.ENDC)
163 if data['actual_duration'] is None:
164 elapsed_time_str = ' NA |'
166 elapsed_time_str = '{:>3.0f} |'.format(data['actual_duration'])
167 return(flow_number_str + '{:>5.1f}'.format(data['speed']) + '% ' + prefix['speed']
168 + '{:>6.3f}'.format(RapidTest.get_pps(data['speed'],size)) + ' Mpps|' +
169 pps_req_tx_str + pps_tx_str + bcolors.ENDC + pps_sut_tx_str +
170 pps_rx_str + prefix['lat_avg'] + ' {:>6.0f}'.format(data['lat_avg']) +
171 ' us' + lat_perc_str +prefix['lat_max']+'{:>6.0f}'.format(data['lat_max'])
172 + ' us | ' + '{:>9.0f}'.format(data['abs_tx']) + ' | {:>9.0f}'.format(data['abs_rx']) +
173 ' | '+ prefix['abs_drop_rate']+ '{:>9.0f}'.format(data['abs_tx']-data['abs_rx']) +
174 tot_drop_str + prefix['drop_rate'] +
175 '{:>5.2f}'.format(100*old_div(float(data['abs_tx']-data['abs_rx']),data['abs_tx'])) + bcolors.ENDC +
176 ' |' + elapsed_time_str)
178 def run_iteration(self, requested_duration, flow_number, size, speed):
179 BUCKET_SIZE_EXP = self.gen_machine.bucket_size_exp
180 LAT_PERCENTILE = self.test['lat_percentile']
183 iteration_data['r'] = 0;
185 while (iteration_data['r'] < self.test['maxr']):
186 self.gen_machine.start_latency_cores()
187 time.sleep(sleep_time)
188 # Sleep_time is needed to be able to do accurate measurements to check for packet loss. We need to make this time large enough so that we do not take the first measurement while some packets from the previous tests migth still be in flight
189 t1_rx, t1_non_dp_rx, t1_tx, t1_non_dp_tx, t1_drop, t1_tx_fail, t1_tsc, abs_tsc_hz = self.gen_machine.core_stats()
190 t1_dp_rx = t1_rx - t1_non_dp_rx
191 t1_dp_tx = t1_tx - t1_non_dp_tx
192 self.gen_machine.set_generator_speed(0)
193 self.gen_machine.start_gen_cores()
194 self.set_background_speed(self.background_machines, 0)
195 self.start_background_traffic(self.background_machines)
196 if 'ramp_step' in self.test.keys():
197 ramp_speed = self.test['ramp_step']
200 while ramp_speed < speed:
201 self.gen_machine.set_generator_speed(ramp_speed)
202 self.set_background_speed(self.background_machines, ramp_speed)
204 ramp_speed = ramp_speed + self.test['ramp_step']
205 self.gen_machine.set_generator_speed(speed)
206 self.set_background_speed(self.background_machines, speed)
207 iteration_data['speed'] = speed
208 time_loop_data['speed'] = speed
209 time.sleep(2) ## Needs to be 2 seconds since this 1 sec is the time that PROX uses to refresh the stats. Note that this can be changed in PROX!! Don't do it.
210 start_bg_gen_stats = []
211 for bg_gen_machine in self.background_machines:
212 bg_rx, bg_non_dp_rx, bg_tx, bg_non_dp_tx, _, _, bg_tsc, _ = bg_gen_machine.core_stats()
214 "bg_dp_rx" : bg_rx - bg_non_dp_rx,
215 "bg_dp_tx" : bg_tx - bg_non_dp_tx,
218 start_bg_gen_stats.append(dict(bg_gen_stat))
219 if self.sut_machine!= None:
220 t2_sut_rx, t2_sut_non_dp_rx, t2_sut_tx, t2_sut_non_dp_tx, t2_sut_drop, t2_sut_tx_fail, t2_sut_tsc, sut_tsc_hz = self.sut_machine.core_stats()
221 t2_rx, t2_non_dp_rx, t2_tx, t2_non_dp_tx, t2_drop, t2_tx_fail, t2_tsc, tsc_hz = self.gen_machine.core_stats()
223 iteration_data['abs_tx'] = tx - (t2_non_dp_tx - t1_non_dp_tx )
224 iteration_data['abs_rx'] = t2_rx - t1_rx - (t2_non_dp_rx - t1_non_dp_rx)
225 iteration_data['abs_dropped'] = iteration_data['abs_tx'] - iteration_data['abs_rx']
227 RapidLog.critical("TX = 0. Test interrupted since no packet has been sent.")
228 if iteration_data['abs_tx'] == 0:
229 RapidLog.critical("Only non-dataplane packets (e.g. ARP) sent. Test interrupted since no packet has been sent.")
230 # Ask PROX to calibrate the bucket size once we have a PROX function to do this.
231 # Measure latency statistics per second
232 iteration_data.update(self.gen_machine.lat_stats())
233 t2_lat_tsc = iteration_data['lat_tsc']
235 for sample_percentile, bucket in enumerate(iteration_data['buckets'],start=1):
236 sample_count += bucket
237 if sample_count > sum(iteration_data['buckets']) * LAT_PERCENTILE:
239 iteration_data['lat_perc_max'] = (sample_percentile == len(iteration_data['buckets']))
240 iteration_data['bucket_size'] = float(2 ** BUCKET_SIZE_EXP) / (old_div(float(iteration_data['lat_hz']),float(10**6)))
241 time_loop_data['bucket_size'] = iteration_data['bucket_size']
242 iteration_data['lat_perc'] = sample_percentile * iteration_data['bucket_size']
243 if self.test['test'] == 'fixed_rate':
244 iteration_data['pps_req_tx'] = None
245 iteration_data['pps_tx'] = None
246 iteration_data['pps_sut_tx'] = None
247 iteration_data['pps_rx'] = None
248 iteration_data['lat_perc'] = None
249 iteration_data['actual_duration'] = None
250 iteration_prefix = {'speed' : '',
254 'abs_drop_rate' : '',
256 RapidLog.info(self.report_result(flow_number, size,
257 iteration_data, iteration_prefix ))
258 tot_rx = tot_non_dp_rx = tot_tx = tot_non_dp_tx = tot_drop = 0
259 iteration_data['lat_avg'] = iteration_data['lat_used'] = 0
260 tot_lat_measurement_duration = float(0)
261 iteration_data['actual_duration'] = float(0)
262 tot_sut_core_measurement_duration = float(0)
263 tot_sut_rx = tot_sut_non_dp_rx = tot_sut_tx = tot_sut_non_dp_tx = tot_sut_drop = tot_sut_tx_fail = tot_sut_tsc = 0
264 lat_avail = core_avail = sut_avail = False
265 while (iteration_data['actual_duration'] - float(requested_duration) <= 0.1) or (tot_lat_measurement_duration - float(requested_duration) <= 0.1):
267 time_loop_data.update(self.gen_machine.lat_stats())
268 # Get statistics after some execution time
269 if time_loop_data['lat_tsc'] != t2_lat_tsc:
270 single_lat_measurement_duration = (time_loop_data['lat_tsc'] - t2_lat_tsc) * 1.0 / time_loop_data['lat_hz'] # time difference between the 2 measurements, expressed in seconds.
271 # A second has passed in between to lat_stats requests. Hence we need to process the results
272 tot_lat_measurement_duration = tot_lat_measurement_duration + single_lat_measurement_duration
273 if iteration_data['lat_min'] > time_loop_data['lat_min']:
274 iteration_data['lat_min'] = time_loop_data['lat_min']
275 if iteration_data['lat_max'] < time_loop_data['lat_max']:
276 iteration_data['lat_max'] = time_loop_data['lat_max']
277 iteration_data['lat_avg'] = iteration_data['lat_avg'] + time_loop_data['lat_avg'] * single_lat_measurement_duration # Sometimes, There is more than 1 second between 2 lat_stats. Hence we will take the latest measurement
278 iteration_data['lat_used'] = iteration_data['lat_used'] + time_loop_data['lat_used'] * single_lat_measurement_duration # and give it more weigth.
280 for sample_percentile, bucket in enumerate(time_loop_data['buckets'],start=1):
281 sample_count += bucket
282 if sample_count > sum(time_loop_data['buckets']) * LAT_PERCENTILE:
284 time_loop_data['lat_perc_max'] = (sample_percentile == len(time_loop_data['buckets']))
285 time_loop_data['lat_perc'] = sample_percentile * iteration_data['bucket_size']
286 iteration_data['buckets'] = [iteration_data['buckets'][i] + time_loop_data['buckets'][i] for i in range(len(iteration_data['buckets']))]
287 t2_lat_tsc = time_loop_data['lat_tsc']
289 t3_rx, t3_non_dp_rx, t3_tx, t3_non_dp_tx, t3_drop, t3_tx_fail, t3_tsc, tsc_hz = self.gen_machine.core_stats()
291 time_loop_data['actual_duration'] = (t3_tsc - t2_tsc) * 1.0 / tsc_hz # time difference between the 2 measurements, expressed in seconds.
292 iteration_data['actual_duration'] = iteration_data['actual_duration'] + time_loop_data['actual_duration']
293 delta_rx = t3_rx - t2_rx
295 delta_non_dp_rx = t3_non_dp_rx - t2_non_dp_rx
296 tot_non_dp_rx += delta_non_dp_rx
297 delta_tx = t3_tx - t2_tx
299 delta_non_dp_tx = t3_non_dp_tx - t2_non_dp_tx
300 tot_non_dp_tx += delta_non_dp_tx
301 delta_dp_tx = delta_tx -delta_non_dp_tx
302 delta_dp_rx = delta_rx -delta_non_dp_rx
303 time_loop_data['abs_dropped'] = delta_dp_tx - delta_dp_rx
304 iteration_data['abs_dropped'] += time_loop_data['abs_dropped']
305 delta_drop = t3_drop - t2_drop
306 tot_drop += delta_drop
307 t2_rx, t2_non_dp_rx, t2_tx, t2_non_dp_tx, t2_drop, t2_tx_fail, t2_tsc = t3_rx, t3_non_dp_rx, t3_tx, t3_non_dp_tx, t3_drop, t3_tx_fail, t3_tsc
309 if self.sut_machine!=None:
310 t3_sut_rx, t3_sut_non_dp_rx, t3_sut_tx, t3_sut_non_dp_tx, t3_sut_drop, t3_sut_tx_fail, t3_sut_tsc, sut_tsc_hz = self.sut_machine.core_stats()
311 if t3_sut_tsc != t2_sut_tsc:
312 single_sut_core_measurement_duration = (t3_sut_tsc - t2_sut_tsc) * 1.0 / sut_tsc_hz # time difference between the 2 measurements, expressed in seconds.
313 tot_sut_core_measurement_duration = tot_sut_core_measurement_duration + single_sut_core_measurement_duration
314 tot_sut_rx += t3_sut_rx - t2_sut_rx
315 tot_sut_non_dp_rx += t3_sut_non_dp_rx - t2_sut_non_dp_rx
316 delta_sut_tx = t3_sut_tx - t2_sut_tx
317 tot_sut_tx += delta_sut_tx
318 delta_sut_non_dp_tx = t3_sut_non_dp_tx - t2_sut_non_dp_tx
319 tot_sut_non_dp_tx += delta_sut_non_dp_tx
320 t2_sut_rx, t2_sut_non_dp_rx, t2_sut_tx, t2_sut_non_dp_tx, t2_sut_drop, t2_sut_tx_fail, t2_sut_tsc = t3_sut_rx, t3_sut_non_dp_rx, t3_sut_tx, t3_sut_non_dp_tx, t3_sut_drop, t3_sut_tx_fail, t3_sut_tsc
322 if self.test['test'] == 'fixed_rate':
323 if lat_avail == core_avail == True:
324 lat_avail = core_avail = False
325 time_loop_data['pps_req_tx'] = (delta_tx + delta_drop - delta_rx)/time_loop_data['actual_duration']/1000000
326 time_loop_data['pps_tx'] = delta_tx/time_loop_data['actual_duration']/1000000
327 if self.sut_machine != None and sut_avail:
328 time_loop_data['pps_sut_tx'] = delta_sut_tx/single_sut_core_measurement_duration/1000000
331 time_loop_data['pps_sut_tx'] = None
332 time_loop_data['pps_rx'] = delta_rx/time_loop_data['actual_duration']/1000000
333 time_loop_data['abs_tx'] = delta_dp_tx
334 time_loop_data['abs_rx'] = delta_dp_rx
335 time_loop_prefix = {'speed' : '',
339 'abs_drop_rate' : '',
341 RapidLog.info(self.report_result(flow_number, size, time_loop_data,
343 time_loop_data['test'] = self.test['testname']
344 time_loop_data['environment_file'] = self.test['environment_file']
345 time_loop_data['Flows'] = flow_number
346 time_loop_data['Size'] = size
347 time_loop_data['RequestedSpeed'] = RapidTest.get_pps(speed, size)
348 _ = self.post_data(time_loop_data)
349 end_bg_gen_stats = []
350 for bg_gen_machine in self.background_machines:
351 bg_rx, bg_non_dp_rx, bg_tx, bg_non_dp_tx, _, _, bg_tsc, bg_hz = bg_gen_machine.core_stats()
352 bg_gen_stat = {"bg_dp_rx" : bg_rx - bg_non_dp_rx,
353 "bg_dp_tx" : bg_tx - bg_non_dp_tx,
357 end_bg_gen_stats.append(dict(bg_gen_stat))
358 self.stop_background_traffic(self.background_machines)
361 while i < len(end_bg_gen_stats):
362 bg_rates.append(0.000001*(end_bg_gen_stats[i]['bg_dp_rx'] -
363 start_bg_gen_stats[i]['bg_dp_rx']) / ((end_bg_gen_stats[i]['bg_tsc'] -
364 start_bg_gen_stats[i]['bg_tsc']) * 1.0 / end_bg_gen_stats[i]['bg_hz']))
367 iteration_data['avg_bg_rate'] = sum(bg_rates) / len(bg_rates)
368 RapidLog.debug('Average Background traffic rate: {:>7.3f} Mpps'.format(iteration_data['avg_bg_rate']))
370 iteration_data['avg_bg_rate'] = None
372 self.gen_machine.stop_gen_cores()
373 iteration_data['r'] += 1
374 iteration_data['lat_avg'] = old_div(iteration_data['lat_avg'], float(tot_lat_measurement_duration))
375 iteration_data['lat_used'] = old_div(iteration_data['lat_used'], float(tot_lat_measurement_duration))
377 while t4_tsc == t2_tsc:
378 t4_rx, t4_non_dp_rx, t4_tx, t4_non_dp_tx, t4_drop, t4_tx_fail, t4_tsc, abs_tsc_hz = self.gen_machine.core_stats()
379 if self.test['test'] == 'fixed_rate':
380 iteration_data['lat_tsc'] = t2_lat_tsc
381 while iteration_data['lat_tsc'] == t2_lat_tsc:
382 iteration_data.update(self.gen_machine.lat_stats())
384 for percentile, bucket in enumerate(iteration_data['buckets'],start=1):
385 sample_count += bucket
386 if sample_count > sum(iteration_data['buckets']) * LAT_PERCENTILE:
388 iteration_data['lat_perc_max'] = (percentile == len(iteration_data['buckets']))
389 iteration_data['lat_perc'] = percentile * iteration_data['bucket_size']
390 delta_rx = t4_rx - t2_rx
391 delta_non_dp_rx = t4_non_dp_rx - t2_non_dp_rx
392 delta_tx = t4_tx - t2_tx
393 delta_non_dp_tx = t4_non_dp_tx - t2_non_dp_tx
394 delta_dp_tx = delta_tx -delta_non_dp_tx
395 delta_dp_rx = delta_rx -delta_non_dp_rx
396 iteration_data['abs_tx'] = delta_dp_tx
397 iteration_data['abs_rx'] = delta_dp_rx
398 iteration_data['abs_dropped'] += delta_dp_tx - delta_dp_rx
399 iteration_data['pps_req_tx'] = None
400 iteration_data['pps_tx'] = None
401 iteration_data['pps_sut_tx'] = None
402 iteration_data['drop_rate'] = 100.0*(iteration_data['abs_tx']-iteration_data['abs_rx'])/iteration_data['abs_tx']
403 iteration_data['actual_duration'] = None
404 break ## Not really needed since the while loop will stop when evaluating the value of r
407 for percentile, bucket in enumerate(iteration_data['buckets'],start=1):
408 sample_count += bucket
409 if sample_count > sum(iteration_data['buckets']) * LAT_PERCENTILE:
411 iteration_data['lat_perc_max'] = (percentile == len(iteration_data['buckets']))
412 iteration_data['lat_perc'] = percentile * iteration_data['bucket_size']
413 iteration_data['pps_req_tx'] = (tot_tx + tot_drop - tot_rx)/iteration_data['actual_duration']/1000000.0 # tot_drop is all packets dropped by all tasks. This includes packets dropped at the generator task + packets dropped by the nop task. In steady state, this equals to the number of packets received by this VM
414 iteration_data['pps_tx'] = tot_tx/iteration_data['actual_duration']/1000000.0 # tot_tx is all generated packets actually accepted by the interface
415 iteration_data['pps_rx'] = tot_rx/iteration_data['actual_duration']/1000000.0 # tot_rx is all packets received by the nop task = all packets received in the gen VM
416 if self.sut_machine != None and sut_avail:
417 iteration_data['pps_sut_tx'] = tot_sut_tx / tot_sut_core_measurement_duration / 1000000.0
419 iteration_data['pps_sut_tx'] = None
420 iteration_data['abs_tx'] = (t4_tx - t1_tx) - (t4_non_dp_tx - t1_non_dp_tx)
421 iteration_data['abs_rx'] = (t4_rx - t1_rx) - (t4_non_dp_rx - t1_non_dp_rx)
422 iteration_data['abs_dropped'] = iteration_data['abs_tx'] - iteration_data['abs_rx']
423 iteration_data['drop_rate'] = 100.0*iteration_data['abs_dropped']/iteration_data['abs_tx']
424 if ((iteration_data['drop_rate'] < self.test['drop_rate_threshold']) or (iteration_data['abs_dropped'] == self.test['drop_rate_threshold'] ==0) or (iteration_data['abs_dropped'] > self.test['maxz'])):
426 self.gen_machine.stop_latency_cores()
427 iteration_data['abs_tx_fail'] = t4_tx_fail - t1_tx_fail
428 return (iteration_data)