4 ## Copyright (c) 2020 Intel Corporation
6 ## Licensed under the Apache License, Version 2.0 (the "License");
7 ## you may not use this file except in compliance with the License.
8 ## You may obtain a copy of the License at
11 ## http://www.apache.org/licenses/LICENSE-2.0
13 ## Unless required by applicable law or agreed to in writing, software
14 ## distributed under the License is distributed on an "AS IS" BASIS,
15 ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 ## See the License for the specific language governing permissions and
17 ## limitations under the License.
24 from past.utils import old_div
25 from rapid_log import RapidLog
26 from rapid_log import bcolors
28 from datetime import datetime as dt
30 class RapidTest(object):
32 Class to manage the testing
34 def __init__(self, test_param, runtime, testname, environment_file ):
35 self.test = test_param
36 self.test['runtime'] = runtime
37 self.test['testname'] = testname
38 self.test['environment_file'] = environment_file
39 if 'maxr' not in self.test.keys():
41 if 'maxz' not in self.test.keys():
42 self.test['maxz'] = inf
43 with open('format.yaml') as f:
44 self.data_format = yaml.load(f, Loader=yaml.FullLoader)
47 def get_percentageof10Gbps(pps_speed,size):
48 # speed is given in pps, returning % of 10Gb/s
49 # 12 bytes is the inter packet gap
50 # pre-amble is 7 bytes
51 # SFD (start of frame delimiter) is 1 byte
52 # Total of 20 bytes overhead per packet
53 return (pps_speed / 1000000.0 * 0.08 * (size+20))
56 def get_pps(speed,size):
57 # speed is given in % of 10Gb/s, returning Mpps
58 # 12 bytes is the inter packet gap
59 # pre-amble is 7 bytes
60 # SFD (start of frame delimiter) is 1 byte
61 # Total of 20 bytes overhead per packet
62 return (speed * 100.0 / (8*(size+20)))
65 def get_speed(packet_speed,size):
66 # return speed in Gb/s
67 # 12 bytes is the inter packet gap
68 # pre-amble is 7 bytes
69 # SFD (start of frame delimiter) is 1 byte
70 # Total of 20 bytes overhead per packet
71 return (packet_speed / 1000.0 * (8*(size+20)))
74 def set_background_flows(background_machines, number_of_flows):
75 for machine in background_machines:
76 _ = machine.set_flows(number_of_flows)
79 def set_background_speed(background_machines, speed):
80 for machine in background_machines:
81 machine.set_generator_speed(speed)
84 def set_background_size(background_machines, imix):
85 # imixs is a list of packet sizes
86 for machine in background_machines:
87 machine.set_udp_packet_size(imix)
90 def start_background_traffic(background_machines):
91 for machine in background_machines:
95 def stop_background_traffic(background_machines):
96 for machine in background_machines:
100 def parse_data_format_dict(data_format, variables):
101 for k, v in data_format.items():
103 RapidTest.parse_data_format_dict(v, variables)
105 if v in variables.keys():
106 data_format[k] = variables[v]
108 def record_start_time(self):
109 self.start = dt.now().strftime('%Y-%m-%d %H:%M:%S')
111 def record_stop_time(self):
112 self.stop = dt.now().strftime('%Y-%m-%d %H:%M:%S')
114 def post_data(self, test, variables):
115 var = copy.deepcopy(self.data_format)
116 self.parse_data_format_dict(var, variables)
117 if 'URL' not in var.keys():
119 if test not in var.keys():
122 for value in var['URL'].values():
124 HEADERS = {'X-Requested-With': 'Python requests', 'Content-type': 'application/rapid'}
125 if 'Format' in var.keys():
126 if var['Format'] == 'PushGateway':
127 data = "\n".join("{} {}".format(k, v) for k, v in var[test].items()) + "\n"
128 response = requests.post(url=URL, data=data,headers=HEADERS)
129 elif var['Format'] == 'Xtesting':
131 response = requests.post(url=URL, json=data)
136 if (response.status_code != 202) and (response.status_code != 200):
137 RapidLog.info('Cannot send metrics to {}'.format(URL))
142 def report_result(flow_number, size, speed, pps_req_tx, pps_tx, pps_sut_tx,
143 pps_rx, lat_avg, lat_perc, lat_perc_max, lat_max, tx, rx, tot_drop,
144 elapsed_time,speed_prefix='', lat_avg_prefix='', lat_perc_prefix='',
145 lat_max_prefix='', abs_drop_rate_prefix='', drop_rate_prefix=''):
147 flow_number_str = '| ({:>4}) |'.format(abs(flow_number))
149 flow_number_str = '|{:>7} |'.format(flow_number)
150 if pps_req_tx is None:
151 pps_req_tx_str = '{0: >14}'.format(' NA |')
153 pps_req_tx_str = '{:>7.3f} Mpps |'.format(pps_req_tx)
155 pps_tx_str = '{0: >14}'.format(' NA |')
157 pps_tx_str = '{:>7.3f} Mpps |'.format(pps_tx)
158 if pps_sut_tx is None:
159 pps_sut_tx_str = '{0: >14}'.format(' NA |')
161 pps_sut_tx_str = '{:>7.3f} Mpps |'.format(pps_sut_tx)
163 pps_rx_str = '{0: >25}'.format('NA |')
165 pps_rx_str = bcolors.OKBLUE + '{:>4.1f} Gb/s |{:7.3f} Mpps {}|'.format(
166 RapidTest.get_speed(pps_rx,size),pps_rx,bcolors.ENDC)
168 tot_drop_str = ' | NA | '
170 tot_drop_str = ' | {:>9.0f} | '.format(tot_drop)
172 lat_perc_str = ' |{:^10.10}|'.format('NA')
173 elif lat_perc_max == True:
174 lat_perc_str = '|>{}{:>5.0f} us{} |'.format(lat_perc_prefix,
175 float(lat_perc), bcolors.ENDC)
177 lat_perc_str = '| {}{:>5.0f} us{} |'.format(lat_perc_prefix,
178 float(lat_perc), bcolors.ENDC)
179 if elapsed_time is None:
180 elapsed_time_str = ' NA |'
182 elapsed_time_str = '{:>3.0f} |'.format(elapsed_time)
183 return(flow_number_str + '{:>5.1f}'.format(speed) + '% ' + speed_prefix
184 + '{:>6.3f}'.format(RapidTest.get_pps(speed,size)) + ' Mpps|' +
185 pps_req_tx_str + pps_tx_str + bcolors.ENDC + pps_sut_tx_str +
186 pps_rx_str + lat_avg_prefix + ' {:>6.0f}'.format(lat_avg) +
187 ' us' + lat_perc_str +lat_max_prefix+'{:>6.0f}'.format(lat_max)
188 + ' us | ' + '{:>9.0f}'.format(tx) + ' | {:>9.0f}'.format(rx) +
189 ' | '+ abs_drop_rate_prefix+ '{:>9.0f}'.format(tx-rx) +
190 tot_drop_str +drop_rate_prefix +
191 '{:>5.2f}'.format(100*old_div(float(tx-rx),tx)) + bcolors.ENDC +
192 ' |' + elapsed_time_str)
194 def run_iteration(self, requested_duration, flow_number, size, speed):
195 BUCKET_SIZE_EXP = self.gen_machine.bucket_size_exp
196 LAT_PERCENTILE = self.test['lat_percentile']
199 while (r < self.test['maxr']):
200 time.sleep(sleep_time)
201 # Sleep_time is needed to be able to do accurate measurements to check for packet loss. We need to make this time large enough so that we do not take the first measurement while some packets from the previous tests migth still be in flight
202 t1_rx, t1_non_dp_rx, t1_tx, t1_non_dp_tx, t1_drop, t1_tx_fail, t1_tsc, abs_tsc_hz = self.gen_machine.core_stats()
203 t1_dp_rx = t1_rx - t1_non_dp_rx
204 t1_dp_tx = t1_tx - t1_non_dp_tx
205 self.gen_machine.start_gen_cores()
206 time.sleep(2) ## Needs to be 2 seconds since this 1 sec is the time that PROX uses to refresh the stats. Note that this can be changed in PROX!! Don't do it.
207 if self.sut_machine!= None:
208 t2_sut_rx, t2_sut_non_dp_rx, t2_sut_tx, t2_sut_non_dp_tx, t2_sut_drop, t2_sut_tx_fail, t2_sut_tsc, sut_tsc_hz = self.sut_machine.core_stats()
209 t2_rx, t2_non_dp_rx, t2_tx, t2_non_dp_tx, t2_drop, t2_tx_fail, t2_tsc, tsc_hz = self.gen_machine.core_stats()
211 dp_tx = tx - (t2_non_dp_tx - t1_non_dp_tx )
212 dp_rx = t2_rx - t1_rx - (t2_non_dp_rx - t1_non_dp_rx)
213 tot_dp_drop = dp_tx - dp_rx
215 RapidLog.critical("TX = 0. Test interrupted since no packet has been sent.")
217 RapidLog.critical("Only non-dataplane packets (e.g. ARP) sent. Test interrupted since no packet has been sent.")
218 # Ask PROX to calibrate the bucket size once we have a PROX function to do this.
219 # Measure latency statistics per second
220 lat_min, lat_max, lat_avg, used_avg, t2_lat_tsc, lat_hz, buckets = self.gen_machine.lat_stats()
221 lat_samples = sum(buckets)
223 for sample_percentile, bucket in enumerate(buckets,start=1):
224 sample_count += bucket
225 if sample_count > (lat_samples * LAT_PERCENTILE):
227 percentile_max = (sample_percentile == len(buckets))
228 sample_percentile = sample_percentile * float(2 ** BUCKET_SIZE_EXP) / (old_div(float(lat_hz),float(10**6)))
229 if self.test['test'] == 'fixed_rate':
230 RapidLog.info(self.report_result(flow_number,size,speed,None,None,None,None,lat_avg,sample_percentile,percentile_max,lat_max, dp_tx, dp_rx , None, None))
231 tot_rx = tot_non_dp_rx = tot_tx = tot_non_dp_tx = tot_drop = 0
232 lat_avg = used_avg = 0
233 buckets_total = [0] * 128
235 tot_lat_measurement_duration = float(0)
236 tot_core_measurement_duration = float(0)
237 tot_sut_core_measurement_duration = float(0)
238 tot_sut_rx = tot_sut_non_dp_rx = tot_sut_tx = tot_sut_non_dp_tx = tot_sut_drop = tot_sut_tx_fail = tot_sut_tsc = 0
239 lat_avail = core_avail = sut_avail = False
240 while (tot_core_measurement_duration - float(requested_duration) <= 0.1) or (tot_lat_measurement_duration - float(requested_duration) <= 0.1):
242 lat_min_sample, lat_max_sample, lat_avg_sample, used_sample, t3_lat_tsc, lat_hz, buckets = self.gen_machine.lat_stats()
243 # Get statistics after some execution time
244 if t3_lat_tsc != t2_lat_tsc:
245 single_lat_measurement_duration = (t3_lat_tsc - t2_lat_tsc) * 1.0 / lat_hz # time difference between the 2 measurements, expressed in seconds.
246 # A second has passed in between to lat_stats requests. Hence we need to process the results
247 tot_lat_measurement_duration = tot_lat_measurement_duration + single_lat_measurement_duration
248 if lat_min > lat_min_sample:
249 lat_min = lat_min_sample
250 if lat_max < lat_max_sample:
251 lat_max = lat_max_sample
252 lat_avg = lat_avg + lat_avg_sample * single_lat_measurement_duration # Sometimes, There is more than 1 second between 2 lat_stats. Hence we will take the latest measurement
253 used_avg = used_avg + used_sample * single_lat_measurement_duration # and give it more weigth.
254 lat_samples = sum(buckets)
255 tot_lat_samples += lat_samples
257 for sample_percentile, bucket in enumerate(buckets,start=1):
258 sample_count += bucket
259 if sample_count > lat_samples * LAT_PERCENTILE:
261 percentile_max = (sample_percentile == len(buckets))
262 sample_percentile = sample_percentile * float(2 ** BUCKET_SIZE_EXP) / (old_div(float(lat_hz),float(10**6)))
263 buckets_total = [buckets_total[i] + buckets[i] for i in range(len(buckets_total))]
264 t2_lat_tsc = t3_lat_tsc
266 t3_rx, t3_non_dp_rx, t3_tx, t3_non_dp_tx, t3_drop, t3_tx_fail, t3_tsc, tsc_hz = self.gen_machine.core_stats()
268 single_core_measurement_duration = (t3_tsc - t2_tsc) * 1.0 / tsc_hz # time difference between the 2 measurements, expressed in seconds.
269 tot_core_measurement_duration = tot_core_measurement_duration + single_core_measurement_duration
270 delta_rx = t3_rx - t2_rx
272 delta_non_dp_rx = t3_non_dp_rx - t2_non_dp_rx
273 tot_non_dp_rx += delta_non_dp_rx
274 delta_tx = t3_tx - t2_tx
276 delta_non_dp_tx = t3_non_dp_tx - t2_non_dp_tx
277 tot_non_dp_tx += delta_non_dp_tx
278 delta_dp_tx = delta_tx -delta_non_dp_tx
279 delta_dp_rx = delta_rx -delta_non_dp_rx
280 delta_dp_drop = delta_dp_tx - delta_dp_rx
281 tot_dp_drop += delta_dp_drop
282 delta_drop = t3_drop - t2_drop
283 tot_drop += delta_drop
284 t2_rx, t2_non_dp_rx, t2_tx, t2_non_dp_tx, t2_drop, t2_tx_fail, t2_tsc = t3_rx, t3_non_dp_rx, t3_tx, t3_non_dp_tx, t3_drop, t3_tx_fail, t3_tsc
286 if self.sut_machine!=None:
287 t3_sut_rx, t3_sut_non_dp_rx, t3_sut_tx, t3_sut_non_dp_tx, t3_sut_drop, t3_sut_tx_fail, t3_sut_tsc, sut_tsc_hz = self.sut_machine.core_stats()
288 if t3_sut_tsc != t2_sut_tsc:
289 single_sut_core_measurement_duration = (t3_sut_tsc - t2_sut_tsc) * 1.0 / tsc_hz # time difference between the 2 measurements, expressed in seconds.
290 tot_sut_core_measurement_duration = tot_sut_core_measurement_duration + single_sut_core_measurement_duration
291 tot_sut_rx += t3_sut_rx - t2_sut_rx
292 tot_sut_non_dp_rx += t3_sut_non_dp_rx - t2_sut_non_dp_rx
293 delta_sut_tx = t3_sut_tx - t2_sut_tx
294 tot_sut_tx += delta_sut_tx
295 delta_sut_non_dp_tx = t3_sut_non_dp_tx - t2_sut_non_dp_tx
296 tot_sut_non_dp_tx += delta_sut_non_dp_tx
297 t2_sut_rx, t2_sut_non_dp_rx, t2_sut_tx, t2_sut_non_dp_tx, t2_sut_drop, t2_sut_tx_fail, t2_sut_tsc = t3_sut_rx, t3_sut_non_dp_rx, t3_sut_tx, t3_sut_non_dp_tx, t3_sut_drop, t3_sut_tx_fail, t3_sut_tsc
299 if self.test['test'] == 'fixed_rate':
300 if lat_avail == core_avail == True:
301 lat_avail = core_avail = False
302 pps_req_tx = (delta_tx + delta_drop - delta_rx)/single_core_measurement_duration/1000000
303 pps_tx = delta_tx/single_core_measurement_duration/1000000
304 if self.sut_machine != None and sut_avail:
305 pps_sut_tx = delta_sut_tx/single_sut_core_measurement_duration/1000000
309 pps_rx = delta_rx/single_core_measurement_duration/1000000
310 RapidLog.info(self.report_result(flow_number, size,
311 speed, pps_req_tx, pps_tx, pps_sut_tx, pps_rx,
312 lat_avg_sample, sample_percentile, percentile_max,
313 lat_max_sample, delta_dp_tx, delta_dp_rx,
314 tot_dp_drop, single_core_measurement_duration))
316 'Flows': flow_number,
318 'RequestedSpeed': self.get_pps(speed,size),
319 'CoreGenerated': pps_req_tx,
321 'FwdBySUT': pps_sut_tx,
323 'AvgLatency': lat_avg_sample,
324 'PCTLatency': sample_percentile,
325 'MaxLatency': lat_max_sample,
326 'PacketsSent': delta_dp_tx,
327 'PacketsReceived': delta_dp_rx,
328 'PacketsLost': tot_dp_drop}
329 self.post_data('rapid_flowsizetest', variables)
331 self.gen_machine.stop_gen_cores()
333 lat_avg = old_div(lat_avg, float(tot_lat_measurement_duration))
334 used_avg = old_div(used_avg, float(tot_lat_measurement_duration))
336 while t4_tsc == t2_tsc:
337 t4_rx, t4_non_dp_rx, t4_tx, t4_non_dp_tx, t4_drop, t4_tx_fail, t4_tsc, abs_tsc_hz = self.gen_machine.core_stats()
338 if self.test['test'] == 'fixed_rate':
339 t4_lat_tsc = t2_lat_tsc
340 while t4_lat_tsc == t2_lat_tsc:
341 lat_min_sample, lat_max_sample, lat_avg_sample, used_sample, t4_lat_tsc, lat_hz, buckets = self.gen_machine.lat_stats()
343 lat_samples = sum(buckets)
344 for percentile, bucket in enumerate(buckets,start=1):
345 sample_count += bucket
346 if sample_count > lat_samples * LAT_PERCENTILE:
348 percentile_max = (percentile == len(buckets))
349 percentile = percentile * float(2 ** BUCKET_SIZE_EXP) / (old_div(float(lat_hz),float(10**6)))
350 lat_max = lat_max_sample
351 lat_avg = lat_avg_sample
352 delta_rx = t4_rx - t2_rx
353 delta_non_dp_rx = t4_non_dp_rx - t2_non_dp_rx
354 delta_tx = t4_tx - t2_tx
355 delta_non_dp_tx = t4_non_dp_tx - t2_non_dp_tx
356 delta_dp_tx = delta_tx -delta_non_dp_tx
357 delta_dp_rx = delta_rx -delta_non_dp_rx
360 tot_dp_drop += delta_dp_tx - delta_dp_rx
365 drop_rate = 100.0*(dp_tx-dp_rx)/dp_tx
366 tot_core_measurement_duration = None
367 break ## Not really needed since the while loop will stop when evaluating the value of r
370 for percentile, bucket in enumerate(buckets_total,start=1):
371 sample_count += bucket
372 if sample_count > tot_lat_samples * LAT_PERCENTILE:
374 percentile_max = (percentile == len(buckets_total))
375 percentile = percentile * float(2 ** BUCKET_SIZE_EXP) / (old_div(float(lat_hz),float(10**6)))
376 pps_req_tx = (tot_tx + tot_drop - tot_rx)/tot_core_measurement_duration/1000000.0 # tot_drop is all packets dropped by all tasks. This includes packets dropped at the generator task + packets dropped by the nop task. In steady state, this equals to the number of packets received by this VM
377 pps_tx = tot_tx/tot_core_measurement_duration/1000000.0 # tot_tx is all generated packets actually accepted by the interface
378 pps_rx = tot_rx/tot_core_measurement_duration/1000000.0 # tot_rx is all packets received by the nop task = all packets received in the gen VM
379 if self.sut_machine != None and sut_avail:
380 pps_sut_tx = tot_sut_tx / tot_sut_core_measurement_duration / 1000000.0
383 dp_tx = (t4_tx - t1_tx) - (t4_non_dp_tx - t1_non_dp_tx)
384 dp_rx = (t4_rx - t1_rx) - (t4_non_dp_rx - t1_non_dp_rx)
385 tot_dp_drop = dp_tx - dp_rx
386 drop_rate = 100.0*tot_dp_drop/dp_tx
387 if ((drop_rate < self.test['drop_rate_threshold']) or (tot_dp_drop == self.test['drop_rate_threshold'] ==0) or (tot_dp_drop > self.test['maxz'])):
389 return(pps_req_tx,pps_tx,pps_sut_tx,pps_rx,lat_avg,percentile,percentile_max,lat_max,dp_tx,dp_rx,tot_dp_drop,(t4_tx_fail - t1_tx_fail),drop_rate,lat_min,used_avg,r,tot_core_measurement_duration)