These changes are the raw update to qemu-2.6.
[kvmfornfv.git] / qemu / target-ppc / fpu_helper.c
1 /*
2  *  PowerPC floating point and SPE emulation helpers for QEMU.
3  *
4  *  Copyright (c) 2003-2007 Jocelyn Mayer
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 #include "cpu.h"
21 #include "exec/helper-proto.h"
22
23 #define float64_snan_to_qnan(x) ((x) | 0x0008000000000000ULL)
24 #define float32_snan_to_qnan(x) ((x) | 0x00400000)
25
26 /*****************************************************************************/
27 /* Floating point operations helpers */
28 uint64_t helper_float32_to_float64(CPUPPCState *env, uint32_t arg)
29 {
30     CPU_FloatU f;
31     CPU_DoubleU d;
32
33     f.l = arg;
34     d.d = float32_to_float64(f.f, &env->fp_status);
35     return d.ll;
36 }
37
38 uint32_t helper_float64_to_float32(CPUPPCState *env, uint64_t arg)
39 {
40     CPU_FloatU f;
41     CPU_DoubleU d;
42
43     d.ll = arg;
44     f.f = float64_to_float32(d.d, &env->fp_status);
45     return f.l;
46 }
47
48 static inline int isden(float64 d)
49 {
50     CPU_DoubleU u;
51
52     u.d = d;
53
54     return ((u.ll >> 52) & 0x7FF) == 0;
55 }
56
57 static inline int ppc_float32_get_unbiased_exp(float32 f)
58 {
59     return ((f >> 23) & 0xFF) - 127;
60 }
61
62 static inline int ppc_float64_get_unbiased_exp(float64 f)
63 {
64     return ((f >> 52) & 0x7FF) - 1023;
65 }
66
67 void helper_compute_fprf(CPUPPCState *env, uint64_t arg)
68 {
69     CPU_DoubleU farg;
70     int isneg;
71     int fprf;
72
73     farg.ll = arg;
74     isneg = float64_is_neg(farg.d);
75     if (unlikely(float64_is_any_nan(farg.d))) {
76         if (float64_is_signaling_nan(farg.d)) {
77             /* Signaling NaN: flags are undefined */
78             fprf = 0x00;
79         } else {
80             /* Quiet NaN */
81             fprf = 0x11;
82         }
83     } else if (unlikely(float64_is_infinity(farg.d))) {
84         /* +/- infinity */
85         if (isneg) {
86             fprf = 0x09;
87         } else {
88             fprf = 0x05;
89         }
90     } else {
91         if (float64_is_zero(farg.d)) {
92             /* +/- zero */
93             if (isneg) {
94                 fprf = 0x12;
95             } else {
96                 fprf = 0x02;
97             }
98         } else {
99             if (isden(farg.d)) {
100                 /* Denormalized numbers */
101                 fprf = 0x10;
102             } else {
103                 /* Normalized numbers */
104                 fprf = 0x00;
105             }
106             if (isneg) {
107                 fprf |= 0x08;
108             } else {
109                 fprf |= 0x04;
110             }
111         }
112     }
113     /* We update FPSCR_FPRF */
114     env->fpscr &= ~(0x1F << FPSCR_FPRF);
115     env->fpscr |= fprf << FPSCR_FPRF;
116 }
117
118 /* Floating-point invalid operations exception */
119 static inline uint64_t fload_invalid_op_excp(CPUPPCState *env, int op,
120                                              int set_fpcc)
121 {
122     CPUState *cs = CPU(ppc_env_get_cpu(env));
123     uint64_t ret = 0;
124     int ve;
125
126     ve = fpscr_ve;
127     switch (op) {
128     case POWERPC_EXCP_FP_VXSNAN:
129         env->fpscr |= 1 << FPSCR_VXSNAN;
130         break;
131     case POWERPC_EXCP_FP_VXSOFT:
132         env->fpscr |= 1 << FPSCR_VXSOFT;
133         break;
134     case POWERPC_EXCP_FP_VXISI:
135         /* Magnitude subtraction of infinities */
136         env->fpscr |= 1 << FPSCR_VXISI;
137         goto update_arith;
138     case POWERPC_EXCP_FP_VXIDI:
139         /* Division of infinity by infinity */
140         env->fpscr |= 1 << FPSCR_VXIDI;
141         goto update_arith;
142     case POWERPC_EXCP_FP_VXZDZ:
143         /* Division of zero by zero */
144         env->fpscr |= 1 << FPSCR_VXZDZ;
145         goto update_arith;
146     case POWERPC_EXCP_FP_VXIMZ:
147         /* Multiplication of zero by infinity */
148         env->fpscr |= 1 << FPSCR_VXIMZ;
149         goto update_arith;
150     case POWERPC_EXCP_FP_VXVC:
151         /* Ordered comparison of NaN */
152         env->fpscr |= 1 << FPSCR_VXVC;
153         if (set_fpcc) {
154             env->fpscr &= ~(0xF << FPSCR_FPCC);
155             env->fpscr |= 0x11 << FPSCR_FPCC;
156         }
157         /* We must update the target FPR before raising the exception */
158         if (ve != 0) {
159             cs->exception_index = POWERPC_EXCP_PROGRAM;
160             env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
161             /* Update the floating-point enabled exception summary */
162             env->fpscr |= 1 << FPSCR_FEX;
163             /* Exception is differed */
164             ve = 0;
165         }
166         break;
167     case POWERPC_EXCP_FP_VXSQRT:
168         /* Square root of a negative number */
169         env->fpscr |= 1 << FPSCR_VXSQRT;
170     update_arith:
171         env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
172         if (ve == 0) {
173             /* Set the result to quiet NaN */
174             ret = 0x7FF8000000000000ULL;
175             if (set_fpcc) {
176                 env->fpscr &= ~(0xF << FPSCR_FPCC);
177                 env->fpscr |= 0x11 << FPSCR_FPCC;
178             }
179         }
180         break;
181     case POWERPC_EXCP_FP_VXCVI:
182         /* Invalid conversion */
183         env->fpscr |= 1 << FPSCR_VXCVI;
184         env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
185         if (ve == 0) {
186             /* Set the result to quiet NaN */
187             ret = 0x7FF8000000000000ULL;
188             if (set_fpcc) {
189                 env->fpscr &= ~(0xF << FPSCR_FPCC);
190                 env->fpscr |= 0x11 << FPSCR_FPCC;
191             }
192         }
193         break;
194     }
195     /* Update the floating-point invalid operation summary */
196     env->fpscr |= 1 << FPSCR_VX;
197     /* Update the floating-point exception summary */
198     env->fpscr |= FP_FX;
199     if (ve != 0) {
200         /* Update the floating-point enabled exception summary */
201         env->fpscr |= 1 << FPSCR_FEX;
202         if (msr_fe0 != 0 || msr_fe1 != 0) {
203             helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
204                                        POWERPC_EXCP_FP | op);
205         }
206     }
207     return ret;
208 }
209
210 static inline void float_zero_divide_excp(CPUPPCState *env)
211 {
212     env->fpscr |= 1 << FPSCR_ZX;
213     env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
214     /* Update the floating-point exception summary */
215     env->fpscr |= FP_FX;
216     if (fpscr_ze != 0) {
217         /* Update the floating-point enabled exception summary */
218         env->fpscr |= 1 << FPSCR_FEX;
219         if (msr_fe0 != 0 || msr_fe1 != 0) {
220             helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
221                                        POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX);
222         }
223     }
224 }
225
226 static inline void float_overflow_excp(CPUPPCState *env)
227 {
228     CPUState *cs = CPU(ppc_env_get_cpu(env));
229
230     env->fpscr |= 1 << FPSCR_OX;
231     /* Update the floating-point exception summary */
232     env->fpscr |= FP_FX;
233     if (fpscr_oe != 0) {
234         /* XXX: should adjust the result */
235         /* Update the floating-point enabled exception summary */
236         env->fpscr |= 1 << FPSCR_FEX;
237         /* We must update the target FPR before raising the exception */
238         cs->exception_index = POWERPC_EXCP_PROGRAM;
239         env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
240     } else {
241         env->fpscr |= 1 << FPSCR_XX;
242         env->fpscr |= 1 << FPSCR_FI;
243     }
244 }
245
246 static inline void float_underflow_excp(CPUPPCState *env)
247 {
248     CPUState *cs = CPU(ppc_env_get_cpu(env));
249
250     env->fpscr |= 1 << FPSCR_UX;
251     /* Update the floating-point exception summary */
252     env->fpscr |= FP_FX;
253     if (fpscr_ue != 0) {
254         /* XXX: should adjust the result */
255         /* Update the floating-point enabled exception summary */
256         env->fpscr |= 1 << FPSCR_FEX;
257         /* We must update the target FPR before raising the exception */
258         cs->exception_index = POWERPC_EXCP_PROGRAM;
259         env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
260     }
261 }
262
263 static inline void float_inexact_excp(CPUPPCState *env)
264 {
265     CPUState *cs = CPU(ppc_env_get_cpu(env));
266
267     env->fpscr |= 1 << FPSCR_XX;
268     /* Update the floating-point exception summary */
269     env->fpscr |= FP_FX;
270     if (fpscr_xe != 0) {
271         /* Update the floating-point enabled exception summary */
272         env->fpscr |= 1 << FPSCR_FEX;
273         /* We must update the target FPR before raising the exception */
274         cs->exception_index = POWERPC_EXCP_PROGRAM;
275         env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
276     }
277 }
278
279 static inline void fpscr_set_rounding_mode(CPUPPCState *env)
280 {
281     int rnd_type;
282
283     /* Set rounding mode */
284     switch (fpscr_rn) {
285     case 0:
286         /* Best approximation (round to nearest) */
287         rnd_type = float_round_nearest_even;
288         break;
289     case 1:
290         /* Smaller magnitude (round toward zero) */
291         rnd_type = float_round_to_zero;
292         break;
293     case 2:
294         /* Round toward +infinite */
295         rnd_type = float_round_up;
296         break;
297     default:
298     case 3:
299         /* Round toward -infinite */
300         rnd_type = float_round_down;
301         break;
302     }
303     set_float_rounding_mode(rnd_type, &env->fp_status);
304 }
305
306 void helper_fpscr_clrbit(CPUPPCState *env, uint32_t bit)
307 {
308     int prev;
309
310     prev = (env->fpscr >> bit) & 1;
311     env->fpscr &= ~(1 << bit);
312     if (prev == 1) {
313         switch (bit) {
314         case FPSCR_RN1:
315         case FPSCR_RN:
316             fpscr_set_rounding_mode(env);
317             break;
318         default:
319             break;
320         }
321     }
322 }
323
324 void helper_fpscr_setbit(CPUPPCState *env, uint32_t bit)
325 {
326     CPUState *cs = CPU(ppc_env_get_cpu(env));
327     int prev;
328
329     prev = (env->fpscr >> bit) & 1;
330     env->fpscr |= 1 << bit;
331     if (prev == 0) {
332         switch (bit) {
333         case FPSCR_VX:
334             env->fpscr |= FP_FX;
335             if (fpscr_ve) {
336                 goto raise_ve;
337             }
338             break;
339         case FPSCR_OX:
340             env->fpscr |= FP_FX;
341             if (fpscr_oe) {
342                 goto raise_oe;
343             }
344             break;
345         case FPSCR_UX:
346             env->fpscr |= FP_FX;
347             if (fpscr_ue) {
348                 goto raise_ue;
349             }
350             break;
351         case FPSCR_ZX:
352             env->fpscr |= FP_FX;
353             if (fpscr_ze) {
354                 goto raise_ze;
355             }
356             break;
357         case FPSCR_XX:
358             env->fpscr |= FP_FX;
359             if (fpscr_xe) {
360                 goto raise_xe;
361             }
362             break;
363         case FPSCR_VXSNAN:
364         case FPSCR_VXISI:
365         case FPSCR_VXIDI:
366         case FPSCR_VXZDZ:
367         case FPSCR_VXIMZ:
368         case FPSCR_VXVC:
369         case FPSCR_VXSOFT:
370         case FPSCR_VXSQRT:
371         case FPSCR_VXCVI:
372             env->fpscr |= 1 << FPSCR_VX;
373             env->fpscr |= FP_FX;
374             if (fpscr_ve != 0) {
375                 goto raise_ve;
376             }
377             break;
378         case FPSCR_VE:
379             if (fpscr_vx != 0) {
380             raise_ve:
381                 env->error_code = POWERPC_EXCP_FP;
382                 if (fpscr_vxsnan) {
383                     env->error_code |= POWERPC_EXCP_FP_VXSNAN;
384                 }
385                 if (fpscr_vxisi) {
386                     env->error_code |= POWERPC_EXCP_FP_VXISI;
387                 }
388                 if (fpscr_vxidi) {
389                     env->error_code |= POWERPC_EXCP_FP_VXIDI;
390                 }
391                 if (fpscr_vxzdz) {
392                     env->error_code |= POWERPC_EXCP_FP_VXZDZ;
393                 }
394                 if (fpscr_vximz) {
395                     env->error_code |= POWERPC_EXCP_FP_VXIMZ;
396                 }
397                 if (fpscr_vxvc) {
398                     env->error_code |= POWERPC_EXCP_FP_VXVC;
399                 }
400                 if (fpscr_vxsoft) {
401                     env->error_code |= POWERPC_EXCP_FP_VXSOFT;
402                 }
403                 if (fpscr_vxsqrt) {
404                     env->error_code |= POWERPC_EXCP_FP_VXSQRT;
405                 }
406                 if (fpscr_vxcvi) {
407                     env->error_code |= POWERPC_EXCP_FP_VXCVI;
408                 }
409                 goto raise_excp;
410             }
411             break;
412         case FPSCR_OE:
413             if (fpscr_ox != 0) {
414             raise_oe:
415                 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
416                 goto raise_excp;
417             }
418             break;
419         case FPSCR_UE:
420             if (fpscr_ux != 0) {
421             raise_ue:
422                 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
423                 goto raise_excp;
424             }
425             break;
426         case FPSCR_ZE:
427             if (fpscr_zx != 0) {
428             raise_ze:
429                 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
430                 goto raise_excp;
431             }
432             break;
433         case FPSCR_XE:
434             if (fpscr_xx != 0) {
435             raise_xe:
436                 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
437                 goto raise_excp;
438             }
439             break;
440         case FPSCR_RN1:
441         case FPSCR_RN:
442             fpscr_set_rounding_mode(env);
443             break;
444         default:
445             break;
446         raise_excp:
447             /* Update the floating-point enabled exception summary */
448             env->fpscr |= 1 << FPSCR_FEX;
449             /* We have to update Rc1 before raising the exception */
450             cs->exception_index = POWERPC_EXCP_PROGRAM;
451             break;
452         }
453     }
454 }
455
456 void helper_store_fpscr(CPUPPCState *env, uint64_t arg, uint32_t mask)
457 {
458     CPUState *cs = CPU(ppc_env_get_cpu(env));
459     target_ulong prev, new;
460     int i;
461
462     prev = env->fpscr;
463     new = (target_ulong)arg;
464     new &= ~0x60000000LL;
465     new |= prev & 0x60000000LL;
466     for (i = 0; i < sizeof(target_ulong) * 2; i++) {
467         if (mask & (1 << i)) {
468             env->fpscr &= ~(0xFLL << (4 * i));
469             env->fpscr |= new & (0xFLL << (4 * i));
470         }
471     }
472     /* Update VX and FEX */
473     if (fpscr_ix != 0) {
474         env->fpscr |= 1 << FPSCR_VX;
475     } else {
476         env->fpscr &= ~(1 << FPSCR_VX);
477     }
478     if ((fpscr_ex & fpscr_eex) != 0) {
479         env->fpscr |= 1 << FPSCR_FEX;
480         cs->exception_index = POWERPC_EXCP_PROGRAM;
481         /* XXX: we should compute it properly */
482         env->error_code = POWERPC_EXCP_FP;
483     } else {
484         env->fpscr &= ~(1 << FPSCR_FEX);
485     }
486     fpscr_set_rounding_mode(env);
487 }
488
489 void store_fpscr(CPUPPCState *env, uint64_t arg, uint32_t mask)
490 {
491     helper_store_fpscr(env, arg, mask);
492 }
493
494 void helper_float_check_status(CPUPPCState *env)
495 {
496     CPUState *cs = CPU(ppc_env_get_cpu(env));
497     int status = get_float_exception_flags(&env->fp_status);
498
499     if (status & float_flag_divbyzero) {
500         float_zero_divide_excp(env);
501     } else if (status & float_flag_overflow) {
502         float_overflow_excp(env);
503     } else if (status & float_flag_underflow) {
504         float_underflow_excp(env);
505     } else if (status & float_flag_inexact) {
506         float_inexact_excp(env);
507     }
508
509     if (cs->exception_index == POWERPC_EXCP_PROGRAM &&
510         (env->error_code & POWERPC_EXCP_FP)) {
511         /* Differred floating-point exception after target FPR update */
512         if (msr_fe0 != 0 || msr_fe1 != 0) {
513             helper_raise_exception_err(env, cs->exception_index,
514                                        env->error_code);
515         }
516     }
517 }
518
519 void helper_reset_fpstatus(CPUPPCState *env)
520 {
521     set_float_exception_flags(0, &env->fp_status);
522 }
523
524 /* fadd - fadd. */
525 uint64_t helper_fadd(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
526 {
527     CPU_DoubleU farg1, farg2;
528
529     farg1.ll = arg1;
530     farg2.ll = arg2;
531
532     if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
533                  float64_is_neg(farg1.d) != float64_is_neg(farg2.d))) {
534         /* Magnitude subtraction of infinities */
535         farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
536     } else {
537         if (unlikely(float64_is_signaling_nan(farg1.d) ||
538                      float64_is_signaling_nan(farg2.d))) {
539             /* sNaN addition */
540             fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
541         }
542         farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
543     }
544
545     return farg1.ll;
546 }
547
548 /* fsub - fsub. */
549 uint64_t helper_fsub(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
550 {
551     CPU_DoubleU farg1, farg2;
552
553     farg1.ll = arg1;
554     farg2.ll = arg2;
555
556     if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
557                  float64_is_neg(farg1.d) == float64_is_neg(farg2.d))) {
558         /* Magnitude subtraction of infinities */
559         farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
560     } else {
561         if (unlikely(float64_is_signaling_nan(farg1.d) ||
562                      float64_is_signaling_nan(farg2.d))) {
563             /* sNaN subtraction */
564             fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
565         }
566         farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
567     }
568
569     return farg1.ll;
570 }
571
572 /* fmul - fmul. */
573 uint64_t helper_fmul(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
574 {
575     CPU_DoubleU farg1, farg2;
576
577     farg1.ll = arg1;
578     farg2.ll = arg2;
579
580     if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
581                  (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
582         /* Multiplication of zero by infinity */
583         farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
584     } else {
585         if (unlikely(float64_is_signaling_nan(farg1.d) ||
586                      float64_is_signaling_nan(farg2.d))) {
587             /* sNaN multiplication */
588             fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
589         }
590         farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
591     }
592
593     return farg1.ll;
594 }
595
596 /* fdiv - fdiv. */
597 uint64_t helper_fdiv(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
598 {
599     CPU_DoubleU farg1, farg2;
600
601     farg1.ll = arg1;
602     farg2.ll = arg2;
603
604     if (unlikely(float64_is_infinity(farg1.d) &&
605                  float64_is_infinity(farg2.d))) {
606         /* Division of infinity by infinity */
607         farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIDI, 1);
608     } else if (unlikely(float64_is_zero(farg1.d) && float64_is_zero(farg2.d))) {
609         /* Division of zero by zero */
610         farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXZDZ, 1);
611     } else {
612         if (unlikely(float64_is_signaling_nan(farg1.d) ||
613                      float64_is_signaling_nan(farg2.d))) {
614             /* sNaN division */
615             fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
616         }
617         farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
618     }
619
620     return farg1.ll;
621 }
622
623
624 #define FPU_FCTI(op, cvt, nanval)                                      \
625 uint64_t helper_##op(CPUPPCState *env, uint64_t arg)                   \
626 {                                                                      \
627     CPU_DoubleU farg;                                                  \
628                                                                        \
629     farg.ll = arg;                                                     \
630     farg.ll = float64_to_##cvt(farg.d, &env->fp_status);               \
631                                                                        \
632     if (unlikely(env->fp_status.float_exception_flags)) {              \
633         if (float64_is_any_nan(arg)) {                                 \
634             fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 1);      \
635             if (float64_is_signaling_nan(arg)) {                       \
636                 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); \
637             }                                                          \
638             farg.ll = nanval;                                          \
639         } else if (env->fp_status.float_exception_flags &              \
640                    float_flag_invalid) {                               \
641             fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 1);      \
642         }                                                              \
643         helper_float_check_status(env);                                \
644     }                                                                  \
645     return farg.ll;                                                    \
646  }
647
648 FPU_FCTI(fctiw, int32, 0x80000000U)
649 FPU_FCTI(fctiwz, int32_round_to_zero, 0x80000000U)
650 FPU_FCTI(fctiwu, uint32, 0x00000000U)
651 FPU_FCTI(fctiwuz, uint32_round_to_zero, 0x00000000U)
652 FPU_FCTI(fctid, int64, 0x8000000000000000ULL)
653 FPU_FCTI(fctidz, int64_round_to_zero, 0x8000000000000000ULL)
654 FPU_FCTI(fctidu, uint64, 0x0000000000000000ULL)
655 FPU_FCTI(fctiduz, uint64_round_to_zero, 0x0000000000000000ULL)
656
657 #define FPU_FCFI(op, cvtr, is_single)                      \
658 uint64_t helper_##op(CPUPPCState *env, uint64_t arg)       \
659 {                                                          \
660     CPU_DoubleU farg;                                      \
661                                                            \
662     if (is_single) {                                       \
663         float32 tmp = cvtr(arg, &env->fp_status);          \
664         farg.d = float32_to_float64(tmp, &env->fp_status); \
665     } else {                                               \
666         farg.d = cvtr(arg, &env->fp_status);               \
667     }                                                      \
668     helper_float_check_status(env);                        \
669     return farg.ll;                                        \
670 }
671
672 FPU_FCFI(fcfid, int64_to_float64, 0)
673 FPU_FCFI(fcfids, int64_to_float32, 1)
674 FPU_FCFI(fcfidu, uint64_to_float64, 0)
675 FPU_FCFI(fcfidus, uint64_to_float32, 1)
676
677 static inline uint64_t do_fri(CPUPPCState *env, uint64_t arg,
678                               int rounding_mode)
679 {
680     CPU_DoubleU farg;
681
682     farg.ll = arg;
683
684     if (unlikely(float64_is_signaling_nan(farg.d))) {
685         /* sNaN round */
686         fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
687         farg.ll = arg | 0x0008000000000000ULL;
688     } else {
689         int inexact = get_float_exception_flags(&env->fp_status) &
690                       float_flag_inexact;
691         set_float_rounding_mode(rounding_mode, &env->fp_status);
692         farg.ll = float64_round_to_int(farg.d, &env->fp_status);
693         /* Restore rounding mode from FPSCR */
694         fpscr_set_rounding_mode(env);
695
696         /* fri* does not set FPSCR[XX] */
697         if (!inexact) {
698             env->fp_status.float_exception_flags &= ~float_flag_inexact;
699         }
700     }
701     helper_float_check_status(env);
702     return farg.ll;
703 }
704
705 uint64_t helper_frin(CPUPPCState *env, uint64_t arg)
706 {
707     return do_fri(env, arg, float_round_ties_away);
708 }
709
710 uint64_t helper_friz(CPUPPCState *env, uint64_t arg)
711 {
712     return do_fri(env, arg, float_round_to_zero);
713 }
714
715 uint64_t helper_frip(CPUPPCState *env, uint64_t arg)
716 {
717     return do_fri(env, arg, float_round_up);
718 }
719
720 uint64_t helper_frim(CPUPPCState *env, uint64_t arg)
721 {
722     return do_fri(env, arg, float_round_down);
723 }
724
725 /* fmadd - fmadd. */
726 uint64_t helper_fmadd(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
727                       uint64_t arg3)
728 {
729     CPU_DoubleU farg1, farg2, farg3;
730
731     farg1.ll = arg1;
732     farg2.ll = arg2;
733     farg3.ll = arg3;
734
735     if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
736                  (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
737         /* Multiplication of zero by infinity */
738         farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
739     } else {
740         if (unlikely(float64_is_signaling_nan(farg1.d) ||
741                      float64_is_signaling_nan(farg2.d) ||
742                      float64_is_signaling_nan(farg3.d))) {
743             /* sNaN operation */
744             fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
745         }
746         /* This is the way the PowerPC specification defines it */
747         float128 ft0_128, ft1_128;
748
749         ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
750         ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
751         ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
752         if (unlikely(float128_is_infinity(ft0_128) &&
753                      float64_is_infinity(farg3.d) &&
754                      float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
755             /* Magnitude subtraction of infinities */
756             farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
757         } else {
758             ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
759             ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
760             farg1.d = float128_to_float64(ft0_128, &env->fp_status);
761         }
762     }
763
764     return farg1.ll;
765 }
766
767 /* fmsub - fmsub. */
768 uint64_t helper_fmsub(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
769                       uint64_t arg3)
770 {
771     CPU_DoubleU farg1, farg2, farg3;
772
773     farg1.ll = arg1;
774     farg2.ll = arg2;
775     farg3.ll = arg3;
776
777     if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
778                  (float64_is_zero(farg1.d) &&
779                   float64_is_infinity(farg2.d)))) {
780         /* Multiplication of zero by infinity */
781         farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
782     } else {
783         if (unlikely(float64_is_signaling_nan(farg1.d) ||
784                      float64_is_signaling_nan(farg2.d) ||
785                      float64_is_signaling_nan(farg3.d))) {
786             /* sNaN operation */
787             fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
788         }
789         /* This is the way the PowerPC specification defines it */
790         float128 ft0_128, ft1_128;
791
792         ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
793         ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
794         ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
795         if (unlikely(float128_is_infinity(ft0_128) &&
796                      float64_is_infinity(farg3.d) &&
797                      float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
798             /* Magnitude subtraction of infinities */
799             farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
800         } else {
801             ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
802             ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
803             farg1.d = float128_to_float64(ft0_128, &env->fp_status);
804         }
805     }
806     return farg1.ll;
807 }
808
809 /* fnmadd - fnmadd. */
810 uint64_t helper_fnmadd(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
811                        uint64_t arg3)
812 {
813     CPU_DoubleU farg1, farg2, farg3;
814
815     farg1.ll = arg1;
816     farg2.ll = arg2;
817     farg3.ll = arg3;
818
819     if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
820                  (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
821         /* Multiplication of zero by infinity */
822         farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
823     } else {
824         if (unlikely(float64_is_signaling_nan(farg1.d) ||
825                      float64_is_signaling_nan(farg2.d) ||
826                      float64_is_signaling_nan(farg3.d))) {
827             /* sNaN operation */
828             fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
829         }
830         /* This is the way the PowerPC specification defines it */
831         float128 ft0_128, ft1_128;
832
833         ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
834         ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
835         ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
836         if (unlikely(float128_is_infinity(ft0_128) &&
837                      float64_is_infinity(farg3.d) &&
838                      float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
839             /* Magnitude subtraction of infinities */
840             farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
841         } else {
842             ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
843             ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
844             farg1.d = float128_to_float64(ft0_128, &env->fp_status);
845         }
846         if (likely(!float64_is_any_nan(farg1.d))) {
847             farg1.d = float64_chs(farg1.d);
848         }
849     }
850     return farg1.ll;
851 }
852
853 /* fnmsub - fnmsub. */
854 uint64_t helper_fnmsub(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
855                        uint64_t arg3)
856 {
857     CPU_DoubleU farg1, farg2, farg3;
858
859     farg1.ll = arg1;
860     farg2.ll = arg2;
861     farg3.ll = arg3;
862
863     if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
864                  (float64_is_zero(farg1.d) &&
865                   float64_is_infinity(farg2.d)))) {
866         /* Multiplication of zero by infinity */
867         farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
868     } else {
869         if (unlikely(float64_is_signaling_nan(farg1.d) ||
870                      float64_is_signaling_nan(farg2.d) ||
871                      float64_is_signaling_nan(farg3.d))) {
872             /* sNaN operation */
873             fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
874         }
875         /* This is the way the PowerPC specification defines it */
876         float128 ft0_128, ft1_128;
877
878         ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
879         ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
880         ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
881         if (unlikely(float128_is_infinity(ft0_128) &&
882                      float64_is_infinity(farg3.d) &&
883                      float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
884             /* Magnitude subtraction of infinities */
885             farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
886         } else {
887             ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
888             ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
889             farg1.d = float128_to_float64(ft0_128, &env->fp_status);
890         }
891         if (likely(!float64_is_any_nan(farg1.d))) {
892             farg1.d = float64_chs(farg1.d);
893         }
894     }
895     return farg1.ll;
896 }
897
898 /* frsp - frsp. */
899 uint64_t helper_frsp(CPUPPCState *env, uint64_t arg)
900 {
901     CPU_DoubleU farg;
902     float32 f32;
903
904     farg.ll = arg;
905
906     if (unlikely(float64_is_signaling_nan(farg.d))) {
907         /* sNaN square root */
908         fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
909     }
910     f32 = float64_to_float32(farg.d, &env->fp_status);
911     farg.d = float32_to_float64(f32, &env->fp_status);
912
913     return farg.ll;
914 }
915
916 /* fsqrt - fsqrt. */
917 uint64_t helper_fsqrt(CPUPPCState *env, uint64_t arg)
918 {
919     CPU_DoubleU farg;
920
921     farg.ll = arg;
922
923     if (unlikely(float64_is_any_nan(farg.d))) {
924         if (unlikely(float64_is_signaling_nan(farg.d))) {
925             /* sNaN reciprocal square root */
926             fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
927             farg.ll = float64_snan_to_qnan(farg.ll);
928         }
929     } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
930         /* Square root of a negative nonzero number */
931         farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, 1);
932     } else {
933         farg.d = float64_sqrt(farg.d, &env->fp_status);
934     }
935     return farg.ll;
936 }
937
938 /* fre - fre. */
939 uint64_t helper_fre(CPUPPCState *env, uint64_t arg)
940 {
941     CPU_DoubleU farg;
942
943     farg.ll = arg;
944
945     if (unlikely(float64_is_signaling_nan(farg.d))) {
946         /* sNaN reciprocal */
947         fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
948     }
949     farg.d = float64_div(float64_one, farg.d, &env->fp_status);
950     return farg.d;
951 }
952
953 /* fres - fres. */
954 uint64_t helper_fres(CPUPPCState *env, uint64_t arg)
955 {
956     CPU_DoubleU farg;
957     float32 f32;
958
959     farg.ll = arg;
960
961     if (unlikely(float64_is_signaling_nan(farg.d))) {
962         /* sNaN reciprocal */
963         fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
964     }
965     farg.d = float64_div(float64_one, farg.d, &env->fp_status);
966     f32 = float64_to_float32(farg.d, &env->fp_status);
967     farg.d = float32_to_float64(f32, &env->fp_status);
968
969     return farg.ll;
970 }
971
972 /* frsqrte  - frsqrte. */
973 uint64_t helper_frsqrte(CPUPPCState *env, uint64_t arg)
974 {
975     CPU_DoubleU farg;
976
977     farg.ll = arg;
978
979     if (unlikely(float64_is_any_nan(farg.d))) {
980         if (unlikely(float64_is_signaling_nan(farg.d))) {
981             /* sNaN reciprocal square root */
982             fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
983             farg.ll = float64_snan_to_qnan(farg.ll);
984         }
985     } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
986         /* Reciprocal square root of a negative nonzero number */
987         farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, 1);
988     } else {
989         farg.d = float64_sqrt(farg.d, &env->fp_status);
990         farg.d = float64_div(float64_one, farg.d, &env->fp_status);
991     }
992
993     return farg.ll;
994 }
995
996 /* fsel - fsel. */
997 uint64_t helper_fsel(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
998                      uint64_t arg3)
999 {
1000     CPU_DoubleU farg1;
1001
1002     farg1.ll = arg1;
1003
1004     if ((!float64_is_neg(farg1.d) || float64_is_zero(farg1.d)) &&
1005         !float64_is_any_nan(farg1.d)) {
1006         return arg2;
1007     } else {
1008         return arg3;
1009     }
1010 }
1011
1012 uint32_t helper_ftdiv(uint64_t fra, uint64_t frb)
1013 {
1014     int fe_flag = 0;
1015     int fg_flag = 0;
1016
1017     if (unlikely(float64_is_infinity(fra) ||
1018                  float64_is_infinity(frb) ||
1019                  float64_is_zero(frb))) {
1020         fe_flag = 1;
1021         fg_flag = 1;
1022     } else {
1023         int e_a = ppc_float64_get_unbiased_exp(fra);
1024         int e_b = ppc_float64_get_unbiased_exp(frb);
1025
1026         if (unlikely(float64_is_any_nan(fra) ||
1027                      float64_is_any_nan(frb))) {
1028             fe_flag = 1;
1029         } else if ((e_b <= -1022) || (e_b >= 1021)) {
1030             fe_flag = 1;
1031         } else if (!float64_is_zero(fra) &&
1032                    (((e_a - e_b) >= 1023) ||
1033                     ((e_a - e_b) <= -1021) ||
1034                     (e_a <= -970))) {
1035             fe_flag = 1;
1036         }
1037
1038         if (unlikely(float64_is_zero_or_denormal(frb))) {
1039             /* XB is not zero because of the above check and */
1040             /* so must be denormalized.                      */
1041             fg_flag = 1;
1042         }
1043     }
1044
1045     return 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0);
1046 }
1047
1048 uint32_t helper_ftsqrt(uint64_t frb)
1049 {
1050     int fe_flag = 0;
1051     int fg_flag = 0;
1052
1053     if (unlikely(float64_is_infinity(frb) || float64_is_zero(frb))) {
1054         fe_flag = 1;
1055         fg_flag = 1;
1056     } else {
1057         int e_b = ppc_float64_get_unbiased_exp(frb);
1058
1059         if (unlikely(float64_is_any_nan(frb))) {
1060             fe_flag = 1;
1061         } else if (unlikely(float64_is_zero(frb))) {
1062             fe_flag = 1;
1063         } else if (unlikely(float64_is_neg(frb))) {
1064             fe_flag = 1;
1065         } else if (!float64_is_zero(frb) && (e_b <= (-1022+52))) {
1066             fe_flag = 1;
1067         }
1068
1069         if (unlikely(float64_is_zero_or_denormal(frb))) {
1070             /* XB is not zero because of the above check and */
1071             /* therefore must be denormalized.               */
1072             fg_flag = 1;
1073         }
1074     }
1075
1076     return 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0);
1077 }
1078
1079 void helper_fcmpu(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
1080                   uint32_t crfD)
1081 {
1082     CPU_DoubleU farg1, farg2;
1083     uint32_t ret = 0;
1084
1085     farg1.ll = arg1;
1086     farg2.ll = arg2;
1087
1088     if (unlikely(float64_is_any_nan(farg1.d) ||
1089                  float64_is_any_nan(farg2.d))) {
1090         ret = 0x01UL;
1091     } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1092         ret = 0x08UL;
1093     } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1094         ret = 0x04UL;
1095     } else {
1096         ret = 0x02UL;
1097     }
1098
1099     env->fpscr &= ~(0x0F << FPSCR_FPRF);
1100     env->fpscr |= ret << FPSCR_FPRF;
1101     env->crf[crfD] = ret;
1102     if (unlikely(ret == 0x01UL
1103                  && (float64_is_signaling_nan(farg1.d) ||
1104                      float64_is_signaling_nan(farg2.d)))) {
1105         /* sNaN comparison */
1106         fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
1107     }
1108 }
1109
1110 void helper_fcmpo(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
1111                   uint32_t crfD)
1112 {
1113     CPU_DoubleU farg1, farg2;
1114     uint32_t ret = 0;
1115
1116     farg1.ll = arg1;
1117     farg2.ll = arg2;
1118
1119     if (unlikely(float64_is_any_nan(farg1.d) ||
1120                  float64_is_any_nan(farg2.d))) {
1121         ret = 0x01UL;
1122     } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1123         ret = 0x08UL;
1124     } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1125         ret = 0x04UL;
1126     } else {
1127         ret = 0x02UL;
1128     }
1129
1130     env->fpscr &= ~(0x0F << FPSCR_FPRF);
1131     env->fpscr |= ret << FPSCR_FPRF;
1132     env->crf[crfD] = ret;
1133     if (unlikely(ret == 0x01UL)) {
1134         if (float64_is_signaling_nan(farg1.d) ||
1135             float64_is_signaling_nan(farg2.d)) {
1136             /* sNaN comparison */
1137             fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN |
1138                                   POWERPC_EXCP_FP_VXVC, 1);
1139         } else {
1140             /* qNaN comparison */
1141             fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 1);
1142         }
1143     }
1144 }
1145
1146 /* Single-precision floating-point conversions */
1147 static inline uint32_t efscfsi(CPUPPCState *env, uint32_t val)
1148 {
1149     CPU_FloatU u;
1150
1151     u.f = int32_to_float32(val, &env->vec_status);
1152
1153     return u.l;
1154 }
1155
1156 static inline uint32_t efscfui(CPUPPCState *env, uint32_t val)
1157 {
1158     CPU_FloatU u;
1159
1160     u.f = uint32_to_float32(val, &env->vec_status);
1161
1162     return u.l;
1163 }
1164
1165 static inline int32_t efsctsi(CPUPPCState *env, uint32_t val)
1166 {
1167     CPU_FloatU u;
1168
1169     u.l = val;
1170     /* NaN are not treated the same way IEEE 754 does */
1171     if (unlikely(float32_is_quiet_nan(u.f))) {
1172         return 0;
1173     }
1174
1175     return float32_to_int32(u.f, &env->vec_status);
1176 }
1177
1178 static inline uint32_t efsctui(CPUPPCState *env, uint32_t val)
1179 {
1180     CPU_FloatU u;
1181
1182     u.l = val;
1183     /* NaN are not treated the same way IEEE 754 does */
1184     if (unlikely(float32_is_quiet_nan(u.f))) {
1185         return 0;
1186     }
1187
1188     return float32_to_uint32(u.f, &env->vec_status);
1189 }
1190
1191 static inline uint32_t efsctsiz(CPUPPCState *env, uint32_t val)
1192 {
1193     CPU_FloatU u;
1194
1195     u.l = val;
1196     /* NaN are not treated the same way IEEE 754 does */
1197     if (unlikely(float32_is_quiet_nan(u.f))) {
1198         return 0;
1199     }
1200
1201     return float32_to_int32_round_to_zero(u.f, &env->vec_status);
1202 }
1203
1204 static inline uint32_t efsctuiz(CPUPPCState *env, uint32_t val)
1205 {
1206     CPU_FloatU u;
1207
1208     u.l = val;
1209     /* NaN are not treated the same way IEEE 754 does */
1210     if (unlikely(float32_is_quiet_nan(u.f))) {
1211         return 0;
1212     }
1213
1214     return float32_to_uint32_round_to_zero(u.f, &env->vec_status);
1215 }
1216
1217 static inline uint32_t efscfsf(CPUPPCState *env, uint32_t val)
1218 {
1219     CPU_FloatU u;
1220     float32 tmp;
1221
1222     u.f = int32_to_float32(val, &env->vec_status);
1223     tmp = int64_to_float32(1ULL << 32, &env->vec_status);
1224     u.f = float32_div(u.f, tmp, &env->vec_status);
1225
1226     return u.l;
1227 }
1228
1229 static inline uint32_t efscfuf(CPUPPCState *env, uint32_t val)
1230 {
1231     CPU_FloatU u;
1232     float32 tmp;
1233
1234     u.f = uint32_to_float32(val, &env->vec_status);
1235     tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1236     u.f = float32_div(u.f, tmp, &env->vec_status);
1237
1238     return u.l;
1239 }
1240
1241 static inline uint32_t efsctsf(CPUPPCState *env, uint32_t val)
1242 {
1243     CPU_FloatU u;
1244     float32 tmp;
1245
1246     u.l = val;
1247     /* NaN are not treated the same way IEEE 754 does */
1248     if (unlikely(float32_is_quiet_nan(u.f))) {
1249         return 0;
1250     }
1251     tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1252     u.f = float32_mul(u.f, tmp, &env->vec_status);
1253
1254     return float32_to_int32(u.f, &env->vec_status);
1255 }
1256
1257 static inline uint32_t efsctuf(CPUPPCState *env, uint32_t val)
1258 {
1259     CPU_FloatU u;
1260     float32 tmp;
1261
1262     u.l = val;
1263     /* NaN are not treated the same way IEEE 754 does */
1264     if (unlikely(float32_is_quiet_nan(u.f))) {
1265         return 0;
1266     }
1267     tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1268     u.f = float32_mul(u.f, tmp, &env->vec_status);
1269
1270     return float32_to_uint32(u.f, &env->vec_status);
1271 }
1272
1273 #define HELPER_SPE_SINGLE_CONV(name)                              \
1274     uint32_t helper_e##name(CPUPPCState *env, uint32_t val)       \
1275     {                                                             \
1276         return e##name(env, val);                                 \
1277     }
1278 /* efscfsi */
1279 HELPER_SPE_SINGLE_CONV(fscfsi);
1280 /* efscfui */
1281 HELPER_SPE_SINGLE_CONV(fscfui);
1282 /* efscfuf */
1283 HELPER_SPE_SINGLE_CONV(fscfuf);
1284 /* efscfsf */
1285 HELPER_SPE_SINGLE_CONV(fscfsf);
1286 /* efsctsi */
1287 HELPER_SPE_SINGLE_CONV(fsctsi);
1288 /* efsctui */
1289 HELPER_SPE_SINGLE_CONV(fsctui);
1290 /* efsctsiz */
1291 HELPER_SPE_SINGLE_CONV(fsctsiz);
1292 /* efsctuiz */
1293 HELPER_SPE_SINGLE_CONV(fsctuiz);
1294 /* efsctsf */
1295 HELPER_SPE_SINGLE_CONV(fsctsf);
1296 /* efsctuf */
1297 HELPER_SPE_SINGLE_CONV(fsctuf);
1298
1299 #define HELPER_SPE_VECTOR_CONV(name)                            \
1300     uint64_t helper_ev##name(CPUPPCState *env, uint64_t val)    \
1301     {                                                           \
1302         return ((uint64_t)e##name(env, val >> 32) << 32) |      \
1303             (uint64_t)e##name(env, val);                        \
1304     }
1305 /* evfscfsi */
1306 HELPER_SPE_VECTOR_CONV(fscfsi);
1307 /* evfscfui */
1308 HELPER_SPE_VECTOR_CONV(fscfui);
1309 /* evfscfuf */
1310 HELPER_SPE_VECTOR_CONV(fscfuf);
1311 /* evfscfsf */
1312 HELPER_SPE_VECTOR_CONV(fscfsf);
1313 /* evfsctsi */
1314 HELPER_SPE_VECTOR_CONV(fsctsi);
1315 /* evfsctui */
1316 HELPER_SPE_VECTOR_CONV(fsctui);
1317 /* evfsctsiz */
1318 HELPER_SPE_VECTOR_CONV(fsctsiz);
1319 /* evfsctuiz */
1320 HELPER_SPE_VECTOR_CONV(fsctuiz);
1321 /* evfsctsf */
1322 HELPER_SPE_VECTOR_CONV(fsctsf);
1323 /* evfsctuf */
1324 HELPER_SPE_VECTOR_CONV(fsctuf);
1325
1326 /* Single-precision floating-point arithmetic */
1327 static inline uint32_t efsadd(CPUPPCState *env, uint32_t op1, uint32_t op2)
1328 {
1329     CPU_FloatU u1, u2;
1330
1331     u1.l = op1;
1332     u2.l = op2;
1333     u1.f = float32_add(u1.f, u2.f, &env->vec_status);
1334     return u1.l;
1335 }
1336
1337 static inline uint32_t efssub(CPUPPCState *env, uint32_t op1, uint32_t op2)
1338 {
1339     CPU_FloatU u1, u2;
1340
1341     u1.l = op1;
1342     u2.l = op2;
1343     u1.f = float32_sub(u1.f, u2.f, &env->vec_status);
1344     return u1.l;
1345 }
1346
1347 static inline uint32_t efsmul(CPUPPCState *env, uint32_t op1, uint32_t op2)
1348 {
1349     CPU_FloatU u1, u2;
1350
1351     u1.l = op1;
1352     u2.l = op2;
1353     u1.f = float32_mul(u1.f, u2.f, &env->vec_status);
1354     return u1.l;
1355 }
1356
1357 static inline uint32_t efsdiv(CPUPPCState *env, uint32_t op1, uint32_t op2)
1358 {
1359     CPU_FloatU u1, u2;
1360
1361     u1.l = op1;
1362     u2.l = op2;
1363     u1.f = float32_div(u1.f, u2.f, &env->vec_status);
1364     return u1.l;
1365 }
1366
1367 #define HELPER_SPE_SINGLE_ARITH(name)                                   \
1368     uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1369     {                                                                   \
1370         return e##name(env, op1, op2);                                  \
1371     }
1372 /* efsadd */
1373 HELPER_SPE_SINGLE_ARITH(fsadd);
1374 /* efssub */
1375 HELPER_SPE_SINGLE_ARITH(fssub);
1376 /* efsmul */
1377 HELPER_SPE_SINGLE_ARITH(fsmul);
1378 /* efsdiv */
1379 HELPER_SPE_SINGLE_ARITH(fsdiv);
1380
1381 #define HELPER_SPE_VECTOR_ARITH(name)                                   \
1382     uint64_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1383     {                                                                   \
1384         return ((uint64_t)e##name(env, op1 >> 32, op2 >> 32) << 32) |   \
1385             (uint64_t)e##name(env, op1, op2);                           \
1386     }
1387 /* evfsadd */
1388 HELPER_SPE_VECTOR_ARITH(fsadd);
1389 /* evfssub */
1390 HELPER_SPE_VECTOR_ARITH(fssub);
1391 /* evfsmul */
1392 HELPER_SPE_VECTOR_ARITH(fsmul);
1393 /* evfsdiv */
1394 HELPER_SPE_VECTOR_ARITH(fsdiv);
1395
1396 /* Single-precision floating-point comparisons */
1397 static inline uint32_t efscmplt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1398 {
1399     CPU_FloatU u1, u2;
1400
1401     u1.l = op1;
1402     u2.l = op2;
1403     return float32_lt(u1.f, u2.f, &env->vec_status) ? 4 : 0;
1404 }
1405
1406 static inline uint32_t efscmpgt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1407 {
1408     CPU_FloatU u1, u2;
1409
1410     u1.l = op1;
1411     u2.l = op2;
1412     return float32_le(u1.f, u2.f, &env->vec_status) ? 0 : 4;
1413 }
1414
1415 static inline uint32_t efscmpeq(CPUPPCState *env, uint32_t op1, uint32_t op2)
1416 {
1417     CPU_FloatU u1, u2;
1418
1419     u1.l = op1;
1420     u2.l = op2;
1421     return float32_eq(u1.f, u2.f, &env->vec_status) ? 4 : 0;
1422 }
1423
1424 static inline uint32_t efststlt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1425 {
1426     /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1427     return efscmplt(env, op1, op2);
1428 }
1429
1430 static inline uint32_t efststgt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1431 {
1432     /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1433     return efscmpgt(env, op1, op2);
1434 }
1435
1436 static inline uint32_t efststeq(CPUPPCState *env, uint32_t op1, uint32_t op2)
1437 {
1438     /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1439     return efscmpeq(env, op1, op2);
1440 }
1441
1442 #define HELPER_SINGLE_SPE_CMP(name)                                     \
1443     uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1444     {                                                                   \
1445         return e##name(env, op1, op2) << 2;                             \
1446     }
1447 /* efststlt */
1448 HELPER_SINGLE_SPE_CMP(fststlt);
1449 /* efststgt */
1450 HELPER_SINGLE_SPE_CMP(fststgt);
1451 /* efststeq */
1452 HELPER_SINGLE_SPE_CMP(fststeq);
1453 /* efscmplt */
1454 HELPER_SINGLE_SPE_CMP(fscmplt);
1455 /* efscmpgt */
1456 HELPER_SINGLE_SPE_CMP(fscmpgt);
1457 /* efscmpeq */
1458 HELPER_SINGLE_SPE_CMP(fscmpeq);
1459
1460 static inline uint32_t evcmp_merge(int t0, int t1)
1461 {
1462     return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
1463 }
1464
1465 #define HELPER_VECTOR_SPE_CMP(name)                                     \
1466     uint32_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1467     {                                                                   \
1468         return evcmp_merge(e##name(env, op1 >> 32, op2 >> 32),          \
1469                            e##name(env, op1, op2));                     \
1470     }
1471 /* evfststlt */
1472 HELPER_VECTOR_SPE_CMP(fststlt);
1473 /* evfststgt */
1474 HELPER_VECTOR_SPE_CMP(fststgt);
1475 /* evfststeq */
1476 HELPER_VECTOR_SPE_CMP(fststeq);
1477 /* evfscmplt */
1478 HELPER_VECTOR_SPE_CMP(fscmplt);
1479 /* evfscmpgt */
1480 HELPER_VECTOR_SPE_CMP(fscmpgt);
1481 /* evfscmpeq */
1482 HELPER_VECTOR_SPE_CMP(fscmpeq);
1483
1484 /* Double-precision floating-point conversion */
1485 uint64_t helper_efdcfsi(CPUPPCState *env, uint32_t val)
1486 {
1487     CPU_DoubleU u;
1488
1489     u.d = int32_to_float64(val, &env->vec_status);
1490
1491     return u.ll;
1492 }
1493
1494 uint64_t helper_efdcfsid(CPUPPCState *env, uint64_t val)
1495 {
1496     CPU_DoubleU u;
1497
1498     u.d = int64_to_float64(val, &env->vec_status);
1499
1500     return u.ll;
1501 }
1502
1503 uint64_t helper_efdcfui(CPUPPCState *env, uint32_t val)
1504 {
1505     CPU_DoubleU u;
1506
1507     u.d = uint32_to_float64(val, &env->vec_status);
1508
1509     return u.ll;
1510 }
1511
1512 uint64_t helper_efdcfuid(CPUPPCState *env, uint64_t val)
1513 {
1514     CPU_DoubleU u;
1515
1516     u.d = uint64_to_float64(val, &env->vec_status);
1517
1518     return u.ll;
1519 }
1520
1521 uint32_t helper_efdctsi(CPUPPCState *env, uint64_t val)
1522 {
1523     CPU_DoubleU u;
1524
1525     u.ll = val;
1526     /* NaN are not treated the same way IEEE 754 does */
1527     if (unlikely(float64_is_any_nan(u.d))) {
1528         return 0;
1529     }
1530
1531     return float64_to_int32(u.d, &env->vec_status);
1532 }
1533
1534 uint32_t helper_efdctui(CPUPPCState *env, uint64_t val)
1535 {
1536     CPU_DoubleU u;
1537
1538     u.ll = val;
1539     /* NaN are not treated the same way IEEE 754 does */
1540     if (unlikely(float64_is_any_nan(u.d))) {
1541         return 0;
1542     }
1543
1544     return float64_to_uint32(u.d, &env->vec_status);
1545 }
1546
1547 uint32_t helper_efdctsiz(CPUPPCState *env, uint64_t val)
1548 {
1549     CPU_DoubleU u;
1550
1551     u.ll = val;
1552     /* NaN are not treated the same way IEEE 754 does */
1553     if (unlikely(float64_is_any_nan(u.d))) {
1554         return 0;
1555     }
1556
1557     return float64_to_int32_round_to_zero(u.d, &env->vec_status);
1558 }
1559
1560 uint64_t helper_efdctsidz(CPUPPCState *env, uint64_t val)
1561 {
1562     CPU_DoubleU u;
1563
1564     u.ll = val;
1565     /* NaN are not treated the same way IEEE 754 does */
1566     if (unlikely(float64_is_any_nan(u.d))) {
1567         return 0;
1568     }
1569
1570     return float64_to_int64_round_to_zero(u.d, &env->vec_status);
1571 }
1572
1573 uint32_t helper_efdctuiz(CPUPPCState *env, uint64_t val)
1574 {
1575     CPU_DoubleU u;
1576
1577     u.ll = val;
1578     /* NaN are not treated the same way IEEE 754 does */
1579     if (unlikely(float64_is_any_nan(u.d))) {
1580         return 0;
1581     }
1582
1583     return float64_to_uint32_round_to_zero(u.d, &env->vec_status);
1584 }
1585
1586 uint64_t helper_efdctuidz(CPUPPCState *env, uint64_t val)
1587 {
1588     CPU_DoubleU u;
1589
1590     u.ll = val;
1591     /* NaN are not treated the same way IEEE 754 does */
1592     if (unlikely(float64_is_any_nan(u.d))) {
1593         return 0;
1594     }
1595
1596     return float64_to_uint64_round_to_zero(u.d, &env->vec_status);
1597 }
1598
1599 uint64_t helper_efdcfsf(CPUPPCState *env, uint32_t val)
1600 {
1601     CPU_DoubleU u;
1602     float64 tmp;
1603
1604     u.d = int32_to_float64(val, &env->vec_status);
1605     tmp = int64_to_float64(1ULL << 32, &env->vec_status);
1606     u.d = float64_div(u.d, tmp, &env->vec_status);
1607
1608     return u.ll;
1609 }
1610
1611 uint64_t helper_efdcfuf(CPUPPCState *env, uint32_t val)
1612 {
1613     CPU_DoubleU u;
1614     float64 tmp;
1615
1616     u.d = uint32_to_float64(val, &env->vec_status);
1617     tmp = int64_to_float64(1ULL << 32, &env->vec_status);
1618     u.d = float64_div(u.d, tmp, &env->vec_status);
1619
1620     return u.ll;
1621 }
1622
1623 uint32_t helper_efdctsf(CPUPPCState *env, uint64_t val)
1624 {
1625     CPU_DoubleU u;
1626     float64 tmp;
1627
1628     u.ll = val;
1629     /* NaN are not treated the same way IEEE 754 does */
1630     if (unlikely(float64_is_any_nan(u.d))) {
1631         return 0;
1632     }
1633     tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
1634     u.d = float64_mul(u.d, tmp, &env->vec_status);
1635
1636     return float64_to_int32(u.d, &env->vec_status);
1637 }
1638
1639 uint32_t helper_efdctuf(CPUPPCState *env, uint64_t val)
1640 {
1641     CPU_DoubleU u;
1642     float64 tmp;
1643
1644     u.ll = val;
1645     /* NaN are not treated the same way IEEE 754 does */
1646     if (unlikely(float64_is_any_nan(u.d))) {
1647         return 0;
1648     }
1649     tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
1650     u.d = float64_mul(u.d, tmp, &env->vec_status);
1651
1652     return float64_to_uint32(u.d, &env->vec_status);
1653 }
1654
1655 uint32_t helper_efscfd(CPUPPCState *env, uint64_t val)
1656 {
1657     CPU_DoubleU u1;
1658     CPU_FloatU u2;
1659
1660     u1.ll = val;
1661     u2.f = float64_to_float32(u1.d, &env->vec_status);
1662
1663     return u2.l;
1664 }
1665
1666 uint64_t helper_efdcfs(CPUPPCState *env, uint32_t val)
1667 {
1668     CPU_DoubleU u2;
1669     CPU_FloatU u1;
1670
1671     u1.l = val;
1672     u2.d = float32_to_float64(u1.f, &env->vec_status);
1673
1674     return u2.ll;
1675 }
1676
1677 /* Double precision fixed-point arithmetic */
1678 uint64_t helper_efdadd(CPUPPCState *env, uint64_t op1, uint64_t op2)
1679 {
1680     CPU_DoubleU u1, u2;
1681
1682     u1.ll = op1;
1683     u2.ll = op2;
1684     u1.d = float64_add(u1.d, u2.d, &env->vec_status);
1685     return u1.ll;
1686 }
1687
1688 uint64_t helper_efdsub(CPUPPCState *env, uint64_t op1, uint64_t op2)
1689 {
1690     CPU_DoubleU u1, u2;
1691
1692     u1.ll = op1;
1693     u2.ll = op2;
1694     u1.d = float64_sub(u1.d, u2.d, &env->vec_status);
1695     return u1.ll;
1696 }
1697
1698 uint64_t helper_efdmul(CPUPPCState *env, uint64_t op1, uint64_t op2)
1699 {
1700     CPU_DoubleU u1, u2;
1701
1702     u1.ll = op1;
1703     u2.ll = op2;
1704     u1.d = float64_mul(u1.d, u2.d, &env->vec_status);
1705     return u1.ll;
1706 }
1707
1708 uint64_t helper_efddiv(CPUPPCState *env, uint64_t op1, uint64_t op2)
1709 {
1710     CPU_DoubleU u1, u2;
1711
1712     u1.ll = op1;
1713     u2.ll = op2;
1714     u1.d = float64_div(u1.d, u2.d, &env->vec_status);
1715     return u1.ll;
1716 }
1717
1718 /* Double precision floating point helpers */
1719 uint32_t helper_efdtstlt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1720 {
1721     CPU_DoubleU u1, u2;
1722
1723     u1.ll = op1;
1724     u2.ll = op2;
1725     return float64_lt(u1.d, u2.d, &env->vec_status) ? 4 : 0;
1726 }
1727
1728 uint32_t helper_efdtstgt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1729 {
1730     CPU_DoubleU u1, u2;
1731
1732     u1.ll = op1;
1733     u2.ll = op2;
1734     return float64_le(u1.d, u2.d, &env->vec_status) ? 0 : 4;
1735 }
1736
1737 uint32_t helper_efdtsteq(CPUPPCState *env, uint64_t op1, uint64_t op2)
1738 {
1739     CPU_DoubleU u1, u2;
1740
1741     u1.ll = op1;
1742     u2.ll = op2;
1743     return float64_eq_quiet(u1.d, u2.d, &env->vec_status) ? 4 : 0;
1744 }
1745
1746 uint32_t helper_efdcmplt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1747 {
1748     /* XXX: TODO: test special values (NaN, infinites, ...) */
1749     return helper_efdtstlt(env, op1, op2);
1750 }
1751
1752 uint32_t helper_efdcmpgt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1753 {
1754     /* XXX: TODO: test special values (NaN, infinites, ...) */
1755     return helper_efdtstgt(env, op1, op2);
1756 }
1757
1758 uint32_t helper_efdcmpeq(CPUPPCState *env, uint64_t op1, uint64_t op2)
1759 {
1760     /* XXX: TODO: test special values (NaN, infinites, ...) */
1761     return helper_efdtsteq(env, op1, op2);
1762 }
1763
1764 #define DECODE_SPLIT(opcode, shift1, nb1, shift2, nb2) \
1765     (((((opcode) >> (shift1)) & ((1 << (nb1)) - 1)) << nb2) |    \
1766      (((opcode) >> (shift2)) & ((1 << (nb2)) - 1)))
1767
1768 #define xT(opcode) DECODE_SPLIT(opcode, 0, 1, 21, 5)
1769 #define xA(opcode) DECODE_SPLIT(opcode, 2, 1, 16, 5)
1770 #define xB(opcode) DECODE_SPLIT(opcode, 1, 1, 11, 5)
1771 #define xC(opcode) DECODE_SPLIT(opcode, 3, 1,  6, 5)
1772 #define BF(opcode) (((opcode) >> (31-8)) & 7)
1773
1774 typedef union _ppc_vsr_t {
1775     uint64_t u64[2];
1776     uint32_t u32[4];
1777     float32 f32[4];
1778     float64 f64[2];
1779 } ppc_vsr_t;
1780
1781 #if defined(HOST_WORDS_BIGENDIAN)
1782 #define VsrW(i) u32[i]
1783 #define VsrD(i) u64[i]
1784 #else
1785 #define VsrW(i) u32[3-(i)]
1786 #define VsrD(i) u64[1-(i)]
1787 #endif
1788
1789 static void getVSR(int n, ppc_vsr_t *vsr, CPUPPCState *env)
1790 {
1791     if (n < 32) {
1792         vsr->VsrD(0) = env->fpr[n];
1793         vsr->VsrD(1) = env->vsr[n];
1794     } else {
1795         vsr->u64[0] = env->avr[n-32].u64[0];
1796         vsr->u64[1] = env->avr[n-32].u64[1];
1797     }
1798 }
1799
1800 static void putVSR(int n, ppc_vsr_t *vsr, CPUPPCState *env)
1801 {
1802     if (n < 32) {
1803         env->fpr[n] = vsr->VsrD(0);
1804         env->vsr[n] = vsr->VsrD(1);
1805     } else {
1806         env->avr[n-32].u64[0] = vsr->u64[0];
1807         env->avr[n-32].u64[1] = vsr->u64[1];
1808     }
1809 }
1810
1811 #define float64_to_float64(x, env) x
1812
1813
1814 /* VSX_ADD_SUB - VSX floating point add/subract
1815  *   name  - instruction mnemonic
1816  *   op    - operation (add or sub)
1817  *   nels  - number of elements (1, 2 or 4)
1818  *   tp    - type (float32 or float64)
1819  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
1820  *   sfprf - set FPRF
1821  */
1822 #define VSX_ADD_SUB(name, op, nels, tp, fld, sfprf, r2sp)                    \
1823 void helper_##name(CPUPPCState *env, uint32_t opcode)                        \
1824 {                                                                            \
1825     ppc_vsr_t xt, xa, xb;                                                    \
1826     int i;                                                                   \
1827                                                                              \
1828     getVSR(xA(opcode), &xa, env);                                            \
1829     getVSR(xB(opcode), &xb, env);                                            \
1830     getVSR(xT(opcode), &xt, env);                                            \
1831     helper_reset_fpstatus(env);                                              \
1832                                                                              \
1833     for (i = 0; i < nels; i++) {                                             \
1834         float_status tstat = env->fp_status;                                 \
1835         set_float_exception_flags(0, &tstat);                                \
1836         xt.fld = tp##_##op(xa.fld, xb.fld, &tstat);                          \
1837         env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1838                                                                              \
1839         if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
1840             if (tp##_is_infinity(xa.fld) && tp##_is_infinity(xb.fld)) {      \
1841                 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, sfprf);    \
1842             } else if (tp##_is_signaling_nan(xa.fld) ||                      \
1843                        tp##_is_signaling_nan(xb.fld)) {                      \
1844                 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf);   \
1845             }                                                                \
1846         }                                                                    \
1847                                                                              \
1848         if (r2sp) {                                                          \
1849             xt.fld = helper_frsp(env, xt.fld);                               \
1850         }                                                                    \
1851                                                                              \
1852         if (sfprf) {                                                         \
1853             helper_compute_fprf(env, xt.fld);                                \
1854         }                                                                    \
1855     }                                                                        \
1856     putVSR(xT(opcode), &xt, env);                                            \
1857     helper_float_check_status(env);                                          \
1858 }
1859
1860 VSX_ADD_SUB(xsadddp, add, 1, float64, VsrD(0), 1, 0)
1861 VSX_ADD_SUB(xsaddsp, add, 1, float64, VsrD(0), 1, 1)
1862 VSX_ADD_SUB(xvadddp, add, 2, float64, VsrD(i), 0, 0)
1863 VSX_ADD_SUB(xvaddsp, add, 4, float32, VsrW(i), 0, 0)
1864 VSX_ADD_SUB(xssubdp, sub, 1, float64, VsrD(0), 1, 0)
1865 VSX_ADD_SUB(xssubsp, sub, 1, float64, VsrD(0), 1, 1)
1866 VSX_ADD_SUB(xvsubdp, sub, 2, float64, VsrD(i), 0, 0)
1867 VSX_ADD_SUB(xvsubsp, sub, 4, float32, VsrW(i), 0, 0)
1868
1869 /* VSX_MUL - VSX floating point multiply
1870  *   op    - instruction mnemonic
1871  *   nels  - number of elements (1, 2 or 4)
1872  *   tp    - type (float32 or float64)
1873  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
1874  *   sfprf - set FPRF
1875  */
1876 #define VSX_MUL(op, nels, tp, fld, sfprf, r2sp)                              \
1877 void helper_##op(CPUPPCState *env, uint32_t opcode)                          \
1878 {                                                                            \
1879     ppc_vsr_t xt, xa, xb;                                                    \
1880     int i;                                                                   \
1881                                                                              \
1882     getVSR(xA(opcode), &xa, env);                                            \
1883     getVSR(xB(opcode), &xb, env);                                            \
1884     getVSR(xT(opcode), &xt, env);                                            \
1885     helper_reset_fpstatus(env);                                              \
1886                                                                              \
1887     for (i = 0; i < nels; i++) {                                             \
1888         float_status tstat = env->fp_status;                                 \
1889         set_float_exception_flags(0, &tstat);                                \
1890         xt.fld = tp##_mul(xa.fld, xb.fld, &tstat);                           \
1891         env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1892                                                                              \
1893         if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
1894             if ((tp##_is_infinity(xa.fld) && tp##_is_zero(xb.fld)) ||        \
1895                 (tp##_is_infinity(xb.fld) && tp##_is_zero(xa.fld))) {        \
1896                 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, sfprf);    \
1897             } else if (tp##_is_signaling_nan(xa.fld) ||                      \
1898                        tp##_is_signaling_nan(xb.fld)) {                      \
1899                 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf);   \
1900             }                                                                \
1901         }                                                                    \
1902                                                                              \
1903         if (r2sp) {                                                          \
1904             xt.fld = helper_frsp(env, xt.fld);                               \
1905         }                                                                    \
1906                                                                              \
1907         if (sfprf) {                                                         \
1908             helper_compute_fprf(env, xt.fld);                                \
1909         }                                                                    \
1910     }                                                                        \
1911                                                                              \
1912     putVSR(xT(opcode), &xt, env);                                            \
1913     helper_float_check_status(env);                                          \
1914 }
1915
1916 VSX_MUL(xsmuldp, 1, float64, VsrD(0), 1, 0)
1917 VSX_MUL(xsmulsp, 1, float64, VsrD(0), 1, 1)
1918 VSX_MUL(xvmuldp, 2, float64, VsrD(i), 0, 0)
1919 VSX_MUL(xvmulsp, 4, float32, VsrW(i), 0, 0)
1920
1921 /* VSX_DIV - VSX floating point divide
1922  *   op    - instruction mnemonic
1923  *   nels  - number of elements (1, 2 or 4)
1924  *   tp    - type (float32 or float64)
1925  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
1926  *   sfprf - set FPRF
1927  */
1928 #define VSX_DIV(op, nels, tp, fld, sfprf, r2sp)                               \
1929 void helper_##op(CPUPPCState *env, uint32_t opcode)                           \
1930 {                                                                             \
1931     ppc_vsr_t xt, xa, xb;                                                     \
1932     int i;                                                                    \
1933                                                                               \
1934     getVSR(xA(opcode), &xa, env);                                             \
1935     getVSR(xB(opcode), &xb, env);                                             \
1936     getVSR(xT(opcode), &xt, env);                                             \
1937     helper_reset_fpstatus(env);                                               \
1938                                                                               \
1939     for (i = 0; i < nels; i++) {                                              \
1940         float_status tstat = env->fp_status;                                  \
1941         set_float_exception_flags(0, &tstat);                                 \
1942         xt.fld = tp##_div(xa.fld, xb.fld, &tstat);                            \
1943         env->fp_status.float_exception_flags |= tstat.float_exception_flags;  \
1944                                                                               \
1945         if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {     \
1946             if (tp##_is_infinity(xa.fld) && tp##_is_infinity(xb.fld)) {       \
1947                 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIDI, sfprf);     \
1948             } else if (tp##_is_zero(xa.fld) &&                                \
1949                 tp##_is_zero(xb.fld)) {                                       \
1950                 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXZDZ, sfprf);     \
1951             } else if (tp##_is_signaling_nan(xa.fld) ||                       \
1952                 tp##_is_signaling_nan(xb.fld)) {                              \
1953                 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf);    \
1954             }                                                                 \
1955         }                                                                     \
1956                                                                               \
1957         if (r2sp) {                                                           \
1958             xt.fld = helper_frsp(env, xt.fld);                                \
1959         }                                                                     \
1960                                                                               \
1961         if (sfprf) {                                                          \
1962             helper_compute_fprf(env, xt.fld);                                 \
1963         }                                                                     \
1964     }                                                                         \
1965                                                                               \
1966     putVSR(xT(opcode), &xt, env);                                             \
1967     helper_float_check_status(env);                                           \
1968 }
1969
1970 VSX_DIV(xsdivdp, 1, float64, VsrD(0), 1, 0)
1971 VSX_DIV(xsdivsp, 1, float64, VsrD(0), 1, 1)
1972 VSX_DIV(xvdivdp, 2, float64, VsrD(i), 0, 0)
1973 VSX_DIV(xvdivsp, 4, float32, VsrW(i), 0, 0)
1974
1975 /* VSX_RE  - VSX floating point reciprocal estimate
1976  *   op    - instruction mnemonic
1977  *   nels  - number of elements (1, 2 or 4)
1978  *   tp    - type (float32 or float64)
1979  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
1980  *   sfprf - set FPRF
1981  */
1982 #define VSX_RE(op, nels, tp, fld, sfprf, r2sp)                                \
1983 void helper_##op(CPUPPCState *env, uint32_t opcode)                           \
1984 {                                                                             \
1985     ppc_vsr_t xt, xb;                                                         \
1986     int i;                                                                    \
1987                                                                               \
1988     getVSR(xB(opcode), &xb, env);                                             \
1989     getVSR(xT(opcode), &xt, env);                                             \
1990     helper_reset_fpstatus(env);                                               \
1991                                                                               \
1992     for (i = 0; i < nels; i++) {                                              \
1993         if (unlikely(tp##_is_signaling_nan(xb.fld))) {                        \
1994                 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf);    \
1995         }                                                                     \
1996         xt.fld = tp##_div(tp##_one, xb.fld, &env->fp_status);                 \
1997                                                                               \
1998         if (r2sp) {                                                           \
1999             xt.fld = helper_frsp(env, xt.fld);                                \
2000         }                                                                     \
2001                                                                               \
2002         if (sfprf) {                                                          \
2003             helper_compute_fprf(env, xt.fld);                                 \
2004         }                                                                     \
2005     }                                                                         \
2006                                                                               \
2007     putVSR(xT(opcode), &xt, env);                                             \
2008     helper_float_check_status(env);                                           \
2009 }
2010
2011 VSX_RE(xsredp, 1, float64, VsrD(0), 1, 0)
2012 VSX_RE(xsresp, 1, float64, VsrD(0), 1, 1)
2013 VSX_RE(xvredp, 2, float64, VsrD(i), 0, 0)
2014 VSX_RE(xvresp, 4, float32, VsrW(i), 0, 0)
2015
2016 /* VSX_SQRT - VSX floating point square root
2017  *   op    - instruction mnemonic
2018  *   nels  - number of elements (1, 2 or 4)
2019  *   tp    - type (float32 or float64)
2020  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2021  *   sfprf - set FPRF
2022  */
2023 #define VSX_SQRT(op, nels, tp, fld, sfprf, r2sp)                             \
2024 void helper_##op(CPUPPCState *env, uint32_t opcode)                          \
2025 {                                                                            \
2026     ppc_vsr_t xt, xb;                                                        \
2027     int i;                                                                   \
2028                                                                              \
2029     getVSR(xB(opcode), &xb, env);                                            \
2030     getVSR(xT(opcode), &xt, env);                                            \
2031     helper_reset_fpstatus(env);                                              \
2032                                                                              \
2033     for (i = 0; i < nels; i++) {                                             \
2034         float_status tstat = env->fp_status;                                 \
2035         set_float_exception_flags(0, &tstat);                                \
2036         xt.fld = tp##_sqrt(xb.fld, &tstat);                                  \
2037         env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2038                                                                              \
2039         if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
2040             if (tp##_is_neg(xb.fld) && !tp##_is_zero(xb.fld)) {              \
2041                 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, sfprf);   \
2042             } else if (tp##_is_signaling_nan(xb.fld)) {                      \
2043                 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf);   \
2044             }                                                                \
2045         }                                                                    \
2046                                                                              \
2047         if (r2sp) {                                                          \
2048             xt.fld = helper_frsp(env, xt.fld);                               \
2049         }                                                                    \
2050                                                                              \
2051         if (sfprf) {                                                         \
2052             helper_compute_fprf(env, xt.fld);                                \
2053         }                                                                    \
2054     }                                                                        \
2055                                                                              \
2056     putVSR(xT(opcode), &xt, env);                                            \
2057     helper_float_check_status(env);                                          \
2058 }
2059
2060 VSX_SQRT(xssqrtdp, 1, float64, VsrD(0), 1, 0)
2061 VSX_SQRT(xssqrtsp, 1, float64, VsrD(0), 1, 1)
2062 VSX_SQRT(xvsqrtdp, 2, float64, VsrD(i), 0, 0)
2063 VSX_SQRT(xvsqrtsp, 4, float32, VsrW(i), 0, 0)
2064
2065 /* VSX_RSQRTE - VSX floating point reciprocal square root estimate
2066  *   op    - instruction mnemonic
2067  *   nels  - number of elements (1, 2 or 4)
2068  *   tp    - type (float32 or float64)
2069  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2070  *   sfprf - set FPRF
2071  */
2072 #define VSX_RSQRTE(op, nels, tp, fld, sfprf, r2sp)                           \
2073 void helper_##op(CPUPPCState *env, uint32_t opcode)                          \
2074 {                                                                            \
2075     ppc_vsr_t xt, xb;                                                        \
2076     int i;                                                                   \
2077                                                                              \
2078     getVSR(xB(opcode), &xb, env);                                            \
2079     getVSR(xT(opcode), &xt, env);                                            \
2080     helper_reset_fpstatus(env);                                              \
2081                                                                              \
2082     for (i = 0; i < nels; i++) {                                             \
2083         float_status tstat = env->fp_status;                                 \
2084         set_float_exception_flags(0, &tstat);                                \
2085         xt.fld = tp##_sqrt(xb.fld, &tstat);                                  \
2086         xt.fld = tp##_div(tp##_one, xt.fld, &tstat);                         \
2087         env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2088                                                                              \
2089         if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
2090             if (tp##_is_neg(xb.fld) && !tp##_is_zero(xb.fld)) {              \
2091                 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, sfprf);   \
2092             } else if (tp##_is_signaling_nan(xb.fld)) {                      \
2093                 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf);   \
2094             }                                                                \
2095         }                                                                    \
2096                                                                              \
2097         if (r2sp) {                                                          \
2098             xt.fld = helper_frsp(env, xt.fld);                               \
2099         }                                                                    \
2100                                                                              \
2101         if (sfprf) {                                                         \
2102             helper_compute_fprf(env, xt.fld);                                \
2103         }                                                                    \
2104     }                                                                        \
2105                                                                              \
2106     putVSR(xT(opcode), &xt, env);                                            \
2107     helper_float_check_status(env);                                          \
2108 }
2109
2110 VSX_RSQRTE(xsrsqrtedp, 1, float64, VsrD(0), 1, 0)
2111 VSX_RSQRTE(xsrsqrtesp, 1, float64, VsrD(0), 1, 1)
2112 VSX_RSQRTE(xvrsqrtedp, 2, float64, VsrD(i), 0, 0)
2113 VSX_RSQRTE(xvrsqrtesp, 4, float32, VsrW(i), 0, 0)
2114
2115 /* VSX_TDIV - VSX floating point test for divide
2116  *   op    - instruction mnemonic
2117  *   nels  - number of elements (1, 2 or 4)
2118  *   tp    - type (float32 or float64)
2119  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2120  *   emin  - minimum unbiased exponent
2121  *   emax  - maximum unbiased exponent
2122  *   nbits - number of fraction bits
2123  */
2124 #define VSX_TDIV(op, nels, tp, fld, emin, emax, nbits)                  \
2125 void helper_##op(CPUPPCState *env, uint32_t opcode)                     \
2126 {                                                                       \
2127     ppc_vsr_t xa, xb;                                                   \
2128     int i;                                                              \
2129     int fe_flag = 0;                                                    \
2130     int fg_flag = 0;                                                    \
2131                                                                         \
2132     getVSR(xA(opcode), &xa, env);                                       \
2133     getVSR(xB(opcode), &xb, env);                                       \
2134                                                                         \
2135     for (i = 0; i < nels; i++) {                                        \
2136         if (unlikely(tp##_is_infinity(xa.fld) ||                        \
2137                      tp##_is_infinity(xb.fld) ||                        \
2138                      tp##_is_zero(xb.fld))) {                           \
2139             fe_flag = 1;                                                \
2140             fg_flag = 1;                                                \
2141         } else {                                                        \
2142             int e_a = ppc_##tp##_get_unbiased_exp(xa.fld);              \
2143             int e_b = ppc_##tp##_get_unbiased_exp(xb.fld);              \
2144                                                                         \
2145             if (unlikely(tp##_is_any_nan(xa.fld) ||                     \
2146                          tp##_is_any_nan(xb.fld))) {                    \
2147                 fe_flag = 1;                                            \
2148             } else if ((e_b <= emin) || (e_b >= (emax-2))) {            \
2149                 fe_flag = 1;                                            \
2150             } else if (!tp##_is_zero(xa.fld) &&                         \
2151                        (((e_a - e_b) >= emax) ||                        \
2152                         ((e_a - e_b) <= (emin+1)) ||                    \
2153                          (e_a <= (emin+nbits)))) {                      \
2154                 fe_flag = 1;                                            \
2155             }                                                           \
2156                                                                         \
2157             if (unlikely(tp##_is_zero_or_denormal(xb.fld))) {           \
2158                 /* XB is not zero because of the above check and */     \
2159                 /* so must be denormalized.                      */     \
2160                 fg_flag = 1;                                            \
2161             }                                                           \
2162         }                                                               \
2163     }                                                                   \
2164                                                                         \
2165     env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2166 }
2167
2168 VSX_TDIV(xstdivdp, 1, float64, VsrD(0), -1022, 1023, 52)
2169 VSX_TDIV(xvtdivdp, 2, float64, VsrD(i), -1022, 1023, 52)
2170 VSX_TDIV(xvtdivsp, 4, float32, VsrW(i), -126, 127, 23)
2171
2172 /* VSX_TSQRT - VSX floating point test for square root
2173  *   op    - instruction mnemonic
2174  *   nels  - number of elements (1, 2 or 4)
2175  *   tp    - type (float32 or float64)
2176  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2177  *   emin  - minimum unbiased exponent
2178  *   emax  - maximum unbiased exponent
2179  *   nbits - number of fraction bits
2180  */
2181 #define VSX_TSQRT(op, nels, tp, fld, emin, nbits)                       \
2182 void helper_##op(CPUPPCState *env, uint32_t opcode)                     \
2183 {                                                                       \
2184     ppc_vsr_t xa, xb;                                                   \
2185     int i;                                                              \
2186     int fe_flag = 0;                                                    \
2187     int fg_flag = 0;                                                    \
2188                                                                         \
2189     getVSR(xA(opcode), &xa, env);                                       \
2190     getVSR(xB(opcode), &xb, env);                                       \
2191                                                                         \
2192     for (i = 0; i < nels; i++) {                                        \
2193         if (unlikely(tp##_is_infinity(xb.fld) ||                        \
2194                      tp##_is_zero(xb.fld))) {                           \
2195             fe_flag = 1;                                                \
2196             fg_flag = 1;                                                \
2197         } else {                                                        \
2198             int e_b = ppc_##tp##_get_unbiased_exp(xb.fld);              \
2199                                                                         \
2200             if (unlikely(tp##_is_any_nan(xb.fld))) {                    \
2201                 fe_flag = 1;                                            \
2202             } else if (unlikely(tp##_is_zero(xb.fld))) {                \
2203                 fe_flag = 1;                                            \
2204             } else if (unlikely(tp##_is_neg(xb.fld))) {                 \
2205                 fe_flag = 1;                                            \
2206             } else if (!tp##_is_zero(xb.fld) &&                         \
2207                       (e_b <= (emin+nbits))) {                          \
2208                 fe_flag = 1;                                            \
2209             }                                                           \
2210                                                                         \
2211             if (unlikely(tp##_is_zero_or_denormal(xb.fld))) {           \
2212                 /* XB is not zero because of the above check and */     \
2213                 /* therefore must be denormalized.               */     \
2214                 fg_flag = 1;                                            \
2215             }                                                           \
2216         }                                                               \
2217     }                                                                   \
2218                                                                         \
2219     env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2220 }
2221
2222 VSX_TSQRT(xstsqrtdp, 1, float64, VsrD(0), -1022, 52)
2223 VSX_TSQRT(xvtsqrtdp, 2, float64, VsrD(i), -1022, 52)
2224 VSX_TSQRT(xvtsqrtsp, 4, float32, VsrW(i), -126, 23)
2225
2226 /* VSX_MADD - VSX floating point muliply/add variations
2227  *   op    - instruction mnemonic
2228  *   nels  - number of elements (1, 2 or 4)
2229  *   tp    - type (float32 or float64)
2230  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2231  *   maddflgs - flags for the float*muladd routine that control the
2232  *           various forms (madd, msub, nmadd, nmsub)
2233  *   afrm  - A form (1=A, 0=M)
2234  *   sfprf - set FPRF
2235  */
2236 #define VSX_MADD(op, nels, tp, fld, maddflgs, afrm, sfprf, r2sp)              \
2237 void helper_##op(CPUPPCState *env, uint32_t opcode)                           \
2238 {                                                                             \
2239     ppc_vsr_t xt_in, xa, xb, xt_out;                                          \
2240     ppc_vsr_t *b, *c;                                                         \
2241     int i;                                                                    \
2242                                                                               \
2243     if (afrm) { /* AxB + T */                                                 \
2244         b = &xb;                                                              \
2245         c = &xt_in;                                                           \
2246     } else { /* AxT + B */                                                    \
2247         b = &xt_in;                                                           \
2248         c = &xb;                                                              \
2249     }                                                                         \
2250                                                                               \
2251     getVSR(xA(opcode), &xa, env);                                             \
2252     getVSR(xB(opcode), &xb, env);                                             \
2253     getVSR(xT(opcode), &xt_in, env);                                          \
2254                                                                               \
2255     xt_out = xt_in;                                                           \
2256                                                                               \
2257     helper_reset_fpstatus(env);                                               \
2258                                                                               \
2259     for (i = 0; i < nels; i++) {                                              \
2260         float_status tstat = env->fp_status;                                  \
2261         set_float_exception_flags(0, &tstat);                                 \
2262         if (r2sp && (tstat.float_rounding_mode == float_round_nearest_even)) {\
2263             /* Avoid double rounding errors by rounding the intermediate */   \
2264             /* result to odd.                                            */   \
2265             set_float_rounding_mode(float_round_to_zero, &tstat);             \
2266             xt_out.fld = tp##_muladd(xa.fld, b->fld, c->fld,                  \
2267                                        maddflgs, &tstat);                     \
2268             xt_out.fld |= (get_float_exception_flags(&tstat) &                \
2269                               float_flag_inexact) != 0;                       \
2270         } else {                                                              \
2271             xt_out.fld = tp##_muladd(xa.fld, b->fld, c->fld,                  \
2272                                         maddflgs, &tstat);                    \
2273         }                                                                     \
2274         env->fp_status.float_exception_flags |= tstat.float_exception_flags;  \
2275                                                                               \
2276         if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {     \
2277             if (tp##_is_signaling_nan(xa.fld) ||                              \
2278                 tp##_is_signaling_nan(b->fld) ||                              \
2279                 tp##_is_signaling_nan(c->fld)) {                              \
2280                 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf);    \
2281                 tstat.float_exception_flags &= ~float_flag_invalid;           \
2282             }                                                                 \
2283             if ((tp##_is_infinity(xa.fld) && tp##_is_zero(b->fld)) ||         \
2284                 (tp##_is_zero(xa.fld) && tp##_is_infinity(b->fld))) {         \
2285                 xt_out.fld = float64_to_##tp(fload_invalid_op_excp(env,       \
2286                     POWERPC_EXCP_FP_VXIMZ, sfprf), &env->fp_status);          \
2287                 tstat.float_exception_flags &= ~float_flag_invalid;           \
2288             }                                                                 \
2289             if ((tstat.float_exception_flags & float_flag_invalid) &&         \
2290                 ((tp##_is_infinity(xa.fld) ||                                 \
2291                   tp##_is_infinity(b->fld)) &&                                \
2292                   tp##_is_infinity(c->fld))) {                                \
2293                 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, sfprf);     \
2294             }                                                                 \
2295         }                                                                     \
2296                                                                               \
2297         if (r2sp) {                                                           \
2298             xt_out.fld = helper_frsp(env, xt_out.fld);                        \
2299         }                                                                     \
2300                                                                               \
2301         if (sfprf) {                                                          \
2302             helper_compute_fprf(env, xt_out.fld);                             \
2303         }                                                                     \
2304     }                                                                         \
2305     putVSR(xT(opcode), &xt_out, env);                                         \
2306     helper_float_check_status(env);                                           \
2307 }
2308
2309 #define MADD_FLGS 0
2310 #define MSUB_FLGS float_muladd_negate_c
2311 #define NMADD_FLGS float_muladd_negate_result
2312 #define NMSUB_FLGS (float_muladd_negate_c | float_muladd_negate_result)
2313
2314 VSX_MADD(xsmaddadp, 1, float64, VsrD(0), MADD_FLGS, 1, 1, 0)
2315 VSX_MADD(xsmaddmdp, 1, float64, VsrD(0), MADD_FLGS, 0, 1, 0)
2316 VSX_MADD(xsmsubadp, 1, float64, VsrD(0), MSUB_FLGS, 1, 1, 0)
2317 VSX_MADD(xsmsubmdp, 1, float64, VsrD(0), MSUB_FLGS, 0, 1, 0)
2318 VSX_MADD(xsnmaddadp, 1, float64, VsrD(0), NMADD_FLGS, 1, 1, 0)
2319 VSX_MADD(xsnmaddmdp, 1, float64, VsrD(0), NMADD_FLGS, 0, 1, 0)
2320 VSX_MADD(xsnmsubadp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 1, 0)
2321 VSX_MADD(xsnmsubmdp, 1, float64, VsrD(0), NMSUB_FLGS, 0, 1, 0)
2322
2323 VSX_MADD(xsmaddasp, 1, float64, VsrD(0), MADD_FLGS, 1, 1, 1)
2324 VSX_MADD(xsmaddmsp, 1, float64, VsrD(0), MADD_FLGS, 0, 1, 1)
2325 VSX_MADD(xsmsubasp, 1, float64, VsrD(0), MSUB_FLGS, 1, 1, 1)
2326 VSX_MADD(xsmsubmsp, 1, float64, VsrD(0), MSUB_FLGS, 0, 1, 1)
2327 VSX_MADD(xsnmaddasp, 1, float64, VsrD(0), NMADD_FLGS, 1, 1, 1)
2328 VSX_MADD(xsnmaddmsp, 1, float64, VsrD(0), NMADD_FLGS, 0, 1, 1)
2329 VSX_MADD(xsnmsubasp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 1, 1)
2330 VSX_MADD(xsnmsubmsp, 1, float64, VsrD(0), NMSUB_FLGS, 0, 1, 1)
2331
2332 VSX_MADD(xvmaddadp, 2, float64, VsrD(i), MADD_FLGS, 1, 0, 0)
2333 VSX_MADD(xvmaddmdp, 2, float64, VsrD(i), MADD_FLGS, 0, 0, 0)
2334 VSX_MADD(xvmsubadp, 2, float64, VsrD(i), MSUB_FLGS, 1, 0, 0)
2335 VSX_MADD(xvmsubmdp, 2, float64, VsrD(i), MSUB_FLGS, 0, 0, 0)
2336 VSX_MADD(xvnmaddadp, 2, float64, VsrD(i), NMADD_FLGS, 1, 0, 0)
2337 VSX_MADD(xvnmaddmdp, 2, float64, VsrD(i), NMADD_FLGS, 0, 0, 0)
2338 VSX_MADD(xvnmsubadp, 2, float64, VsrD(i), NMSUB_FLGS, 1, 0, 0)
2339 VSX_MADD(xvnmsubmdp, 2, float64, VsrD(i), NMSUB_FLGS, 0, 0, 0)
2340
2341 VSX_MADD(xvmaddasp, 4, float32, VsrW(i), MADD_FLGS, 1, 0, 0)
2342 VSX_MADD(xvmaddmsp, 4, float32, VsrW(i), MADD_FLGS, 0, 0, 0)
2343 VSX_MADD(xvmsubasp, 4, float32, VsrW(i), MSUB_FLGS, 1, 0, 0)
2344 VSX_MADD(xvmsubmsp, 4, float32, VsrW(i), MSUB_FLGS, 0, 0, 0)
2345 VSX_MADD(xvnmaddasp, 4, float32, VsrW(i), NMADD_FLGS, 1, 0, 0)
2346 VSX_MADD(xvnmaddmsp, 4, float32, VsrW(i), NMADD_FLGS, 0, 0, 0)
2347 VSX_MADD(xvnmsubasp, 4, float32, VsrW(i), NMSUB_FLGS, 1, 0, 0)
2348 VSX_MADD(xvnmsubmsp, 4, float32, VsrW(i), NMSUB_FLGS, 0, 0, 0)
2349
2350 #define VSX_SCALAR_CMP(op, ordered)                                      \
2351 void helper_##op(CPUPPCState *env, uint32_t opcode)                      \
2352 {                                                                        \
2353     ppc_vsr_t xa, xb;                                                    \
2354     uint32_t cc = 0;                                                     \
2355                                                                          \
2356     getVSR(xA(opcode), &xa, env);                                        \
2357     getVSR(xB(opcode), &xb, env);                                        \
2358                                                                          \
2359     if (unlikely(float64_is_any_nan(xa.VsrD(0)) ||                       \
2360                  float64_is_any_nan(xb.VsrD(0)))) {                      \
2361         if (float64_is_signaling_nan(xa.VsrD(0)) ||                      \
2362             float64_is_signaling_nan(xb.VsrD(0))) {                      \
2363             fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);       \
2364         }                                                                \
2365         if (ordered) {                                                   \
2366             fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0);         \
2367         }                                                                \
2368         cc = 1;                                                          \
2369     } else {                                                             \
2370         if (float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) {       \
2371             cc = 8;                                                      \
2372         } else if (!float64_le(xa.VsrD(0), xb.VsrD(0),                   \
2373                                &env->fp_status)) { \
2374             cc = 4;                                                      \
2375         } else {                                                         \
2376             cc = 2;                                                      \
2377         }                                                                \
2378     }                                                                    \
2379                                                                          \
2380     env->fpscr &= ~(0x0F << FPSCR_FPRF);                                 \
2381     env->fpscr |= cc << FPSCR_FPRF;                                      \
2382     env->crf[BF(opcode)] = cc;                                           \
2383                                                                          \
2384     helper_float_check_status(env);                                      \
2385 }
2386
2387 VSX_SCALAR_CMP(xscmpodp, 1)
2388 VSX_SCALAR_CMP(xscmpudp, 0)
2389
2390 /* VSX_MAX_MIN - VSX floating point maximum/minimum
2391  *   name  - instruction mnemonic
2392  *   op    - operation (max or min)
2393  *   nels  - number of elements (1, 2 or 4)
2394  *   tp    - type (float32 or float64)
2395  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2396  */
2397 #define VSX_MAX_MIN(name, op, nels, tp, fld)                                  \
2398 void helper_##name(CPUPPCState *env, uint32_t opcode)                         \
2399 {                                                                             \
2400     ppc_vsr_t xt, xa, xb;                                                     \
2401     int i;                                                                    \
2402                                                                               \
2403     getVSR(xA(opcode), &xa, env);                                             \
2404     getVSR(xB(opcode), &xb, env);                                             \
2405     getVSR(xT(opcode), &xt, env);                                             \
2406                                                                               \
2407     for (i = 0; i < nels; i++) {                                              \
2408         xt.fld = tp##_##op(xa.fld, xb.fld, &env->fp_status);                  \
2409         if (unlikely(tp##_is_signaling_nan(xa.fld) ||                         \
2410                      tp##_is_signaling_nan(xb.fld))) {                        \
2411             fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);            \
2412         }                                                                     \
2413     }                                                                         \
2414                                                                               \
2415     putVSR(xT(opcode), &xt, env);                                             \
2416     helper_float_check_status(env);                                           \
2417 }
2418
2419 VSX_MAX_MIN(xsmaxdp, maxnum, 1, float64, VsrD(0))
2420 VSX_MAX_MIN(xvmaxdp, maxnum, 2, float64, VsrD(i))
2421 VSX_MAX_MIN(xvmaxsp, maxnum, 4, float32, VsrW(i))
2422 VSX_MAX_MIN(xsmindp, minnum, 1, float64, VsrD(0))
2423 VSX_MAX_MIN(xvmindp, minnum, 2, float64, VsrD(i))
2424 VSX_MAX_MIN(xvminsp, minnum, 4, float32, VsrW(i))
2425
2426 /* VSX_CMP - VSX floating point compare
2427  *   op    - instruction mnemonic
2428  *   nels  - number of elements (1, 2 or 4)
2429  *   tp    - type (float32 or float64)
2430  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2431  *   cmp   - comparison operation
2432  *   svxvc - set VXVC bit
2433  */
2434 #define VSX_CMP(op, nels, tp, fld, cmp, svxvc)                            \
2435 void helper_##op(CPUPPCState *env, uint32_t opcode)                       \
2436 {                                                                         \
2437     ppc_vsr_t xt, xa, xb;                                                 \
2438     int i;                                                                \
2439     int all_true = 1;                                                     \
2440     int all_false = 1;                                                    \
2441                                                                           \
2442     getVSR(xA(opcode), &xa, env);                                         \
2443     getVSR(xB(opcode), &xb, env);                                         \
2444     getVSR(xT(opcode), &xt, env);                                         \
2445                                                                           \
2446     for (i = 0; i < nels; i++) {                                          \
2447         if (unlikely(tp##_is_any_nan(xa.fld) ||                           \
2448                      tp##_is_any_nan(xb.fld))) {                          \
2449             if (tp##_is_signaling_nan(xa.fld) ||                          \
2450                 tp##_is_signaling_nan(xb.fld)) {                          \
2451                 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);    \
2452             }                                                             \
2453             if (svxvc) {                                                  \
2454                 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0);      \
2455             }                                                             \
2456             xt.fld = 0;                                                   \
2457             all_true = 0;                                                 \
2458         } else {                                                          \
2459             if (tp##_##cmp(xb.fld, xa.fld, &env->fp_status) == 1) {       \
2460                 xt.fld = -1;                                              \
2461                 all_false = 0;                                            \
2462             } else {                                                      \
2463                 xt.fld = 0;                                               \
2464                 all_true = 0;                                             \
2465             }                                                             \
2466         }                                                                 \
2467     }                                                                     \
2468                                                                           \
2469     putVSR(xT(opcode), &xt, env);                                         \
2470     if ((opcode >> (31-21)) & 1) {                                        \
2471         env->crf[6] = (all_true ? 0x8 : 0) | (all_false ? 0x2 : 0);       \
2472     }                                                                     \
2473     helper_float_check_status(env);                                       \
2474  }
2475
2476 VSX_CMP(xvcmpeqdp, 2, float64, VsrD(i), eq, 0)
2477 VSX_CMP(xvcmpgedp, 2, float64, VsrD(i), le, 1)
2478 VSX_CMP(xvcmpgtdp, 2, float64, VsrD(i), lt, 1)
2479 VSX_CMP(xvcmpeqsp, 4, float32, VsrW(i), eq, 0)
2480 VSX_CMP(xvcmpgesp, 4, float32, VsrW(i), le, 1)
2481 VSX_CMP(xvcmpgtsp, 4, float32, VsrW(i), lt, 1)
2482
2483 /* VSX_CVT_FP_TO_FP - VSX floating point/floating point conversion
2484  *   op    - instruction mnemonic
2485  *   nels  - number of elements (1, 2 or 4)
2486  *   stp   - source type (float32 or float64)
2487  *   ttp   - target type (float32 or float64)
2488  *   sfld  - source vsr_t field
2489  *   tfld  - target vsr_t field (f32 or f64)
2490  *   sfprf - set FPRF
2491  */
2492 #define VSX_CVT_FP_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf)    \
2493 void helper_##op(CPUPPCState *env, uint32_t opcode)                \
2494 {                                                                  \
2495     ppc_vsr_t xt, xb;                                              \
2496     int i;                                                         \
2497                                                                    \
2498     getVSR(xB(opcode), &xb, env);                                  \
2499     getVSR(xT(opcode), &xt, env);                                  \
2500                                                                    \
2501     for (i = 0; i < nels; i++) {                                   \
2502         xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status);        \
2503         if (unlikely(stp##_is_signaling_nan(xb.sfld))) {           \
2504             fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2505             xt.tfld = ttp##_snan_to_qnan(xt.tfld);                 \
2506         }                                                          \
2507         if (sfprf) {                                               \
2508             helper_compute_fprf(env, ttp##_to_float64(xt.tfld,     \
2509                                 &env->fp_status));                 \
2510         }                                                          \
2511     }                                                              \
2512                                                                    \
2513     putVSR(xT(opcode), &xt, env);                                  \
2514     helper_float_check_status(env);                                \
2515 }
2516
2517 VSX_CVT_FP_TO_FP(xscvdpsp, 1, float64, float32, VsrD(0), VsrW(0), 1)
2518 VSX_CVT_FP_TO_FP(xscvspdp, 1, float32, float64, VsrW(0), VsrD(0), 1)
2519 VSX_CVT_FP_TO_FP(xvcvdpsp, 2, float64, float32, VsrD(i), VsrW(2*i), 0)
2520 VSX_CVT_FP_TO_FP(xvcvspdp, 2, float32, float64, VsrW(2*i), VsrD(i), 0)
2521
2522 uint64_t helper_xscvdpspn(CPUPPCState *env, uint64_t xb)
2523 {
2524     float_status tstat = env->fp_status;
2525     set_float_exception_flags(0, &tstat);
2526
2527     return (uint64_t)float64_to_float32(xb, &tstat) << 32;
2528 }
2529
2530 uint64_t helper_xscvspdpn(CPUPPCState *env, uint64_t xb)
2531 {
2532     float_status tstat = env->fp_status;
2533     set_float_exception_flags(0, &tstat);
2534
2535     return float32_to_float64(xb >> 32, &tstat);
2536 }
2537
2538 /* VSX_CVT_FP_TO_INT - VSX floating point to integer conversion
2539  *   op    - instruction mnemonic
2540  *   nels  - number of elements (1, 2 or 4)
2541  *   stp   - source type (float32 or float64)
2542  *   ttp   - target type (int32, uint32, int64 or uint64)
2543  *   sfld  - source vsr_t field
2544  *   tfld  - target vsr_t field
2545  *   rnan  - resulting NaN
2546  */
2547 #define VSX_CVT_FP_TO_INT(op, nels, stp, ttp, sfld, tfld, rnan)              \
2548 void helper_##op(CPUPPCState *env, uint32_t opcode)                          \
2549 {                                                                            \
2550     ppc_vsr_t xt, xb;                                                        \
2551     int i;                                                                   \
2552                                                                              \
2553     getVSR(xB(opcode), &xb, env);                                            \
2554     getVSR(xT(opcode), &xt, env);                                            \
2555                                                                              \
2556     for (i = 0; i < nels; i++) {                                             \
2557         if (unlikely(stp##_is_any_nan(xb.sfld))) {                           \
2558             if (stp##_is_signaling_nan(xb.sfld)) {                           \
2559                 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);       \
2560             }                                                                \
2561             fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0);            \
2562             xt.tfld = rnan;                                                  \
2563         } else {                                                             \
2564             xt.tfld = stp##_to_##ttp##_round_to_zero(xb.sfld,                \
2565                           &env->fp_status);                                  \
2566             if (env->fp_status.float_exception_flags & float_flag_invalid) { \
2567                 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0);        \
2568             }                                                                \
2569         }                                                                    \
2570     }                                                                        \
2571                                                                              \
2572     putVSR(xT(opcode), &xt, env);                                            \
2573     helper_float_check_status(env);                                          \
2574 }
2575
2576 VSX_CVT_FP_TO_INT(xscvdpsxds, 1, float64, int64, VsrD(0), VsrD(0), \
2577                   0x8000000000000000ULL)
2578 VSX_CVT_FP_TO_INT(xscvdpsxws, 1, float64, int32, VsrD(0), VsrW(1), \
2579                   0x80000000U)
2580 VSX_CVT_FP_TO_INT(xscvdpuxds, 1, float64, uint64, VsrD(0), VsrD(0), 0ULL)
2581 VSX_CVT_FP_TO_INT(xscvdpuxws, 1, float64, uint32, VsrD(0), VsrW(1), 0U)
2582 VSX_CVT_FP_TO_INT(xvcvdpsxds, 2, float64, int64, VsrD(i), VsrD(i), \
2583                   0x8000000000000000ULL)
2584 VSX_CVT_FP_TO_INT(xvcvdpsxws, 2, float64, int32, VsrD(i), VsrW(2*i), \
2585                   0x80000000U)
2586 VSX_CVT_FP_TO_INT(xvcvdpuxds, 2, float64, uint64, VsrD(i), VsrD(i), 0ULL)
2587 VSX_CVT_FP_TO_INT(xvcvdpuxws, 2, float64, uint32, VsrD(i), VsrW(2*i), 0U)
2588 VSX_CVT_FP_TO_INT(xvcvspsxds, 2, float32, int64, VsrW(2*i), VsrD(i), \
2589                   0x8000000000000000ULL)
2590 VSX_CVT_FP_TO_INT(xvcvspsxws, 4, float32, int32, VsrW(i), VsrW(i), 0x80000000U)
2591 VSX_CVT_FP_TO_INT(xvcvspuxds, 2, float32, uint64, VsrW(2*i), VsrD(i), 0ULL)
2592 VSX_CVT_FP_TO_INT(xvcvspuxws, 4, float32, uint32, VsrW(i), VsrW(i), 0U)
2593
2594 /* VSX_CVT_INT_TO_FP - VSX integer to floating point conversion
2595  *   op    - instruction mnemonic
2596  *   nels  - number of elements (1, 2 or 4)
2597  *   stp   - source type (int32, uint32, int64 or uint64)
2598  *   ttp   - target type (float32 or float64)
2599  *   sfld  - source vsr_t field
2600  *   tfld  - target vsr_t field
2601  *   jdef  - definition of the j index (i or 2*i)
2602  *   sfprf - set FPRF
2603  */
2604 #define VSX_CVT_INT_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf, r2sp)  \
2605 void helper_##op(CPUPPCState *env, uint32_t opcode)                     \
2606 {                                                                       \
2607     ppc_vsr_t xt, xb;                                                   \
2608     int i;                                                              \
2609                                                                         \
2610     getVSR(xB(opcode), &xb, env);                                       \
2611     getVSR(xT(opcode), &xt, env);                                       \
2612                                                                         \
2613     for (i = 0; i < nels; i++) {                                        \
2614         xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status);             \
2615         if (r2sp) {                                                     \
2616             xt.tfld = helper_frsp(env, xt.tfld);                        \
2617         }                                                               \
2618         if (sfprf) {                                                    \
2619             helper_compute_fprf(env, xt.tfld);                          \
2620         }                                                               \
2621     }                                                                   \
2622                                                                         \
2623     putVSR(xT(opcode), &xt, env);                                       \
2624     helper_float_check_status(env);                                     \
2625 }
2626
2627 VSX_CVT_INT_TO_FP(xscvsxddp, 1, int64, float64, VsrD(0), VsrD(0), 1, 0)
2628 VSX_CVT_INT_TO_FP(xscvuxddp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 0)
2629 VSX_CVT_INT_TO_FP(xscvsxdsp, 1, int64, float64, VsrD(0), VsrD(0), 1, 1)
2630 VSX_CVT_INT_TO_FP(xscvuxdsp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 1)
2631 VSX_CVT_INT_TO_FP(xvcvsxddp, 2, int64, float64, VsrD(i), VsrD(i), 0, 0)
2632 VSX_CVT_INT_TO_FP(xvcvuxddp, 2, uint64, float64, VsrD(i), VsrD(i), 0, 0)
2633 VSX_CVT_INT_TO_FP(xvcvsxwdp, 2, int32, float64, VsrW(2*i), VsrD(i), 0, 0)
2634 VSX_CVT_INT_TO_FP(xvcvuxwdp, 2, uint64, float64, VsrW(2*i), VsrD(i), 0, 0)
2635 VSX_CVT_INT_TO_FP(xvcvsxdsp, 2, int64, float32, VsrD(i), VsrW(2*i), 0, 0)
2636 VSX_CVT_INT_TO_FP(xvcvuxdsp, 2, uint64, float32, VsrD(i), VsrW(2*i), 0, 0)
2637 VSX_CVT_INT_TO_FP(xvcvsxwsp, 4, int32, float32, VsrW(i), VsrW(i), 0, 0)
2638 VSX_CVT_INT_TO_FP(xvcvuxwsp, 4, uint32, float32, VsrW(i), VsrW(i), 0, 0)
2639
2640 /* For "use current rounding mode", define a value that will not be one of
2641  * the existing rounding model enums.
2642  */
2643 #define FLOAT_ROUND_CURRENT (float_round_nearest_even + float_round_down + \
2644   float_round_up + float_round_to_zero)
2645
2646 /* VSX_ROUND - VSX floating point round
2647  *   op    - instruction mnemonic
2648  *   nels  - number of elements (1, 2 or 4)
2649  *   tp    - type (float32 or float64)
2650  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2651  *   rmode - rounding mode
2652  *   sfprf - set FPRF
2653  */
2654 #define VSX_ROUND(op, nels, tp, fld, rmode, sfprf)                     \
2655 void helper_##op(CPUPPCState *env, uint32_t opcode)                    \
2656 {                                                                      \
2657     ppc_vsr_t xt, xb;                                                  \
2658     int i;                                                             \
2659     getVSR(xB(opcode), &xb, env);                                      \
2660     getVSR(xT(opcode), &xt, env);                                      \
2661                                                                        \
2662     if (rmode != FLOAT_ROUND_CURRENT) {                                \
2663         set_float_rounding_mode(rmode, &env->fp_status);               \
2664     }                                                                  \
2665                                                                        \
2666     for (i = 0; i < nels; i++) {                                       \
2667         if (unlikely(tp##_is_signaling_nan(xb.fld))) {                 \
2668             fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);     \
2669             xt.fld = tp##_snan_to_qnan(xb.fld);                        \
2670         } else {                                                       \
2671             xt.fld = tp##_round_to_int(xb.fld, &env->fp_status);       \
2672         }                                                              \
2673         if (sfprf) {                                                   \
2674             helper_compute_fprf(env, xt.fld);                          \
2675         }                                                              \
2676     }                                                                  \
2677                                                                        \
2678     /* If this is not a "use current rounding mode" instruction,       \
2679      * then inhibit setting of the XX bit and restore rounding         \
2680      * mode from FPSCR */                                              \
2681     if (rmode != FLOAT_ROUND_CURRENT) {                                \
2682         fpscr_set_rounding_mode(env);                                  \
2683         env->fp_status.float_exception_flags &= ~float_flag_inexact;   \
2684     }                                                                  \
2685                                                                        \
2686     putVSR(xT(opcode), &xt, env);                                      \
2687     helper_float_check_status(env);                                    \
2688 }
2689
2690 VSX_ROUND(xsrdpi, 1, float64, VsrD(0), float_round_nearest_even, 1)
2691 VSX_ROUND(xsrdpic, 1, float64, VsrD(0), FLOAT_ROUND_CURRENT, 1)
2692 VSX_ROUND(xsrdpim, 1, float64, VsrD(0), float_round_down, 1)
2693 VSX_ROUND(xsrdpip, 1, float64, VsrD(0), float_round_up, 1)
2694 VSX_ROUND(xsrdpiz, 1, float64, VsrD(0), float_round_to_zero, 1)
2695
2696 VSX_ROUND(xvrdpi, 2, float64, VsrD(i), float_round_nearest_even, 0)
2697 VSX_ROUND(xvrdpic, 2, float64, VsrD(i), FLOAT_ROUND_CURRENT, 0)
2698 VSX_ROUND(xvrdpim, 2, float64, VsrD(i), float_round_down, 0)
2699 VSX_ROUND(xvrdpip, 2, float64, VsrD(i), float_round_up, 0)
2700 VSX_ROUND(xvrdpiz, 2, float64, VsrD(i), float_round_to_zero, 0)
2701
2702 VSX_ROUND(xvrspi, 4, float32, VsrW(i), float_round_nearest_even, 0)
2703 VSX_ROUND(xvrspic, 4, float32, VsrW(i), FLOAT_ROUND_CURRENT, 0)
2704 VSX_ROUND(xvrspim, 4, float32, VsrW(i), float_round_down, 0)
2705 VSX_ROUND(xvrspip, 4, float32, VsrW(i), float_round_up, 0)
2706 VSX_ROUND(xvrspiz, 4, float32, VsrW(i), float_round_to_zero, 0)
2707
2708 uint64_t helper_xsrsp(CPUPPCState *env, uint64_t xb)
2709 {
2710     helper_reset_fpstatus(env);
2711
2712     uint64_t xt = helper_frsp(env, xb);
2713
2714     helper_compute_fprf(env, xt);
2715     helper_float_check_status(env);
2716     return xt;
2717 }