mirror of
				https://git.suyu.dev/suyu/suyu
				synced 2025-10-30 23:49:01 -05:00 
			
		
		
		
	Fix exception propagation for VFP single precision
This commit is contained in:
		| @@ -271,8 +271,9 @@ inline int vfp_single_type(const vfp_single* s) | ||||
| // Unpack a single-precision float.  Note that this returns the magnitude | ||||
| // of the single-precision float mantissa with the 1. if necessary, | ||||
| // aligned to bit 30. | ||||
| inline void vfp_single_unpack(vfp_single* s, s32 val, u32* fpscr) | ||||
| inline u32 vfp_single_unpack(vfp_single* s, s32 val, u32 fpscr) | ||||
| { | ||||
|     u32 exceptions = 0; | ||||
|     s->sign = vfp_single_packed_sign(val) >> 16, | ||||
|     s->exponent = vfp_single_packed_exponent(val); | ||||
|  | ||||
| @@ -283,12 +284,13 @@ inline void vfp_single_unpack(vfp_single* s, s32 val, u32* fpscr) | ||||
|  | ||||
|     // If flush-to-zero mode is enabled, turn the denormal into zero. | ||||
|     // On a VFPv2 architecture, the sign of the zero is always positive. | ||||
|     if ((*fpscr & FPSCR_FLUSH_TO_ZERO) != 0 && (vfp_single_type(s) & VFP_DENORMAL) != 0) { | ||||
|     if ((fpscr & FPSCR_FLUSH_TO_ZERO) != 0 && (vfp_single_type(s) & VFP_DENORMAL) != 0) { | ||||
|         s->sign = 0; | ||||
|         s->exponent = 0; | ||||
|         s->significand = 0; | ||||
|         *fpscr |= FPSCR_IDC; | ||||
|         exceptions |= FPSCR_IDC; | ||||
|     } | ||||
|     return exceptions; | ||||
| } | ||||
|  | ||||
| // Re-pack a single-precision float. This assumes that the float is | ||||
|   | ||||
| @@ -334,8 +334,9 @@ static u32 vfp_single_fsqrt(ARMul_State* state, int sd, int unused, s32 m, u32 f | ||||
| { | ||||
|     struct vfp_single vsm, vsd, *vsp; | ||||
|     int ret, tm; | ||||
|     u32 exceptions = 0; | ||||
|  | ||||
|     vfp_single_unpack(&vsm, m, &fpscr); | ||||
|     exceptions |= vfp_single_unpack(&vsm, m, fpscr); | ||||
|     tm = vfp_single_type(&vsm); | ||||
|     if (tm & (VFP_NAN|VFP_INFINITY)) { | ||||
|         vsp = &vsd; | ||||
| @@ -408,7 +409,9 @@ sqrt_invalid: | ||||
|     } | ||||
|     vsd.significand = vfp_shiftright32jamming(vsd.significand, 1); | ||||
|  | ||||
|     return vfp_single_normaliseround(state, sd, &vsd, fpscr, 0, "fsqrt"); | ||||
|     exceptions |= vfp_single_normaliseround(state, sd, &vsd, fpscr, 0, "fsqrt"); | ||||
|  | ||||
|     return exceptions; | ||||
| } | ||||
|  | ||||
| /* | ||||
| @@ -503,7 +506,7 @@ static u32 vfp_single_fcvtd(ARMul_State* state, int dd, int unused, s32 m, u32 f | ||||
|     int tm; | ||||
|     u32 exceptions = 0; | ||||
|  | ||||
|     vfp_single_unpack(&vsm, m, &fpscr); | ||||
|     exceptions |= vfp_single_unpack(&vsm, m, fpscr); | ||||
|  | ||||
|     tm = vfp_single_type(&vsm); | ||||
|  | ||||
| @@ -511,7 +514,7 @@ static u32 vfp_single_fcvtd(ARMul_State* state, int dd, int unused, s32 m, u32 f | ||||
|      * If we have a signalling NaN, signal invalid operation. | ||||
|      */ | ||||
|     if (tm == VFP_SNAN) | ||||
|         exceptions = FPSCR_IOC; | ||||
|         exceptions |= FPSCR_IOC; | ||||
|  | ||||
|     if (tm & VFP_DENORMAL) | ||||
|         vfp_single_normalise_denormal(&vsm); | ||||
| @@ -568,7 +571,7 @@ static u32 vfp_single_ftoui(ARMul_State* state, int sd, int unused, s32 m, u32 f | ||||
|     int rmode = fpscr & FPSCR_RMODE_MASK; | ||||
|     int tm; | ||||
|  | ||||
|     vfp_single_unpack(&vsm, m, &fpscr); | ||||
|     exceptions |= vfp_single_unpack(&vsm, m, fpscr); | ||||
|     vfp_single_dump("VSM", &vsm); | ||||
|  | ||||
|     /* | ||||
| @@ -583,7 +586,7 @@ static u32 vfp_single_ftoui(ARMul_State* state, int sd, int unused, s32 m, u32 f | ||||
|  | ||||
|     if (vsm.exponent >= 127 + 32) { | ||||
|         d = vsm.sign ? 0 : 0xffffffff; | ||||
|         exceptions = FPSCR_IOC; | ||||
|         exceptions |= FPSCR_IOC; | ||||
|     } else if (vsm.exponent >= 127) { | ||||
|         int shift = 127 + 31 - vsm.exponent; | ||||
|         u32 rem, incr = 0; | ||||
| @@ -648,7 +651,7 @@ static u32 vfp_single_ftosi(ARMul_State* state, int sd, int unused, s32 m, u32 f | ||||
|     int rmode = fpscr & FPSCR_RMODE_MASK; | ||||
|     int tm; | ||||
|  | ||||
|     vfp_single_unpack(&vsm, m, &fpscr); | ||||
|     exceptions |= vfp_single_unpack(&vsm, m, fpscr); | ||||
|     vfp_single_dump("VSM", &vsm); | ||||
|  | ||||
|     /* | ||||
| @@ -774,7 +777,7 @@ vfp_single_fadd_nonnumber(struct vfp_single *vsd, struct vfp_single *vsn, | ||||
|             /* | ||||
|              * different signs -> invalid | ||||
|              */ | ||||
|             exceptions = FPSCR_IOC; | ||||
|             exceptions |= FPSCR_IOC; | ||||
|             vsp = &vfp_single_default_qnan; | ||||
|         } else { | ||||
|             /* | ||||
| @@ -921,27 +924,27 @@ static u32 | ||||
| vfp_single_multiply_accumulate(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr, u32 negate, const char *func) | ||||
| { | ||||
|     vfp_single vsd, vsp, vsn, vsm; | ||||
|     u32 exceptions; | ||||
|     u32 exceptions = 0; | ||||
|     s32 v; | ||||
|  | ||||
|     v = vfp_get_float(state, sn); | ||||
|     LOG_TRACE(Core_ARM11, "s%u = %08x", sn, v); | ||||
|     vfp_single_unpack(&vsn, v, &fpscr); | ||||
|     exceptions |= vfp_single_unpack(&vsn, v, fpscr); | ||||
|     if (vsn.exponent == 0 && vsn.significand) | ||||
|         vfp_single_normalise_denormal(&vsn); | ||||
|  | ||||
|     vfp_single_unpack(&vsm, m, &fpscr); | ||||
|     exceptions |= vfp_single_unpack(&vsm, m, fpscr); | ||||
|     if (vsm.exponent == 0 && vsm.significand) | ||||
|         vfp_single_normalise_denormal(&vsm); | ||||
|  | ||||
|     exceptions = vfp_single_multiply(&vsp, &vsn, &vsm, fpscr); | ||||
|     exceptions |= vfp_single_multiply(&vsp, &vsn, &vsm, fpscr); | ||||
|  | ||||
|     if (negate & NEG_MULTIPLY) | ||||
|         vsp.sign = vfp_sign_negate(vsp.sign); | ||||
|  | ||||
|     v = vfp_get_float(state, sd); | ||||
|     LOG_TRACE(Core_ARM11, "s%u = %08x", sd, v); | ||||
|     vfp_single_unpack(&vsn, v, &fpscr); | ||||
|     exceptions |= vfp_single_unpack(&vsn, v, fpscr); | ||||
|     if (vsn.exponent == 0 && vsn.significand != 0) | ||||
|         vfp_single_normalise_denormal(&vsn); | ||||
|  | ||||
| @@ -1000,20 +1003,20 @@ static u32 vfp_single_fnmsc(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr | ||||
| static u32 vfp_single_fmul(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr) | ||||
| { | ||||
|     struct vfp_single vsd, vsn, vsm; | ||||
|     u32 exceptions; | ||||
|     u32 exceptions = 0; | ||||
|     s32 n = vfp_get_float(state, sn); | ||||
|  | ||||
|     LOG_TRACE(Core_ARM11, "s%u = %08x", sn, n); | ||||
|  | ||||
|     vfp_single_unpack(&vsn, n, &fpscr); | ||||
|     exceptions |= vfp_single_unpack(&vsn, n, fpscr); | ||||
|     if (vsn.exponent == 0 && vsn.significand) | ||||
|         vfp_single_normalise_denormal(&vsn); | ||||
|  | ||||
|     vfp_single_unpack(&vsm, m, &fpscr); | ||||
|     exceptions |= vfp_single_unpack(&vsm, m, fpscr); | ||||
|     if (vsm.exponent == 0 && vsm.significand) | ||||
|         vfp_single_normalise_denormal(&vsm); | ||||
|  | ||||
|     exceptions = vfp_single_multiply(&vsd, &vsn, &vsm, fpscr); | ||||
|     exceptions |= vfp_single_multiply(&vsd, &vsn, &vsm, fpscr); | ||||
|     return vfp_single_normaliseround(state, sd, &vsd, fpscr, exceptions, "fmul"); | ||||
| } | ||||
|  | ||||
| @@ -1023,20 +1026,20 @@ static u32 vfp_single_fmul(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr) | ||||
| static u32 vfp_single_fnmul(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr) | ||||
| { | ||||
|     struct vfp_single vsd, vsn, vsm; | ||||
|     u32 exceptions; | ||||
|     u32 exceptions = 0; | ||||
|     s32 n = vfp_get_float(state, sn); | ||||
|  | ||||
|     LOG_TRACE(Core_ARM11, "s%u = %08x", sn, n); | ||||
|  | ||||
|     vfp_single_unpack(&vsn, n, &fpscr); | ||||
|     exceptions |= vfp_single_unpack(&vsn, n, fpscr); | ||||
|     if (vsn.exponent == 0 && vsn.significand) | ||||
|         vfp_single_normalise_denormal(&vsn); | ||||
|  | ||||
|     vfp_single_unpack(&vsm, m, &fpscr); | ||||
|     exceptions |= vfp_single_unpack(&vsm, m, fpscr); | ||||
|     if (vsm.exponent == 0 && vsm.significand) | ||||
|         vfp_single_normalise_denormal(&vsm); | ||||
|  | ||||
|     exceptions = vfp_single_multiply(&vsd, &vsn, &vsm, fpscr); | ||||
|     exceptions |= vfp_single_multiply(&vsd, &vsn, &vsm, fpscr); | ||||
|     vsd.sign = vfp_sign_negate(vsd.sign); | ||||
|     return vfp_single_normaliseround(state, sd, &vsd, fpscr, exceptions, "fnmul"); | ||||
| } | ||||
| @@ -1047,7 +1050,7 @@ static u32 vfp_single_fnmul(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr | ||||
| static u32 vfp_single_fadd(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr) | ||||
| { | ||||
|     struct vfp_single vsd, vsn, vsm; | ||||
|     u32 exceptions; | ||||
|     u32 exceptions = 0; | ||||
|     s32 n = vfp_get_float(state, sn); | ||||
|  | ||||
|     LOG_TRACE(Core_ARM11, "s%u = %08x", sn, n); | ||||
| @@ -1055,15 +1058,15 @@ static u32 vfp_single_fadd(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr) | ||||
|     /* | ||||
|      * Unpack and normalise denormals. | ||||
|      */ | ||||
|     vfp_single_unpack(&vsn, n, &fpscr); | ||||
|     exceptions |= vfp_single_unpack(&vsn, n, fpscr); | ||||
|     if (vsn.exponent == 0 && vsn.significand) | ||||
|         vfp_single_normalise_denormal(&vsn); | ||||
|  | ||||
|     vfp_single_unpack(&vsm, m, &fpscr); | ||||
|     exceptions |= vfp_single_unpack(&vsm, m, fpscr); | ||||
|     if (vsm.exponent == 0 && vsm.significand) | ||||
|         vfp_single_normalise_denormal(&vsm); | ||||
|  | ||||
|     exceptions = vfp_single_add(&vsd, &vsn, &vsm, fpscr); | ||||
|     exceptions |= vfp_single_add(&vsd, &vsn, &vsm, fpscr); | ||||
|  | ||||
|     return vfp_single_normaliseround(state, sd, &vsd, fpscr, exceptions, "fadd"); | ||||
| } | ||||
| @@ -1095,8 +1098,8 @@ static u32 vfp_single_fdiv(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr) | ||||
|  | ||||
|     LOG_TRACE(Core_ARM11, "s%u = %08x", sn, n); | ||||
|  | ||||
|     vfp_single_unpack(&vsn, n, &fpscr); | ||||
|     vfp_single_unpack(&vsm, m, &fpscr); | ||||
|     exceptions |= vfp_single_unpack(&vsn, n, fpscr); | ||||
|     exceptions |= vfp_single_unpack(&vsm, m, fpscr); | ||||
|  | ||||
|     vsd.sign = vsn.sign ^ vsm.sign; | ||||
|  | ||||
| @@ -1165,13 +1168,13 @@ static u32 vfp_single_fdiv(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr) | ||||
|     return vfp_single_normaliseround(state, sd, &vsd, fpscr, 0, "fdiv"); | ||||
|  | ||||
| vsn_nan: | ||||
|     exceptions = vfp_propagate_nan(&vsd, &vsn, &vsm, fpscr); | ||||
|     exceptions |= vfp_propagate_nan(&vsd, &vsn, &vsm, fpscr); | ||||
| pack: | ||||
|     vfp_put_float(state, vfp_single_pack(&vsd), sd); | ||||
|     return exceptions; | ||||
|  | ||||
| vsm_nan: | ||||
|     exceptions = vfp_propagate_nan(&vsd, &vsm, &vsn, fpscr); | ||||
|     exceptions |= vfp_propagate_nan(&vsd, &vsm, &vsn, fpscr); | ||||
|     goto pack; | ||||
|  | ||||
| zero: | ||||
| @@ -1180,7 +1183,7 @@ zero: | ||||
|     goto pack; | ||||
|  | ||||
| divzero: | ||||
|     exceptions = FPSCR_DZC; | ||||
|     exceptions |= FPSCR_DZC; | ||||
| infinity: | ||||
|     vsd.exponent = 255; | ||||
|     vsd.significand = 0; | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Jannik Vogel
					Jannik Vogel