26
26
27
27
#include "assert.h"
28
28
29
+ // computes the exponent of a number
29
30
int_fast16_t flexfloat_exp (const flexfloat_t * a )
30
31
{
31
- int_fast16_t a_exp = EXPONENT (CAST_TO_INT (a -> value ));
32
+ int_fast16_t a_exp = EXPONENT (CAST_TO_INT (a -> value )); //exponent value of a
32
33
33
- int_fast16_t bias = flexfloat_bias (a -> desc );
34
+ int_fast16_t bias = flexfloat_bias (a -> desc ); //in header file. no fucking clue what it does
34
35
35
36
if (a_exp == 0 || a_exp == INF_EXP )
36
37
return a_exp ;
37
38
else
38
- return (a_exp - BIAS ) + bias ;
39
+ return (a_exp - BIAS ) + bias ; //why?
39
40
}
40
41
41
42
uint_t flexfloat_frac (const flexfloat_t * a )
42
43
{
44
+ // CAST_TO_INT(a->value) & MASK_FRAC = significand bits
45
+ // NUM_BITS_FRAC - a->desc.frac_bits = ? (I don't know what desc.frac_bits is)
46
+ // Returns bit shifted version of significan bits
43
47
return (CAST_TO_INT (a -> value ) & MASK_FRAC ) >> (NUM_BITS_FRAC - a -> desc .frac_bits );
44
48
}
45
49
50
+ // Does same thing as flexfloat_frac but for denormal (subnormal) numbers.
51
+ // I don't understand the contents yet.
46
52
uint_t flexfloat_denorm_frac (const flexfloat_t * a , int_fast16_t exp )
47
53
{
48
54
if (EXPONENT (CAST_TO_INT (a -> value )) == 0 ) // Denormalized backend value
@@ -58,6 +64,7 @@ uint_t flexfloat_denorm_frac(const flexfloat_t *a, int_fast16_t exp)
58
64
}
59
65
60
66
// Pack normalized desc-fraction with desc-relative exponent to backend float
67
+ // Gives the binary representation of value given binary representation of sign, exp, and significand
61
68
uint_t flexfloat_pack (flexfloat_desc_t desc , bool sign , int_fast16_t exp , uint_t frac )
62
69
{
63
70
int_fast16_t bias = flexfloat_bias (desc );
@@ -74,6 +81,7 @@ uint_t flexfloat_pack(flexfloat_desc_t desc, bool sign, int_fast16_t exp, uint_t
74
81
return PACK (sign , exp , frac << (NUM_BITS_FRAC - desc .frac_bits ));
75
82
}
76
83
84
+ // Same thing as flexfloat_pack but for denormalized functions
77
85
uint_t flexfloat_denorm_pack (flexfloat_desc_t desc , bool sign , uint_t frac )
78
86
{
79
87
int_fast16_t bias = flexfloat_bias (desc );
@@ -82,7 +90,14 @@ uint_t flexfloat_denorm_pack(flexfloat_desc_t desc, bool sign, uint_t frac)
82
90
83
91
uint_t flexfloat_pack_bits (flexfloat_desc_t desc , uint_t bits )
84
92
{
93
+ // bits >> (desc.exp_bits + desc.frac_bits) shifts bits to right so that sign bit is last
94
+ // 0x1 = 1? If so I don't understand the point of the bitwise AND since the only non-zero bit
95
+ // after shifting is the sign bit...I think.
85
96
bool sign = (bits >> (desc .exp_bits + desc .frac_bits )) & 0x1 ;
97
+
98
+ // bits >> desc.frac_bits shifts bits to the right so that only sign bit and exponent bits remain
99
+ // 0x1<<desc.exp_bits puts a 1 in the (exp_bits+1) bit location
100
+ // ( ) - 1
86
101
int_fast16_t exp = (bits >> desc .frac_bits ) & ((0x1 <<desc .exp_bits ) - 1 );
87
102
uint_t frac = bits & ((UINT_C (1 )<<desc .frac_bits ) - 1 );
88
103
@@ -189,9 +204,10 @@ bool flexfloat_inf_rounding(const flexfloat_t *a, int_fast16_t exp, bool sign, b
189
204
}
190
205
191
206
// return a value to sum in order to apply rounding
207
+ // what is the difference between EXPONENT(CAST_TO_INT(a->value)) and exp = flexfloat_exp(a)
192
208
int_t flexfloat_rounding_value (const flexfloat_t * a , int_fast16_t exp , bool sign )
193
209
{
194
- if (EXPONENT (CAST_TO_INT (a -> value )) == 0 ) // Denorm backend format
210
+ if (EXPONENT (CAST_TO_INT (a -> value )) == 0 ) // Denorm backend format. Just a rule for subnormal numers when exp=0
195
211
{
196
212
return flexfloat_denorm_pack (a -> desc , sign , 0x1 );
197
213
}
@@ -239,6 +255,7 @@ void flexfloat_sanitize(flexfloat_t *a)
239
255
fegetexceptflag (& flags , FE_ALL_EXCEPT );
240
256
#endif
241
257
// Rounding mode
258
+ // All three cases are doing the same thing. What's the point?
242
259
int mode = fegetround ();
243
260
if (mode == FE_TONEAREST && flexfloat_nearest_rounding (a , exp ))
244
261
{
0 commit comments