in csharp/EPAM.Deltix.DFP.Benchmark/BidInternal.cs [865:1111]
public static unsafe BID_UINT64 get_BID64_small_mantissa(BID_UINT64 sgn, int expon, BID_UINT64 coeff, int rmode, ref _IDEC_flags fpsc)
{
BID_UINT128 C128, Q_low;
#if BID_SET_STATUS_FLAGS
BID_UINT128 Stemp;
BID_UINT64 carry, CY;
#endif
BID_UINT64 r, mask, _C64, remainder_h, QH;
int extra_digits, amount, amount2;
#if BID_SET_STATUS_FLAGS
unsigned status;
#endif
// check for possible underflow/overflow
if (((unsigned)expon) >= 3 * 256)
{
if (expon < 0)
{
// underflow
if (expon + MAX_FORMAT_DIGITS < 0)
{
#if BID_SET_STATUS_FLAGS
__set_status_flags(ref fpsc, BID_UNDERFLOW_EXCEPTION | BID_INEXACT_EXCEPTION);
#endif
#if !IEEE_ROUND_NEAREST_TIES_AWAY
#if !IEEE_ROUND_NEAREST
if (rmode == BID_ROUNDING_DOWN && sgn != 0)
return 0x8000000000000001UL;
if (rmode == BID_ROUNDING_UP && sgn == 0)
return 1UL;
#endif
#endif
// result is 0
return sgn;
}
#if !IEEE_ROUND_NEAREST_TIES_AWAY
#if !IEEE_ROUND_NEAREST
if (sgn != 0 && (unsigned)(rmode - 1) < 2)
rmode = 3 - rmode;
#endif
#endif
// get digits to be shifted out
extra_digits = -expon;
C128.w0 = coeff + bid_round_const_table[rmode, extra_digits];
// get coeff*(2^M[extra_digits])/10^extra_digits
//__mul_64x128_full(out QH, out Q_low, C128.w0, bid_reciprocals10_128[extra_digits]);
{
BID_UINT128 ALBL, ALBH, QM2;
BID_UINT128 B = bid_reciprocals10_128[extra_digits];
//__mul_64x64_to_128(out ALBH, C128.w0, B.w1);
{
BID_UINT64 CXH, CXL, CYH, CYL, PL, PH, PM, PM2;
CXH = C128.w0 >> 32;
CXL = (BID_UINT32)C128.w0;
CYH = B.w1 >> 32;
CYL = (BID_UINT32)B.w1;
PM = CXH * CYL;
PH = CXH * CYH;
PL = CXL * CYL;
PM2 = CXL * CYH;
PH += (PM >> 32);
PM = (BID_UINT64)((BID_UINT32)PM) + PM2 + (PL >> 32);
ALBH.w1 = PH + (PM >> 32);
ALBH.w0 = (PM << 32) + (BID_UINT32)PL;
}
//__mul_64x64_to_128(out ALBL, C128.w0, B.w0);
{
BID_UINT64 CXH, CXL, CYH, CYL, PL, PH, PM, PM2;
CXH = C128.w0 >> 32;
CXL = (BID_UINT32)C128.w0;
CYH = B.w0 >> 32;
CYL = (BID_UINT32)B.w0;
PM = CXH * CYL;
PH = CXH * CYH;
PL = CXL * CYL;
PM2 = CXL * CYH;
PH += (PM >> 32);
PM = (BID_UINT64)((BID_UINT32)PM) + PM2 + (PL >> 32);
ALBL.w1 = PH + (PM >> 32);
ALBL.w0 = (PM << 32) + (BID_UINT32)PL;
}
Q_low.w0 = ALBL.w0;
//__add_128_64(out QM2, ALBH, ALBL.w1);
{
BID_UINT64 R64H = ALBH.w1;
QM2.w0 = ALBL.w1 + ALBH.w0;
if (QM2.w0 < ALBL.w1)
R64H++;
QM2.w1 = R64H;
}
Q_low.w1 = QM2.w0;
QH = QM2.w1;
}
// now get P/10^extra_digits: shift Q_high right by M[extra_digits]-128
amount = bid_recip_scale[extra_digits];
_C64 = QH >> amount;
#if !IEEE_ROUND_NEAREST_TIES_AWAY
#if !IEEE_ROUND_NEAREST
if (rmode == 0) //BID_ROUNDING_TO_NEAREST
#endif
if ((_C64 & 1) != 0)
{
// check whether fractional part of initial_P/10^extra_digits is exactly .5
// get remainder
amount2 = 64 - amount;
remainder_h = 0;
remainder_h--;
remainder_h >>= amount2;
remainder_h = remainder_h & QH;
if (remainder_h == 0
&& (Q_low.w1 < bid_reciprocals10_128[extra_digits].w1
|| (Q_low.w1 == bid_reciprocals10_128[extra_digits].w1
&& Q_low.w0 < bid_reciprocals10_128[extra_digits].w0)))
{
_C64--;
}
}
#endif
#if BID_SET_STATUS_FLAGS
if (is_inexact(fpsc))
__set_status_flags(ref fpsc, BID_UNDERFLOW_EXCEPTION);
else
{
status = BID_INEXACT_EXCEPTION;
// get remainder
remainder_h = QH << (64 - amount);
switch (rmode)
{
case BID_ROUNDING_TO_NEAREST:
case BID_ROUNDING_TIES_AWAY:
// test whether fractional part is 0
if (remainder_h == 0x8000000000000000UL
&& (Q_low.w1 < bid_reciprocals10_128[extra_digits].w1
|| (Q_low.w1 == bid_reciprocals10_128[extra_digits].w1
&& Q_low.w0 < bid_reciprocals10_128[extra_digits].w0)))
status = BID_EXACT_STATUS;
break;
case BID_ROUNDING_DOWN:
case BID_ROUNDING_TO_ZERO:
if (remainder_h == 0
&& (Q_low.w1 < bid_reciprocals10_128[extra_digits].w1
|| (Q_low.w1 == bid_reciprocals10_128[extra_digits].w1
&& Q_low.w0 < bid_reciprocals10_128[extra_digits].w0)))
status = BID_EXACT_STATUS;
break;
default:
// round up
//__add_carry_out(out Stemp.w0, out CY, Q_low.w0, bid_reciprocals10_128[extra_digits].w0);
{
Stemp.w0 = Q_low.w0 + bid_reciprocals10_128[extra_digits].w0;
CY = (Stemp.w0 < Q_low.w0) ? 1UL : 0;
}
//__add_carry_in_out(out Stemp.w1, out carry, Q_low.w1, bid_reciprocals10_128[extra_digits].w1, CY);
{
BID_UINT64 X1 = Q_low.w1 + CY;
Stemp.w1 = X1 + bid_reciprocals10_128[extra_digits].w1;
carry = ((Stemp.w1 < X1) || (X1 < CY)) ? 1UL : 0;
}
if ((remainder_h >> (64 - amount)) + carry >=
(((BID_UINT64)1) << amount))
status = BID_EXACT_STATUS;
break;
}
if (status != BID_EXACT_STATUS)
__set_status_flags(ref fpsc, BID_UNDERFLOW_EXCEPTION | status);
}
#endif
return sgn | _C64;
}
while (coeff < 1000000000000000UL && expon >= 3 * 256)
{
expon--;
coeff = (coeff << 3) + (coeff << 1);
}
if (expon > DECIMAL_MAX_EXPON_64)
{
#if BID_SET_STATUS_FLAGS
__set_status_flags(ref fpsc, BID_OVERFLOW_EXCEPTION | BID_INEXACT_EXCEPTION);
#endif
// overflow
r = sgn | INFINITY_MASK64;
switch (rmode)
{
case BID_ROUNDING_DOWN:
if (sgn == 0)
r = LARGEST_BID64;
break;
case BID_ROUNDING_TO_ZERO:
r = sgn | LARGEST_BID64;
break;
case BID_ROUNDING_UP:
// round up
if (sgn != 0)
r = SMALLEST_BID64;
break;
}
return r;
}
else
{
mask = 1;
mask <<= EXPONENT_SHIFT_SMALL64;
if (coeff >= mask)
{
r = (BID_UINT64)expon;
r <<= EXPONENT_SHIFT_LARGE64;
r |= (sgn | SPECIAL_ENCODING_MASK64);
// add coeff, without leading bits
mask = (mask >> 2) - 1;
coeff &= mask;
r |= coeff;
return r;
}
}
}
r = (BID_UINT64)expon;
r <<= EXPONENT_SHIFT_SMALL64;
r |= (coeff | sgn);
return r;
}