Skip to content

Improve erf/expm1/expint coverage. #1111

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 54 commits into
base: develop
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
54 commits
Select commit Hold shift + click to select a range
9ca40b9
Improve erf/expm1/expint coverage.
jzmaddock Mar 6, 2024
fbf553d
Add missing #include.
jzmaddock Mar 6, 2024
c446d79
expm1 coverage.
jzmaddock Mar 8, 2024
47414e9
Tidy up expint coverage.
jzmaddock Mar 8, 2024
e945458
Factorial coverage.
jzmaddock Mar 8, 2024
bb9b10e
Correct expint test case.
jzmaddock Mar 8, 2024
718004e
fpclassify coverage.
jzmaddock Mar 10, 2024
0da61de
Gamma coverage.
jzmaddock Mar 10, 2024
c206a8d
Allow for no-exceptions.
jzmaddock Mar 11, 2024
98bf9ad
Merge branch 'develop' into improve_coverage_3
jzmaddock Jun 28, 2024
b90cded
Remove unneeded initializers.
jzmaddock Jun 28, 2024
0c6522f
Mark up last missing erf.hpp line: it's covered.
jzmaddock Jun 29, 2024
e844686
tgamma coverage #1.
jzmaddock Jun 29, 2024
f60b35f
More tgamma coverage.
jzmaddock Jun 29, 2024
87f242f
gamma.hpp coverage.
jzmaddock Jun 30, 2024
d441001
Correct test.
jzmaddock Jun 30, 2024
d8a4900
Mark up zeta constants for coverage.
jzmaddock Jul 1, 2024
24ede6f
gamma.hpp coverage.
jzmaddock Jul 3, 2024
e322c31
Hopefully complete gamma coverage.
jzmaddock Jul 4, 2024
8a266b8
Merge branch 'develop' into improve_coverage_3
jzmaddock Jul 4, 2024
67dc6ec
Correct standalone failure.
jzmaddock Jul 4, 2024
1c6c602
Corrections for gamma coverage.
jzmaddock Jul 4, 2024
0848e3b
Exclude owen's T tables from coverage.
jzmaddock Jul 5, 2024
fc0b406
Hankel error handling tests.
jzmaddock Jul 5, 2024
6440f73
Heumann Lambda coverage.
jzmaddock Jul 5, 2024
ba6838c
Improve 0F1, 1F0 and 1F1 coverage.
jzmaddock Jul 13, 2024
2b4512a
1F1 and 2F0 coverage.
jzmaddock Jul 14, 2024
36d4644
hypergeometric_0F1_bessel.hpp coverage.
jzmaddock Jul 14, 2024
977feaf
1F1 coverage.
jzmaddock Aug 6, 2024
8107e55
Merge branch 'develop' into improve_coverage_3
jzmaddock Apr 18, 2025
0087009
Correct macro name for standalone usage.
jzmaddock Apr 18, 2025
b0200e4
Correct macro name for standalone.
jzmaddock Apr 18, 2025
54b057b
Modernise log1p/expm1 to mostly use their std:: equivalents.
jzmaddock Apr 19, 2025
fff9f07
Adjust error rate on bessel_i_prime as a result of expm1 changes on m…
jzmaddock Apr 20, 2025
7914a75
One more error rate adjustment (ibeta_derivative for Mingw).
jzmaddock Apr 20, 2025
31a913c
Error rates again.
jzmaddock Apr 20, 2025
63d559c
Digamma coverage tidy up.
jzmaddock Apr 21, 2025
6113e18
Correct ellint_d testing logic.
jzmaddock Apr 21, 2025
56f5611
Remove duplicated error handling from ellint_rj.
jzmaddock Apr 21, 2025
b0a9ffb
Correct erf reflection logic (fixes CUDA change).
jzmaddock Apr 21, 2025
2bd29ae
erf: exclude __float128 case from coverage.
jzmaddock Apr 21, 2025
ce9c72d
expint: coverage cosmetic change.
jzmaddock Apr 21, 2025
c0af363
Add true multiprecision test case to log1p/expm1.
jzmaddock Apr 21, 2025
760fca3
Remove MP test case: our test data isn't precise enough.
jzmaddock Apr 21, 2025
0e7b0f3
Correct ellint_d test configuration.
jzmaddock Apr 21, 2025
2600de8
Coverage: Digamma exclude another MP-only function.
jzmaddock Apr 21, 2025
255dbc9
Coverage: erf, exclude block known to be covered by __float128 tests.
jzmaddock Apr 21, 2025
ca6e976
Warning fix in ellint_rj.hpp
jzmaddock Apr 22, 2025
4a3d3bc
Coverage: Improve gamma.hpp.
jzmaddock Apr 22, 2025
9fc0965
Disable slow to compile test.
jzmaddock Apr 22, 2025
84f83b3
Coverage: Remove and assert currently unused p_derivative calculations,
jzmaddock Apr 22, 2025
7a24948
BOOST_ASSERT->BOOST_MATH_ASSERT
jzmaddock Apr 22, 2025
5e5cdee
Coverage: Correct assertions in gamma.hpp.
jzmaddock Apr 22, 2025
07c5282
Coverage: correct exception throwing tests for sycl run.
jzmaddock Apr 23, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion include/boost/math/special_functions/detail/bessel_ik.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -416,7 +416,7 @@ BOOST_MATH_GPU_ENABLED int bessel_ik(T v, T x, T* result_I, T* result_K, int kin
else
Iv = boost::math::numeric_limits<T>::quiet_NaN(); // any value will do
}
if (reflect)
if (reflect && (kind & need_i))
{
BOOST_MATH_ASSERT(fabs(v - n - u) < tools::forth_root_epsilon<T>());
T z = (u + n % 2);
Expand Down
32 changes: 0 additions & 32 deletions include/boost/math/special_functions/detail/bessel_j1.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -32,41 +32,9 @@

namespace boost { namespace math{ namespace detail{

template <typename T>
BOOST_MATH_GPU_ENABLED T bessel_j1(T x);

template <class T>
struct bessel_j1_initializer
{
struct init
{
BOOST_MATH_GPU_ENABLED init()
{
do_init();
}
BOOST_MATH_GPU_ENABLED static void do_init()
{
bessel_j1(T(1));
}
BOOST_MATH_GPU_ENABLED void force_instantiate()const{}
};
BOOST_MATH_STATIC const init initializer;
BOOST_MATH_GPU_ENABLED static void force_instantiate()
{
#ifndef BOOST_MATH_HAS_GPU_SUPPORT
initializer.force_instantiate();
#endif
}
};

template <class T>
const typename bessel_j1_initializer<T>::init bessel_j1_initializer<T>::initializer;

template <typename T>
BOOST_MATH_GPU_ENABLED T bessel_j1(T x)
{
bessel_j1_initializer<T>::force_instantiate();

BOOST_MATH_STATIC const T P1[] = {
static_cast<T>(BOOST_MATH_BIG_CONSTANT(T, 64, -1.4258509801366645672e+11)),
static_cast<T>(BOOST_MATH_BIG_CONSTANT(T, 64, 6.6781041261492395835e+09)),
Expand Down
40 changes: 0 additions & 40 deletions include/boost/math/special_functions/detail/bessel_k0.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -47,45 +47,6 @@

namespace boost { namespace math { namespace detail{

template <typename T>
BOOST_MATH_GPU_ENABLED T bessel_k0(const T& x);

template <class T, class tag>
struct bessel_k0_initializer
{
struct init
{
BOOST_MATH_GPU_ENABLED init()
{
do_init(tag());
}
BOOST_MATH_GPU_ENABLED static void do_init(const boost::math::integral_constant<int, 113>&)
{
bessel_k0(T(0.5));
bessel_k0(T(1.5));
}
BOOST_MATH_GPU_ENABLED static void do_init(const boost::math::integral_constant<int, 64>&)
{
bessel_k0(T(0.5));
bessel_k0(T(1.5));
}
template <class U>
BOOST_MATH_GPU_ENABLED static void do_init(const U&){}
BOOST_MATH_GPU_ENABLED void force_instantiate()const{}
};
BOOST_MATH_STATIC const init initializer;
BOOST_MATH_GPU_ENABLED static void force_instantiate()
{
#ifndef BOOST_MATH_HAS_GPU_SUPPORT
initializer.force_instantiate();
#endif
}
};

template <class T, class tag>
const typename bessel_k0_initializer<T, tag>::init bessel_k0_initializer<T, tag>::initializer;


template <typename T, int N>
BOOST_MATH_GPU_ENABLED T bessel_k0_imp(const T&, const boost::math::integral_constant<int, N>&)
{
Expand Down Expand Up @@ -511,7 +472,6 @@ BOOST_MATH_GPU_ENABLED inline T bessel_k0(const T& x)
113 : -1
> tag_type;

bessel_k0_initializer<T, tag_type>::force_instantiate();
return bessel_k0_imp(x, tag_type());
}

Expand Down
41 changes: 0 additions & 41 deletions include/boost/math/special_functions/detail/bessel_k1.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -47,46 +47,6 @@

namespace boost { namespace math { namespace detail{

template <typename T>
BOOST_MATH_GPU_ENABLED T bessel_k1(const T&);

template <class T, class tag>
struct bessel_k1_initializer
{
struct init
{
BOOST_MATH_GPU_ENABLED init()
{
do_init(tag());
}
BOOST_MATH_GPU_ENABLED static void do_init(const boost::math::integral_constant<int, 113>&)
{
bessel_k1(T(0.5));
bessel_k1(T(2));
bessel_k1(T(6));
}
BOOST_MATH_GPU_ENABLED static void do_init(const boost::math::integral_constant<int, 64>&)
{
bessel_k1(T(0.5));
bessel_k1(T(6));
}
template <class U>
BOOST_MATH_GPU_ENABLED static void do_init(const U&) {}
BOOST_MATH_GPU_ENABLED void force_instantiate()const {}
};
BOOST_MATH_STATIC const init initializer;
BOOST_MATH_GPU_ENABLED static void force_instantiate()
{
#ifndef BOOST_MATH_HAS_GPU_SUPPORT
initializer.force_instantiate();
#endif
}
};

template <class T, class tag>
const typename bessel_k1_initializer<T, tag>::init bessel_k1_initializer<T, tag>::initializer;


template <typename T, int N>
inline BOOST_MATH_GPU_ENABLED T bessel_k1_imp(const T&, const boost::math::integral_constant<int, N>&)
{
Expand Down Expand Up @@ -553,7 +513,6 @@ namespace boost { namespace math { namespace detail{
113 : -1
> tag_type;

bessel_k1_initializer<T, tag_type>::force_instantiate();
return bessel_k1_imp(x, tag_type());
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,12 +20,11 @@
{
BOOST_MATH_STD_USING

const bool is_z_nonpositive = z <= 0;
//const bool is_z_nonpositive = z <= 0;
BOOST_MATH_ASSERT(z < 0); // condition used at call site

const T sqrt_z = is_z_nonpositive ? T(sqrt(-z)) : T(sqrt(z));
const T bessel_mult = is_z_nonpositive ?
boost::math::cyl_bessel_j(b - 1, 2 * sqrt_z, pol) :
boost::math::cyl_bessel_i(b - 1, 2 * sqrt_z, pol) ;
const T sqrt_z = sqrt(-z);
const T bessel_mult = boost::math::cyl_bessel_j(b - 1, 2 * sqrt_z, pol);

if (b > boost::math::max_factorial<T>::value)
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -74,8 +74,12 @@
{
// We get very limited precision due to rapid denormalisation/underflow of the Bessel values, raise an exception and try something else:
policies::raise_evaluation_error("hypergeometric_1F1_AS_13_3_7_tricomi_series<%1%>", "Underflow in Bessel functions", bessel_cache[cache_size - 1], pol);
// Exceptions are off if we get here, just fill the cache with NaN's and we'll let this method fail and fallback later:
std::fill(bessel_cache.begin(), bessel_cache.end(), std::numeric_limits<T>::quiet_NaN());
cache_offset = -cache_size;
return;
}
if ((term * bessel_cache[cache_size - 1] < tools::min_value<T>() / (tools::epsilon<T>() * tools::epsilon<T>())) || !(boost::math::isfinite)(term) || (!std::numeric_limits<T>::has_infinity && (fabs(term) > tools::max_value<T>())))
if ((fabs(term * bessel_cache[cache_size - 1]) < tools::min_value<T>() / (tools::epsilon<T>() * tools::epsilon<T>())) || !(boost::math::isfinite)(term) || (!std::numeric_limits<T>::has_infinity && (fabs(term) > tools::max_value<T>())))
{
term = -log(fabs(bessel_arg)) * b_minus_1_plus_n / 2;
log_scale = lltrunc(term);
Expand All @@ -88,15 +92,27 @@
if constexpr (std::numeric_limits<T>::has_infinity)
{
if (!(boost::math::isfinite)(bessel_cache[cache_size - 1]))
{
policies::raise_evaluation_error("hypergeometric_1F1_AS_13_3_7_tricomi_series<%1%>", "Expected finite Bessel function result but got %1%", bessel_cache[cache_size - 1], pol);
// Exceptions are off if we get here, just fill the cache with NaN's and we'll let this method fail and fallback later:
std::fill(bessel_cache.begin(), bessel_cache.end(), std::numeric_limits<T>::quiet_NaN());
}
}
else
if ((boost::math::isnan)(bessel_cache[cache_size - 1]) || (fabs(bessel_cache[cache_size - 1]) >= tools::max_value<T>()))
{
policies::raise_evaluation_error("hypergeometric_1F1_AS_13_3_7_tricomi_series<%1%>", "Expected finite Bessel function result but got %1%", bessel_cache[cache_size - 1], pol);
// Exceptions are off if we get here, just fill the cache with NaN's and we'll let this method fail and fallback later:
std::fill(bessel_cache.begin(), bessel_cache.end(), std::numeric_limits<T>::quiet_NaN());
}
#else
if ((std::numeric_limits<T>::has_infinity && !(boost::math::isfinite)(bessel_cache[cache_size - 1]))
|| (!std::numeric_limits<T>::has_infinity && ((boost::math::isnan)(bessel_cache[cache_size - 1]) || (fabs(bessel_cache[cache_size - 1]) >= tools::max_value<T>()))))
{
policies::raise_evaluation_error("hypergeometric_1F1_AS_13_3_7_tricomi_series<%1%>", "Expected finite Bessel function result but got %1%", bessel_cache[cache_size - 1], pol);
// Exceptions are off if we get here, just fill the cache with NaN's and we'll let this method fail and fallback later:
std::fill(bessel_cache.begin(), bessel_cache.end(), std::numeric_limits<T>::quiet_NaN());
}
#endif
cache_offset = -cache_size;
refill_cache();
Expand All @@ -108,8 +124,13 @@
// very small (or zero) when b == 2a:
//
BOOST_MATH_STD_USING
//
// Except in the multiprecision case, we have probably illiminated anything
// would need more than the default 64 Bessel Functions. Anything more
// than that risks becoming a divergent series anyway...
//
if(n - 2 - cache_offset >= cache_size)
refill_cache();
refill_cache(); // LCOV_EXCL_LINE
T result = A_minus_2 * term * bessel_cache[n - 2 - cache_offset];
term *= mult;
++n;
Expand All @@ -122,7 +143,7 @@
if (A_minus_2 != 0)
{
if (n - 2 - cache_offset >= cache_size)
refill_cache();
refill_cache(); // LCOV_EXCL_LINE
result += A_minus_2 * term * bessel_cache[n - 2 - cache_offset];
}
term *= mult;
Expand Down
9 changes: 9 additions & 0 deletions include/boost/math/special_functions/detail/igamma_large.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,7 @@ BOOST_MATH_GPU_ENABLED T igamma_temme_large(T a, T x, const Policy& pol, const b

T workspace[13];

// LCOV_EXCL_START
BOOST_MATH_STATIC const T C0[] = {
BOOST_MATH_BIG_CONSTANT(T, 64, -0.333333333333333333333),
BOOST_MATH_BIG_CONSTANT(T, 64, 0.0833333333333333333333),
Expand Down Expand Up @@ -272,6 +273,8 @@ BOOST_MATH_GPU_ENABLED T igamma_temme_large(T a, T x, const Policy& pol, const b
BOOST_MATH_BIG_CONSTANT(T, 64, 0.00640336283380806979482),
BOOST_MATH_BIG_CONSTANT(T, 64, -0.00404101610816766177474),
};
// LCOV_EXCL_END

workspace[12] = tools::evaluate_polynomial(C12, z);

T result = tools::evaluate_polynomial<13, T, T>(workspace, 1/a);
Expand Down Expand Up @@ -303,6 +306,7 @@ BOOST_MATH_GPU_ENABLED T igamma_temme_large(T a, T x, const Policy& pol, const b

T workspace[10];

// LCOV_EXCL_START
BOOST_MATH_STATIC const T C0[] = {
static_cast<T>(-0.33333333333333333L),
static_cast<T>(0.083333333333333333L),
Expand Down Expand Up @@ -416,6 +420,7 @@ BOOST_MATH_GPU_ENABLED T igamma_temme_large(T a, T x, const Policy& pol, const b
static_cast<T>(0.00083949872067208728L),
static_cast<T>(-0.00043829709854172101L),
};
// LCOV_EXCL_END
workspace[8] = tools::evaluate_polynomial(C8, z);
workspace[9] = static_cast<T>(-0.00059676129019274625L);

Expand Down Expand Up @@ -456,6 +461,7 @@ BOOST_MATH_GPU_ENABLED T igamma_temme_large(T a, T x, const Policy& pol, const b

T workspace[3];

// LCOV_EXCL_START
BOOST_MATH_STATIC const T C0[] = {
static_cast<T>(-0.333333333L),
static_cast<T>(0.0833333333L),
Expand All @@ -482,6 +488,7 @@ BOOST_MATH_GPU_ENABLED T igamma_temme_large(T a, T x, const Policy& pol, const b
static_cast<T>(0.000771604938L),
};
workspace[2] = tools::evaluate_polynomial(C2, z);
// LCOV_EXCL_END

T result = tools::evaluate_polynomial(workspace, 1/a);
result *= exp(-y) / sqrt(2 * constants::pi<T>() * a);
Expand Down Expand Up @@ -510,6 +517,7 @@ BOOST_MATH_GPU_ENABLED T igamma_temme_large(T a, T x, const Policy& pol, const b
// It's use for a < 200 is not recommended, that would
// require many more terms in the polynomials.
//
// LCOV_EXCL_START: 128-bit floats not deliberately tested in our coverage tests (takes too long)
#ifndef BOOST_MATH_HAS_GPU_SUPPORT

template <class T, class Policy>
Expand Down Expand Up @@ -802,6 +810,7 @@ BOOST_MATH_GPU_ENABLED T igamma_temme_large(T a, T x, const Policy& pol, const b

return result;
}
// LCOV_EXCL_END

#endif

Expand Down
Loading
Loading