gauss_example.cpp 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142
  1. /*
  2. * Copyright John Maddock, 2017
  3. * Use, modification and distribution are subject to the
  4. * Boost Software License, Version 1.0. (See accompanying file
  5. * LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
  6. *
  7. * This example Illustrates numerical integration via Gauss and Gauss-Kronrod quadrature.
  8. */
  9. #include <iostream>
  10. #include <cmath>
  11. #include <limits>
  12. #include <boost/math/quadrature/gauss.hpp>
  13. #include <boost/math/quadrature/gauss_kronrod.hpp>
  14. #include <boost/math/constants/constants.hpp>
  15. #include <boost/math/special_functions/relative_difference.hpp>
  16. #include <boost/multiprecision/cpp_bin_float.hpp>
  17. void gauss_examples()
  18. {
  19. //[gauss_example
  20. /*`
  21. We'll begin by integrating t[super 2] atan(t) over (0,1) using a 7 term Gauss-Legendre rule,
  22. and begin by defining the function to integrate as a C++ lambda expression:
  23. */
  24. using namespace boost::math::quadrature;
  25. auto f = [](const double& t) { return t * t * std::atan(t); };
  26. /*`
  27. Integration is simply a matter of calling the `gauss<double, 7>::integrate` method:
  28. */
  29. double Q = gauss<double, 7>::integrate(f, 0, 1);
  30. /*`
  31. Which yields a value 0.2106572512 accurate to 1e-10.
  32. For more accurate evaluations, we'll move to a multiprecision type and use a 20-point integration scheme:
  33. */
  34. using boost::multiprecision::cpp_bin_float_quad;
  35. auto f2 = [](const cpp_bin_float_quad& t) { return t * t * atan(t); };
  36. cpp_bin_float_quad Q2 = gauss<cpp_bin_float_quad, 20>::integrate(f2, 0, 1);
  37. /*`
  38. Which yields 0.2106572512258069881080923020669, which is accurate to 5e-28.
  39. */
  40. //]
  41. std::cout << std::setprecision(18) << Q << std::endl;
  42. std::cout << boost::math::relative_difference(Q, (boost::math::constants::pi<double>() - 2 + 2 * boost::math::constants::ln_two<double>()) / 12) << std::endl;
  43. std::cout << std::setprecision(34) << Q2 << std::endl;
  44. std::cout << boost::math::relative_difference(Q2, (boost::math::constants::pi<cpp_bin_float_quad>() - 2 + 2 * boost::math::constants::ln_two<cpp_bin_float_quad>()) / 12) << std::endl;
  45. }
  46. void gauss_kronrod_examples()
  47. {
  48. //[gauss_kronrod_example
  49. /*`
  50. We'll begin by integrating exp(-t[super 2]/2) over (0,+[infin]) using a 7 term Gauss rule
  51. and 15 term Kronrod rule,
  52. and begin by defining the function to integrate as a C++ lambda expression:
  53. */
  54. using namespace boost::math::quadrature;
  55. auto f1 = [](double t) { return std::exp(-t*t / 2); };
  56. //<-
  57. double Q_expected = sqrt(boost::math::constants::half_pi<double>());
  58. //->
  59. /*`
  60. We'll start off with a one shot (ie non-adaptive)
  61. integration, and keep track of the estimated error:
  62. */
  63. double error;
  64. double Q = gauss_kronrod<double, 15>::integrate(f1, 0, std::numeric_limits<double>::infinity(), 0, 0, &error);
  65. /*`
  66. This yields Q = 1.25348207361, which has an absolute error of 1e-4 compared to the estimated error
  67. of 5e-3: this is fairly typical, with the difference between Gauss and Gauss-Kronrod schemes being
  68. much higher than the actual error. Before moving on to adaptive quadrature, lets try again
  69. with more points, in fact with the largest Gauss-Kronrod scheme we have cached (30/61):
  70. */
  71. //<-
  72. std::cout << std::setprecision(16) << Q << std::endl;
  73. std::cout << boost::math::relative_difference(Q, Q_expected) << std::endl;
  74. std::cout << fabs(Q - Q_expected) << std::endl;
  75. std::cout << error << std::endl;
  76. //->
  77. Q = gauss_kronrod<double, 61>::integrate(f1, 0, std::numeric_limits<double>::infinity(), 0, 0, &error);
  78. //<-
  79. std::cout << std::setprecision(16) << Q << std::endl;
  80. std::cout << boost::math::relative_difference(Q, Q_expected) << std::endl;
  81. std::cout << fabs(Q - Q_expected) << std::endl;
  82. std::cout << error << std::endl;
  83. //->
  84. /*`
  85. This yields an absolute error of 3e-15 against an estimate of 1e-8, which is about as good as we're going to get
  86. at double precision
  87. However, instead of continuing with ever more points, lets switch to adaptive integration, and set the desired relative
  88. error to 1e-14 against a maximum depth of 5:
  89. */
  90. Q = gauss_kronrod<double, 15>::integrate(f1, 0, std::numeric_limits<double>::infinity(), 5, 1e-14, &error);
  91. //<-
  92. std::cout << std::setprecision(16) << Q << std::endl;
  93. std::cout << boost::math::relative_difference(Q, Q_expected) << std::endl;
  94. std::cout << fabs(Q - Q_expected) << std::endl;
  95. std::cout << error << std::endl;
  96. //->
  97. /*`
  98. This yields an actual error of zero, against an estimate of 4e-15. In fact in this case the requested tolerance was almost
  99. certainly set too low: as we've seen above, for smooth functions, the precision achieved is often double
  100. that of the estimate, so if we integrate with a tolerance of 1e-9:
  101. */
  102. Q = gauss_kronrod<double, 15>::integrate(f1, 0, std::numeric_limits<double>::infinity(), 5, 1e-9, &error);
  103. //<-
  104. std::cout << std::setprecision(16) << Q << std::endl;
  105. std::cout << boost::math::relative_difference(Q, Q_expected) << std::endl;
  106. std::cout << fabs(Q - Q_expected) << std::endl;
  107. std::cout << error << std::endl;
  108. //->
  109. /*`
  110. We still achieve 1e-15 precision, with an error estimate of 1e-10.
  111. */
  112. //]
  113. }
  114. int main()
  115. {
  116. gauss_examples();
  117. gauss_kronrod_examples();
  118. return 0;
  119. }