3 * Copyright (C) 1996, 1997, 1998, 1999, 2000 Gerard Jungman
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 3 of the License, or (at
8 * your option) any later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
20 /* Author: G. Jungman */
23 #include <gsl/gsl_math.h>
24 #include <gsl/gsl_errno.h>
25 #include <gsl/gsl_sf_exp.h>
26 #include <gsl/gsl_sf_gamma.h>
27 #include <gsl/gsl_sf_bessel.h>
28 #include <gsl/gsl_sf_laguerre.h>
29 #include <gsl/gsl_sf_pow_int.h>
30 #include <gsl/gsl_sf_hyperg.h>
35 #define INT_THRESHOLD (1000.0*GSL_DBL_EPSILON)
37 #define SERIES_EVAL_OK(a,b,x) ((fabs(a) < 5 && b < 5 && x < 2.0) || (fabs(a) < 10 && b < 10 && x < 1.0))
39 #define ASYMP_EVAL_OK(a,b,x) (GSL_MAX_DBL(fabs(a),1.0)*GSL_MAX_DBL(fabs(1.0+a-b),1.0) < 0.99*fabs(x))
43 * [Abramowitz+stegun, 13.6.21]
44 * Assumes x > 0, a > 1/2.
48 hyperg_lnU_beq2a(const double a, const double x, gsl_sf_result * result)
50 const double lx = log(x);
51 const double nu = a - 0.5;
52 const double lnpre = 0.5*(x - M_LNPI) - nu*lx;
54 gsl_sf_bessel_lnKnu_e(nu, 0.5*x, &lnK);
55 result->val = lnpre + lnK.val;
56 result->err = 2.0 * GSL_DBL_EPSILON * (fabs(0.5*x) + 0.5*M_LNPI + fabs(nu*lx));
57 result->err += lnK.err;
58 result->err += 2.0 * GSL_DBL_EPSILON * fabs(result->val);
63 /* Evaluate u_{N+1}/u_N by Steed's continued fraction method.
65 * u_N := Gamma[a+N]/Gamma[a] U(a + N, b, x)
67 * u_{N+1}/u_N = (a+N) U(a+N+1,b,x)/U(a+N,b,x)
71 hyperg_U_CF1(const double a, const double b, const int N, const double x,
72 double * result, int * count)
74 const double RECUR_BIG = GSL_SQRT_DBL_MAX;
75 const int maxiter = 20000;
82 double b1 = (b - 2.0*a - x - 2.0*(N+1));
83 double An = b1*Anm1 + a1*Anm2;
84 double Bn = b1*Bnm1 + a1*Bnm2;
96 an = -(a + N + n - b)*(a + N + n - 1.0);
97 bn = (b - 2.0*a - x - 2.0*(N+n));
98 An = bn*Anm1 + an*Anm2;
99 Bn = bn*Bnm1 + an*Bnm2;
101 if(fabs(An) > RECUR_BIG || fabs(Bn) > RECUR_BIG) {
114 if(fabs(del - 1.0) < 10.0*GSL_DBL_EPSILON) break;
121 GSL_ERROR ("error", GSL_EMAXITER);
127 /* Large x asymptotic for x^a U(a,b,x)
128 * Based on SLATEC D9CHU() [W. Fullerton]
130 * Uses a rational approximation due to Luke.
131 * See [Luke, Algorithms for the Computation of Special Functions, p. 252]
132 * [Luke, Utilitas Math. (1977)]
134 * z^a U(a,b,z) ~ 2F0(a,1+a-b,-1/z)
136 * This assumes that a is not a negative integer and
137 * that 1+a-b is not a negative integer. If one of them
138 * is, then the 2F0 actually terminates, the above
139 * relation is an equality, and the sum should be
140 * evaluated directly [see below].
144 d9chu(const double a, const double b, const double x, gsl_sf_result * result)
146 const double EPS = 8.0 * GSL_DBL_EPSILON; /* EPS = 4.0D0*D1MACH(4) */
147 const int maxiter = 500;
151 double bp = 1.0 + a - b;
153 double ct2 = 2.0 * (x - ab);
156 double ct3 = sab + 1.0 + ab;
157 double anbn = ct3 + sab + 3.0;
158 double ct1 = 1.0 + 2.0*x/anbn;
163 bb[1] = 1.0 + 2.0*x/ct3;
164 aa[1] = 1.0 + ct2/ct3;
166 bb[2] = 1.0 + 6.0*ct1*x/ct3;
167 aa[2] = 1.0 + 6.0*ab/anbn + 3.0*ct1*ct2/ct3;
169 for(i=4; i<maxiter; i++) {
174 double x2i1 = 2*i - 3;
175 ct1 = x2i1/(x2i1-2.0);
177 ct2 = (x2i1 - 1.0)/anbn;
179 d1z = 2.0*x2i1*x/anbn;
182 g1 = d1z + ct1*(c2+ct3);
184 g3 = ct1*(1.0 - ct3 - 2.0*ct2);
186 bb[3] = g1*bb[2] + g2*bb[1] + g3*bb[0];
187 aa[3] = g1*aa[2] + g2*aa[1] + g3*aa[0];
189 if(fabs(aa[3]*bb[0]-aa[0]*bb[3]) < EPS*fabs(bb[3]*bb[0])) break;
197 result->val = aa[3]/bb[3];
198 result->err = 8.0 * GSL_DBL_EPSILON * fabs(result->val);
201 GSL_ERROR ("error", GSL_EMAXITER);
209 /* Evaluate asymptotic for z^a U(a,b,z) ~ 2F0(a,1+a-b,-1/z)
210 * We check for termination of the 2F0 as a special case.
212 * Also assumes a,b are not too large compared to x.
216 hyperg_zaU_asymp(const double a, const double b, const double x, gsl_sf_result *result)
219 const double bp = 1.0 + a - b;
220 const double rintap = floor(ap + 0.5);
221 const double rintbp = floor(bp + 0.5);
222 const int ap_neg_int = ( ap < 0.0 && fabs(ap - rintap) < INT_THRESHOLD );
223 const int bp_neg_int = ( bp < 0.0 && fabs(bp - rintbp) < INT_THRESHOLD );
225 if(ap_neg_int || bp_neg_int) {
226 /* Evaluate 2F0 polynomial.
229 double nmax = -(int)(GSL_MIN(ap,bp) - 0.1);
233 double sum_err = 0.0;
235 double apn = (ap+n-1.0);
236 double bpn = (bp+n-1.0);
237 tn *= ((apn/n)*mxi)*bpn;
239 sum_err += 2.0 * GSL_DBL_EPSILON * fabs(tn);
243 result->err = sum_err;
244 result->err += 2.0 * GSL_DBL_EPSILON * (fabs(nmax)+1.0) * fabs(sum);
248 return d9chu(a,b,x,result);
253 /* Evaluate finite sum which appears below.
257 hyperg_U_finite_sum(int N, double a, double b, double x, double xeps,
258 gsl_sf_result * result)
272 for(i=1; i<= -N; i++) {
273 const double xi1 = i - 1;
274 const double mult = (a+xi1)*x/((b+xi1)*(xi1+1.0));
276 t_err += fabs(mult) * t_err + fabs(t_val) * 8.0 * 2.0 * GSL_DBL_EPSILON;
281 stat_poch = gsl_sf_poch_e(1.0+a-b, -a, &poch);
283 result->val = sum_val * poch.val;
284 result->err = fabs(sum_val) * poch.err + sum_err * fabs(poch.val);
285 result->err += fabs(poch.val) * (fabs(N) + 2.0) * GSL_DBL_EPSILON * fabs(sum_val);
286 result->err += 2.0 * GSL_DBL_EPSILON * fabs(result->val);
287 result->err *= 2.0; /* FIXME: fudge factor... why is the error estimate too small? */
307 for(i=1; i<=M; i++) {
308 const double mult = (a-b+i)*x/((1.0-b+i)*i);
310 t_err += t_err * fabs(mult) + fabs(t_val) * 8.0 * 2.0 * GSL_DBL_EPSILON;
315 stat_gbm1 = gsl_sf_gamma_e(b-1.0, &gbm1);
316 stat_gamr = gsl_sf_gammainv_e(a, &gamr);
318 if(stat_gbm1 == GSL_SUCCESS) {
319 gsl_sf_result powx1N;
320 int stat_p = gsl_sf_pow_int_e(x, 1-N, &powx1N);
321 double pe_val = powx1N.val * xeps;
322 double pe_err = powx1N.err * fabs(xeps) + 2.0 * GSL_DBL_EPSILON * fabs(pe_val);
323 double coeff_val = gbm1.val * gamr.val * pe_val;
324 double coeff_err = gbm1.err * fabs(gamr.val * pe_val)
325 + gamr.err * fabs(gbm1.val * pe_val)
326 + fabs(gbm1.val * gamr.val) * pe_err
327 + 2.0 * GSL_DBL_EPSILON * fabs(coeff_val);
329 result->val = sum_val * coeff_val;
330 result->err = fabs(sum_val) * coeff_err + sum_err * fabs(coeff_val);
331 result->err += 2.0 * GSL_DBL_EPSILON * (M+2.0) * fabs(result->val);
332 result->err *= 2.0; /* FIXME: fudge factor... why is the error estimate too small? */
345 /* Based on SLATEC DCHU() [W. Fullerton]
347 * This is just a series summation method, and
348 * it is not good for large a.
350 * I patched up the window for 1+a-b near zero. [GJ]
354 hyperg_U_series(const double a, const double b, const double x, gsl_sf_result * result)
356 const double EPS = 2.0 * GSL_DBL_EPSILON; /* EPS = D1MACH(3) */
357 const double SQRT_EPS = M_SQRT2 * GSL_SQRT_DBL_EPSILON;
359 if(fabs(1.0 + a - b) < SQRT_EPS) {
360 /* Original Comment: ALGORITHM IS BAD WHEN 1+A-B IS NEAR ZERO FOR SMALL X
362 /* We can however do the following:
363 * U(a,b,x) = U(a,a+1,x) when 1+a-b=0
364 * and U(a,a+1,x) = x^(-a).
366 double lnr = -a * log(x);
367 int stat_e = gsl_sf_exp_e(lnr, result);
368 result->err += 2.0 * SQRT_EPS * fabs(result->val);
372 double aintb = ( b < 0.0 ? ceil(b-0.5) : floor(b+0.5) );
373 double beps = b - aintb;
377 double xeps = exp(-beps*lnx);
379 /* Evaluate finite sum.
382 int stat_sum = hyperg_U_finite_sum(N, a, b, x, xeps, &sum);
385 /* Evaluate infinite sum. */
387 int istrt = ( N < 1 ? 1-N : 0 );
392 int stat_gamr = gsl_sf_gammainv_e(1.0+a-b, &gamr);
393 int stat_powx = gsl_sf_pow_int_e(x, istrt, &powx);
394 double sarg = beps*M_PI;
395 double sfact = ( sarg != 0.0 ? sarg/sin(sarg) : 1.0 );
396 double factor_val = sfact * ( GSL_IS_ODD(N) ? -1.0 : 1.0 ) * gamr.val * powx.val;
397 double factor_err = fabs(gamr.val) * powx.err + fabs(powx.val) * gamr.err
398 + 2.0 * GSL_DBL_EPSILON * fabs(factor_val);
400 gsl_sf_result pochai;
401 gsl_sf_result gamri1;
402 gsl_sf_result gamrni;
403 int stat_pochai = gsl_sf_poch_e(a, xi, &pochai);
404 int stat_gamri1 = gsl_sf_gammainv_e(xi + 1.0, &gamri1);
405 int stat_gamrni = gsl_sf_gammainv_e(aintb + xi, &gamrni);
406 int stat_gam123 = GSL_ERROR_SELECT_3(stat_gamr, stat_gamri1, stat_gamrni);
407 int stat_gamall = GSL_ERROR_SELECT_4(stat_sum, stat_gam123, stat_pochai, stat_powx);
409 gsl_sf_result pochaxibeps;
410 gsl_sf_result gamrxi1beps;
411 int stat_pochaxibeps = gsl_sf_poch_e(a, xi-beps, &pochaxibeps);
412 int stat_gamrxi1beps = gsl_sf_gammainv_e(xi + 1.0 - beps, &gamrxi1beps);
414 int stat_all = GSL_ERROR_SELECT_3(stat_gamall, stat_pochaxibeps, stat_gamrxi1beps);
416 double b0_val = factor_val * pochaxibeps.val * gamrni.val * gamrxi1beps.val;
417 double b0_err = fabs(factor_val * pochaxibeps.val * gamrni.val) * gamrxi1beps.err
418 + fabs(factor_val * pochaxibeps.val * gamrxi1beps.val) * gamrni.err
419 + fabs(factor_val * gamrni.val * gamrxi1beps.val) * pochaxibeps.err
420 + fabs(pochaxibeps.val * gamrni.val * gamrxi1beps.val) * factor_err
421 + 2.0 * GSL_DBL_EPSILON * fabs(b0_val);
423 if(fabs(xeps-1.0) < 0.5) {
425 C X**(-BEPS) IS CLOSE TO 1.0D0, SO WE MUST BE
426 C CAREFUL IN EVALUATING THE DIFFERENCES.
429 gsl_sf_result pch1ai;
431 gsl_sf_result poch1bxibeps;
432 int stat_pch1ai = gsl_sf_pochrel_e(a + xi, -beps, &pch1ai);
433 int stat_pch1i = gsl_sf_pochrel_e(xi + 1.0 - beps, beps, &pch1i);
434 int stat_poch1bxibeps = gsl_sf_pochrel_e(b+xi, -beps, &poch1bxibeps);
435 double c0_t1_val = beps*pch1ai.val*pch1i.val;
436 double c0_t1_err = fabs(beps) * fabs(pch1ai.val) * pch1i.err
437 + fabs(beps) * fabs(pch1i.val) * pch1ai.err
438 + 2.0 * GSL_DBL_EPSILON * fabs(c0_t1_val);
439 double c0_t2_val = -poch1bxibeps.val + pch1ai.val - pch1i.val + c0_t1_val;
440 double c0_t2_err = poch1bxibeps.err + pch1ai.err + pch1i.err + c0_t1_err
441 + 2.0 * GSL_DBL_EPSILON * fabs(c0_t2_val);
442 double c0_val = factor_val * pochai.val * gamrni.val * gamri1.val * c0_t2_val;
443 double c0_err = fabs(factor_val * pochai.val * gamrni.val * gamri1.val) * c0_t2_err
444 + fabs(factor_val * pochai.val * gamrni.val * c0_t2_val) * gamri1.err
445 + fabs(factor_val * pochai.val * gamri1.val * c0_t2_val) * gamrni.err
446 + fabs(factor_val * gamrni.val * gamri1.val * c0_t2_val) * pochai.err
447 + fabs(pochai.val * gamrni.val * gamri1.val * c0_t2_val) * factor_err
448 + 2.0 * GSL_DBL_EPSILON * fabs(c0_val);
450 C XEPS1 = (1.0 - X**(-BEPS))/BEPS = (X**(-BEPS) - 1.0)/(-BEPS)
452 gsl_sf_result dexprl;
453 int stat_dexprl = gsl_sf_exprel_e(-beps*lnx, &dexprl);
454 double xeps1_val = lnx * dexprl.val;
455 double xeps1_err = 2.0 * GSL_DBL_EPSILON * (1.0 + fabs(beps*lnx)) * fabs(dexprl.val)
456 + fabs(lnx) * dexprl.err
457 + 2.0 * GSL_DBL_EPSILON * fabs(xeps1_val);
458 double dchu_val = sum.val + c0_val + xeps1_val*b0_val;
459 double dchu_err = sum.err + c0_err
460 + fabs(xeps1_val)*b0_err + xeps1_err * fabs(b0_val)
461 + fabs(b0_val*lnx)*dexprl.err
462 + 2.0 * GSL_DBL_EPSILON * (fabs(sum.val) + fabs(c0_val) + fabs(xeps1_val*b0_val));
467 stat_all = GSL_ERROR_SELECT_5(stat_all, stat_dexprl, stat_poch1bxibeps, stat_pch1i, stat_pch1ai);
469 for(i=1; i<2000; i++) {
470 const double xi = istrt + i;
471 const double xi1 = istrt + i - 1;
472 const double tmp = (a-1.0)*(xn+2.0*xi-1.0) + xi*(xi-beps);
473 const double b0_multiplier = (a+xi1-beps)*x/((xn+xi1)*(xi-beps));
474 const double c0_multiplier_1 = (a+xi1)*x/((b+xi1)*xi);
475 const double c0_multiplier_2 = tmp / (xi*(b+xi1)*(a+xi1-beps));
476 b0_val *= b0_multiplier;
477 b0_err += fabs(b0_multiplier) * b0_err + fabs(b0_val) * 8.0 * 2.0 * GSL_DBL_EPSILON;
478 c0_val = c0_multiplier_1 * c0_val - c0_multiplier_2 * b0_val;
479 c0_err = fabs(c0_multiplier_1) * c0_err
480 + fabs(c0_multiplier_2) * b0_err
481 + fabs(c0_val) * 8.0 * 2.0 * GSL_DBL_EPSILON
482 + fabs(b0_val * c0_multiplier_2) * 16.0 * 2.0 * GSL_DBL_EPSILON;
483 t_val = c0_val + xeps1_val*b0_val;
484 t_err = c0_err + fabs(xeps1_val)*b0_err;
485 t_err += fabs(b0_val*lnx) * dexprl.err;
486 t_err += fabs(b0_val)*xeps1_err;
489 if(fabs(t_val) < EPS*fabs(dchu_val)) break;
492 result->val = dchu_val;
493 result->err = 2.0 * dchu_err;
494 result->err += 2.0 * fabs(t_val);
495 result->err += 4.0 * GSL_DBL_EPSILON * (i+2.0) * fabs(dchu_val);
496 result->err *= 2.0; /* FIXME: fudge factor */
499 GSL_ERROR ("error", GSL_EMAXITER);
507 C X**(-BEPS) IS VERY DIFFERENT FROM 1.0, SO THE
508 C STRAIGHTFORWARD FORMULATION IS STABLE.
515 gsl_sf_result dgamrbxi;
516 int stat_dgamrbxi = gsl_sf_gammainv_e(b+xi, &dgamrbxi);
517 double a0_val = factor_val * pochai.val * dgamrbxi.val * gamri1.val / beps;
518 double a0_err = fabs(factor_val * pochai.val * dgamrbxi.val / beps) * gamri1.err
519 + fabs(factor_val * pochai.val * gamri1.val / beps) * dgamrbxi.err
520 + fabs(factor_val * dgamrbxi.val * gamri1.val / beps) * pochai.err
521 + fabs(pochai.val * dgamrbxi.val * gamri1.val / beps) * factor_err
522 + 2.0 * GSL_DBL_EPSILON * fabs(a0_val);
523 stat_all = GSL_ERROR_SELECT_2(stat_all, stat_dgamrbxi);
525 b0_val = xeps * b0_val / beps;
526 b0_err = fabs(xeps / beps) * b0_err + 4.0 * GSL_DBL_EPSILON * fabs(b0_val);
527 dchu_val = sum.val + a0_val - b0_val;
528 dchu_err = sum.err + a0_err + b0_err
529 + 2.0 * GSL_DBL_EPSILON * (fabs(sum.val) + fabs(a0_val) + fabs(b0_val));
531 for(i=1; i<2000; i++) {
532 double xi = istrt + i;
533 double xi1 = istrt + i - 1;
534 double a0_multiplier = (a+xi1)*x/((b+xi1)*xi);
535 double b0_multiplier = (a+xi1-beps)*x/((aintb+xi1)*(xi-beps));
536 a0_val *= a0_multiplier;
537 a0_err += fabs(a0_multiplier) * a0_err;
538 b0_val *= b0_multiplier;
539 b0_err += fabs(b0_multiplier) * b0_err;
540 t_val = a0_val - b0_val;
541 t_err = a0_err + b0_err;
544 if(fabs(t_val) < EPS*fabs(dchu_val)) break;
547 result->val = dchu_val;
548 result->err = 2.0 * dchu_err;
549 result->err += 2.0 * fabs(t_val);
550 result->err += 4.0 * GSL_DBL_EPSILON * (i+2.0) * fabs(dchu_val);
551 result->err *= 2.0; /* FIXME: fudge factor */
554 GSL_ERROR ("error", GSL_EMAXITER);
564 /* Assumes b > 0 and x > 0.
568 hyperg_U_small_ab(const double a, const double b, const double x, gsl_sf_result * result)
571 /* U(-1,c+1,x) = Laguerre[c,0,x] = -b + x
573 result->val = -b + x;
574 result->err = 2.0 * GSL_DBL_EPSILON * (fabs(b) + fabs(x));
575 result->err += 2.0 * GSL_DBL_EPSILON * fabs(result->val);
579 /* U(0,c+1,x) = Laguerre[c,0,x] = 1
585 else if(ASYMP_EVAL_OK(a,b,x)) {
586 double p = pow(x, -a);
588 int stat_asymp = hyperg_zaU_asymp(a, b, x, &asymp);
589 result->val = asymp.val * p;
590 result->err = asymp.err * p;
591 result->err += fabs(asymp.val) * GSL_DBL_EPSILON * fabs(a) * p;
592 result->err += 2.0 * GSL_DBL_EPSILON * fabs(result->val);
596 return hyperg_U_series(a, b, x, result);
601 /* Assumes b > 0 and x > 0.
605 hyperg_U_small_a_bgt0(const double a, const double b, const double x,
606 gsl_sf_result * result,
607 double * ln_multiplier
613 *ln_multiplier = 0.0;
616 else if( (b > 5000.0 && x < 0.90 * fabs(b))
617 || (b > 500.0 && x < 0.50 * fabs(b))
619 int stat = gsl_sf_hyperg_U_large_b_e(a, b, x, result, ln_multiplier);
620 if(stat == GSL_EOVRFLW)
626 /* Recurse up from b near 1.
628 double eps = b - floor(b);
629 double b0 = 1.0 + eps;
630 gsl_sf_result r_Ubm1;
632 int stat_0 = hyperg_U_small_ab(a, b0, x, &r_Ubm1);
633 int stat_1 = hyperg_U_small_ab(a, b0+1.0, x, &r_Ub);
634 double Ubm1 = r_Ubm1.val;
635 double Ub = r_Ub.val;
639 for(bp = b0+1.0; bp<b-0.1; bp += 1.0) {
640 Ubp1 = ((1.0+a-bp)*Ubm1 + (bp+x-1.0)*Ub)/x;
645 result->err = (fabs(r_Ubm1.err/r_Ubm1.val) + fabs(r_Ub.err/r_Ub.val)) * fabs(Ub);
646 result->err += 2.0 * GSL_DBL_EPSILON * (fabs(b-b0)+1.0) * fabs(Ub);
647 *ln_multiplier = 0.0;
648 return GSL_ERROR_SELECT_2(stat_0, stat_1);
651 *ln_multiplier = 0.0;
652 return hyperg_U_small_ab(a, b, x, result);
657 /* We use this to keep track of large
658 * dynamic ranges in the recursions.
659 * This can be important because sometimes
660 * we want to calculate a very large and
661 * a very small number and the answer is
662 * the product, of order 1. This happens,
663 * for instance, when we apply a Kummer
664 * transform to make b positive and
665 * both x and b are large.
667 #define RESCALE_2(u0,u1,factor,count) \
669 double au0 = fabs(u0); \
675 else if(au0 < 1.0/factor) { \
683 /* Specialization to b >= 1, for integer parameters.
688 hyperg_U_int_bge1(const int a, const int b, const double x,
689 gsl_sf_result_e10 * result)
698 result->val = -b + x;
699 result->err = 2.0 * GSL_DBL_EPSILON * (fabs(b) + fabs(x));
700 result->err += 2.0 * GSL_DBL_EPSILON * fabs(result->val);
704 else if(b == a + 1) {
705 /* U(a,a+1,x) = x^(-a)
707 return gsl_sf_exp_e10_e(-a*log(x), result);
709 else if(ASYMP_EVAL_OK(a,b,x)) {
710 const double ln_pre_val = -a*log(x);
711 const double ln_pre_err = 2.0 * GSL_DBL_EPSILON * fabs(ln_pre_val);
713 int stat_asymp = hyperg_zaU_asymp(a, b, x, &asymp);
714 int stat_e = gsl_sf_exp_mult_err_e10_e(ln_pre_val, ln_pre_err,
715 asymp.val, asymp.err,
717 return GSL_ERROR_SELECT_2(stat_e, stat_asymp);
719 else if(SERIES_EVAL_OK(a,b,x)) {
721 const int stat_ser = hyperg_U_series(a, b, x, &ser);
722 result->val = ser.val;
723 result->err = ser.err;
728 /* Recurse backward from a = -1,0.
731 const double scale_factor = GSL_SQRT_DBL_MAX;
735 double Uap1 = 1.0; /* U(0,b,x) */
736 double Ua = -b + x; /* U(-1,b,x) */
740 for(ap=-1; ap>a; ap--) {
741 Uam1 = ap*(b-ap-1.0)*Uap1 + (x+2.0*ap-b)*Ua;
744 RESCALE_2(Ua,Uap1,scale_factor,scale_count);
747 lnscale = log(scale_factor);
748 lnm.val = scale_count*lnscale;
749 lnm.err = 2.0 * GSL_DBL_EPSILON * fabs(lnm.val);
751 y.err = 4.0 * GSL_DBL_EPSILON * (fabs(a)+1.0) * fabs(Ua);
752 return gsl_sf_exp_mult_err_e10_e(lnm.val, lnm.err, y.val, y.err, result);
754 else if(b >= 2.0*a + x) {
755 /* Recurse forward from a = 0,1.
758 const double scale_factor = GSL_SQRT_DBL_MAX;
764 int stat_1 = hyperg_U_small_a_bgt0(1.0, b, x, &r_Ua, &lm); /* U(1,b,x) */
766 double Uam1 = 1.0; /* U(0,b,x) */
767 double Ua = r_Ua.val;
773 for(ap=1; ap<a; ap++) {
774 Uap1 = -(Uam1 + (b-2.0*ap-x)*Ua)/(ap*(1.0+ap-b));
777 RESCALE_2(Ua,Uam1,scale_factor,scale_count);
780 lnscale = log(scale_factor);
781 lnm.val = lm + scale_count * lnscale;
782 lnm.err = 2.0 * GSL_DBL_EPSILON * (fabs(lm) + fabs(scale_count*lnscale));
784 y.err = fabs(r_Ua.err/r_Ua.val) * fabs(Ua);
785 y.err += 2.0 * GSL_DBL_EPSILON * (fabs(a) + 1.0) * fabs(Ua);
786 stat_e = gsl_sf_exp_mult_err_e10_e(lnm.val, lnm.err, y.val, y.err, result);
787 return GSL_ERROR_SELECT_2(stat_e, stat_1);
791 /* Recurse backward either to the b=a+1 line
792 * or to a=0, whichever we hit.
794 const double scale_factor = GSL_SQRT_DBL_MAX;
808 lnU_target = -a_target*log(x);
815 stat_CF1 = hyperg_U_CF1(a, b, 0, x, &ru, &CF1_count);
819 for(ap=a; ap>a_target; ap--) {
820 Uam1 = -((b-2.0*ap-x)*Ua + ap*(1.0+ap-b)*Uap1);
823 RESCALE_2(Ua,Uap1,scale_factor,scale_count);
830 GSL_ERROR ("error", GSL_EZERODIV);
833 double lnscl = -scale_count*log(scale_factor);
834 double lnpre_val = lnU_target + lnscl;
835 double lnpre_err = 2.0 * GSL_DBL_EPSILON * (fabs(lnU_target) + fabs(lnscl));
836 double oUa_err = 2.0 * (fabs(a_target-a) + CF1_count + 1.0) * GSL_DBL_EPSILON * fabs(1.0/Ua);
837 int stat_e = gsl_sf_exp_mult_err_e10_e(lnpre_val, lnpre_err,
840 return GSL_ERROR_SELECT_2(stat_e, stat_CF1);
844 /* Recurse backward to near the b=2a+x line, then
845 * determine normalization by either direct evaluation
846 * or by a forward recursion. The direct evaluation
847 * is needed when x is small (which is precisely
848 * when it is easy to do).
850 const double scale_factor = GSL_SQRT_DBL_MAX;
851 int scale_count_for = 0;
852 int scale_count_bck = 0;
854 int a1 = a0 + ceil(0.5*(b-x) - a0);
861 gsl_sf_result lm_for;
864 /* Recurse back to determine U(a1,b), sans normalization.
868 int stat_CF1 = hyperg_U_CF1(a, b, 0, x, &ru, &CF1_count);
870 double Uap1 = ru/a * Ua;
873 for(ap=a; ap>a1; ap--) {
874 Uam1 = -((b-2.0*ap-x)*Ua + ap*(1.0+ap-b)*Uap1);
877 RESCALE_2(Ua,Uap1,scale_factor,scale_count_bck);
880 Ua1_bck_err = 2.0 * GSL_DBL_EPSILON * (fabs(a1-a)+CF1_count+1.0) * fabs(Ua);
884 if(b == 2*a1 && a1 > 1) {
885 /* This can happen when x is small, which is
886 * precisely when we need to be careful with
889 hyperg_lnU_beq2a((double)a1, x, &lm_for);
892 stat_for = GSL_SUCCESS;
894 else if(b == 2*a1 - 1 && a1 > 1) {
895 /* Similar to the above. Happens when x is small.
897 * U(a,2a-1) = (x U(a,2a) - U(a-1,2(a-1))) / (2a - 2)
899 gsl_sf_result lnU00, lnU12;
900 gsl_sf_result U00, U12;
901 hyperg_lnU_beq2a(a1-1.0, x, &lnU00);
902 hyperg_lnU_beq2a(a1, x, &lnU12);
903 if(lnU00.val > lnU12.val) {
904 lm_for.val = lnU00.val;
905 lm_for.err = lnU00.err;
908 gsl_sf_exp_err_e(lnU12.val - lm_for.val, lnU12.err + lm_for.err, &U12);
911 lm_for.val = lnU12.val;
912 lm_for.err = lnU12.err;
915 gsl_sf_exp_err_e(lnU00.val - lm_for.val, lnU00.err + lm_for.err, &U00);
917 Ua1_for_val = (x * U12.val - U00.val) / (2.0*a1 - 2.0);
918 Ua1_for_err = (fabs(x)*U12.err + U00.err) / fabs(2.0*a1 - 2.0);
919 Ua1_for_err += 2.0 * GSL_DBL_EPSILON * fabs(Ua1_for_val);
920 stat_for = GSL_SUCCESS;
923 /* Recurse forward to determine U(a1,b) with
924 * absolute normalization.
927 double Uam1 = 1.0; /* U(a0-1,b,x) = U(0,b,x) */
932 stat_for = hyperg_U_small_a_bgt0(a0, b, x, &r_Ua, &lm_for_local); /* U(1,b,x) */
934 Uam1 *= exp(-lm_for_local);
935 lm_for.val = lm_for_local;
938 for(ap=a0; ap<a1; ap++) {
939 Uap1 = -(Uam1 + (b-2.0*ap-x)*Ua)/(ap*(1.0+ap-b));
942 RESCALE_2(Ua,Uam1,scale_factor,scale_count_for);
945 Ua1_for_err = fabs(Ua) * fabs(r_Ua.err/r_Ua.val);
946 Ua1_for_err += 2.0 * GSL_DBL_EPSILON * (fabs(a1-a0)+1.0) * fabs(Ua1_for_val);
949 /* Now do the matching to produce the final result.
951 if(Ua1_bck_val == 0.0) {
955 GSL_ERROR ("error", GSL_EZERODIV);
957 else if(Ua1_for_val == 0.0) {
958 /* Should never happen. */
959 UNDERFLOW_ERROR_E10(result);
962 double lns = (scale_count_for - scale_count_bck)*log(scale_factor);
963 double ln_for_val = log(fabs(Ua1_for_val));
964 double ln_for_err = GSL_DBL_EPSILON + fabs(Ua1_for_err/Ua1_for_val);
965 double ln_bck_val = log(fabs(Ua1_bck_val));
966 double ln_bck_err = GSL_DBL_EPSILON + fabs(Ua1_bck_err/Ua1_bck_val);
967 double lnr_val = lm_for.val + ln_for_val - ln_bck_val + lns;
968 double lnr_err = lm_for.err + ln_for_err + ln_bck_err
969 + 2.0 * GSL_DBL_EPSILON * (fabs(lm_for.val) + fabs(ln_for_val) + fabs(ln_bck_val) + fabs(lns));
970 double sgn = GSL_SIGN(Ua1_for_val) * GSL_SIGN(Ua1_bck_val);
971 int stat_e = gsl_sf_exp_err_e10_e(lnr_val, lnr_err, result);
973 return GSL_ERROR_SELECT_3(stat_e, stat_bck, stat_for);
980 /* Handle b >= 1 for generic a,b values.
984 hyperg_U_bge1(const double a, const double b, const double x,
985 gsl_sf_result_e10 * result)
987 const double rinta = floor(a+0.5);
988 const int a_neg_integer = (a < 0.0 && fabs(a - rinta) < INT_THRESHOLD);
996 else if(a_neg_integer && fabs(rinta) < INT_MAX) {
997 /* U(-n,b,x) = (-1)^n n! Laguerre[n,b-1,x]
999 const int n = -(int)rinta;
1000 const double sgn = (GSL_IS_ODD(n) ? -1.0 : 1.0);
1001 gsl_sf_result lnfact;
1003 const int stat_L = gsl_sf_laguerre_n_e(n, b-1.0, x, &L);
1004 gsl_sf_lnfact_e(n, &lnfact);
1006 const int stat_e = gsl_sf_exp_mult_err_e10_e(lnfact.val, lnfact.err,
1009 return GSL_ERROR_SELECT_2(stat_e, stat_L);
1012 else if(ASYMP_EVAL_OK(a,b,x)) {
1013 const double ln_pre_val = -a*log(x);
1014 const double ln_pre_err = 2.0 * GSL_DBL_EPSILON * fabs(ln_pre_val);
1015 gsl_sf_result asymp;
1016 int stat_asymp = hyperg_zaU_asymp(a, b, x, &asymp);
1017 int stat_e = gsl_sf_exp_mult_err_e10_e(ln_pre_val, ln_pre_err,
1018 asymp.val, asymp.err,
1020 return GSL_ERROR_SELECT_2(stat_e, stat_asymp);
1022 else if(fabs(a) <= 1.0) {
1024 double ln_multiplier;
1025 int stat_U = hyperg_U_small_a_bgt0(a, b, x, &rU, &ln_multiplier);
1026 int stat_e = gsl_sf_exp_mult_err_e10_e(ln_multiplier, 2.0*GSL_DBL_EPSILON*fabs(ln_multiplier), rU.val, rU.err, result);
1027 return GSL_ERROR_SELECT_2(stat_U, stat_e);
1029 else if(SERIES_EVAL_OK(a,b,x)) {
1031 const int stat_ser = hyperg_U_series(a, b, x, &ser);
1032 result->val = ser.val;
1033 result->err = ser.err;
1038 /* Recurse backward on a and then upward on b.
1040 const double scale_factor = GSL_SQRT_DBL_MAX;
1041 const double a0 = a - floor(a) - 1.0;
1042 const double b0 = b - floor(b) + 1.0;
1043 int scale_count = 0;
1046 gsl_sf_result r_Uap1;
1048 int stat_0 = hyperg_U_small_a_bgt0(a0+1.0, b0, x, &r_Uap1, &lm_0);
1049 int stat_1 = hyperg_U_small_a_bgt0(a0, b0, x, &r_Ua, &lm_1);
1051 double Uap1 = r_Uap1.val;
1052 double Ua = r_Ua.val;
1055 lm_max = GSL_MAX(lm_0, lm_1);
1056 Uap1 *= exp(lm_0-lm_max);
1057 Ua *= exp(lm_1-lm_max);
1059 /* Downward recursion on a.
1061 for(ap=a0; ap>a+0.1; ap -= 1.0) {
1062 Uam1 = ap*(b0-ap-1.0)*Uap1 + (x+2.0*ap-b0)*Ua;
1065 RESCALE_2(Ua,Uap1,scale_factor,scale_count);
1069 /* b == b0, so no recursion necessary
1071 const double lnscale = log(scale_factor);
1074 lnm.val = lm_max + scale_count * lnscale;
1075 lnm.err = 2.0 * GSL_DBL_EPSILON * (fabs(lm_max) + scale_count * fabs(lnscale));
1077 y.err = fabs(r_Uap1.err/r_Uap1.val) * fabs(Ua);
1078 y.err += fabs(r_Ua.err/r_Ua.val) * fabs(Ua);
1079 y.err += 2.0 * GSL_DBL_EPSILON * (fabs(a-a0) + 1.0) * fabs(Ua);
1080 y.err *= fabs(lm_0-lm_max) + 1.0;
1081 y.err *= fabs(lm_1-lm_max) + 1.0;
1082 stat_e = gsl_sf_exp_mult_err_e10_e(lnm.val, lnm.err, y.val, y.err, result);
1085 /* Upward recursion on b.
1087 const double err_mult = fabs(b-b0) + fabs(a-a0) + 1.0;
1088 const double lnscale = log(scale_factor);
1092 double Ubm1 = Ua; /* U(a,b0) */
1093 double Ub = (a*(b0-a-1.0)*Uap1 + (a+x)*Ua)/x; /* U(a,b0+1) */
1096 for(bp=b0+1.0; bp<b-0.1; bp += 1.0) {
1097 Ubp1 = ((1.0+a-bp)*Ubm1 + (bp+x-1.0)*Ub)/x;
1100 RESCALE_2(Ub,Ubm1,scale_factor,scale_count);
1103 lnm.val = lm_max + scale_count * lnscale;
1104 lnm.err = 2.0 * GSL_DBL_EPSILON * (fabs(lm_max) + fabs(scale_count * lnscale));
1106 y.err = 2.0 * err_mult * fabs(r_Uap1.err/r_Uap1.val) * fabs(Ub);
1107 y.err += 2.0 * err_mult * fabs(r_Ua.err/r_Ua.val) * fabs(Ub);
1108 y.err += 2.0 * GSL_DBL_EPSILON * err_mult * fabs(Ub);
1109 y.err *= fabs(lm_0-lm_max) + 1.0;
1110 y.err *= fabs(lm_1-lm_max) + 1.0;
1111 stat_e = gsl_sf_exp_mult_err_e10_e(lnm.val, lnm.err, y.val, y.err, result);
1113 return GSL_ERROR_SELECT_3(stat_e, stat_0, stat_1);
1115 else if(b >= 2*a + x) {
1116 /* Recurse forward from a near zero.
1117 * Note that we cannot cross the singularity at
1118 * the line b=a+1, because the only way we could
1119 * be in that little wedge is if a < 1. But we
1120 * have already dealt with the small a case.
1122 int scale_count = 0;
1123 const double a0 = a - floor(a);
1124 const double scale_factor = GSL_SQRT_DBL_MAX;
1126 double lm_0, lm_1, lm_max;
1127 gsl_sf_result r_Uam1;
1129 int stat_0 = hyperg_U_small_a_bgt0(a0-1.0, b, x, &r_Uam1, &lm_0);
1130 int stat_1 = hyperg_U_small_a_bgt0(a0, b, x, &r_Ua, &lm_1);
1134 double Uam1 = r_Uam1.val;
1135 double Ua = r_Ua.val;
1138 lm_max = GSL_MAX(lm_0, lm_1);
1139 Uam1 *= exp(lm_0-lm_max);
1140 Ua *= exp(lm_1-lm_max);
1142 for(ap=a0; ap<a-0.1; ap += 1.0) {
1143 Uap1 = -(Uam1 + (b-2.0*ap-x)*Ua)/(ap*(1.0+ap-b));
1146 RESCALE_2(Ua,Uam1,scale_factor,scale_count);
1149 lnscale = log(scale_factor);
1150 lnm.val = lm_max + scale_count * lnscale;
1151 lnm.err = 2.0 * GSL_DBL_EPSILON * (fabs(lm_max) + fabs(scale_count * lnscale));
1153 y.err = fabs(r_Uam1.err/r_Uam1.val) * fabs(Ua);
1154 y.err += fabs(r_Ua.err/r_Ua.val) * fabs(Ua);
1155 y.err += 2.0 * GSL_DBL_EPSILON * (fabs(a-a0) + 1.0) * fabs(Ua);
1156 y.err *= fabs(lm_0-lm_max) + 1.0;
1157 y.err *= fabs(lm_1-lm_max) + 1.0;
1158 stat_e = gsl_sf_exp_mult_err_e10_e(lnm.val, lnm.err, y.val, y.err, result);
1159 return GSL_ERROR_SELECT_3(stat_e, stat_0, stat_1);
1163 /* Recurse backward to a near zero.
1165 const double a0 = a - floor(a);
1166 const double scale_factor = GSL_SQRT_DBL_MAX;
1167 int scale_count = 0;
1180 int stat_CF1 = hyperg_U_CF1(a, b, 0, x, &ru, &CF1_count);
1184 Ua = GSL_SQRT_DBL_MIN;
1186 for(ap=a; ap>a0+0.1; ap -= 1.0) {
1187 Uam1 = -((b-2.0*ap-x)*Ua + ap*(1.0+ap-b)*Uap1);
1190 RESCALE_2(Ua,Uap1,scale_factor,scale_count);
1193 stat_U0 = hyperg_U_small_a_bgt0(a0, b, x, &U0, &lm_0);
1195 lnscale = log(scale_factor);
1196 lnm.val = lm_0 - scale_count * lnscale;
1197 lnm.err = 2.0 * GSL_DBL_EPSILON * (fabs(lm_0) + fabs(scale_count * lnscale));
1198 y.val = GSL_SQRT_DBL_MIN*(U0.val/Ua);
1199 y.err = GSL_SQRT_DBL_MIN*(U0.err/fabs(Ua));
1200 y.err += 2.0 * GSL_DBL_EPSILON * (fabs(a0-a) + CF1_count + 1.0) * fabs(y.val);
1201 stat_e = gsl_sf_exp_mult_err_e10_e(lnm.val, lnm.err, y.val, y.err, result);
1202 return GSL_ERROR_SELECT_3(stat_e, stat_U0, stat_CF1);
1205 /* Recurse backward to near the b=2a+x line, then
1206 * forward from a near zero to get the normalization.
1208 int scale_count_for = 0;
1209 int scale_count_bck = 0;
1210 const double scale_factor = GSL_SQRT_DBL_MAX;
1211 const double eps = a - floor(a);
1212 const double a0 = ( eps == 0.0 ? 1.0 : eps );
1213 const double a1 = a0 + ceil(0.5*(b-x) - a0);
1226 /* Recurse back to determine U(a1,b), sans normalization.
1234 int stat_CF1 = hyperg_U_CF1(a, b, 0, x, &ru, &CF1_count);
1236 Ua = GSL_SQRT_DBL_MIN;
1238 for(ap=a; ap>a1+0.1; ap -= 1.0) {
1239 Uam1 = -((b-2.0*ap-x)*Ua + ap*(1.0+ap-b)*Uap1);
1242 RESCALE_2(Ua,Uap1,scale_factor,scale_count_bck);
1245 stat_bck = stat_CF1;
1248 /* Recurse forward to determine U(a1,b) with
1249 * absolute normalization.
1251 gsl_sf_result r_Uam1;
1254 int stat_0 = hyperg_U_small_a_bgt0(a0-1.0, b, x, &r_Uam1, &lm_0);
1255 int stat_1 = hyperg_U_small_a_bgt0(a0, b, x, &r_Ua, &lm_1);
1256 double Uam1 = r_Uam1.val;
1257 double Ua = r_Ua.val;
1261 lm_for = GSL_MAX(lm_0, lm_1);
1262 Uam1 *= exp(lm_0 - lm_for);
1263 Ua *= exp(lm_1 - lm_for);
1265 for(ap=a0; ap<a1-0.1; ap += 1.0) {
1266 Uap1 = -(Uam1 + (b-2.0*ap-x)*Ua)/(ap*(1.0+ap-b));
1269 RESCALE_2(Ua,Uam1,scale_factor,scale_count_for);
1272 stat_for = GSL_ERROR_SELECT_2(stat_0, stat_1);
1275 lnscale = log(scale_factor);
1276 lnm.val = lm_for + (scale_count_for - scale_count_bck)*lnscale;
1277 lnm.err = 2.0 * GSL_DBL_EPSILON * (fabs(lm_for) + fabs(scale_count_for - scale_count_bck)*fabs(lnscale));
1278 y.val = GSL_SQRT_DBL_MIN*Ua1_for/Ua1_bck;
1279 y.err = 2.0 * GSL_DBL_EPSILON * (fabs(a-a0) + CF1_count + 1.0) * fabs(y.val);
1280 stat_e = gsl_sf_exp_mult_err_e10_e(lnm.val, lnm.err, y.val, y.err, result);
1281 return GSL_ERROR_SELECT_3(stat_e, stat_bck, stat_for);
1287 /*-*-*-*-*-*-*-*-*-*-*-* Functions with Error Codes *-*-*-*-*-*-*-*-*-*-*-*/
1291 gsl_sf_hyperg_U_int_e10_e(const int a, const int b, const double x,
1292 gsl_sf_result_e10 * result)
1294 /* CHECK_POINTER(result) */
1297 DOMAIN_ERROR_E10(result);
1301 return hyperg_U_int_bge1(a, b, x, result);
1304 /* Use the reflection formula
1305 * U(a,b,x) = x^(1-b) U(1+a-b,2-b,x)
1307 gsl_sf_result_e10 U;
1308 double ln_x = log(x);
1312 int stat_U = hyperg_U_int_bge1(ap, bp, x, &U);
1313 double ln_pre_val = (1.0-b)*ln_x;
1314 double ln_pre_err = 2.0 * GSL_DBL_EPSILON * (fabs(b)+1.0) * fabs(ln_x);
1315 ln_pre_err += 2.0 * GSL_DBL_EPSILON * fabs(1.0-b); /* error in log(x) */
1316 stat_e = gsl_sf_exp_mult_err_e10_e(ln_pre_val + U.e10*M_LN10, ln_pre_err,
1319 return GSL_ERROR_SELECT_2(stat_e, stat_U);
1326 gsl_sf_hyperg_U_e10_e(const double a, const double b, const double x,
1327 gsl_sf_result_e10 * result)
1329 const double rinta = floor(a + 0.5);
1330 const double rintb = floor(b + 0.5);
1331 const int a_integer = ( fabs(a - rinta) < INT_THRESHOLD );
1332 const int b_integer = ( fabs(b - rintb) < INT_THRESHOLD );
1334 /* CHECK_POINTER(result) */
1337 DOMAIN_ERROR_E10(result);
1345 else if(a_integer && b_integer) {
1346 return gsl_sf_hyperg_U_int_e10_e(rinta, rintb, x, result);
1350 /* Use b >= 1 function.
1352 return hyperg_U_bge1(a, b, x, result);
1355 /* Use the reflection formula
1356 * U(a,b,x) = x^(1-b) U(1+a-b,2-b,x)
1358 const double lnx = log(x);
1359 const double ln_pre_val = (1.0-b)*lnx;
1360 const double ln_pre_err = fabs(lnx) * 2.0 * GSL_DBL_EPSILON * (1.0 + fabs(b));
1361 const double ap = 1.0 + a - b;
1362 const double bp = 2.0 - b;
1363 gsl_sf_result_e10 U;
1364 int stat_U = hyperg_U_bge1(ap, bp, x, &U);
1365 int stat_e = gsl_sf_exp_mult_err_e10_e(ln_pre_val + U.e10*M_LN10, ln_pre_err,
1368 return GSL_ERROR_SELECT_2(stat_e, stat_U);
1375 gsl_sf_hyperg_U_int_e(const int a, const int b, const double x, gsl_sf_result * result)
1377 gsl_sf_result_e10 re;
1378 int stat_U = gsl_sf_hyperg_U_int_e10_e(a, b, x, &re);
1379 int stat_c = gsl_sf_result_smash_e(&re, result);
1380 return GSL_ERROR_SELECT_2(stat_c, stat_U);
1385 gsl_sf_hyperg_U_e(const double a, const double b, const double x, gsl_sf_result * result)
1387 gsl_sf_result_e10 re;
1388 int stat_U = gsl_sf_hyperg_U_e10_e(a, b, x, &re);
1389 int stat_c = gsl_sf_result_smash_e(&re, result);
1390 return GSL_ERROR_SELECT_2(stat_c, stat_U);
1394 /*-*-*-*-*-*-*-*-*-* Functions w/ Natural Prototypes *-*-*-*-*-*-*-*-*-*-*/
1398 double gsl_sf_hyperg_U_int(const int a, const int b, const double x)
1400 EVAL_RESULT(gsl_sf_hyperg_U_int_e(a, b, x, &result));
1403 double gsl_sf_hyperg_U(const double a, const double b, const double x)
1405 EVAL_RESULT(gsl_sf_hyperg_U_e(a, b, x, &result));