3 * Copyright (C) 1996, 1997, 1998, 1999, 2000 Michael Booth
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 3 of the License, or (at
8 * your option) any later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
20 /* MISER. Based on the algorithm described in the following article,
22 W.H. Press, G.R. Farrar, "Recursive Stratified Sampling for
23 Multidimensional Monte Carlo Integration", Computers in Physics,
29 /* Modified by Brian Gough 12/2000 */
35 #include <gsl/gsl_math.h>
36 #include <gsl/gsl_errno.h>
37 #include <gsl/gsl_rng.h>
38 #include <gsl/gsl_monte.h>
39 #include <gsl/gsl_monte_miser.h>
42 estimate_corrmc (gsl_monte_function * f,
43 const double xl[], const double xu[],
44 size_t dim, size_t calls,
46 gsl_monte_miser_state * state,
47 double *result, double *abserr,
48 const double xmid[], double sigma_l[], double sigma_r[]);
52 gsl_monte_miser_integrate (gsl_monte_function * f,
53 const double xl[], const double xu[],
54 size_t dim, size_t calls,
56 gsl_monte_miser_state * state,
57 double *result, double *abserr)
59 size_t n, estimate_calls, calls_l, calls_r;
60 const size_t min_calls = state->min_calls;
65 double res_est = 0, err_est = 0;
66 double res_r = 0, err_r = 0, res_l = 0, err_l = 0;
67 double xbi_l, xbi_m, xbi_r, s;
70 double weight_l, weight_r;
73 double *xmid = state->xmid;
74 double *sigma_l = state->sigma_l, *sigma_r = state->sigma_r;
76 if (dim != state->dim)
78 GSL_ERROR ("number of dimensions must match allocated size", GSL_EINVAL);
81 for (i = 0; i < dim; i++)
85 GSL_ERROR ("xu must be greater than xl", GSL_EINVAL);
88 if (xu[i] - xl[i] > GSL_DBL_MAX)
90 GSL_ERROR ("Range of integration is too large, please rescale",
97 GSL_ERROR ("alpha must be non-negative", GSL_EINVAL);
104 for (i = 0; i < dim; i++)
106 vol *= xu[i] - xl[i];
109 if (calls < state->min_calls_per_bisection)
111 double m = 0.0, q = 0.0;
115 GSL_ERROR ("insufficient calls for subvolume", GSL_EFAILED);
118 for (n = 0; n < calls; n++)
120 /* Choose a random point in the integration region */
122 for (i = 0; i < dim; i++)
124 x[i] = xl[i] + gsl_rng_uniform_pos (r) * (xu[i] - xl[i]);
128 double fval = GSL_MONTE_FN_EVAL (f, x);
130 /* recurrence for mean and variance */
134 q += d * d * (n / (n + 1.0));
140 *abserr = vol * sqrt (q / (calls * (calls - 1.0)));
145 estimate_calls = GSL_MAX (min_calls, calls * (state->estimate_frac));
147 if (estimate_calls < 4 * dim)
149 GSL_ERROR ("insufficient calls to sample all halfspaces", GSL_ESANITY);
152 /* Flip coins to bisect the integration region with some fuzz */
154 for (i = 0; i < dim; i++)
156 s = (gsl_rng_uniform (r) - 0.5) >= 0.0 ? state->dither : -state->dither;
157 state->xmid[i] = (0.5 + s) * xl[i] + (0.5 - s) * xu[i];
160 /* The idea is to chose the direction to bisect based on which will
161 give the smallest total variance. We could (and may do so later)
162 use MC to compute these variances. But the NR guys simply estimate
163 the variances by finding the min and max function values
164 for each half-region for each bisection. */
166 estimate_corrmc (f, xl, xu, dim, estimate_calls,
167 r, state, &res_est, &err_est, xmid, sigma_l, sigma_r);
169 /* We have now used up some calls for the estimation */
171 calls -= estimate_calls;
173 /* Now find direction with the smallest total "variance" */
176 double best_var = GSL_DBL_MAX;
177 double beta = 2.0 / (1.0 + state->alpha);
180 weight_l = weight_r = 1.0;
182 for (i = 0; i < dim; i++)
184 if (sigma_l[i] >= 0 && sigma_r[i] >= 0)
186 /* estimates are okay */
187 double var = pow (sigma_l[i], beta) + pow (sigma_r[i], beta);
194 weight_l = pow (sigma_l[i], beta);
195 weight_r = pow (sigma_r[i], beta);
202 GSL_ERROR ("no points in left-half space!", GSL_ESANITY);
206 GSL_ERROR ("no points in right-half space!", GSL_ESANITY);
214 /* All estimates were the same, so chose a direction at random */
216 i_bisect = gsl_rng_uniform_int (r, dim);
219 xbi_l = xl[i_bisect];
220 xbi_m = xmid[i_bisect];
221 xbi_r = xu[i_bisect];
223 /* Get the actual fractional sizes of the two "halves", and
224 distribute the remaining calls among them */
227 double fraction_l = fabs ((xbi_m - xbi_l) / (xbi_r - xbi_l));
228 double fraction_r = 1 - fraction_l;
230 double a = fraction_l * weight_l;
231 double b = fraction_r * weight_r;
233 calls_l = min_calls + (calls - 2 * min_calls) * a / (a + b);
234 calls_r = min_calls + (calls - 2 * min_calls) * b / (a + b);
237 /* Compute the integral for the left hand side of the bisection */
239 /* Due to the recursive nature of the algorithm we must allocate
240 some new memory for each recursive call */
245 double *xu_tmp = (double *) malloc (dim * sizeof (double));
249 GSL_ERROR_VAL ("out of memory for left workspace", GSL_ENOMEM, 0);
252 for (i = 0; i < dim; i++)
257 xu_tmp[i_bisect] = xbi_m;
259 status = gsl_monte_miser_integrate (f, xl, xu_tmp,
260 dim, calls_l, r, state,
264 if (status != GSL_SUCCESS)
270 /* Compute the integral for the right hand side of the bisection */
275 double *xl_tmp = (double *) malloc (dim * sizeof (double));
279 GSL_ERROR_VAL ("out of memory for right workspace", GSL_ENOMEM, 0);
282 for (i = 0; i < dim; i++)
287 xl_tmp[i_bisect] = xbi_m;
289 status = gsl_monte_miser_integrate (f, xl_tmp, xu,
290 dim, calls_r, r, state,
294 if (status != GSL_SUCCESS)
300 *result = res_l + res_r;
301 *abserr = sqrt (err_l * err_l + err_r * err_r);
306 gsl_monte_miser_state *
307 gsl_monte_miser_alloc (size_t dim)
309 gsl_monte_miser_state *s =
310 (gsl_monte_miser_state *) malloc (sizeof (gsl_monte_miser_state));
314 GSL_ERROR_VAL ("failed to allocate space for miser state struct",
318 s->x = (double *) malloc (dim * sizeof (double));
323 GSL_ERROR_VAL ("failed to allocate space for x", GSL_ENOMEM, 0);
326 s->xmid = (double *) malloc (dim * sizeof (double));
332 GSL_ERROR_VAL ("failed to allocate space for xmid", GSL_ENOMEM, 0);
335 s->sigma_l = (double *) malloc (dim * sizeof (double));
342 GSL_ERROR_VAL ("failed to allocate space for sigma_l", GSL_ENOMEM, 0);
345 s->sigma_r = (double *) malloc (dim * sizeof (double));
353 GSL_ERROR_VAL ("failed to allocate space for sigma_r", GSL_ENOMEM, 0);
356 s->fmax_l = (double *) malloc (dim * sizeof (double));
365 GSL_ERROR_VAL ("failed to allocate space for fmax_l", GSL_ENOMEM, 0);
368 s->fmax_r = (double *) malloc (dim * sizeof (double));
378 GSL_ERROR_VAL ("failed to allocate space for fmax_r", GSL_ENOMEM, 0);
381 s->fmin_l = (double *) malloc (dim * sizeof (double));
392 GSL_ERROR_VAL ("failed to allocate space for fmin_l", GSL_ENOMEM, 0);
395 s->fmin_r = (double *) malloc (dim * sizeof (double));
407 GSL_ERROR_VAL ("failed to allocate space for fmin_r", GSL_ENOMEM, 0);
410 s->fsum_l = (double *) malloc (dim * sizeof (double));
423 GSL_ERROR_VAL ("failed to allocate space for fsum_l", GSL_ENOMEM, 0);
426 s->fsum_r = (double *) malloc (dim * sizeof (double));
440 GSL_ERROR_VAL ("failed to allocate space for fsum_r", GSL_ENOMEM, 0);
443 s->fsum2_l = (double *) malloc (dim * sizeof (double));
458 GSL_ERROR_VAL ("failed to allocate space for fsum2_l", GSL_ENOMEM, 0);
461 s->fsum2_r = (double *) malloc (dim * sizeof (double));
477 GSL_ERROR_VAL ("failed to allocate space for fsum2_r", GSL_ENOMEM, 0);
481 s->hits_r = (size_t *) malloc (dim * sizeof (size_t));
498 GSL_ERROR_VAL ("failed to allocate space for fsum2_r", GSL_ENOMEM, 0);
501 s->hits_l = (size_t *) malloc (dim * sizeof (size_t));
519 GSL_ERROR_VAL ("failed to allocate space for fsum2_r", GSL_ENOMEM, 0);
524 gsl_monte_miser_init (s);
530 gsl_monte_miser_init (gsl_monte_miser_state * s)
532 /* We use 8 points in each halfspace to estimate the variance. There are
533 2*dim halfspaces. A variance estimate requires a minimum of 2 points. */
534 s->min_calls = 16 * s->dim;
535 s->min_calls_per_bisection = 32 * s->min_calls;
536 s->estimate_frac = 0.1;
544 gsl_monte_miser_free (gsl_monte_miser_state * s)
564 estimate_corrmc (gsl_monte_function * f,
565 const double xl[], const double xu[],
566 size_t dim, size_t calls,
568 gsl_monte_miser_state * state,
569 double *result, double *abserr,
570 const double xmid[], double sigma_l[], double sigma_r[])
574 double *x = state->x;
575 double *fsum_l = state->fsum_l;
576 double *fsum_r = state->fsum_r;
577 double *fsum2_l = state->fsum2_l;
578 double *fsum2_r = state->fsum2_r;
579 size_t *hits_l = state->hits_l;
580 size_t *hits_r = state->hits_r;
582 double m = 0.0, q = 0.0;
585 for (i = 0; i < dim; i++)
587 vol *= xu[i] - xl[i];
588 hits_l[i] = hits_r[i] = 0;
589 fsum_l[i] = fsum_r[i] = 0.0;
590 fsum2_l[i] = fsum2_r[i] = 0.0;
591 sigma_l[i] = sigma_r[i] = -1;
594 for (n = 0; n < calls; n++)
598 unsigned int j = (n/2) % dim;
599 unsigned int side = (n % 2);
601 for (i = 0; i < dim; i++)
603 double z = gsl_rng_uniform_pos (r) ;
607 x[i] = xl[i] + z * (xu[i] - xl[i]);
613 x[i] = xmid[i] + z * (xu[i] - xmid[i]);
617 x[i] = xl[i] + z * (xmid[i] - xl[i]);
622 fval = GSL_MONTE_FN_EVAL (f, x);
624 /* recurrence for mean and variance */
628 q += d * d * (n / (n + 1.0));
631 /* compute the variances on each side of the bisection */
632 for (i = 0; i < dim; i++)
637 fsum2_l[i] += fval * fval;
643 fsum2_r[i] += fval * fval;
649 for (i = 0; i < dim; i++)
651 double fraction_l = (xmid[i] - xl[i]) / (xu[i] - xl[i]);
655 fsum_l[i] /= hits_l[i];
656 sigma_l[i] = sqrt (fsum2_l[i] - fsum_l[i] * fsum_l[i] / hits_l[i]);
657 sigma_l[i] *= fraction_l * vol / hits_l[i];
662 fsum_r[i] /= hits_r[i];
663 sigma_r[i] = sqrt (fsum2_r[i] - fsum_r[i] * fsum_r[i] / hits_r[i]);
664 sigma_r[i] *= (1 - fraction_l) * vol / hits_r[i];
672 *abserr = GSL_POSINF;
676 *abserr = vol * sqrt (q / (calls * (calls - 1.0)));