comparison PsiCLASS-1.0.2/samtools-0.1.19/bcftools/kmin.c @ 0:903fc43d6227 draft default tip

Uploaded
author lsong10
date Fri, 26 Mar 2021 16:52:45 +0000
parents
children
comparison
equal deleted inserted replaced
-1:000000000000 0:903fc43d6227
1 /* The MIT License
2
3 Copyright (c) 2008, 2010 by Attractive Chaos <attractor@live.co.uk>
4
5 Permission is hereby granted, free of charge, to any person obtaining
6 a copy of this software and associated documentation files (the
7 "Software"), to deal in the Software without restriction, including
8 without limitation the rights to use, copy, modify, merge, publish,
9 distribute, sublicense, and/or sell copies of the Software, and to
10 permit persons to whom the Software is furnished to do so, subject to
11 the following conditions:
12
13 The above copyright notice and this permission notice shall be
14 included in all copies or substantial portions of the Software.
15
16 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
19 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
20 BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
21 ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
22 CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 SOFTWARE.
24 */
25
26 /* Hooke-Jeeves algorithm for nonlinear minimization
27
28 Based on the pseudocodes by Bell and Pike (CACM 9(9):684-685), and
29 the revision by Tomlin and Smith (CACM 12(11):637-638). Both of the
30 papers are comments on Kaupe's Algorithm 178 "Direct Search" (ACM
31 6(6):313-314). The original algorithm was designed by Hooke and
32 Jeeves (ACM 8:212-229). This program is further revised according to
33 Johnson's implementation at Netlib (opt/hooke.c).
34
35 Hooke-Jeeves algorithm is very simple and it works quite well on a
36 few examples. However, it might fail to converge due to its heuristic
37 nature. A possible improvement, as is suggested by Johnson, may be to
38 choose a small r at the beginning to quickly approach to the minimum
39 and a large r at later step to hit the minimum.
40 */
41
42 #include <stdlib.h>
43 #include <string.h>
44 #include <math.h>
45 #include "kmin.h"
46
47 static double __kmin_hj_aux(kmin_f func, int n, double *x1, void *data, double fx1, double *dx, int *n_calls)
48 {
49 int k, j = *n_calls;
50 double ftmp;
51 for (k = 0; k != n; ++k) {
52 x1[k] += dx[k];
53 ftmp = func(n, x1, data); ++j;
54 if (ftmp < fx1) fx1 = ftmp;
55 else { /* search the opposite direction */
56 dx[k] = 0.0 - dx[k];
57 x1[k] += dx[k] + dx[k];
58 ftmp = func(n, x1, data); ++j;
59 if (ftmp < fx1) fx1 = ftmp;
60 else x1[k] -= dx[k]; /* back to the original x[k] */
61 }
62 }
63 *n_calls = j;
64 return fx1; /* here: fx1=f(n,x1) */
65 }
66
67 double kmin_hj(kmin_f func, int n, double *x, void *data, double r, double eps, int max_calls)
68 {
69 double fx, fx1, *x1, *dx, radius;
70 int k, n_calls = 0;
71 x1 = (double*)calloc(n, sizeof(double));
72 dx = (double*)calloc(n, sizeof(double));
73 for (k = 0; k != n; ++k) { /* initial directions, based on MGJ */
74 dx[k] = fabs(x[k]) * r;
75 if (dx[k] == 0) dx[k] = r;
76 }
77 radius = r;
78 fx1 = fx = func(n, x, data); ++n_calls;
79 for (;;) {
80 memcpy(x1, x, n * sizeof(double)); /* x1 = x */
81 fx1 = __kmin_hj_aux(func, n, x1, data, fx, dx, &n_calls);
82 while (fx1 < fx) {
83 for (k = 0; k != n; ++k) {
84 double t = x[k];
85 dx[k] = x1[k] > x[k]? fabs(dx[k]) : 0.0 - fabs(dx[k]);
86 x[k] = x1[k];
87 x1[k] = x1[k] + x1[k] - t;
88 }
89 fx = fx1;
90 if (n_calls >= max_calls) break;
91 fx1 = func(n, x1, data); ++n_calls;
92 fx1 = __kmin_hj_aux(func, n, x1, data, fx1, dx, &n_calls);
93 if (fx1 >= fx) break;
94 for (k = 0; k != n; ++k)
95 if (fabs(x1[k] - x[k]) > .5 * fabs(dx[k])) break;
96 if (k == n) break;
97 }
98 if (radius >= eps) {
99 if (n_calls >= max_calls) break;
100 radius *= r;
101 for (k = 0; k != n; ++k) dx[k] *= r;
102 } else break; /* converge */
103 }
104 free(x1); free(dx);
105 return fx1;
106 }
107
108 // I copied this function somewhere several years ago with some of my modifications, but I forgot the source.
109 double kmin_brent(kmin1_f func, double a, double b, void *data, double tol, double *xmin)
110 {
111 double bound, u, r, q, fu, tmp, fa, fb, fc, c;
112 const double gold1 = 1.6180339887;
113 const double gold2 = 0.3819660113;
114 const double tiny = 1e-20;
115 const int max_iter = 100;
116
117 double e, d, w, v, mid, tol1, tol2, p, eold, fv, fw;
118 int iter;
119
120 fa = func(a, data); fb = func(b, data);
121 if (fb > fa) { // swap, such that f(a) > f(b)
122 tmp = a; a = b; b = tmp;
123 tmp = fa; fa = fb; fb = tmp;
124 }
125 c = b + gold1 * (b - a), fc = func(c, data); // golden section extrapolation
126 while (fb > fc) {
127 bound = b + 100.0 * (c - b); // the farthest point where we want to go
128 r = (b - a) * (fb - fc);
129 q = (b - c) * (fb - fa);
130 if (fabs(q - r) < tiny) { // avoid 0 denominator
131 tmp = q > r? tiny : 0.0 - tiny;
132 } else tmp = q - r;
133 u = b - ((b - c) * q - (b - a) * r) / (2.0 * tmp); // u is the parabolic extrapolation point
134 if ((b > u && u > c) || (b < u && u < c)) { // u lies between b and c
135 fu = func(u, data);
136 if (fu < fc) { // (b,u,c) bracket the minimum
137 a = b; b = u; fa = fb; fb = fu;
138 break;
139 } else if (fu > fb) { // (a,b,u) bracket the minimum
140 c = u; fc = fu;
141 break;
142 }
143 u = c + gold1 * (c - b); fu = func(u, data); // golden section extrapolation
144 } else if ((c > u && u > bound) || (c < u && u < bound)) { // u lies between c and bound
145 fu = func(u, data);
146 if (fu < fc) { // fb > fc > fu
147 b = c; c = u; u = c + gold1 * (c - b);
148 fb = fc; fc = fu; fu = func(u, data);
149 } else { // (b,c,u) bracket the minimum
150 a = b; b = c; c = u;
151 fa = fb; fb = fc; fc = fu;
152 break;
153 }
154 } else if ((u > bound && bound > c) || (u < bound && bound < c)) { // u goes beyond the bound
155 u = bound; fu = func(u, data);
156 } else { // u goes the other way around, use golden section extrapolation
157 u = c + gold1 * (c - b); fu = func(u, data);
158 }
159 a = b; b = c; c = u;
160 fa = fb; fb = fc; fc = fu;
161 }
162 if (a > c) u = a, a = c, c = u; // swap
163
164 // now, a<b<c, fa>fb and fb<fc, move on to Brent's algorithm
165 e = d = 0.0;
166 w = v = b; fv = fw = fb;
167 for (iter = 0; iter != max_iter; ++iter) {
168 mid = 0.5 * (a + c);
169 tol2 = 2.0 * (tol1 = tol * fabs(b) + tiny);
170 if (fabs(b - mid) <= (tol2 - 0.5 * (c - a))) {
171 *xmin = b; return fb; // found
172 }
173 if (fabs(e) > tol1) {
174 // related to parabolic interpolation
175 r = (b - w) * (fb - fv);
176 q = (b - v) * (fb - fw);
177 p = (b - v) * q - (b - w) * r;
178 q = 2.0 * (q - r);
179 if (q > 0.0) p = 0.0 - p;
180 else q = 0.0 - q;
181 eold = e; e = d;
182 if (fabs(p) >= fabs(0.5 * q * eold) || p <= q * (a - b) || p >= q * (c - b)) {
183 d = gold2 * (e = (b >= mid ? a - b : c - b));
184 } else {
185 d = p / q; u = b + d; // actual parabolic interpolation happens here
186 if (u - a < tol2 || c - u < tol2)
187 d = (mid > b)? tol1 : 0.0 - tol1;
188 }
189 } else d = gold2 * (e = (b >= mid ? a - b : c - b)); // golden section interpolation
190 u = fabs(d) >= tol1 ? b + d : b + (d > 0.0? tol1 : -tol1);
191 fu = func(u, data);
192 if (fu <= fb) { // u is the minimum point so far
193 if (u >= b) a = b;
194 else c = b;
195 v = w; w = b; b = u; fv = fw; fw = fb; fb = fu;
196 } else { // adjust (a,c) and (u,v,w)
197 if (u < b) a = u;
198 else c = u;
199 if (fu <= fw || w == b) {
200 v = w; w = u;
201 fv = fw; fw = fu;
202 } else if (fu <= fv || v == b || v == w) {
203 v = u; fv = fu;
204 }
205 }
206 }
207 *xmin = b;
208 return fb;
209 }