// New and Improved (~20% faster) // inner loop was inefficient, now eliminated // See line 132 and 268 for the changes // This version 18 Oct. 1999 //--------------------------- anneal.ox ---------------------------------- // // This is a quick and dirty translation and modification of Goffe et. al.'s //simulated annealing algorithm. The source was Gauss code obtained from // // http://gurukul.ucc.american.edu/econ/gaussres/GAUSSIDX.HTM // // You will find proper credits and references there // // The structure of the program has been modified in minor ways. In particular it // only maximizes, so objective functions should be written accordingly. // // The Gauss code is by E.G. Tsionas, who deserves all credit. // Translated to Ox by Michael Creel mcreel@volcano.uab.es /* Example format of objective function for maximization obj(const theta) { < your calculations here > return(value_of_obj); } */ /* Example of how to call the routine: the following need to be defined. Then the obj. function is maximized. See Goffe et. al documentation for details. // SA controls t=1000; rt=.25; ns=20; nt=5; neps=5; eps=1e-5; maxeval=1e10; iprint=1; lb=-2*ones(rows(theta),1); ub=-lb; c=ub; vm=ub./ub; error_code=anneal(obj,&theta, rt, eps, ns, nt, neps, maxeval, lb, ub, c, iprint, t, vm); */ //-------------- The annealing algorithm -------------- anneal(const func, const aParam, const rt, const eps, const ns, const nt, const neps, const maxevl, const lb, const ub, const c, const iprint, t, vm) { decl n, x, xopt, xp, nacp, nacc, nobds, nfuncev, ier; decl fstar, f, fopt, nup, nrej, nnew, ndown, lnobds; decl m, j, h, i, fp, p, pp, ratio, converge; decl test; x=aParam[0]; n = rows(x); xopt = zeros(n, 1); xp = zeros(n, 1); nacp = zeros(n, 1); // Set initial values nacc = 0; nobds = 0; nfuncev = 0; ier = 99; xopt = x; nacp = zeros(n, 1); fstar = 1e20 * ones(neps, 1); // If the initial temperature is not positive, notify the user and abort if(t <= 0.0) { print(" The initial temperature is not positive. Reset the variable t"); return 4; } /** If the initial value is out of bounds, notify the user and abort. **/ if((sumc(x .> ub) + sumc(x .< lb) > 0)) { print("initial condition out of bounds"); return 3; } /** Evaluate the function with input param and return value as f. **/ f = func(x); nfuncev = nfuncev + 1; fopt = f; fstar[0][] = f; if(iprint > 1) { print(""); print("initial x", x'); print("initial f", f); } /** Start the main loop. Note that it terminates if (i) the algorithm successfully optimizes the function or (ii) there are too many function evaluations (more than MAXEVL). **/ converge=0; while(converge==0) { nup = 0; nrej = 0; nnew = 0; ndown = 0; lnobds = 0; for(m = 0;m < nt;++m) { for(j = 0;j < ns;++j) { for(h = 0;h < n;++h) { // Creel's code to replace the commented loop below xp = x; xp[h][] = x[h][] + (2*ranu(1, 1) - 1) * vm[h][]; if((xp[h][] < lb[h][]) || (xp[h][] > ub[h][])) { xp[h][] = lb[h][] + (ub[h][] - lb[h][]) * ranu(1, 1); lnobds = lnobds + 1; nobds = nobds + 1; if ((iprint >= 3)) { print(""); print("\ncurrent x");print(x', "\n"); print("current f", f); print("trial x", xp'); print("point rejected since out of bounds"); } } // End of Creel's code // This is the replaced code // /** Generate xp, the trial value of param. Note use of vm to choose xp. **/ // for(i = 0;i < n;++i) // { // if(i == h) // { // xp[i][] = x[i][] + (2*ranu(1, 1) - 1) * vm[i][]; // } // else // { // xp[i][] = x[i][]; // } // // /** If xp is out of bounds, select a point in bounds for the trial. **/ // if((xp[i][] < lb[i][]) || (xp[i][] > ub[i][])) // { // xp[i][] = lb[i][] + (ub[i][] - lb[i][]) * ranu(1, 1); // lnobds = lnobds + 1; // nobds = nobds + 1; // if ((iprint >= 3)) // { // print(""); // print("current x");print(x', "\n"); // print("current f", f); // print("trial x", xp'); // print("point rejected since out of bounds"); // } // } // } /** Evaluate the function with the trial point xp and return as fp. **/ fp = func(xp); nfuncev = nfuncev + 1; if ((iprint >= 3)) { print(""); print("\ncurrent x", x'); print("current f", f); print("trial x", xp'); print("resulting f", fp); } /** If too many function evaluations occur, terminate the algorithm. **/ if(nfuncev >= maxevl) { print("Too many function evaluations; consider"); print("increasing maxevl or eps, or decreasing"); print("nt or rt. These results are likely to be poor"); return 2; } /** Accept the new point if the function value increases. **/ if(fp >= f) { if ((iprint >= 3)) { print("point accepted"); } x = xp; f = fp; nacc = nacc + 1; nacp[h][] = nacp[h][] + 1; nup = nup + 1; /** If greater than any other point, record as new optimum. **/ if(fp > fopt) { if(iprint >= 3) { print("\nnew optimum"); } xopt = xp; fopt = fp; nnew = nnew + 1; } } /** If the point is lower, use the Metropolis criteria to decide on acceptance or rejection. **/ else { p = exp((fp - f) / t); pp = ranu(1, 1); if(pp < p) { if(iprint >= 3) { print("though lower, point accepted"); } x = xp; f = fp; nacc = nacc + 1; nacp[h][] = nacp[h][] + 1; ndown = ndown + 1; } else { nrej = nrej + 1; if(iprint >= 3) { print("lower point rejected"); } } } } } /** Adjust vm so that approximately half of all evaluations are accepted. **/ // Creel's code to replace the commented loop below ratio = nacp / ns; test = ratio .> 0.6; vm = (1 - test) .* vm + test .* vm .* (1. + c .* (ratio - 0.6) / 0.4); test = ratio .< 0.4; vm = (1 - test) .* vm + test .* vm ./ (1. + c .* (0.4 - ratio) / 0.4); test = vm .> (ub - lb); vm = (1 - test) .* vm + test .* (ub - lb); // end of Creel's code // this is the replaced code // for(i = 0;i < n;++i) // { // ratio = nacp[i][] / ns; // if(ratio > .6) // { // vm[i][] = vm[i][] * (1. + c[i][] * (ratio - .6) / .4); // } // else if(ratio < .4) // { // vm[i][] = vm[i][] / (1. + c[i][] * ((.4 - ratio) / .4)); // } // if(vm[i][] > (ub[i][] - lb[i][])) // { // vm[i][] = ub[i][] - lb[i][]; // } // } if(iprint >= 2) { print("intermediate results after step length adjustment", "\n"); print("new step length (vm)", vm', "\n"); print("current optimal x", "\n"); print(xopt', "\n"); print("current x", "\n"); print(x', "\n"); print(""); } nacp = zeros(n, 1); } if(iprint >= 1) { print("Intermediate results before next temperature reduction\n"); print("\ncurrent temperature ", t); print("\nmax function value so far", fopt); print("\ntotal moves ", nup + ndown + nrej); print("\nuphill ", nup); print("\naccepted downhill ", ndown); print("\nrejected downhill ", nrej); print("\nout of bounds trials ", lnobds); print("\nnew maxima this temperature ", nnew); print("\ncurrent optimal x", "\n"); print(xopt', "\n"); print("\nstrength (vm)", "\n"); print(vm', "\n"); print(""); } /** Check termination criteria. **/ fstar[0][] = f; if( ((fopt - fstar[0][]) <= eps) && (max(fabs(f - fstar) .> eps) == 0) ) { converge = 1; } /** Terminate SA if appropriate. **/ if(converge) { aParam[0] = xopt; ier=1; if(iprint >= 1) print("\nSuccessful normal termination\n"); print("function value\n",fopt); print("\ntotal accepted function evaluations\n",nacc); print("\ntotal trial function evaluation\n",nfuncev); print("\nnumber of out-of-bounds arguments\n",nobds); print("\nfinal temperature\n",t); return ier; // return breaks out of the program } /** If termination criteria are not met, prepare for another loop. **/ t = rt * t; fstar[1:neps-1][] = fstar[0:neps-2][]; f = fopt; x = xopt; } } /* Now that we're at the end, the temperature is low, so here's a joke from a cold climate: One polar bear to another, discussing igloos: "Man, I love these things - crunchy on the outside, chewy on the inside" */