1 /* 2 Copyright 2008-2024 3 Matthias Ehmann, 4 Carsten Miller, 5 Reinhard Oldenburg, 6 Andreas Walter, 7 Alfred Wassermann 8 9 This file is part of JSXGraph. 10 11 JSXGraph is free software dual licensed under the GNU LGPL or MIT License. 12 13 You can redistribute it and/or modify it under the terms of the 14 15 * GNU Lesser General Public License as published by 16 the Free Software Foundation, either version 3 of the License, or 17 (at your option) any later version 18 OR 19 * MIT License: https://github.com/jsxgraph/jsxgraph/blob/master/LICENSE.MIT 20 21 JSXGraph is distributed in the hope that it will be useful, 22 but WITHOUT ANY WARRANTY; without even the implied warranty of 23 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 24 GNU Lesser General Public License for more details. 25 26 You should have received a copy of the GNU Lesser General Public License and 27 the MIT License along with JSXGraph. If not, see <https://www.gnu.org/licenses/> 28 and <https://opensource.org/licenses/MIT/>. 29 30 This is a port of jcobyla 31 32 - to JavaScript by Reihard Oldenburg and 33 - to JSXGraph by Alfred Wassermann 34 - optimized by Andreas Walter 35 */ 36 /* 37 * jcobyla 38 * 39 * The MIT License 40 * 41 * Copyright (c) 2012 Anders Gustafsson, Cureos AB. 42 * 43 * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files 44 * (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, 45 * publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, 46 * subject to the following conditions: 47 * 48 * The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 49 * 50 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 51 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE 52 * FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 53 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 54 * 55 * Remarks: 56 * 57 * The original Fortran 77 version of this code was by Michael Powell (M.J.D.Powell @ damtp.cam.ac.uk) 58 * The Fortran 90 version was by Alan Miller (Alan.Miller @ vic.cmis.csiro.au). Latest revision - 30 October 1998 59 */ 60 61 /** 62 * Constrained Optimization BY Linear Approximation in Java. 63 * 64 * COBYLA2 is an implementation of Powell's nonlinear derivative free constrained optimization that uses 65 * a linear approximation approach. The algorithm is a sequential trust region algorithm that employs linear 66 * approximations to the objective and constraint functions, where the approximations are formed by linear 67 * interpolation at n + 1 points in the space of the variables and tries to maintain a regular shaped simplex 68 * over iterations. 69 * 70 * It solves nonsmooth NLP with a moderate number of variables (about 100). Inequality constraints only. 71 * 72 * The initial point X is taken as one vertex of the initial simplex with zero being another, so, X should 73 * not be entered as the zero vector. 74 * 75 * @author Anders Gustafsson, Cureos AB. Translation to Javascript by Reinhard Oldenburg, Goethe-University 76 */ 77 78 /*global JXG: true, define: true*/ 79 /*jslint nomen: true, plusplus: true, continue: true*/ 80 81 import JXG from "../jxg.js"; 82 import Mat from "./math.js"; 83 // import Type from "../utils/type.js"; 84 85 /** 86 * The JXG.Math.Nlp namespace holds numerical algorithms for non-linear optimization. 87 * @name JXG.Math.Nlp 88 * @namespace 89 * 90 */ 91 JXG.Math.Nlp = { 92 arr: function (n) { 93 // Is 0 initialized 94 return new Float64Array(n); 95 }, 96 97 arr2: function (n, m) { 98 var i = 0, 99 a = new Array(n); 100 101 while (i < n) { 102 a[i] = this.arr(m); 103 i++; 104 } 105 return a; 106 }, 107 108 arraycopy: function (x, a, iox, b, n) { 109 var i = 0; 110 while (i < n) { 111 iox[i + b] = x[i + a]; 112 i++; 113 } 114 }, 115 116 lastNumberOfEvaluations: 0, 117 GetLastNumberOfEvaluations: function () { 118 return this.lastNumberOfEvaluations; 119 }, 120 // status Variables 121 Normal: 0, 122 MaxIterationsReached: 1, 123 DivergingRoundingErrors: 2, 124 125 /** 126 * Minimizes the objective function F with respect to a set of inequality constraints CON, 127 * and returns the optimal variable array. F and CON may be non-linear, and should preferably be smooth. 128 * Calls {@link JXG.Math.Nlp#cobylb}. 129 * 130 * @param calcfc Interface implementation for calculating objective function and constraints. 131 * @param n Number of variables. 132 * @param m Number of constraints. 133 * @param x On input initial values of the variables (zero-based array). On output 134 * optimal values of the variables obtained in the COBYLA minimization. 135 * @param rhobeg Initial size of the simplex. 136 * @param rhoend Final value of the simplex. 137 * @param iprint Print level, 0 <= iprint <= 3, where 0 provides no output and 138 * 3 provides full output to the console. 139 * @param maxfun Maximum number of function evaluations before terminating. 140 * @param [testForRoundingErrors=false] 141 * @returns {Number} Exit status of the COBYLA2 optimization. 142 */ 143 FindMinimum: function (calcfc, n, m, x, rhobeg, rhoend, iprint, maxfun, testForRoundingErrors) { 144 // CobylaExitStatus FindMinimum(final Calcfc calcfc, int n, int m, double[] x, double rhobeg, double rhoend, int iprint, int maxfun) 145 // This subroutine minimizes an objective function F(X) subject to M 146 // inequality constraints on X, where X is a vector of variables that has 147 // N components. The algorithm employs linear approximations to the 148 // objective and constraint functions, the approximations being formed by 149 // linear interpolation at N+1 points in the space of the variables. 150 // We regard these interpolation points as vertices of a simplex. The 151 // parameter RHO controls the size of the simplex and it is reduced 152 // automatically from RHOBEG to RHOEND. For each RHO the subroutine tries 153 // to achieve a good vector of variables for the current size, and then 154 // RHO is reduced until the value RHOEND is reached. Therefore RHOBEG and 155 // RHOEND should be set to reasonable initial changes to and the required 156 // accuracy in the variables respectively, but this accuracy should be 157 // viewed as a subject for experimentation because it is not guaranteed. 158 // The subroutine has an advantage over many of its competitors, however, 159 // which is that it treats each constraint individually when calculating 160 // a change to the variables, instead of lumping the constraints together 161 // into a single penalty function. The name of the subroutine is derived 162 // from the phrase Constrained Optimization BY Linear Approximations. 163 164 // The user must set the values of N, M, RHOBEG and RHOEND, and must 165 // provide an initial vector of variables in X. Further, the value of 166 // IPRINT should be set to 0, 1, 2 or 3, which controls the amount of 167 // printing during the calculation. Specifically, there is no output if 168 // IPRINT=0 and there is output only at the end of the calculation if 169 // IPRINT=1. Otherwise each new value of RHO and SIGMA is printed. 170 // Further, the vector of variables and some function information are 171 // given either when RHO is reduced or when each new value of F(X) is 172 // computed in the cases IPRINT=2 or IPRINT=3 respectively. Here SIGMA 173 // is a penalty parameter, it being assumed that a change to X is an 174 // improvement if it reduces the merit function 175 // F(X)+SIGMA*MAX(0.0, - C1(X), - C2(X),..., - CM(X)), 176 // where C1,C2,...,CM denote the constraint functions that should become 177 // nonnegative eventually, at least to the precision of RHOEND. In the 178 // printed output the displayed term that is multiplied by SIGMA is 179 // called MAXCV, which stands for 'MAXimum Constraint Violation'. The 180 // argument ITERS is an integer variable that must be set by the user to a 181 // limit on the number of calls of CALCFC, the purpose of this routine being 182 // given below. The value of ITERS will be altered to the number of calls 183 // of CALCFC that are made. 184 // In order to define the objective and constraint functions, we require 185 // a subroutine that has the name and arguments 186 // SUBROUTINE CALCFC (N,M,X,F,CON) 187 // DIMENSION X(:),CON(:) . 188 // The values of N and M are fixed and have been defined already, while 189 // X is now the current vector of variables. The subroutine should return 190 // the objective and constraint functions at X in F and CON(1),CON(2), 191 // ...,CON(M). Note that we are trying to adjust X so that F(X) is as 192 // small as possible subject to the constraint functions being nonnegative. 193 194 // Local variables 195 var mpp = m + 2, 196 status, 197 // Internal base-1 X array 198 iox = this.arr(n + 1), 199 that = this, 200 fcalcfc; 201 202 this.lastNumberOfEvaluations = 0; 203 204 if (testForRoundingErrors) { 205 console.log("Experimental feature 'testForRoundingErrors' is activated."); 206 } 207 208 iox[0] = 0.0; 209 this.arraycopy(x, 0, iox, 1, n); 210 211 // Internal representation of the objective and constraints calculation method, 212 // accounting for that X and CON arrays in the cobylb method are base-1 arrays. 213 fcalcfc = function (n, m, thisx, con) { 214 // int n, int m, double[] x, double[] con 215 var ix = that.arr(n), 216 ocon, f; 217 218 that.lastNumberOfEvaluations = that.lastNumberOfEvaluations + 1; 219 that.arraycopy(thisx, 1, ix, 0, n); 220 ocon = that.arr(m); 221 f = calcfc(n, m, ix, ocon); 222 that.arraycopy(ocon, 0, con, 1, m); 223 return f; 224 }; 225 226 status = this.cobylb(fcalcfc, n, m, mpp, iox, rhobeg, rhoend, iprint, maxfun, testForRoundingErrors); 227 this.arraycopy(iox, 1, x, 0, n); 228 229 return status; 230 }, 231 232 // private static CobylaExitStatus cobylb(Calcfc calcfc, int n, int m, int mpp, double[] x, 233 // double rhobeg, double rhoend, int iprint, int maxfun) 234 /** 235 * JavaScript implementation of the non-linear optimization method COBYLA. 236 * @param {Function} calcfc 237 * @param {Number} n 238 * @param {Number} m 239 * @param {Number} mpp 240 * @param {Number} x 241 * @param {Number} rhobeg 242 * @param {Number} rhoend 243 * @param {Number} iprint 244 * @param {Number} maxfun 245 * @param {Boolean} [testForRoundingErrors=false] 246 * @returns {Number} Exit status of the COBYLA2 optimization 247 */ 248 cobylb: function (calcfc, n, m, mpp, x, rhobeg, rhoend, iprint, maxfun, testForRoundingErrors) { 249 // calcf ist funktion die aufgerufen wird wie calcfc(n, m, ix, ocon) 250 // N.B. Arguments CON, SIM, SIMI, DATMAT, A, VSIG, VETA, SIGBAR, DX, W & IACT 251 // have been removed. 252 253 // Set the initial values of some parameters. The last column of SIM holds 254 // the optimal vertex of the current simplex, and the preceding N columns 255 // hold the displacements from the optimal vertex to the other vertices. 256 // Further, SIMI holds the inverse of the matrix that is contained in the 257 // first N columns of SIM. 258 259 // Local variables 260 var status = -1, 261 alpha = 0.25, 262 beta = 2.1, 263 gamma = 0.5, 264 delta = 1.1, 265 f = 0.0, 266 resmax = 0.0, 267 total, 268 np = n + 1, 269 mp = m + 1, 270 rho = rhobeg, 271 parmu = 0.0, 272 iflag = false, 273 ifull = false, 274 parsig = 0.0, 275 prerec = 0.0, 276 prerem = 0.0, 277 con = this.arr(1 + mpp), 278 sim = this.arr2(1 + n, 1 + np), 279 simi = this.arr2(1 + n, 1 + n), 280 datmat = this.arr2(1 + mpp, 1 + np), 281 a = this.arr2(1 + n, 1 + mp), 282 vsig = this.arr(1 + n), 283 veta = this.arr(1 + n), 284 sigbar = this.arr(1 + n), 285 dx = this.arr(1 + n), 286 w = this.arr(1 + n), 287 i, j, k, l, 288 temp, tempa, 289 nfvals, jdrop, ibrnch, skipVertexIdent, 290 phimin, nbest, error, pareta, wsig, weta, 291 cvmaxp, cvmaxm, dxsign, resnew, barmu, 292 phi, vmold, vmnew, trured, ratio, edgmax, 293 cmin, cmax, denom, 294 endless = true; 295 296 if (iprint >= 2) { 297 console.log("The initial value of RHO is " + rho + " and PARMU is set to zero."); 298 } 299 300 nfvals = 0; 301 temp = 1.0 / rho; 302 303 for (i = 1; i <= n; ++i) { 304 sim[i][np] = x[i]; 305 sim[i][i] = rho; 306 simi[i][i] = temp; 307 } 308 309 jdrop = np; 310 ibrnch = false; 311 312 // Make the next call of the user-supplied subroutine CALCFC. These 313 // instructions are also used for calling CALCFC during the iterations of 314 // the algorithm. 315 //alert("Iteration "+nfvals+" x="+x); 316 L_40: do { 317 if (nfvals >= maxfun && nfvals > 0) { 318 status = this.MaxIterationsReached; 319 break L_40; 320 } 321 322 ++nfvals; 323 f = calcfc(n, m, x, con); 324 resmax = 0.0; 325 for (k = 1; k <= m; ++k) { 326 resmax = Math.max(resmax, -con[k]); 327 } 328 //alert( " f="+f+" resmax="+resmax); 329 330 if (nfvals === iprint - 1 || iprint === 3) { 331 this.PrintIterationResult(nfvals, f, resmax, x, n, iprint); 332 } 333 334 con[mp] = f; 335 con[mpp] = resmax; 336 337 // Set the recently calculated function values in a column of DATMAT. This 338 // array has a column for each vertex of the current simplex, the entries of 339 // each column being the values of the constraint functions (if any) 340 // followed by the objective function and the greatest constraint violation 341 // at the vertex. 342 skipVertexIdent = true; 343 if (!ibrnch) { 344 skipVertexIdent = false; 345 346 for (i = 1; i <= mpp; ++i) { 347 datmat[i][jdrop] = con[i]; 348 } 349 350 if (nfvals <= np) { 351 // Exchange the new vertex of the initial simplex with the optimal vertex if 352 // necessary. Then, if the initial simplex is not complete, pick its next 353 // vertex and calculate the function values there. 354 355 if (jdrop <= n) { 356 if (datmat[mp][np] <= f) { 357 x[jdrop] = sim[jdrop][np]; 358 } else { 359 sim[jdrop][np] = x[jdrop]; 360 for (k = 1; k <= mpp; ++k) { 361 datmat[k][jdrop] = datmat[k][np]; 362 datmat[k][np] = con[k]; 363 } 364 for (k = 1; k <= jdrop; ++k) { 365 sim[jdrop][k] = -rho; 366 temp = 0.0; 367 for (i = k; i <= jdrop; ++i) { 368 temp -= simi[i][k]; 369 } 370 simi[jdrop][k] = temp; 371 } 372 } 373 } 374 if (nfvals <= n) { 375 jdrop = nfvals; 376 x[jdrop] += rho; 377 continue L_40; 378 } 379 } 380 ibrnch = true; 381 } 382 383 L_140: do { 384 L_550: do { 385 if (!skipVertexIdent) { 386 // Identify the optimal vertex of the current simplex. 387 phimin = datmat[mp][np] + parmu * datmat[mpp][np]; 388 nbest = np; 389 390 for (j = 1; j <= n; ++j) { 391 temp = datmat[mp][j] + parmu * datmat[mpp][j]; 392 if (temp < phimin) { 393 nbest = j; 394 phimin = temp; 395 } else if ( 396 temp === phimin && 397 parmu === 0.0 && 398 datmat[mpp][j] < datmat[mpp][nbest] 399 ) { 400 nbest = j; 401 } 402 } 403 404 // Switch the best vertex into pole position if it is not there already, 405 // and also update SIM, SIMI and DATMAT. 406 if (nbest <= n) { 407 for (i = 1; i <= mpp; ++i) { 408 temp = datmat[i][np]; 409 datmat[i][np] = datmat[i][nbest]; 410 datmat[i][nbest] = temp; 411 } 412 for (i = 1; i <= n; ++i) { 413 temp = sim[i][nbest]; 414 sim[i][nbest] = 0.0; 415 sim[i][np] += temp; 416 417 tempa = 0.0; 418 for (k = 1; k <= n; ++k) { 419 sim[i][k] -= temp; 420 tempa -= simi[k][i]; 421 } 422 simi[nbest][i] = tempa; 423 } 424 } 425 426 // Make an error return if SIGI is a poor approximation to the inverse of 427 // the leading N by N submatrix of SIG. 428 error = 0.0; 429 if (testForRoundingErrors) { 430 for (i = 1; i <= n; ++i) { 431 for (j = 1; j <= n; ++j) { 432 temp = 433 this.DOT_PRODUCT_ROW_COL(simi, i, sim, j, 1, n) - 434 (i === j ? 1.0 : 0.0); 435 // temp = this.DOT_PRODUCT( 436 // this.PART(this.ROW(simi, i), 1, n), 437 // this.PART(this.COL(sim, j), 1, n) 438 // ) - (i === j ? 1.0 : 0.0); 439 440 error = Math.max(error, Math.abs(temp)); 441 } 442 } 443 } 444 if (error > 0.1) { 445 status = this.DivergingRoundingErrors; 446 break L_40; 447 } 448 449 // Calculate the coefficients of the linear approximations to the objective 450 // and constraint functions, placing minus the objective function gradient 451 // after the constraint gradients in the array A. The vector W is used for 452 // working space. 453 for (k = 1; k <= mp; ++k) { 454 con[k] = -datmat[k][np]; 455 for (j = 1; j <= n; ++j) { 456 w[j] = datmat[k][j] + con[k]; 457 } 458 459 for (i = 1; i <= n; ++i) { 460 a[i][k] = 461 (k === mp ? -1.0 : 1.0) * 462 this.DOT_PRODUCT_ROW_COL(w, -1, simi, i, 1, n); 463 // this.DOT_PRODUCT(this.PART(w, 1, n), this.PART(this.COL(simi, i), 1, n)); 464 } 465 } 466 467 // Calculate the values of sigma and eta, and set IFLAG = 0 if the current 468 // simplex is not acceptable. 469 iflag = true; 470 parsig = alpha * rho; 471 pareta = beta * rho; 472 473 for (j = 1; j <= n; ++j) { 474 wsig = 0.0; 475 weta = 0.0; 476 for (k = 1; k <= n; ++k) { 477 wsig += simi[j][k] * simi[j][k]; 478 weta += sim[k][j] * sim[k][j]; 479 } 480 vsig[j] = 1.0 / Math.sqrt(wsig); 481 veta[j] = Math.sqrt(weta); 482 if (vsig[j] < parsig || veta[j] > pareta) { 483 iflag = false; 484 } 485 } 486 487 // If a new vertex is needed to improve acceptability, then decide which 488 // vertex to drop from the simplex. 489 if (!ibrnch && !iflag) { 490 jdrop = 0; 491 temp = pareta; 492 for (j = 1; j <= n; ++j) { 493 if (veta[j] > temp) { 494 jdrop = j; 495 temp = veta[j]; 496 } 497 } 498 if (jdrop === 0) { 499 for (j = 1; j <= n; ++j) { 500 if (vsig[j] < temp) { 501 jdrop = j; 502 temp = vsig[j]; 503 } 504 } 505 } 506 507 // Calculate the step to the new vertex and its sign. 508 temp = gamma * rho * vsig[jdrop]; 509 for (k = 1; k <= n; ++k) { 510 dx[k] = temp * simi[jdrop][k]; 511 } 512 cvmaxp = 0.0; 513 cvmaxm = 0.0; 514 total = 0.0; 515 for (k = 1; k <= mp; ++k) { 516 // total = this.DOT_PRODUCT(this.PART(this.COL(a, k), 1, n), this.PART(dx, 1, n)); 517 total = this.DOT_PRODUCT_ROW_COL(dx, -1, a, k, 1, n); 518 if (k < mp) { 519 temp = datmat[k][np]; 520 cvmaxp = Math.max(cvmaxp, -total - temp); 521 cvmaxm = Math.max(cvmaxm, total - temp); 522 } 523 } 524 dxsign = parmu * (cvmaxp - cvmaxm) > 2.0 * total ? -1.0 : 1.0; 525 526 // Update the elements of SIM and SIMI, and set the next X. 527 temp = 0.0; 528 for (i = 1; i <= n; ++i) { 529 dx[i] = dxsign * dx[i]; 530 sim[i][jdrop] = dx[i]; 531 temp += simi[jdrop][i] * dx[i]; 532 } 533 for (k = 1; k <= n; ++k) { 534 simi[jdrop][k] /= temp; 535 } 536 537 for (j = 1; j <= n; ++j) { 538 if (j !== jdrop) { 539 // temp = this.DOT_PRODUCT(this.PART(this.ROW(simi, j), 1, n), this.PART(dx, 1, n)); 540 temp = this.DOT_PRODUCT_ROW_COL(simi, j, dx, -1, 1, n); 541 for (k = 1; k <= n; ++k) { 542 simi[j][k] -= temp * simi[jdrop][k]; 543 } 544 } 545 x[j] = sim[j][np] + dx[j]; 546 } 547 continue L_40; 548 } 549 550 // Calculate DX = x(*)-x(0). 551 // Branch if the length of DX is less than 0.5*RHO. 552 ifull = this.trstlp(n, m, a, con, rho, dx); 553 if (!ifull) { 554 temp = 0.0; 555 for (k = 1; k <= n; ++k) { 556 temp += dx[k] * dx[k]; 557 } 558 if (temp < 0.25 * rho * rho) { 559 ibrnch = true; 560 break L_550; 561 } 562 } 563 564 // Predict the change to F and the new maximum constraint violation if the 565 // variables are altered from x(0) to x(0) + DX. 566 total = 0.0; 567 resnew = 0.0; 568 con[mp] = 0.0; 569 for (k = 1; k <= mp; ++k) { 570 //total = con[k] - this.DOT_PRODUCT(this.PART(this.COL(a, k), 1, n), this.PART(dx, 1, n)); 571 total = con[k] - this.DOT_PRODUCT_ROW_COL(dx, -1, a, k, 1, n); 572 if (k < mp) { 573 resnew = Math.max(resnew, total); 574 } 575 } 576 577 // Increase PARMU if necessary and branch back if this change alters the 578 // optimal vertex. Otherwise PREREM and PREREC will be set to the predicted 579 // reductions in the merit function and the maximum constraint violation 580 // respectively. 581 prerec = datmat[mpp][np] - resnew; 582 barmu = prerec > 0.0 ? total / prerec : 0.0; 583 if (parmu < 1.5 * barmu) { 584 parmu = 2.0 * barmu; 585 if (iprint >= 2) { 586 console.log("Increase in PARMU to " + parmu); 587 } 588 phi = datmat[mp][np] + parmu * datmat[mpp][np]; 589 for (j = 1; j <= n; ++j) { 590 temp = datmat[mp][j] + parmu * datmat[mpp][j]; 591 if ( 592 temp < phi || 593 (temp === phi && 594 parmu === 0.0 && 595 datmat[mpp][j] < datmat[mpp][np]) 596 ) { 597 continue L_140; 598 } 599 } 600 } 601 prerem = parmu * prerec - total; 602 603 // Calculate the constraint and objective functions at x(*). 604 // Then find the actual reduction in the merit function. 605 for (k = 1; k <= n; ++k) { 606 x[k] = sim[k][np] + dx[k]; 607 } 608 ibrnch = true; 609 continue L_40; 610 } 611 612 skipVertexIdent = false; 613 vmold = datmat[mp][np] + parmu * datmat[mpp][np]; 614 vmnew = f + parmu * resmax; 615 trured = vmold - vmnew; 616 if (parmu === 0.0 && f === datmat[mp][np]) { 617 prerem = prerec; 618 trured = datmat[mpp][np] - resmax; 619 } 620 621 // Begin the operations that decide whether x(*) should replace one of the 622 // vertices of the current simplex, the change being mandatory if TRURED is 623 // positive. Firstly, JDROP is set to the index of the vertex that is to be 624 // replaced. 625 ratio = trured <= 0.0 ? 1.0 : 0.0; 626 jdrop = 0; 627 for (j = 1; j <= n; ++j) { 628 // temp = Math.abs(this.DOT_PRODUCT(this.PART(this.ROW(simi, j), 1, n), this.PART(dx, 1, n))); 629 temp = Math.abs(this.DOT_PRODUCT_ROW_COL(simi, j, dx, -1, 1, n)); 630 if (temp > ratio) { 631 jdrop = j; 632 ratio = temp; 633 } 634 sigbar[j] = temp * vsig[j]; 635 } 636 637 // Calculate the value of ell. 638 edgmax = delta * rho; 639 l = 0; 640 for (j = 1; j <= n; ++j) { 641 if (sigbar[j] >= parsig || sigbar[j] >= vsig[j]) { 642 temp = veta[j]; 643 if (trured > 0.0) { 644 temp = 0.0; 645 for (k = 1; k <= n; ++k) { 646 temp += Math.pow(dx[k] - sim[k][j], 2.0); 647 } 648 temp = Math.sqrt(temp); 649 } 650 if (temp > edgmax) { 651 l = j; 652 edgmax = temp; 653 } 654 } 655 } 656 if (l > 0) { 657 jdrop = l; 658 } 659 660 if (jdrop !== 0) { 661 // Revise the simplex by updating the elements of SIM, SIMI and DATMAT. 662 temp = 0.0; 663 for (i = 1; i <= n; ++i) { 664 sim[i][jdrop] = dx[i]; 665 temp += simi[jdrop][i] * dx[i]; 666 } 667 for (k = 1; k <= n; ++k) { 668 simi[jdrop][k] /= temp; 669 } 670 for (j = 1; j <= n; ++j) { 671 if (j !== jdrop) { 672 // temp = this.DOT_PRODUCT(this.PART(this.ROW(simi, j), 1, n), this.PART(dx, 1, n)); 673 temp = this.DOT_PRODUCT_ROW_COL(simi, j, dx, -1, 1, n); 674 for (k = 1; k <= n; ++k) { 675 simi[j][k] -= temp * simi[jdrop][k]; 676 } 677 } 678 } 679 for (k = 1; k <= mpp; ++k) { 680 datmat[k][jdrop] = con[k]; 681 } 682 683 // Branch back for further iterations with the current RHO. 684 if (trured > 0.0 && trured >= 0.1 * prerem) { 685 continue L_140; 686 } 687 } 688 // If we end up here, we drop out. 689 } while (!endless); 690 691 if (!iflag) { 692 ibrnch = false; 693 continue L_140; 694 } 695 696 if (rho <= rhoend) { 697 status = this.Normal; 698 break L_40; 699 } 700 701 // Otherwise reduce RHO if it is not at its least value and reset PARMU. 702 cmin = 0.0; 703 cmax = 0.0; 704 rho *= 0.5; 705 if (rho <= 1.5 * rhoend) { 706 rho = rhoend; 707 } 708 if (parmu > 0.0) { 709 denom = 0.0; 710 for (k = 1; k <= mp; ++k) { 711 cmin = datmat[k][np]; 712 cmax = cmin; 713 for (i = 1; i <= n; ++i) { 714 cmin = Math.min(cmin, datmat[k][i]); 715 cmax = Math.max(cmax, datmat[k][i]); 716 } 717 if (k <= m && cmin < 0.5 * cmax) { 718 temp = Math.max(cmax, 0.0) - cmin; 719 denom = denom <= 0.0 ? temp : Math.min(denom, temp); 720 } 721 } 722 if (denom === 0.0) { 723 parmu = 0.0; 724 } else if (cmax - cmin < parmu * denom) { 725 parmu = (cmax - cmin) / denom; 726 } 727 } 728 if (iprint >= 2) { 729 console.log("Reduction in RHO to " + rho + " and PARMU = " + parmu); 730 } 731 if (iprint === 2) { 732 this.PrintIterationResult( 733 nfvals, 734 datmat[mp][np], 735 datmat[mpp][np], 736 this.COL(sim, np), 737 n, 738 iprint 739 ); 740 } 741 } while (endless); 742 } while (endless); 743 744 switch (status) { 745 case this.Normal: 746 if (iprint >= 1) { 747 console.log("%nNormal return from subroutine COBYLA%n"); 748 } 749 if (ifull) { 750 if (iprint >= 1) { 751 this.PrintIterationResult(nfvals, f, resmax, x, n, iprint); 752 } 753 return status; 754 } 755 break; 756 case this.MaxIterationsReached: 757 if (iprint >= 1) { 758 console.log( 759 "%nReturn from subroutine COBYLA because the MAXFUN limit has been reached.%n" 760 ); 761 } 762 break; 763 case this.DivergingRoundingErrors: 764 if (iprint >= 1) { 765 console.log( 766 "%nReturn from subroutine COBYLA because rounding errors are becoming damaging.%n" 767 ); 768 } 769 break; 770 } 771 772 for (k = 1; k <= n; ++k) { 773 x[k] = sim[k][np]; 774 } 775 f = datmat[mp][np]; 776 resmax = datmat[mpp][np]; 777 if (iprint >= 1) { 778 this.PrintIterationResult(nfvals, f, resmax, x, n, iprint); 779 } 780 781 return status; 782 }, 783 784 trstlp: function (n, m, a, b, rho, dx) { 785 //(int n, int m, double[][] a, double[] b, double rho, double[] dx) 786 // N.B. Arguments Z, ZDOTA, VMULTC, SDIRN, DXNEW, VMULTD & IACT have been removed. 787 788 // This subroutine calculates an N-component vector DX by applying the 789 // following two stages. In the first stage, DX is set to the shortest 790 // vector that minimizes the greatest violation of the constraints 791 // A(1,K)*DX(1)+A(2,K)*DX(2)+...+A(N,K)*DX(N) .GE. B(K), K = 2,3,...,M, 792 // subject to the Euclidean length of DX being at most RHO. If its length is 793 // strictly less than RHO, then we use the resultant freedom in DX to 794 // minimize the objective function 795 // -A(1,M+1)*DX(1) - A(2,M+1)*DX(2) - ... - A(N,M+1)*DX(N) 796 // subject to no increase in any greatest constraint violation. This 797 // notation allows the gradient of the objective function to be regarded as 798 // the gradient of a constraint. Therefore the two stages are distinguished 799 // by MCON .EQ. M and MCON .GT. M respectively. It is possible that a 800 // degeneracy may prevent DX from attaining the target length RHO. Then the 801 // value IFULL = 0 would be set, but usually IFULL = 1 on return. 802 // 803 // In general NACT is the number of constraints in the active set and 804 // IACT(1),...,IACT(NACT) are their indices, while the remainder of IACT 805 // contains a permutation of the remaining constraint indices. Further, Z 806 // is an orthogonal matrix whose first NACT columns can be regarded as the 807 // result of Gram-Schmidt applied to the active constraint gradients. For 808 // J = 1,2,...,NACT, the number ZDOTA(J) is the scalar product of the J-th 809 // column of Z with the gradient of the J-th active constraint. DX is the 810 // current vector of variables and here the residuals of the active 811 // constraints should be zero. Further, the active constraints have 812 // nonnegative Lagrange multipliers that are held at the beginning of 813 // VMULTC. The remainder of this vector holds the residuals of the inactive 814 // constraints at DX, the ordering of the components of VMULTC being in 815 // agreement with the permutation of the indices of the constraints that is 816 // in IACT. All these residuals are nonnegative, which is achieved by the 817 // shift RESMAX that makes the least residual zero. 818 819 // Initialize Z and some other variables. The value of RESMAX will be 820 // appropriate to DX = 0, while ICON will be the index of a most violated 821 // constraint if RESMAX is positive. Usually during the first stage the 822 // vector SDIRN gives a search direction that reduces all the active 823 // constraint violations by one simultaneously. 824 825 // Local variables 826 827 var temp = 0, 828 nactx = 0, 829 resold = 0.0, 830 z = this.arr2(1 + n, 1 + n), 831 zdota = this.arr(2 + m), 832 vmultc = this.arr(2 + m), 833 sdirn = this.arr(1 + n), 834 dxnew = this.arr(1 + n), 835 vmultd = this.arr(2 + m), 836 iact = this.arr(2 + m), 837 mcon = m, 838 nact = 0, 839 icon, resmax, 840 i, k, first, optold, icount, 841 step, stpful, optnew, ratio, 842 isave, vsave, total, 843 kp, kk, sp, alpha, beta, tot, spabs, 844 acca, accb, zdotv, zdvabs, kw, 845 dd, ss, sd, 846 zdotw, zdwabs, kl, sumabs, tempa, 847 endless = true; 848 849 for (i = 1; i <= n; ++i) { 850 z[i][i] = 1.0; 851 dx[i] = 0.0; 852 } 853 854 icon = 0; 855 resmax = 0.0; 856 if (m >= 1) { 857 for (k = 1; k <= m; ++k) { 858 if (b[k] > resmax) { 859 resmax = b[k]; 860 icon = k; 861 } 862 } 863 for (k = 1; k <= m; ++k) { 864 iact[k] = k; 865 vmultc[k] = resmax - b[k]; 866 } 867 } 868 869 // End the current stage of the calculation if 3 consecutive iterations 870 // have either failed to reduce the best calculated value of the objective 871 // function or to increase the number of active constraints since the best 872 // value was calculated. This strategy prevents cycling, but there is a 873 // remote possibility that it will cause premature termination. 874 875 first = true; 876 do { 877 L_60: do { 878 if (!first || (first && resmax === 0.0)) { 879 mcon = m + 1; 880 icon = mcon; 881 iact[mcon] = mcon; 882 vmultc[mcon] = 0.0; 883 } 884 first = false; 885 886 optold = 0.0; 887 icount = 0; 888 step = 0; 889 stpful = 0; 890 891 L_70: do { 892 // optnew = (mcon === m) ? resmax : -this.DOT_PRODUCT(this.PART(dx, 1, n), this.PART(this.COL(a, mcon), 1, n)); 893 optnew = 894 mcon === m ? resmax : -this.DOT_PRODUCT_ROW_COL(dx, -1, a, mcon, 1, n); 895 896 if (icount === 0 || optnew < optold) { 897 optold = optnew; 898 nactx = nact; 899 icount = 3; 900 } else if (nact > nactx) { 901 nactx = nact; 902 icount = 3; 903 } else { 904 --icount; 905 } 906 if (icount === 0) { 907 break L_60; 908 } 909 910 // If ICON exceeds NACT, then we add the constraint with index IACT(ICON) to 911 // the active set. Apply Givens rotations so that the last N-NACT-1 columns 912 // of Z are orthogonal to the gradient of the new constraint, a scalar 913 // product being set to zero if its nonzero value could be due to computer 914 // rounding errors. The array DXNEW is used for working space. 915 ratio = 0; 916 if (icon <= nact) { 917 if (icon < nact) { 918 // Delete the constraint that has the index IACT(ICON) from the active set. 919 920 isave = iact[icon]; 921 vsave = vmultc[icon]; 922 k = icon; 923 do { 924 kp = k + 1; 925 kk = iact[kp]; 926 sp = this.DOT_PRODUCT( 927 this.PART(this.COL(z, k), 1, n), 928 this.PART(this.COL(a, kk), 1, n) 929 ); 930 temp = Mat.hypot(sp, zdota[kp]); 931 alpha = zdota[kp] / temp; 932 beta = sp / temp; 933 zdota[kp] = alpha * zdota[k]; 934 zdota[k] = temp; 935 for (i = 1; i <= n; ++i) { 936 temp = alpha * z[i][kp] + beta * z[i][k]; 937 z[i][kp] = alpha * z[i][k] - beta * z[i][kp]; 938 z[i][k] = temp; 939 } 940 iact[k] = kk; 941 vmultc[k] = vmultc[kp]; 942 k = kp; 943 } while (k < nact); 944 945 iact[k] = isave; 946 vmultc[k] = vsave; 947 } 948 --nact; 949 950 // If stage one is in progress, then set SDIRN to the direction of the next 951 // change to the current vector of variables. 952 if (mcon > m) { 953 // Pick the next search direction of stage two. 954 temp = 1.0 / zdota[nact]; 955 for (k = 1; k <= n; ++k) { 956 sdirn[k] = temp * z[k][nact]; 957 } 958 } else { 959 // temp = this.DOT_PRODUCT(this.PART(sdirn, 1, n), this.PART(this.COL(z, nact + 1), 1, n)); 960 temp = this.DOT_PRODUCT_ROW_COL(sdirn, -1, z, nact + 1, 1, n); 961 for (k = 1; k <= n; ++k) { 962 sdirn[k] -= temp * z[k][nact + 1]; 963 } 964 } 965 } else { 966 kk = iact[icon]; 967 for (k = 1; k <= n; ++k) { 968 dxnew[k] = a[k][kk]; 969 } 970 tot = 0.0; 971 972 // { 973 k = n; 974 while (k > nact) { 975 sp = 0.0; 976 spabs = 0.0; 977 for (i = 1; i <= n; ++i) { 978 temp = z[i][k] * dxnew[i]; 979 sp += temp; 980 spabs += Math.abs(temp); 981 } 982 acca = spabs + 0.1 * Math.abs(sp); 983 accb = spabs + 0.2 * Math.abs(sp); 984 if (spabs >= acca || acca >= accb) { 985 sp = 0.0; 986 } 987 if (tot === 0.0) { 988 tot = sp; 989 } else { 990 kp = k + 1; 991 temp = Mat.hypot(sp, tot); 992 alpha = sp / temp; 993 beta = tot / temp; 994 tot = temp; 995 for (i = 1; i <= n; ++i) { 996 temp = alpha * z[i][k] + beta * z[i][kp]; 997 z[i][kp] = alpha * z[i][kp] - beta * z[i][k]; 998 z[i][k] = temp; 999 } 1000 } 1001 --k; 1002 } 1003 // } 1004 1005 if (tot === 0.0) { 1006 // The next instruction is reached if a deletion has to be made from the 1007 // active set in order to make room for the new active constraint, because 1008 // the new constraint gradient is a linear combination of the gradients of 1009 // the old active constraints. Set the elements of VMULTD to the multipliers 1010 // of the linear combination. Further, set IOUT to the index of the 1011 // constraint to be deleted, but branch if no suitable index can be found. 1012 1013 ratio = -1.0; 1014 //{ 1015 k = nact; 1016 do { 1017 zdotv = 0.0; 1018 zdvabs = 0.0; 1019 1020 for (i = 1; i <= n; ++i) { 1021 temp = z[i][k] * dxnew[i]; 1022 zdotv += temp; 1023 zdvabs += Math.abs(temp); 1024 } 1025 acca = zdvabs + 0.1 * Math.abs(zdotv); 1026 accb = zdvabs + 0.2 * Math.abs(zdotv); 1027 if (zdvabs < acca && acca < accb) { 1028 temp = zdotv / zdota[k]; 1029 if (temp > 0.0 && iact[k] <= m) { 1030 tempa = vmultc[k] / temp; 1031 if (ratio < 0.0 || tempa < ratio) { 1032 ratio = tempa; 1033 } 1034 } 1035 1036 if (k >= 2) { 1037 kw = iact[k]; 1038 for (i = 1; i <= n; ++i) { 1039 dxnew[i] -= temp * a[i][kw]; 1040 } 1041 } 1042 vmultd[k] = temp; 1043 } else { 1044 vmultd[k] = 0.0; 1045 } 1046 } while (--k > 0); 1047 //} 1048 if (ratio < 0.0) { 1049 break L_60; 1050 } 1051 1052 // Revise the Lagrange multipliers and reorder the active constraints so 1053 // that the one to be replaced is at the end of the list. Also calculate the 1054 // new value of ZDOTA(NACT) and branch if it is not acceptable. 1055 1056 for (k = 1; k <= nact; ++k) { 1057 vmultc[k] = Math.max(0.0, vmultc[k] - ratio * vmultd[k]); 1058 } 1059 if (icon < nact) { 1060 isave = iact[icon]; 1061 vsave = vmultc[icon]; 1062 k = icon; 1063 do { 1064 kp = k + 1; 1065 kw = iact[kp]; 1066 sp = this.DOT_PRODUCT( 1067 this.PART(this.COL(z, k), 1, n), 1068 this.PART(this.COL(a, kw), 1, n) 1069 ); 1070 temp = Mat.hypot(sp, zdota[kp]); 1071 alpha = zdota[kp] / temp; 1072 beta = sp / temp; 1073 zdota[kp] = alpha * zdota[k]; 1074 zdota[k] = temp; 1075 for (i = 1; i <= n; ++i) { 1076 temp = alpha * z[i][kp] + beta * z[i][k]; 1077 z[i][kp] = alpha * z[i][k] - beta * z[i][kp]; 1078 z[i][k] = temp; 1079 } 1080 iact[k] = kw; 1081 vmultc[k] = vmultc[kp]; 1082 k = kp; 1083 } while (k < nact); 1084 iact[k] = isave; 1085 vmultc[k] = vsave; 1086 } 1087 temp = this.DOT_PRODUCT( 1088 this.PART(this.COL(z, nact), 1, n), 1089 this.PART(this.COL(a, kk), 1, n) 1090 ); 1091 if (temp === 0.0) { 1092 break L_60; 1093 } 1094 zdota[nact] = temp; 1095 vmultc[icon] = 0.0; 1096 vmultc[nact] = ratio; 1097 } else { 1098 // Add the new constraint if this can be done without a deletion from the 1099 // active set. 1100 1101 ++nact; 1102 zdota[nact] = tot; 1103 vmultc[icon] = vmultc[nact]; 1104 vmultc[nact] = 0.0; 1105 } 1106 1107 // Update IACT and ensure that the objective function continues to be 1108 // treated as the last active constraint when MCON>M. 1109 1110 iact[icon] = iact[nact]; 1111 iact[nact] = kk; 1112 if (mcon > m && kk !== mcon) { 1113 k = nact - 1; 1114 sp = this.DOT_PRODUCT( 1115 this.PART(this.COL(z, k), 1, n), 1116 this.PART(this.COL(a, kk), 1, n) 1117 ); 1118 temp = Mat.hypot(sp, zdota[nact]); 1119 alpha = zdota[nact] / temp; 1120 beta = sp / temp; 1121 zdota[nact] = alpha * zdota[k]; 1122 zdota[k] = temp; 1123 for (i = 1; i <= n; ++i) { 1124 temp = alpha * z[i][nact] + beta * z[i][k]; 1125 z[i][nact] = alpha * z[i][k] - beta * z[i][nact]; 1126 z[i][k] = temp; 1127 } 1128 iact[nact] = iact[k]; 1129 iact[k] = kk; 1130 temp = vmultc[k]; 1131 vmultc[k] = vmultc[nact]; 1132 vmultc[nact] = temp; 1133 } 1134 1135 // If stage one is in progress, then set SDIRN to the direction of the next 1136 // change to the current vector of variables. 1137 if (mcon > m) { 1138 // Pick the next search direction of stage two. 1139 temp = 1.0 / zdota[nact]; 1140 for (k = 1; k <= n; ++k) { 1141 sdirn[k] = temp * z[k][nact]; 1142 } 1143 } else { 1144 kk = iact[nact]; 1145 // temp = (this.DOT_PRODUCT(this.PART(sdirn, 1, n),this.PART(this.COL(a, kk), 1, n)) - 1.0) / zdota[nact]; 1146 temp = 1147 (this.DOT_PRODUCT_ROW_COL(sdirn, -1, a, kk, 1, n) - 1.0) / 1148 zdota[nact]; 1149 for (k = 1; k <= n; ++k) { 1150 sdirn[k] -= temp * z[k][nact]; 1151 } 1152 } 1153 } 1154 1155 // Calculate the step to the boundary of the trust region or take the step 1156 // that reduces RESMAX to zero. The two statements below that include the 1157 // factor 1.0E-6 prevent some harmless underflows that occurred in a test 1158 // calculation. Further, we skip the step if it could be zero within a 1159 // reasonable tolerance for computer rounding errors. 1160 dd = rho * rho; 1161 sd = 0.0; 1162 ss = 0.0; 1163 for (i = 1; i <= n; ++i) { 1164 if (Math.abs(dx[i]) >= 1.0e-6 * rho) { 1165 dd -= dx[i] * dx[i]; 1166 } 1167 sd += dx[i] * sdirn[i]; 1168 ss += sdirn[i] * sdirn[i]; 1169 } 1170 if (dd <= 0.0) { 1171 break L_60; 1172 } 1173 temp = Math.sqrt(ss * dd); 1174 if (Math.abs(sd) >= 1.0e-6 * temp) { 1175 temp = Math.sqrt(ss * dd + sd * sd); 1176 } 1177 stpful = dd / (temp + sd); 1178 step = stpful; 1179 if (mcon === m) { 1180 acca = step + 0.1 * resmax; 1181 accb = step + 0.2 * resmax; 1182 if (step >= acca || acca >= accb) { 1183 break L_70; 1184 } 1185 step = Math.min(step, resmax); 1186 } 1187 1188 // Set DXNEW to the new variables if STEP is the steplength, and reduce 1189 // RESMAX to the corresponding maximum residual if stage one is being done. 1190 // Because DXNEW will be changed during the calculation of some Lagrange 1191 // multipliers, it will be restored to the following value later. 1192 for (k = 1; k <= n; ++k) { 1193 dxnew[k] = dx[k] + step * sdirn[k]; 1194 } 1195 if (mcon === m) { 1196 resold = resmax; 1197 resmax = 0.0; 1198 for (k = 1; k <= nact; ++k) { 1199 kk = iact[k]; 1200 // temp = b[kk] - this.DOT_PRODUCT(this.PART(this.COL(a, kk), 1, n), this.PART(dxnew, 1, n)); 1201 temp = b[kk] - this.DOT_PRODUCT_ROW_COL(dxnew, -1, a, kk, 1, n); 1202 resmax = Math.max(resmax, temp); 1203 } 1204 } 1205 1206 // Set VMULTD to the VMULTC vector that would occur if DX became DXNEW. A 1207 // device is included to force VMULTD(K) = 0.0 if deviations from this value 1208 // can be attributed to computer rounding errors. First calculate the new 1209 // Lagrange multipliers. 1210 //{ 1211 k = nact; 1212 do { 1213 zdotw = 0.0; 1214 zdwabs = 0.0; 1215 for (i = 1; i <= n; ++i) { 1216 temp = z[i][k] * dxnew[i]; 1217 zdotw += temp; 1218 zdwabs += Math.abs(temp); 1219 } 1220 acca = zdwabs + 0.1 * Math.abs(zdotw); 1221 accb = zdwabs + 0.2 * Math.abs(zdotw); 1222 if (zdwabs >= acca || acca >= accb) { 1223 zdotw = 0.0; 1224 } 1225 vmultd[k] = zdotw / zdota[k]; 1226 if (k >= 2) { 1227 kk = iact[k]; 1228 for (i = 1; i <= n; ++i) { 1229 dxnew[i] -= vmultd[k] * a[i][kk]; 1230 } 1231 } 1232 } while (k-- >= 2); 1233 if (mcon > m) { 1234 vmultd[nact] = Math.max(0.0, vmultd[nact]); 1235 } 1236 //} 1237 1238 // Complete VMULTC by finding the new constraint residuals. 1239 1240 for (k = 1; k <= n; ++k) { 1241 dxnew[k] = dx[k] + step * sdirn[k]; 1242 } 1243 if (mcon > nact) { 1244 kl = nact + 1; 1245 for (k = kl; k <= mcon; ++k) { 1246 kk = iact[k]; 1247 total = resmax - b[kk]; 1248 sumabs = resmax + Math.abs(b[kk]); 1249 for (i = 1; i <= n; ++i) { 1250 temp = a[i][kk] * dxnew[i]; 1251 total += temp; 1252 sumabs += Math.abs(temp); 1253 } 1254 acca = sumabs + 0.1 * Math.abs(total); 1255 accb = sumabs + 0.2 * Math.abs(total); 1256 if (sumabs >= acca || acca >= accb) { 1257 total = 0.0; 1258 } 1259 vmultd[k] = total; 1260 } 1261 } 1262 1263 // Calculate the fraction of the step from DX to DXNEW that will be taken. 1264 1265 ratio = 1.0; 1266 icon = 0; 1267 for (k = 1; k <= mcon; ++k) { 1268 if (vmultd[k] < 0.0) { 1269 temp = vmultc[k] / (vmultc[k] - vmultd[k]); 1270 if (temp < ratio) { 1271 ratio = temp; 1272 icon = k; 1273 } 1274 } 1275 } 1276 1277 // Update DX, VMULTC and RESMAX. 1278 1279 temp = 1.0 - ratio; 1280 for (k = 1; k <= n; ++k) { 1281 dx[k] = temp * dx[k] + ratio * dxnew[k]; 1282 } 1283 for (k = 1; k <= mcon; ++k) { 1284 vmultc[k] = Math.max(0.0, temp * vmultc[k] + ratio * vmultd[k]); 1285 } 1286 if (mcon === m) { 1287 resmax = resold + ratio * (resmax - resold); 1288 } 1289 1290 // If the full step is not acceptable then begin another iteration. 1291 // Otherwise switch to stage two or end the calculation. 1292 } while (icon > 0); 1293 1294 if (step === stpful) { 1295 return true; 1296 } 1297 } while (endless); 1298 1299 // We employ any freedom that may be available to reduce the objective 1300 // function before returning a DX whose length is less than RHO. 1301 } while (mcon === m); 1302 1303 return false; 1304 }, 1305 1306 PrintIterationResult: function (nfvals, f, resmax, x, n, iprint) { 1307 if (iprint > 1) { 1308 console.log("NFVALS = " + nfvals + " F = " + f + " MAXCV = " + resmax); 1309 } 1310 if (iprint > 1) { 1311 console.log("X = " + this.PART(x, 1, n)); 1312 } 1313 }, 1314 1315 ROW: function (src, rowidx) { 1316 return src[rowidx].slice(); 1317 // var col, 1318 // cols = src[0].length, 1319 // dest = this.arr(cols); 1320 1321 // for (col = 0; col < cols; ++col) { 1322 // dest[col] = src[rowidx][col]; 1323 // } 1324 // return dest; 1325 }, 1326 1327 COL: function (src, colidx) { 1328 var row, 1329 rows = src.length, 1330 dest = []; // this.arr(rows); 1331 1332 for (row = 0; row < rows; ++row) { 1333 dest[row] = src[row][colidx]; 1334 } 1335 return dest; 1336 }, 1337 1338 PART: function (src, from, to) { 1339 return src.slice(from, to + 1); 1340 // var srcidx, 1341 // dest = this.arr(to - from + 1), 1342 // destidx = 0; 1343 // for (srcidx = from; srcidx <= to; ++srcidx, ++destidx) { 1344 // dest[destidx] = src[srcidx]; 1345 // } 1346 // return dest; 1347 }, 1348 1349 FORMAT: function (x) { 1350 return x.join(","); 1351 // var i, fmt = ""; 1352 // for (i = 0; i < x.length; ++i) { 1353 // fmt += ", " + x[i]; 1354 // } 1355 // return fmt; 1356 }, 1357 1358 DOT_PRODUCT: function (lhs, rhs) { 1359 var i, 1360 sum = 0.0, 1361 len = lhs.length; 1362 for (i = 0; i < len; ++i) { 1363 sum += lhs[i] * rhs[i]; 1364 } 1365 return sum; 1366 }, 1367 1368 DOT_PRODUCT_ROW_COL: function (lhs, row, rhs, col, start, end) { 1369 var i, 1370 sum = 0.0; 1371 1372 if (row === -1) { 1373 // lhs is vector 1374 for (i = start; i <= end; ++i) { 1375 sum += lhs[i] * rhs[i][col]; 1376 } 1377 } else { 1378 // lhs is row of matrix 1379 if (col === -1) { 1380 // rhs is vector 1381 for (i = start; i <= end; ++i) { 1382 sum += lhs[row][i] * rhs[i]; 1383 } 1384 } else { 1385 // rhs is column of matrix 1386 for (i = start; i <= end; ++i) { 1387 sum += lhs[row][i] * rhs[i][col]; 1388 } 1389 } 1390 } 1391 1392 return sum; 1393 } 1394 }; 1395 1396 export default JXG.Math.Nlp; 1397