2 * Jalview - A Sequence Alignment Editor and Viewer ($$Version-Rel$$)
3 * Copyright (C) $$Year-Rel$$ The Jalview Authors
5 * This file is part of Jalview.
7 * Jalview is free software: you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation, either version 3
10 * of the License, or (at your option) any later version.
12 * Jalview is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty
14 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR
15 * PURPOSE. See the GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Jalview. If not, see <http://www.gnu.org/licenses/>.
19 * The Jalview Authors are detailed in the 'AUTHORS' file.
23 * Copyright 2018-2022 Kathy Su, Kay Diederichs
25 * This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
27 * This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
29 * You should have received a copy of the GNU General Public License along with this program. If not, see <https://www.gnu.org/licenses/>.
33 * Ported from https://doi.org/10.1107/S2059798317000699 by
34 * @AUTHOR MorellThomas
37 package jalview.analysis;
39 import jalview.bin.Console;
40 import jalview.math.MatrixI;
41 import jalview.math.Matrix;
42 import jalview.math.MiscMath;
44 import java.lang.Math;
45 import java.lang.System;
46 import java.util.Arrays;
47 import java.util.ArrayList;
48 import java.util.Comparator;
49 import java.util.Map.Entry;
50 import java.util.TreeMap;
52 import org.apache.commons.math3.linear.Array2DRowRealMatrix;
53 import org.apache.commons.math3.linear.SingularValueDecomposition;
56 * A class to model rectangular matrices of double values and operations on them
58 public class ccAnalysis
60 private byte dim = 0; //dimensions
62 private MatrixI scoresOld; //input scores
64 public ccAnalysis(MatrixI scores, byte dim)
66 // round matrix to .4f to be same as in pasimap
67 for (int i = 0; i < scores.height(); i++)
69 for (int j = 0; j < scores.width(); j++)
71 if (!Double.isNaN(scores.getValue(i,j)))
73 scores.setValue(i, j, (double) Math.round(scores.getValue(i,j) * (int) 10000) / 10000);
77 this.scoresOld = scores;
82 * Initialise a distrust-score for each hypothesis (h) of hSigns
83 * distrust = conHypNum - proHypNum
85 * @param hSigns ~ hypothesis signs (+/-) for each sequence
86 * @param scores ~ input score matrix
88 * @return distrustScores
90 private int[] initialiseDistrusts(byte[] hSigns, MatrixI scores)
92 int[] distrustScores = new int[scores.width()];
94 // loop over symmetric matrix
95 for (int i = 0; i < scores.width(); i++)
97 byte hASign = hSigns[i];
101 for (int j = 0; j < scores.width(); j++)
103 double cell = scores.getRow(i)[j]; // value at [i][j] in scores
104 byte hBSign = hSigns[j];
105 if (!Double.isNaN(cell))
107 byte cellSign = (byte) Math.signum(cell); //check if sign of matrix value fits hyptohesis
108 if (cellSign == hASign * hBSign)
116 distrustScores[i] = conHypNum - proHypNum; //create distrust score for each sequence
118 return distrustScores;
122 * Optemise hypothesis concerning the sign of the hypothetical value for each hSigns by interpreting the pairwise correlation coefficients as scalar products
124 * @param hSigns ~ hypothesis signs (+/-)
125 * @param distrustScores
126 * @param scores ~ input score matrix
130 private byte[] optimiseHypothesis(byte[] hSigns, int[] distrustScores, MatrixI scores)
132 // get maximum distrust score
133 int[] maxes = MiscMath.findMax(distrustScores);
134 int maxDistrustIndex = maxes[0];
135 int maxDistrust = maxes[1];
137 // if hypothesis is not optimal yet
140 //toggle sign for hI with maximum distrust
141 hSigns[maxDistrustIndex] *= -1;
142 // update distrust at same position
143 distrustScores[maxDistrustIndex] *= -1;
145 // also update distrust scores for all hI that were not changed
146 byte hASign = hSigns[maxDistrustIndex];
147 for (int NOTmaxDistrustIndex = 0; NOTmaxDistrustIndex < distrustScores.length; NOTmaxDistrustIndex++)
149 if (NOTmaxDistrustIndex != maxDistrustIndex)
151 byte hBSign = hSigns[NOTmaxDistrustIndex];
152 double cell = scores.getValue(maxDistrustIndex, NOTmaxDistrustIndex);
154 // distrust only changed if not NaN
155 if (!Double.isNaN(cell))
157 byte cellSign = (byte) Math.signum(cell);
158 // if sign of cell matches hypothesis decrease distrust by 2 because 1 more value supporting and 1 less contradicting
159 // else increase by 2
160 if (cellSign == hASign * hBSign)
162 distrustScores[NOTmaxDistrustIndex] -= 2;
164 distrustScores[NOTmaxDistrustIndex] += 2;
169 //further optimisation necessary
170 return optimiseHypothesis(hSigns, distrustScores, scores);
178 * takes the a symmetric MatrixI as input scores which may contain Double.NaN
179 * approximate the missing values using hypothesis optimisation
183 * @param scores ~ score matrix
187 public MatrixI run () throws Exception
189 //initialse eigenMatrix and repMatrix
190 MatrixI eigenMatrix = scoresOld.copy();
191 MatrixI repMatrix = scoresOld.copy();
195 * Calculate correction factor for 2nd and higher eigenvalue(s).
196 * This correction is NOT needed for the 1st eigenvalue, because the
197 * unknown (=NaN) values of the matrix are approximated by presuming
198 * 1-dimensional vectors as the basis of the matrix interpretation as dot
202 System.out.println("Input correlation matrix:");
203 eigenMatrix.print(System.out, "%1.4f ");
205 int matrixWidth = eigenMatrix.width(); // square matrix, so width == height
206 int matrixElementsTotal = (int) Math.pow(matrixWidth, 2); //total number of elemts
208 float correctionFactor = (float) (matrixElementsTotal - eigenMatrix.countNaN()) / (float) matrixElementsTotal;
211 * Calculate hypothetical value (1-dimensional vector) h_i for each
212 * dataset by interpreting the given correlation coefficients as scalar
217 * Memory for current hypothesis concerning sign of each h_i.
218 * List of signs for all h_i in the encoding:
222 * Initial hypothesis: all signs are positive.
224 byte[] hSigns = new byte[matrixWidth];
225 Arrays.fill(hSigns, (byte) 1);
227 //Estimate signs for each h_i by refining hypothesis on signs.
228 hSigns = optimiseHypothesis(hSigns, initialiseDistrusts(hSigns, eigenMatrix), eigenMatrix);
231 //Estimate absolute values for each h_i by determining sqrt of mean of
232 //non-NaN absolute values for every row.
233 double[] hAbs = MiscMath.sqrt(eigenMatrix.absolute().meanRow());
235 //Combine estimated signs with absolute values in obtain total value for
237 double[] hValues = MiscMath.elementwiseMultiply(hSigns, hAbs);
239 /*Complement symmetric matrix by using the scalar products of estimated
240 *values of h_i to replace NaN-cells.
241 *Matrix positions that have estimated values
242 *(only for diagonal and upper off-diagonal values, due to the symmetry
243 *the positions of the lower-diagonal values can be inferred).
244 List of tuples (row_idx, column_idx).*/
246 ArrayList<int[]> estimatedPositions = new ArrayList<int[]>();
248 // for off-diagonal cells
249 for (int rowIndex = 0; rowIndex < matrixWidth - 1; rowIndex++)
251 for (int columnIndex = rowIndex + 1; columnIndex < matrixWidth; columnIndex++)
253 double cell = eigenMatrix.getValue(rowIndex, columnIndex);
254 if (Double.isNaN(cell))
256 //calculate scalar product as new cell value
257 cell = hValues[rowIndex] * hValues[columnIndex];
258 //fill in new value in cell and symmetric partner
259 eigenMatrix.setValue(rowIndex, columnIndex, cell);
260 eigenMatrix.setValue(columnIndex, rowIndex, cell);
261 //save positions of estimated values
262 estimatedPositions.add(new int[]{rowIndex, columnIndex});
267 // for diagonal cells
268 for (int diagonalIndex = 0; diagonalIndex < matrixWidth; diagonalIndex++)
270 double cell = Math.pow(hValues[diagonalIndex], 2);
271 eigenMatrix.setValue(diagonalIndex, diagonalIndex, cell);
272 estimatedPositions.add(new int[]{diagonalIndex, diagonalIndex});
275 /*Refine total values of each h_i:
276 *Initialise h_values of the hypothetical non-existant previous iteration
277 *with the correct format but with impossible values.
278 Needed for exit condition of otherwise endless loop.*/
279 System.out.print("initial values: [ ");
280 for (double h : hValues)
282 System.out.print(String.format("%1.4f, ", h));
284 System.out.println(" ]");
287 double[] hValuesOld = new double[matrixWidth];
289 int iterationCount = 0;
291 // repeat unitl values of h do not significantly change anymore
294 for (int hIndex = 0; hIndex < matrixWidth; hIndex++)
296 double newH = Arrays.stream(MiscMath.elementwiseMultiply(hValues, eigenMatrix.getRow(hIndex))).sum() / Arrays.stream(MiscMath.elementwiseMultiply(hValues, hValues)).sum();
297 hValues[hIndex] = newH;
300 System.out.print(String.format("iteration %d: [ ", iterationCount));
301 for (double h : hValues)
303 System.out.print(String.format("%1.4f, ", h));
305 System.out.println(" ]");
307 //update values of estimated positions
308 for (int[] pair : estimatedPositions) // pair ~ row, col
310 double newVal = hValues[pair[0]] * hValues[pair[1]];
311 eigenMatrix.setValue(pair[0], pair[1], newVal);
312 eigenMatrix.setValue(pair[1], pair[0], newVal);
317 //exit loop as soon as new values are similar to the last iteration
318 if (MiscMath.allClose(hValues, hValuesOld, 0d, 1e-05d, false))
323 //save hValues for comparison in the next iteration
324 System.arraycopy(hValues, 0, hValuesOld, 0, hValues.length);
327 //-----------------------------
328 //Use complemented symmetric matrix to calculate final representative
331 //Eigendecomposition.
335 System.out.println("eigenmatrix");
336 eigenMatrix.print(System.out, "%8.2f");
337 System.out.println();
338 System.out.println("uncorrected eigenvalues");
339 eigenMatrix.printD(System.out, "%2.4f ");
340 System.out.println();
342 double[] eigenVals = eigenMatrix.getD();
344 TreeMap<Double, Integer> eigenPairs = new TreeMap<>(Comparator.reverseOrder());
345 for (int i = 0; i < eigenVals.length; i++)
347 eigenPairs.put(eigenVals[i], i);
350 // matrix of representative eigenvectors (each row is a vector)
351 double[][] _repMatrix = new double[eigenVals.length][dim];
352 double[][] _oldMatrix = new double[eigenVals.length][dim];
353 double[] correctedEigenValues = new double[dim];
356 for (Entry<Double, Integer> pair : eigenPairs.entrySet())
358 double eigenValue = pair.getKey();
359 int column = pair.getValue();
360 double[] eigenVector = eigenMatrix.getColumn(column);
361 //for 2nd and higher eigenvalues
364 eigenValue /= correctionFactor;
366 correctedEigenValues[l] = eigenValue;
367 for (int j = 0; j < eigenVector.length; j++)
369 _repMatrix[j][l] = (eigenValue < 0) ? 0.0 : - Math.sqrt(eigenValue) * eigenVector[j];
370 double tmpOldScore = scoresOld.getColumn(column)[j];
371 _oldMatrix[j][dim - l - 1] = (Double.isNaN(tmpOldScore)) ? 0.0 : tmpOldScore;
380 System.out.println("correctedEigenValues");
381 MiscMath.print(correctedEigenValues, "%2.4f ");
383 repMatrix = new Matrix(_repMatrix);
384 repMatrix.setD(correctedEigenValues);
385 MatrixI oldMatrix = new Matrix(_oldMatrix);
387 MatrixI dotMatrix = repMatrix.postMultiply(repMatrix.transpose());
389 double rmsd = scoresOld.rmsd(dotMatrix);
391 System.out.println("iteration, rmsd, maxDiff, rmsdDiff");
392 System.out.println(String.format("0, %8.5f, -, -", rmsd));
393 // Refine representative vectors by minimising sum-of-squared deviates between dotMatrix and original score matrix
394 for (int iteration = 1; iteration < 21; iteration++) // arbitrarily set to 20
396 MatrixI repMatrixOLD = repMatrix.copy();
397 MatrixI dotMatrixOLD = dotMatrix.copy();
399 // for all rows/hA in the original matrix
400 for (int hAIndex = 0; hAIndex < oldMatrix.height(); hAIndex++)
402 double[] row = oldMatrix.getRow(hAIndex);
403 double[] hA = repMatrix.getRow(hAIndex);
405 //find least-squares-solution fo rdifferences between original scores and representative vectors
406 double[] hAlsm = leastSquaresOptimisation(repMatrix, scoresOld, hAIndex);
407 // update repMatrix with new hAlsm
408 for (int j = 0; j < repMatrix.width(); j++)
410 repMatrix.setValue(hAIndex, j, hAlsm[j]);
414 // dot product of representative vecotrs yields a matrix with values approximating the correlation matrix
415 dotMatrix = repMatrix.postMultiply(repMatrix.transpose());
416 // calculate rmsd between approximation and correlation matrix
417 rmsd = scoresOld.rmsd(dotMatrix);
419 // calculate maximum change of representative vectors of current iteration
420 MatrixI diff = repMatrix.subtract(repMatrixOLD).absolute();
421 double maxDiff = 0.0;
422 for (int i = 0; i < diff.height(); i++)
424 for (int j = 0; j < diff.width(); j++)
426 maxDiff = (diff.getValue(i, j) > maxDiff) ? diff.getValue(i, j) : maxDiff;
430 // calculate rmsd between current and previous estimation
431 double rmsdDiff = dotMatrix.rmsd(dotMatrixOLD);
433 System.out.println(String.format("%d, %8.5f, %8.5f, %8.5f", iteration, rmsd, maxDiff, rmsdDiff));
435 if (!(Math.abs(maxDiff) > 1e-06))
437 repMatrix = repMatrixOLD.copy();
443 } catch (Exception q)
445 Console.error("Error computing cc_analysis: " + q.getMessage());
448 System.out.println("final coordinates:");
449 repMatrix.print(System.out, "%1.8f ");
454 * Create equations system using information on originally known
455 * pairwise correlation coefficients (parsed from infile) and the
456 * representative result vectors
458 * Each equation has the format:
459 * hA * hA - pairwiseCC = 0
461 * hA: unknown variable
462 * hB: known representative vector
463 * pairwiseCC: known pairwise correlation coefficien
465 * The resulting equations system is overdetermined, if there are more
466 * equations than unknown elements
468 * @param x ~ unknown n-dimensional column-vector
469 * (needed for generating equations system, NOT to be specified by user).
470 * @param hAIndex ~ index of currently optimised representative result vector.
471 * @param h ~ matrix with row-wise listing of representative result vectors.
472 * @param originalRow ~ matrix-row of originally parsed pairwise correlation coefficients.
476 private double[] originalToEquasionSystem(double[] hA, MatrixI repMatrix, MatrixI scoresOld, int hAIndex)
478 double[] originalRow = scoresOld.getRow(hAIndex);
479 int nans = MiscMath.countNaN(originalRow);
480 double[] result = new double[originalRow.length - nans];
482 //for all pairwiseCC in originalRow
484 for (int hBIndex = 0; hBIndex < originalRow.length; hBIndex++)
486 double pairwiseCC = originalRow[hBIndex];
487 // if not NaN -> create new equation and add it to the system
488 if (!Double.isNaN(pairwiseCC))
490 double[] hB = repMatrix.getRow(hBIndex);
491 result[resultIndex++] = MiscMath.sum(MiscMath.elementwiseMultiply(hA, hB)) - pairwiseCC;
499 * returns the jacobian matrix
500 * @param repMatrix ~ matrix of representative vectors
501 * @param hAIndex ~ current row index
505 private MatrixI approximateDerivative(MatrixI repMatrix, MatrixI scoresOld, int hAIndex)
508 double[] hA = repMatrix.getRow(hAIndex);
509 double[] f0 = originalToEquasionSystem(hA, repMatrix, scoresOld, hAIndex);
510 double[] signX0 = new double[hA.length];
511 double[] xAbs = new double[hA.length];
512 for (int i = 0; i < hA.length; i++)
514 signX0[i] = (hA[i] >= 0) ? 1 : -1;
515 xAbs[i] = (Math.abs(hA[i]) >= 1.0) ? Math.abs(hA[i]) : 1.0;
517 double rstep = Math.pow(Math.ulp(1.0), 0.5);
519 double[] h = new double [hA.length];
520 for (int i = 0; i < hA.length; i++)
522 h[i] = rstep * signX0[i] * xAbs[i];
527 double[][] jTransposed = new double[n][m];
528 for (int i = 0; i < h.length; i++)
530 double[] x = new double[h.length];
531 System.arraycopy(hA, 0, x, 0, h.length);
533 double dx = x[i] - hA[i];
534 double[] df = originalToEquasionSystem(x, repMatrix, scoresOld, hAIndex);
535 for (int j = 0; j < df.length; j++)
538 jTransposed[i][j] = df[j] / dx;
541 MatrixI J = new Matrix(jTransposed).transpose();
546 * norm of regularized (by alpha) least-squares solution minus Delta
554 private double[] phiAndDerivative(double alpha, double[] suf, double[] s, double Delta)
556 double[] denom = MiscMath.elementwiseAdd(MiscMath.elementwiseMultiply(s, s), alpha);
557 double pNorm = MiscMath.norm(MiscMath.elementwiseDivide(suf, denom));
558 double phi = pNorm - Delta;
559 // - sum ( suf**2 / denom**3) / pNorm
560 double phiPrime = - MiscMath.sum(MiscMath.elementwiseDivide(MiscMath.elementwiseMultiply(suf, suf), MiscMath.elementwiseMultiply(MiscMath.elementwiseMultiply(denom, denom), denom))) / pNorm;
561 return new double[]{phi, phiPrime};
565 * class holding the result of solveLsqTrustRegion
567 private class TrustRegion
569 private double[] step;
570 private double alpha;
571 private int iteration;
573 public TrustRegion(double[] step, double alpha, int iteration)
577 this.iteration = iteration;
580 public double[] getStep()
585 public double getAlpha()
590 public int getIteration()
592 return this.iteration;
597 * solve a trust-region problem arising in least-squares optimisation
598 * @param n ~ number of variables
599 * @param m ~ number of residuals
601 * @param s ~ singular values of J
602 * @param V ~ transpose of VT
603 * @param Delta ~ radius of a trust region
604 * @param alpha ~ initial guess for alpha
608 private TrustRegion solveLsqTrustRegion(int n, int m, double[] uf, double[] s, MatrixI V, double Delta, double alpha)
610 double[] suf = MiscMath.elementwiseMultiply(s, uf);
612 //check if J has full rank and tr Gauss-Newton step
613 boolean fullRank = false;
616 double threshold = s[0] * Math.ulp(1.0) * m;
617 fullRank = s[s.length - 1] > threshold;
621 double[] p = MiscMath.elementwiseMultiply(V.sumProduct(MiscMath.elementwiseDivide(uf, s)), -1);
622 if (MiscMath.norm(p) <= Delta)
624 TrustRegion result = new TrustRegion(p, 0.0, 0);
629 double alphaUpper = MiscMath.norm(suf) / Delta;
630 double alphaLower = 0.0;
633 double[] phiAndPrime = phiAndDerivative(0.0, suf, s, Delta);
634 alphaLower = - phiAndPrime[0] / phiAndPrime[1];
637 alpha = (!fullRank && alpha == 0.0) ? alpha = Math.max(0.001 * alphaUpper, Math.pow(alphaLower * alphaUpper, 0.5)) : alpha;
640 while (iteration < 10) // 10 is default max_iter
642 alpha = (alpha < alphaLower || alpha > alphaUpper) ? alpha = Math.max(0.001 * alphaUpper, Math.pow(alphaLower * alphaUpper, 0.5)) : alpha;
643 double[] phiAndPrime = phiAndDerivative(alpha, suf, s, Delta);
644 double phi = phiAndPrime[0];
645 double phiPrime = phiAndPrime[1];
647 alphaUpper = (phi < 0) ? alpha : alphaUpper;
648 double ratio = phi / phiPrime;
649 alphaLower = Math.max(alphaLower, alpha - ratio);
650 alpha -= (phi + Delta) * ratio / Delta;
652 if (Math.abs(phi) < 0.01 * Delta) // default rtol set to 0.01
659 // p = - V.dot( suf / (s**2 + alpha))
660 double[] tmp = MiscMath.elementwiseDivide(suf, MiscMath.elementwiseAdd(MiscMath.elementwiseMultiply(s, s), alpha));
661 double[] p = MiscMath.elementwiseMultiply(V.sumProduct(tmp), -1);
663 // Make the norm of p equal to Delta, p is changed only slightly during this.
664 // It is done to prevent p lie outside of the trust region
665 p = MiscMath.elementwiseMultiply(p, Delta / MiscMath.norm(p));
667 TrustRegion result = new TrustRegion(p, alpha, iteration + 1);
672 * compute values of a quadratic function arising in least squares
673 * function: 0.5 * s.T * (J.T * J + diag) * s + g.T * s
675 * @param J ~ jacobian matrix
676 * @param g ~ gradient
677 * @param s ~ steps and rows
681 private double evaluateQuadratic(MatrixI J, double[] g, double[] s)
684 double[] Js = J.sumProduct(s);
685 double q = MiscMath.dot(Js, Js);
686 double l = MiscMath.dot(s, g);
692 * update the radius of a trust region based on the cost reduction
695 * @param actualReduction
696 * @param predictedReduction
702 private double[] updateTrustRegionRadius(double Delta, double actualReduction, double predictedReduction, double stepNorm, boolean boundHit)
705 if (predictedReduction > 0)
707 ratio = actualReduction / predictedReduction;
708 } else if (predictedReduction == 0 && actualReduction == 0) {
716 Delta = 0.25 * stepNorm;
717 } else if (ratio > 0.75 && boundHit) {
721 return new double[]{Delta, ratio};
725 * trust region reflective algorithm
726 * @param repMatrix ~ Matrix containing representative vectors
727 * @param scoresOld ~ Matrix containing initial observations
728 * @param index ~ current row index
729 * @param J ~ jacobian matrix
733 private double[] trf(MatrixI repMatrix, MatrixI scoresOld, int index, MatrixI J)
736 double[] hA = repMatrix.getRow(index);
737 double[] f0 = originalToEquasionSystem(hA, repMatrix, scoresOld, index);
741 double cost = 0.5 * MiscMath.dot(f0, f0);
742 double[] g = J.transpose().sumProduct(f0);
743 double Delta = MiscMath.norm(hA);
744 int maxNfev = hA.length * 100;
745 double alpha = 0.0; // "Levenberg-Marquardt" parameter
748 boolean terminationStatus = false;
753 gNorm = MiscMath.norm(g);
754 if (terminationStatus || nfev == maxNfev)
758 SingularValueDecomposition svd = new SingularValueDecomposition(new Array2DRowRealMatrix(J.asArray()));
759 MatrixI U = new Matrix(svd.getU().getData());
760 double[] s = svd.getSingularValues();
761 MatrixI V = new Matrix(svd.getV().getData()).transpose();
762 double[] uf = U.transpose().sumProduct(f0);
764 double actualReduction = -1;
765 double[] xNew = new double[hA.length];
766 double[] fNew = new double[f0.length];
768 double stepHnorm = 0;
770 while (actualReduction <= 0 && nfev < maxNfev)
772 TrustRegion trustRegion = solveLsqTrustRegion(n, m, uf, s, V, Delta, alpha);
773 double[] stepH = trustRegion.getStep();
774 alpha = trustRegion.getAlpha();
775 int nIterations = trustRegion.getIteration();
776 double predictedReduction = - (evaluateQuadratic(J, g, stepH));
778 xNew = MiscMath.elementwiseAdd(hA, stepH);
779 fNew = originalToEquasionSystem(xNew, repMatrix, scoresOld, index);
782 stepHnorm = MiscMath.norm(stepH);
784 if (MiscMath.countNaN(fNew) > 0)
786 Delta = 0.25 * stepHnorm;
790 // usual trust-region step quality estimation
791 costNew = 0.5 * MiscMath.dot(fNew, fNew);
792 actualReduction = cost - costNew;
794 double[] updatedTrustRegion = updateTrustRegionRadius(Delta, actualReduction, predictedReduction, stepHnorm, stepHnorm > (0.95 * Delta));
795 double DeltaNew = updatedTrustRegion[0];
796 double ratio = updatedTrustRegion[1];
798 // default ftol and xtol = 1e-8
799 boolean ftolSatisfied = actualReduction < (1e-8 * cost) && ratio > 0.25;
800 boolean xtolSatisfied = stepHnorm < (1e-8 * (1e-8 + MiscMath.norm(hA)));
801 terminationStatus = ftolSatisfied || xtolSatisfied;
802 if (terminationStatus)
807 alpha *= Delta / DeltaNew;
811 if (actualReduction > 0)
817 J = approximateDerivative(repMatrix, scoresOld, index);
819 g = J.transpose().sumProduct(f0);
831 * performs the least squares optimisation
832 * adapted from https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.least_squares.html#scipy.optimize.least_squares
834 * @param repMatrix ~ Matrix containing representative vectors
835 * @param scoresOld ~ Matrix containing initial observations
836 * @param index ~ current row index
840 private double[] leastSquaresOptimisation(MatrixI repMatrix, MatrixI scoresOld, int index)
842 MatrixI J = approximateDerivative(repMatrix, scoresOld, index);
843 double[] result = trf(repMatrix, scoresOld, index, J);