2 * Jalview - A Sequence Alignment Editor and Viewer ($$Version-Rel$$)
3 * Copyright (C) $$Year-Rel$$ The Jalview Authors
5 * This file is part of Jalview.
7 * Jalview is free software: you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation, either version 3
10 * of the License, or (at your option) any later version.
12 * Jalview is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty
14 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR
15 * PURPOSE. See the GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Jalview. If not, see <http://www.gnu.org/licenses/>.
19 * The Jalview Authors are detailed in the 'AUTHORS' file.
23 * Copyright 2018-2022 Kathy Su, Kay Diederichs
25 * This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
27 * This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
29 * You should have received a copy of the GNU General Public License along with this program. If not, see <https://www.gnu.org/licenses/>.
33 * Ported from https://doi.org/10.1107/S2059798317000699 by
34 * @AUTHOR MorellThomas
37 package jalview.analysis;
39 import jalview.bin.Console;
40 import jalview.math.MatrixI;
41 import jalview.math.Matrix;
42 import jalview.math.MiscMath;
44 import java.lang.Math;
45 import java.lang.System;
46 import java.util.Arrays;
47 import java.util.ArrayList;
48 import java.util.Comparator;
49 import java.util.Map.Entry;
50 import java.util.TreeMap;
52 import org.apache.commons.math3.linear.Array2DRowRealMatrix;
53 import org.apache.commons.math3.linear.SingularValueDecomposition;
56 * A class to model rectangular matrices of double values and operations on them
58 public class ccAnalysis
60 private byte dim = 0; // dimensions
62 private MatrixI scoresOld; // input scores
64 public ccAnalysis(MatrixI scores, byte dim)
66 // round matrix to .4f to be same as in pasimap
67 for (int i = 0; i < scores.height(); i++)
69 for (int j = 0; j < scores.width(); j++)
71 if (!Double.isNaN(scores.getValue(i, j)))
74 (double) Math.round(scores.getValue(i, j) * (int) 10000)
79 this.scoresOld = scores;
84 * Initialise a distrust-score for each hypothesis (h) of hSigns distrust =
85 * conHypNum - proHypNum
88 * ~ hypothesis signs (+/-) for each sequence
90 * ~ input score matrix
92 * @return distrustScores
94 private int[] initialiseDistrusts(byte[] hSigns, MatrixI scores)
96 int[] distrustScores = new int[scores.width()];
98 // loop over symmetric matrix
99 for (int i = 0; i < scores.width(); i++)
101 byte hASign = hSigns[i];
105 for (int j = 0; j < scores.width(); j++)
107 double cell = scores.getRow(i)[j]; // value at [i][j] in scores
108 byte hBSign = hSigns[j];
109 if (!Double.isNaN(cell))
111 byte cellSign = (byte) Math.signum(cell); // check if sign of matrix
112 // value fits hyptohesis
113 if (cellSign == hASign * hBSign)
123 distrustScores[i] = conHypNum - proHypNum; // create distrust score for
126 return distrustScores;
130 * Optimise hypothesis concerning the sign of the hypothetical value for each
131 * hSigns by interpreting the pairwise correlation coefficients as scalar
135 * ~ hypothesis signs (+/-)
136 * @param distrustScores
138 * ~ input score matrix
142 private byte[] optimiseHypothesis(byte[] hSigns, int[] distrustScores,
145 // get maximum distrust score
146 int[] maxes = MiscMath.findMax(distrustScores);
147 int maxDistrustIndex = maxes[0];
148 int maxDistrust = maxes[1];
150 // if hypothesis is not optimal yet
153 // toggle sign for hI with maximum distrust
154 hSigns[maxDistrustIndex] *= -1;
155 // update distrust at same position
156 distrustScores[maxDistrustIndex] *= -1;
158 // also update distrust scores for all hI that were not changed
159 byte hASign = hSigns[maxDistrustIndex];
160 for (int NOTmaxDistrustIndex = 0; NOTmaxDistrustIndex < distrustScores.length; NOTmaxDistrustIndex++)
162 if (NOTmaxDistrustIndex != maxDistrustIndex)
164 byte hBSign = hSigns[NOTmaxDistrustIndex];
165 double cell = scores.getValue(maxDistrustIndex,
166 NOTmaxDistrustIndex);
168 // distrust only changed if not NaN
169 if (!Double.isNaN(cell))
171 byte cellSign = (byte) Math.signum(cell);
172 // if sign of cell matches hypothesis decrease distrust by 2 because
173 // 1 more value supporting and 1 less contradicting
174 // else increase by 2
175 if (cellSign == hASign * hBSign)
177 distrustScores[NOTmaxDistrustIndex] -= 2;
181 distrustScores[NOTmaxDistrustIndex] += 2;
186 // further optimisation necessary
187 return optimiseHypothesis(hSigns, distrustScores, scores);
197 * takes the a symmetric MatrixI as input scores which may contain Double.NaN
198 * approximate the missing values using hypothesis optimisation
207 public MatrixI run() throws Exception
209 // initialse eigenMatrix and repMatrix
210 MatrixI eigenMatrix = scoresOld.copy();
211 MatrixI repMatrix = scoresOld.copy();
215 * Calculate correction factor for 2nd and higher eigenvalue(s).
216 * This correction is NOT needed for the 1st eigenvalue, because the
217 * unknown (=NaN) values of the matrix are approximated by presuming
218 * 1-dimensional vectors as the basis of the matrix interpretation as dot
222 System.out.println("Input correlation matrix:");
223 eigenMatrix.print(System.out, "%1.4f ");
225 int matrixWidth = eigenMatrix.width(); // square matrix, so width ==
227 int matrixElementsTotal = (int) Math.pow(matrixWidth, 2); // total number
230 float correctionFactor = (float) (matrixElementsTotal
231 - eigenMatrix.countNaN()) / (float) matrixElementsTotal;
234 * Calculate hypothetical value (1-dimensional vector) h_i for each
235 * dataset by interpreting the given correlation coefficients as scalar
240 * Memory for current hypothesis concerning sign of each h_i.
241 * List of signs for all h_i in the encoding:
245 * Initial hypothesis: all signs are positive.
247 byte[] hSigns = new byte[matrixWidth];
248 Arrays.fill(hSigns, (byte) 1);
250 // Estimate signs for each h_i by refining hypothesis on signs.
251 hSigns = optimiseHypothesis(hSigns,
252 initialiseDistrusts(hSigns, eigenMatrix), eigenMatrix);
254 // Estimate absolute values for each h_i by determining sqrt of mean of
255 // non-NaN absolute values for every row.
256 double[] hAbs = MiscMath.sqrt(eigenMatrix.absolute().meanRow());
258 // Combine estimated signs with absolute values in obtain total value for
260 double[] hValues = MiscMath.elementwiseMultiply(hSigns, hAbs);
262 /*Complement symmetric matrix by using the scalar products of estimated
263 *values of h_i to replace NaN-cells.
264 *Matrix positions that have estimated values
265 *(only for diagonal and upper off-diagonal values, due to the symmetry
266 *the positions of the lower-diagonal values can be inferred).
267 List of tuples (row_idx, column_idx).*/
269 ArrayList<int[]> estimatedPositions = new ArrayList<int[]>();
271 // for off-diagonal cells
272 for (int rowIndex = 0; rowIndex < matrixWidth - 1; rowIndex++)
274 for (int columnIndex = rowIndex
275 + 1; columnIndex < matrixWidth; columnIndex++)
277 double cell = eigenMatrix.getValue(rowIndex, columnIndex);
278 if (Double.isNaN(cell))
280 // calculate scalar product as new cell value
281 cell = hValues[rowIndex] * hValues[columnIndex];
282 // fill in new value in cell and symmetric partner
283 eigenMatrix.setValue(rowIndex, columnIndex, cell);
284 eigenMatrix.setValue(columnIndex, rowIndex, cell);
285 // save positions of estimated values
286 estimatedPositions.add(new int[] { rowIndex, columnIndex });
291 // for diagonal cells
292 for (int diagonalIndex = 0; diagonalIndex < matrixWidth; diagonalIndex++)
294 double cell = Math.pow(hValues[diagonalIndex], 2);
295 eigenMatrix.setValue(diagonalIndex, diagonalIndex, cell);
296 estimatedPositions.add(new int[] { diagonalIndex, diagonalIndex });
299 /*Refine total values of each h_i:
300 *Initialise h_values of the hypothetical non-existant previous iteration
301 *with the correct format but with impossible values.
302 Needed for exit condition of otherwise endless loop.*/
303 System.out.print("initial values: [ ");
304 for (double h : hValues)
306 System.out.print(String.format("%1.4f, ", h));
308 System.out.println(" ]");
310 double[] hValuesOld = new double[matrixWidth];
312 int iterationCount = 0;
314 // FIXME JAL-4443 - spliterators could be coded out or patched with j2s
316 // repeat unitl values of h do not significantly change anymore
319 for (int hIndex = 0; hIndex < matrixWidth; hIndex++)
322 .stream(MiscMath.elementwiseMultiply(hValues,
323 eigenMatrix.getRow(hIndex)))
326 MiscMath.elementwiseMultiply(hValues, hValues))
328 hValues[hIndex] = newH;
331 System.out.print(String.format("iteration %d: [ ", iterationCount));
332 for (double h : hValues)
334 System.out.print(String.format("%1.4f, ", h));
336 System.out.println(" ]");
338 // update values of estimated positions
339 for (int[] pair : estimatedPositions) // pair ~ row, col
341 double newVal = hValues[pair[0]] * hValues[pair[1]];
342 eigenMatrix.setValue(pair[0], pair[1], newVal);
343 eigenMatrix.setValue(pair[1], pair[0], newVal);
348 // exit loop as soon as new values are similar to the last iteration
349 if (MiscMath.allClose(hValues, hValuesOld, 0d, 1e-05d, false))
354 // save hValues for comparison in the next iteration
355 System.arraycopy(hValues, 0, hValuesOld, 0, hValues.length);
358 // -----------------------------
359 // Use complemented symmetric matrix to calculate final representative
362 // Eigendecomposition.
366 System.out.println("eigenmatrix");
367 eigenMatrix.print(System.out, "%8.2f");
368 System.out.println();
369 System.out.println("uncorrected eigenvalues");
370 eigenMatrix.printD(System.out, "%2.4f ");
371 System.out.println();
373 double[] eigenVals = eigenMatrix.getD();
375 TreeMap<Double, Integer> eigenPairs = new TreeMap<>(
376 Comparator.reverseOrder());
377 for (int i = 0; i < eigenVals.length; i++)
379 eigenPairs.put(eigenVals[i], i);
382 // matrix of representative eigenvectors (each row is a vector)
383 double[][] _repMatrix = new double[eigenVals.length][dim];
384 double[][] _oldMatrix = new double[eigenVals.length][dim];
385 double[] correctedEigenValues = new double[dim];
388 for (Entry<Double, Integer> pair : eigenPairs.entrySet())
390 double eigenValue = pair.getKey();
391 int column = pair.getValue();
392 double[] eigenVector = eigenMatrix.getColumn(column);
393 // for 2nd and higher eigenvalues
396 eigenValue /= correctionFactor;
398 correctedEigenValues[l] = eigenValue;
399 for (int j = 0; j < eigenVector.length; j++)
401 _repMatrix[j][l] = (eigenValue < 0) ? 0.0
402 : -Math.sqrt(eigenValue) * eigenVector[j];
403 double tmpOldScore = scoresOld.getColumn(column)[j];
404 _oldMatrix[j][dim - l - 1] = (Double.isNaN(tmpOldScore)) ? 0.0
414 System.out.println("correctedEigenValues");
415 MiscMath.print(correctedEigenValues, "%2.4f ");
417 repMatrix = new Matrix(_repMatrix);
418 repMatrix.setD(correctedEigenValues);
419 MatrixI oldMatrix = new Matrix(_oldMatrix);
421 MatrixI dotMatrix = repMatrix.postMultiply(repMatrix.transpose());
423 double rmsd = scoresOld.rmsd(dotMatrix);
425 System.out.println("iteration, rmsd, maxDiff, rmsdDiff");
426 System.out.println(String.format("0, %8.5f, -, -", rmsd));
427 // Refine representative vectors by minimising sum-of-squared deviates
428 // between dotMatrix and original score matrix
429 for (int iteration = 1; iteration < 21; iteration++) // arbitrarily set to
432 MatrixI repMatrixOLD = repMatrix.copy();
433 MatrixI dotMatrixOLD = dotMatrix.copy();
435 // for all rows/hA in the original matrix
436 for (int hAIndex = 0; hAIndex < oldMatrix.height(); hAIndex++)
438 double[] row = oldMatrix.getRow(hAIndex);
439 double[] hA = repMatrix.getRow(hAIndex);
441 // find least-squares-solution fo rdifferences between original scores
442 // and representative vectors
443 double[] hAlsm = leastSquaresOptimisation(repMatrix, scoresOld,
445 // update repMatrix with new hAlsm
446 for (int j = 0; j < repMatrix.width(); j++)
448 repMatrix.setValue(hAIndex, j, hAlsm[j]);
452 // dot product of representative vecotrs yields a matrix with values
453 // approximating the correlation matrix
454 dotMatrix = repMatrix.postMultiply(repMatrix.transpose());
455 // calculate rmsd between approximation and correlation matrix
456 rmsd = scoresOld.rmsd(dotMatrix);
458 // calculate maximum change of representative vectors of current
460 MatrixI diff = repMatrix.subtract(repMatrixOLD).absolute();
461 double maxDiff = 0.0;
462 for (int i = 0; i < diff.height(); i++)
464 for (int j = 0; j < diff.width(); j++)
466 maxDiff = (diff.getValue(i, j) > maxDiff) ? diff.getValue(i, j)
471 // calculate rmsd between current and previous estimation
472 double rmsdDiff = dotMatrix.rmsd(dotMatrixOLD);
474 System.out.println(String.format("%d, %8.5f, %8.5f, %8.5f",
475 iteration, rmsd, maxDiff, rmsdDiff));
477 if (!(Math.abs(maxDiff) > 1e-06))
479 repMatrix = repMatrixOLD.copy();
484 } catch (Exception q)
486 Console.error("Error computing cc_analysis: " + q.getMessage());
489 System.out.println("final coordinates:");
490 repMatrix.print(System.out, "%1.8f ");
495 * Create equations system using information on originally known pairwise
496 * correlation coefficients (parsed from infile) and the representative result
499 * Each equation has the format:
501 * hA * hA - pairwiseCC = 0
505 * hA: unknown variable
507 * hB: known representative vector
509 * pairwiseCC: known pairwise correlation coefficien
511 * The resulting equations system is overdetermined, if there are more
512 * equations than unknown elements
514 * x is the user input. Remaining parameters are needed for generating
515 * equations system, NOT to be specified by user).
518 * ~ unknown n-dimensional column-vector
520 * ~ index of currently optimised representative result vector.
522 * ~ matrix with row-wise listing of representative result vectors.
524 * ~ matrix-row of originally parsed pairwise correlation
529 private double[] originalToEquasionSystem(double[] hA, MatrixI repMatrix,
530 MatrixI scoresOld, int hAIndex)
532 double[] originalRow = scoresOld.getRow(hAIndex);
533 int nans = MiscMath.countNaN(originalRow);
534 double[] result = new double[originalRow.length - nans];
536 // for all pairwiseCC in originalRow
538 for (int hBIndex = 0; hBIndex < originalRow.length; hBIndex++)
540 double pairwiseCC = originalRow[hBIndex];
541 // if not NaN -> create new equation and add it to the system
542 if (!Double.isNaN(pairwiseCC))
544 double[] hB = repMatrix.getRow(hBIndex);
545 result[resultIndex++] = MiscMath
546 .sum(MiscMath.elementwiseMultiply(hA, hB)) - pairwiseCC;
556 * returns the jacobian matrix
559 * ~ matrix of representative vectors
561 * ~ current row index
565 private MatrixI approximateDerivative(MatrixI repMatrix,
566 MatrixI scoresOld, int hAIndex)
569 double[] hA = repMatrix.getRow(hAIndex);
570 double[] f0 = originalToEquasionSystem(hA, repMatrix, scoresOld,
572 double[] signX0 = new double[hA.length];
573 double[] xAbs = new double[hA.length];
574 for (int i = 0; i < hA.length; i++)
576 signX0[i] = (hA[i] >= 0) ? 1 : -1;
577 xAbs[i] = (Math.abs(hA[i]) >= 1.0) ? Math.abs(hA[i]) : 1.0;
579 double rstep = Math.pow(Math.ulp(1.0), 0.5);
581 double[] h = new double[hA.length];
582 for (int i = 0; i < hA.length; i++)
584 h[i] = rstep * signX0[i] * xAbs[i];
589 double[][] jTransposed = new double[n][m];
590 for (int i = 0; i < h.length; i++)
592 double[] x = new double[h.length];
593 System.arraycopy(hA, 0, x, 0, h.length);
595 double dx = x[i] - hA[i];
596 double[] df = originalToEquasionSystem(x, repMatrix, scoresOld,
598 for (int j = 0; j < df.length; j++)
601 jTransposed[i][j] = df[j] / dx;
604 MatrixI J = new Matrix(jTransposed).transpose();
609 * norm of regularized (by alpha) least-squares solution minus Delta
618 private double[] phiAndDerivative(double alpha, double[] suf, double[] s,
621 double[] denom = MiscMath
622 .elementwiseAdd(MiscMath.elementwiseMultiply(s, s), alpha);
623 double pNorm = MiscMath.norm(MiscMath.elementwiseDivide(suf, denom));
624 double phi = pNorm - Delta;
625 // - sum ( suf**2 / denom**3) / pNorm
626 double phiPrime = -MiscMath.sum(MiscMath.elementwiseDivide(
627 MiscMath.elementwiseMultiply(suf, suf),
628 MiscMath.elementwiseMultiply(
629 MiscMath.elementwiseMultiply(denom, denom), denom)))
631 return new double[] { phi, phiPrime };
635 * class holding the result of solveLsqTrustRegion
637 private class TrustRegion
639 private double[] step;
641 private double alpha;
643 private int iteration;
645 public TrustRegion(double[] step, double alpha, int iteration)
649 this.iteration = iteration;
652 public double[] getStep()
657 public double getAlpha()
662 public int getIteration()
664 return this.iteration;
669 * solve a trust-region problem arising in least-squares optimisation
672 * ~ number of variables
674 * ~ number of residuals
677 * ~ singular values of J
681 * ~ radius of a trust region
683 * ~ initial guess for alpha
687 private TrustRegion solveLsqTrustRegion(int n, int m, double[] uf,
688 double[] s, MatrixI V, double Delta, double alpha)
690 double[] suf = MiscMath.elementwiseMultiply(s, uf);
692 // check if J has full rank and tr Gauss-Newton step
693 boolean fullRank = false;
696 double threshold = s[0] * Math.ulp(1.0) * m;
697 fullRank = s[s.length - 1] > threshold;
701 double[] p = MiscMath.elementwiseMultiply(
702 V.sumProduct(MiscMath.elementwiseDivide(uf, s)), -1);
703 if (MiscMath.norm(p) <= Delta)
705 TrustRegion result = new TrustRegion(p, 0.0, 0);
710 double alphaUpper = MiscMath.norm(suf) / Delta;
711 double alphaLower = 0.0;
714 double[] phiAndPrime = phiAndDerivative(0.0, suf, s, Delta);
715 alphaLower = -phiAndPrime[0] / phiAndPrime[1];
718 alpha = (!fullRank && alpha == 0.0)
719 ? alpha = Math.max(0.001 * alphaUpper,
720 Math.pow(alphaLower * alphaUpper, 0.5))
724 while (iteration < 10) // 10 is default max_iter
726 alpha = (alpha < alphaLower || alpha > alphaUpper)
727 ? alpha = Math.max(0.001 * alphaUpper,
728 Math.pow(alphaLower * alphaUpper, 0.5))
730 double[] phiAndPrime = phiAndDerivative(alpha, suf, s, Delta);
731 double phi = phiAndPrime[0];
732 double phiPrime = phiAndPrime[1];
734 alphaUpper = (phi < 0) ? alpha : alphaUpper;
735 double ratio = phi / phiPrime;
736 alphaLower = Math.max(alphaLower, alpha - ratio);
737 alpha -= (phi + Delta) * ratio / Delta;
739 if (Math.abs(phi) < 0.01 * Delta) // default rtol set to 0.01
746 // p = - V.dot( suf / (s**2 + alpha))
747 double[] tmp = MiscMath.elementwiseDivide(suf, MiscMath
748 .elementwiseAdd(MiscMath.elementwiseMultiply(s, s), alpha));
749 double[] p = MiscMath.elementwiseMultiply(V.sumProduct(tmp), -1);
751 // Make the norm of p equal to Delta, p is changed only slightly during
753 // It is done to prevent p lie outside of the trust region
754 p = MiscMath.elementwiseMultiply(p, Delta / MiscMath.norm(p));
756 TrustRegion result = new TrustRegion(p, alpha, iteration + 1);
761 * compute values of a quadratic function arising in least squares function:
762 * 0.5 * s.T * (J.T * J + diag) * s + g.T * s
773 private double evaluateQuadratic(MatrixI J, double[] g, double[] s)
776 double[] Js = J.sumProduct(s);
777 double q = MiscMath.dot(Js, Js);
778 double l = MiscMath.dot(s, g);
784 * update the radius of a trust region based on the cost reduction
787 * @param actualReduction
788 * @param predictedReduction
794 private double[] updateTrustRegionRadius(double Delta,
795 double actualReduction, double predictedReduction,
796 double stepNorm, boolean boundHit)
799 if (predictedReduction > 0)
801 ratio = actualReduction / predictedReduction;
803 else if (predictedReduction == 0 && actualReduction == 0)
814 Delta = 0.25 * stepNorm;
816 else if (ratio > 0.75 && boundHit)
821 return new double[] { Delta, ratio };
825 * trust region reflective algorithm
828 * ~ Matrix containing representative vectors
830 * ~ Matrix containing initial observations
832 * ~ current row index
838 private double[] trf(MatrixI repMatrix, MatrixI scoresOld, int index,
842 double[] hA = repMatrix.getRow(index);
843 double[] f0 = originalToEquasionSystem(hA, repMatrix, scoresOld, index);
847 double cost = 0.5 * MiscMath.dot(f0, f0);
848 double[] g = J.transpose().sumProduct(f0);
849 double Delta = MiscMath.norm(hA);
850 int maxNfev = hA.length * 100;
851 double alpha = 0.0; // "Levenberg-Marquardt" parameter
854 boolean terminationStatus = false;
859 gNorm = MiscMath.norm(g);
860 if (terminationStatus || nfev == maxNfev)
864 SingularValueDecomposition svd = new SingularValueDecomposition(
865 new Array2DRowRealMatrix(J.asArray()));
866 MatrixI U = new Matrix(svd.getU().getData());
867 double[] s = svd.getSingularValues();
868 MatrixI V = new Matrix(svd.getV().getData()).transpose();
869 double[] uf = U.transpose().sumProduct(f0);
871 double actualReduction = -1;
872 double[] xNew = new double[hA.length];
873 double[] fNew = new double[f0.length];
875 double stepHnorm = 0;
877 while (actualReduction <= 0 && nfev < maxNfev)
879 TrustRegion trustRegion = solveLsqTrustRegion(n, m, uf, s, V, Delta,
881 double[] stepH = trustRegion.getStep();
882 alpha = trustRegion.getAlpha();
883 int nIterations = trustRegion.getIteration();
884 double predictedReduction = -(evaluateQuadratic(J, g, stepH));
886 xNew = MiscMath.elementwiseAdd(hA, stepH);
887 fNew = originalToEquasionSystem(xNew, repMatrix, scoresOld, index);
890 stepHnorm = MiscMath.norm(stepH);
892 if (MiscMath.countNaN(fNew) > 0)
894 Delta = 0.25 * stepHnorm;
898 // usual trust-region step quality estimation
899 costNew = 0.5 * MiscMath.dot(fNew, fNew);
900 actualReduction = cost - costNew;
902 double[] updatedTrustRegion = updateTrustRegionRadius(Delta,
903 actualReduction, predictedReduction, stepHnorm,
904 stepHnorm > (0.95 * Delta));
905 double DeltaNew = updatedTrustRegion[0];
906 double ratio = updatedTrustRegion[1];
908 // default ftol and xtol = 1e-8
909 boolean ftolSatisfied = actualReduction < (1e-8 * cost)
911 boolean xtolSatisfied = stepHnorm < (1e-8
912 * (1e-8 + MiscMath.norm(hA)));
913 terminationStatus = ftolSatisfied || xtolSatisfied;
914 if (terminationStatus)
919 alpha *= Delta / DeltaNew;
923 if (actualReduction > 0)
929 J = approximateDerivative(repMatrix, scoresOld, index);
931 g = J.transpose().sumProduct(f0);
945 * performs the least squares optimisation adapted from
946 * https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.least_squares.html#scipy.optimize.least_squares
949 * ~ Matrix containing representative vectors
951 * ~ Matrix containing initial observations
953 * ~ current row index
957 private double[] leastSquaresOptimisation(MatrixI repMatrix,
958 MatrixI scoresOld, int index)
960 MatrixI J = approximateDerivative(repMatrix, scoresOld, index);
961 double[] result = trf(repMatrix, scoresOld, index, J);